nova-13.0.0/0000775000567000056710000000000012701410205013667 5ustar jenkinsjenkins00000000000000nova-13.0.0/devstack/0000775000567000056710000000000012701410205015473 5ustar jenkinsjenkins00000000000000nova-13.0.0/devstack/tempest-dsvm-cells-rc0000664000567000056710000001235212701410011021546 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # This script is executed in the OpenStack CI *tempest-dsvm-cells job. # It's used to configure which tempest tests actually get run. You can find # the CI job configuration here: # # http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/devstack-gate.yaml # # NOTE(sdague): tempest (because of testr) only supports and additive # regex for specifying test selection. As such this is a series of # negative assertions ?: for strings. # # Being a regex, an unescaped '.' matches any character, so those # should be escaped. There is no need to specify .* at the end of a # pattern, as it's handled by the final match. # Construct a regex to use when limiting scope of tempest # to avoid features unsupported by Nova Cells. r="^(?!.*" # skip security group tests r="$r(?:tempest\.api\.compute\.security_groups)" # skip test that requires security groups r="$r|(?:tempest\.thirdparty\.boto\.test_ec2_instance_run\.InstanceRunTest\.test_compute_with_volumes)" # skip aggregates tests r="$r|(?:tempest\.api\.compute\.admin\.test_aggregates)" r="$r|(?:tempest\.scenario\.test_aggregates_basic_ops)" # skip availability zone tests r="$r|(?:(tempest\.api\.compute\.)(servers\.|admin\.)(test_availability_zone*))" # skip fixed-ip tests r="$r|(?:tempest\.api\.compute\.admin\.test_fixed_ips)" # skip floating-ip tests r="$r|(?:tempest\.api\.compute\.floating_ips)" # https://bugs.launchpad.net/tempest/+bug/1513983 - The follow scenario tests rely on Neutron but use floating IPs r="$r|(?:tempest\.scenario\.test_network_advanced_server_ops\.TestNetworkAdvancedServerOps\.test_server_connectivity_pause_unpause)" r="$r|(?:tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_network_basic_ops)" r="$r|(?:tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_update_router_admin_state)" r="$r|(?:tempest\.scenario\.test_network_v6\.TestGettingAddress\.test_slaac_from_os)" r="$r|(?:tempest\.scenario\.test_security_groups_basic_ops\.TestSecurityGroupsBasicOps\.test_cross_tenant_traffic)" # exclude the slow tag r="$r|(?:.*\[.*\bslow\b.*\])" # skip current regressions; when adding new entries to this list, add the bug # reference with it since this list should shrink # NOTE(mriedem): Resize tests are skipped in devstack until custom flavors # in devstack used in Tempest runs are synced to the cells database. r="$r|(?:tempest\.api\.compute\.admin\.test_networks\.NetworksTest\.test_get_network)" r="$r|(?:tempest\.api\.compute\.admin\.test_networks\.NetworksTest\.test_list_all_networks)" r="$r|(?:tempest\.api\.compute\.servers\.test_server_rescue\.ServerRescueTestJSON)" r="$r|(?:tempest\.api\.compute\.servers\.test_create_server\.ServersTestJSON\.test_create_server_with_scheduler_hint_group)" r="$r|(?:tempest\.api\.compute\.servers\.test_create_server\.ServersTestManualDisk\.test_create_server_with_scheduler_hint_group)" r="$r|(?:tempest\.api\.compute\.servers\.test_virtual_interfaces\.VirtualInterfacesTestJSON\.test_list_virtual_interfaces)" r="$r|(?:tempest\.api\.compute\.test_networks\.ComputeNetworksTest\.test_list_networks)" r="$r|(?:tempest\.scenario\.test_minimum_basic\.TestMinimumBasicScenario\.test_minimum_basic_scenario)" r="$r|(?:tempest\.api\.compute\.servers\.test_server_rescue_negative\.ServerRescueNegativeTestJSON)" r="$r|(?:tempest\.scenario\.test_encrypted_cinder_volumes\.TestEncryptedCinderVolumes\.test_encrypted_cinder_volumes_cryptsetup)" r="$r|(?:tempest\.scenario\.test_encrypted_cinder_volumes\.TestEncryptedCinderVolumes\.test_encrypted_cinder_volumes_luks)" r="$r|(?:tempest\.thirdparty\.boto\.test_ec2_network\.EC2NetworkTest\.test_disassociate_not_associated_floating_ip)" r="$r|(?:tempest\.scenario\.test_server_basic_ops\.TestServerBasicOps\.(test_server_basicops|test_server_basic_ops))" r="$r|(?:tempest\.scenario\.test_snapshot_pattern\.TestSnapshotPattern\.test_snapshot_pattern)" r="$r|(?:tempest\.api\.compute\.admin\.test_hosts\.HostsAdminTestJSON\.test_show_host_detail)" r="$r|(?:tempest\.api\.compute\.test_tenant_networks\.ComputeTenantNetworksTest\.test_list_show_tenant_networks)" # https://bugs.launchpad.net/nova/+bug/1489581 r="$r|(?:tempest\.scenario\.test_volume_boot_pattern\.)" # https://bugs.launchpad.net/nova/+bug/1445628 r="$r|(?:tempest\.thirdparty\.boto\.test_ec2_instance_run\.InstanceRunTest\.test_run_idempotent_instances)" # https://bugs.launchpad.net/nova/+bug/1466696 - Cells: Race between instance 'unlock' and 'stop' can cause 'stop' to fail r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_lock_unlock_server)" # scheduler hints apparently don't work in devstack cells r="$r|(?:tempest\.scenario\.test_server_multinode\.TestServerMultinode\.test_schedule_to_all_nodes)" r="$r).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" nova-13.0.0/devstack/tempest-dsvm-lxc-rc0000664000567000056710000000534212701407773021260 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # This script is executed in the OpenStack CI *tempest-dsvm-lxc job. # It's used to configure which tempest tests actually get run. You can find # the CI job configuration here: # # http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/devstack-gate.yaml # # Construct a regex to use when limiting scope of tempest # to avoid features unsupported by Nova's LXC support. # Note that several tests are disabled by the use of tempest # feature toggles in devstack/lib/tempest for an lxc config, # so this regex is not entirely representative of what's excluded. # When adding entries to the regex, add a comment explaining why # since this list should not grow. r="^(?!.*" r="$r(?:.*\[.*\bslow\b.*\])" # NOTE(thomasem): Skipping these tests due to Ubuntu bug: # https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1536280. # These exclusions should be able to be removed once that bug is addressed. r="$r|(?:tempest\.api\.compute\.servers\.test_server_personality\.ServerPersonalityTestJSON\.test_rebuild_server_with_personality)" r="$r|(?:tempest\.api\.compute\.admin\.test_servers\.ServersAdminTestJSON\.test_rebuild_server_in_error_state)" r="$r|(?:tempest\.api\.compute\.servers\.test_list_server_filters\.ListServerFiltersTestJSON\.test_list_servers_filter_by_shutoff_status)" r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_lock_unlock_server)" r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_rebuild_server)" r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_rebuild_server_in_stop_state)" r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_stop_start_server)" r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_get_console_output_server_id_in_shutoff_status)" r="$r|(?:tempest\.api\.compute\.servers\.test_servers\.ServersTestJSON\.test_update_server_name_in_stop_state)" r="$r|(?:tempest\.api\.compute\.servers\.test_disk_config\.ServerDiskConfigTestJSON*)" r="$r|(?:tempest\.api\.compute\.servers\.test_delete_server\.DeleteServersTestJSON*)" r="$r).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" nova-13.0.0/etc/0000775000567000056710000000000012701410205014442 5ustar jenkinsjenkins00000000000000nova-13.0.0/etc/nova/0000775000567000056710000000000012701410205015405 5ustar jenkinsjenkins00000000000000nova-13.0.0/etc/nova/rootwrap.d/0000775000567000056710000000000012701410205017504 5ustar jenkinsjenkins00000000000000nova-13.0.0/etc/nova/rootwrap.d/compute.filters0000664000567000056710000002266112701410011022554 0ustar jenkinsjenkins00000000000000# nova-rootwrap command filters for compute nodes # This file should be owned by (and only-writeable by) the root user [Filters] # nova/virt/disk/mount/api.py: 'kpartx', '-a', device # nova/virt/disk/mount/api.py: 'kpartx', '-d', device kpartx: CommandFilter, kpartx, root # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path tune2fs: CommandFilter, tune2fs, root # nova/virt/disk/mount/api.py: 'mount', mapped_device # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'.. # nova/virt/configdrive.py: 'mount', device, mountdir # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ... mount: CommandFilter, mount, root # nova/virt/disk/mount/api.py: 'umount', mapped_device # nova/virt/disk/api.py: 'umount' target # nova/virt/xenapi/vm_utils.py: 'umount', dev_path # nova/virt/configdrive.py: 'umount', mountdir umount: CommandFilter, umount, root # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device qemu-nbd: CommandFilter, qemu-nbd, root # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device losetup: CommandFilter, losetup, root # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device blkid: CommandFilter, blkid, root # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.* # nova/virt/disk/vfs/localfs.py: 'tee', canonpath tee: CommandFilter, tee, root # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath mkdir: CommandFilter, mkdir, root # nova/virt/disk/vfs/localfs.py: 'chown' # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk') chown: CommandFilter, chown, root # nova/virt/disk/vfs/localfs.py: 'chmod' chmod: CommandFilter, chmod, root # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' # nova/network/linux_net.py: 'ip', 'route', 'add', .. # nova/network/linux_net.py: 'ip', 'route', 'del', . # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev ip: CommandFilter, ip, root # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev tunctl: CommandFilter, tunctl, root # nova/virt/libvirt/vif.py: 'ovs-vsctl', ... # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... # nova/network/linux_net.py: 'ovs-vsctl', .... ovs-vsctl: CommandFilter, ovs-vsctl, root # nova/virt/libvirt/vif.py: 'vrouter-port-control', ... vrouter-port-control: CommandFilter, vrouter-port-control, root # nova/virt/libvirt/vif.py: 'ebrctl', ... ebrctl: CommandFilter, ebrctl, root # nova/virt/libvirt/vif.py: 'mm-ctl', ... mm-ctl: CommandFilter, mm-ctl, root # nova/network/linux_net.py: 'ovs-ofctl', .... ovs-ofctl: CommandFilter, ovs-ofctl, root # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ... dd: CommandFilter, dd, root # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ... iscsiadm: CommandFilter, iscsiadm, root # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev # nova/virt/libvirt/volume/aoe.py: 'aoe-discover' aoe-revalidate: CommandFilter, aoe-revalidate, root aoe-discover: CommandFilter, aoe-discover, root # nova/virt/xenapi/vm_utils.py: parted, --script, ... # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*. parted: CommandFilter, parted, root # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path pygrub: CommandFilter, pygrub, root # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s fdisk: CommandFilter, fdisk, root # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path # nova/virt/disk/api.py: e2fsck, -f, -p, image e2fsck: CommandFilter, e2fsck, root # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path # nova/virt/disk/api.py: resize2fs, image resize2fs: CommandFilter, resize2fs, root # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... iptables-save: CommandFilter, iptables-save, root ip6tables-save: CommandFilter, ip6tables-save, root # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) iptables-restore: CommandFilter, iptables-restore, root ip6tables-restore: CommandFilter, ip6tables-restore, root # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. arping: CommandFilter, arping, root # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address dhcp_release: CommandFilter, dhcp_release, root # nova/network/linux_net.py: 'kill', '-9', pid # nova/network/linux_net.py: 'kill', '-HUP', pid kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP # nova/network/linux_net.py: 'kill', pid kill_radvd: KillFilter, root, /usr/sbin/radvd # nova/network/linux_net.py: dnsmasq call dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. radvd: CommandFilter, radvd, root # nova/network/linux_net.py: 'brctl', 'addbr', bridge # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface brctl: CommandFilter, brctl, root # nova/virt/libvirt/utils.py: 'mkswap' # nova/virt/xenapi/vm_utils.py: 'mkswap' mkswap: CommandFilter, mkswap, root # nova/virt/libvirt/utils.py: 'nova-idmapshift' nova-idmapshift: CommandFilter, nova-idmapshift, root # nova/virt/xenapi/vm_utils.py: 'mkfs' # nova/utils.py: 'mkfs', fs, path, label mkfs: CommandFilter, mkfs, root # nova/virt/libvirt/utils.py: 'qemu-img' qemu-img: CommandFilter, qemu-img, root # nova/virt/disk/vfs/localfs.py: 'readlink', '-e' readlink: CommandFilter, readlink, root # nova/virt/disk/api.py: mkfs.ext3: CommandFilter, mkfs.ext3, root mkfs.ext4: CommandFilter, mkfs.ext4, root mkfs.ntfs: CommandFilter, mkfs.ntfs, root # nova/virt/libvirt/connection.py: lvremove: CommandFilter, lvremove, root # nova/virt/libvirt/utils.py: lvcreate: CommandFilter, lvcreate, root # nova/virt/libvirt/utils.py: lvs: CommandFilter, lvs, root # nova/virt/libvirt/utils.py: vgs: CommandFilter, vgs, root # nova/utils.py:read_file_as_root: 'cat', file_path # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file) read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow # os-brick needed commands read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi multipath: CommandFilter, multipath, root # multipathd show status multipathd: CommandFilter, multipathd, root systool: CommandFilter, systool, root vgc-cluster: CommandFilter, vgc-cluster, root # os_brick/initiator/connector.py drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid # TODO(smcginnis) Temporary fix. # Need to pull in os-brick os-brick.filters file instead and clean # out stale brick values from this file. scsi_id: CommandFilter, /lib/udev/scsi_id, root # nova/storage/linuxscsi.py: sg_scan device sg_scan: CommandFilter, sg_scan, root # nova/volume/encryptors/cryptsetup.py: # nova/volume/encryptors/luks.py: ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/.*, .* # nova/volume/encryptors.py: # nova/virt/libvirt/dmcrypt.py: cryptsetup: CommandFilter, cryptsetup, root # nova/virt/xenapi/vm_utils.py: xenstore-read: CommandFilter, xenstore-read, root # nova/virt/libvirt/utils.py: rbd: CommandFilter, rbd, root # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path shred: CommandFilter, shred, root # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control.. cp: CommandFilter, cp, root # nova/virt/xenapi/vm_utils.py: sync: CommandFilter, sync, root # nova/virt/libvirt/imagebackend.py: ploop: CommandFilter, ploop, root # nova/virt/libvirt/utils.py: 'xend', 'status' xend: CommandFilter, xend, root # nova/virt/libvirt/utils.py: touch: CommandFilter, touch, root nova-13.0.0/etc/nova/rootwrap.d/api-metadata.filters0000664000567000056710000000111512701407773023443 0ustar jenkinsjenkins00000000000000# nova-rootwrap command filters for api-metadata nodes # This is needed on nova-api hosts running with "metadata" in enabled_apis # or when running nova-api-metadata # This file should be owned by (and only-writeable by) the root user [Filters] # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... iptables-save: CommandFilter, iptables-save, root ip6tables-save: CommandFilter, ip6tables-save, root # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) iptables-restore: CommandFilter, iptables-restore, root ip6tables-restore: CommandFilter, ip6tables-restore, root nova-13.0.0/etc/nova/rootwrap.d/network.filters0000664000567000056710000000764312701407773022621 0ustar jenkinsjenkins00000000000000# nova-rootwrap command filters for network nodes # This file should be owned by (and only-writeable by) the root user [Filters] # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' # nova/network/linux_net.py: 'ip', 'route', 'add', .. # nova/network/linux_net.py: 'ip', 'route', 'del', . # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev ip: CommandFilter, ip, root # nova/virt/libvirt/vif.py: 'ovs-vsctl', ... # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... # nova/network/linux_net.py: 'ovs-vsctl', .... ovs-vsctl: CommandFilter, ovs-vsctl, root # nova/network/linux_net.py: 'ovs-ofctl', .... ovs-ofctl: CommandFilter, ovs-ofctl, root # nova/virt/libvirt/vif.py: 'ivs-ctl', ... # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ... # nova/network/linux_net.py: 'ivs-ctl', .... ivs-ctl: CommandFilter, ivs-ctl, root # nova/virt/libvirt/vif.py: 'ifc_ctl', ... ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root # nova/network/linux_net.py: 'ebtables', '-D' ... # nova/network/linux_net.py: 'ebtables', '-I' ... ebtables: CommandFilter, ebtables, root ebtables_usr: CommandFilter, ebtables, root # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... iptables-save: CommandFilter, iptables-save, root ip6tables-save: CommandFilter, ip6tables-save, root # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) iptables-restore: CommandFilter, iptables-restore, root ip6tables-restore: CommandFilter, ip6tables-restore, root # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. arping: CommandFilter, arping, root # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address dhcp_release: CommandFilter, dhcp_release, root # nova/network/linux_net.py: 'kill', '-9', pid # nova/network/linux_net.py: 'kill', '-HUP', pid kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP # nova/network/linux_net.py: 'kill', pid kill_radvd: KillFilter, root, /usr/sbin/radvd # nova/network/linux_net.py: dnsmasq call dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. radvd: CommandFilter, radvd, root # nova/network/linux_net.py: 'brctl', 'addbr', bridge # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface brctl: CommandFilter, brctl, root # nova/network/linux_net.py: 'sysctl', .... sysctl: CommandFilter, sysctl, root # nova/network/linux_net.py: 'conntrack' conntrack: CommandFilter, conntrack, root # nova/network/linux_net.py: 'fp-vdev' fp-vdev: CommandFilter, fp-vdev, root nova-13.0.0/etc/nova/README-nova.conf.txt0000664000567000056710000000017412701407773021012 0ustar jenkinsjenkins00000000000000To generate the sample nova.conf file, run the following command from the top level of the nova directory: tox -egenconfig nova-13.0.0/etc/nova/release.sample0000664000567000056710000000011112701407773020241 0ustar jenkinsjenkins00000000000000[Nova] vendor = Fedora Project product = OpenStack Nova package = 1.fc18 nova-13.0.0/etc/nova/rootwrap.conf0000664000567000056710000000170612701407773020155 0ustar jenkinsjenkins00000000000000# Configuration for nova-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap # List of directories to search executables in, in case filters do not # explicitly specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, local0, local1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR nova-13.0.0/etc/nova/policy.json0000664000567000056710000006641212701407773017630 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "admin_or_owner": "is_admin:True or project_id:%(project_id)s", "default": "rule:admin_or_owner", "cells_scheduler_filter:TargetCellFilter": "is_admin:True", "compute:create": "rule:admin_or_owner", "compute:create:attach_network": "rule:admin_or_owner", "compute:create:attach_volume": "rule:admin_or_owner", "compute:create:forced_host": "is_admin:True", "compute:get": "rule:admin_or_owner", "compute:get_all": "rule:admin_or_owner", "compute:get_all_tenants": "is_admin:True", "compute:update": "rule:admin_or_owner", "compute:get_instance_metadata": "rule:admin_or_owner", "compute:get_all_instance_metadata": "rule:admin_or_owner", "compute:get_all_instance_system_metadata": "rule:admin_or_owner", "compute:update_instance_metadata": "rule:admin_or_owner", "compute:delete_instance_metadata": "rule:admin_or_owner", "compute:get_diagnostics": "rule:admin_or_owner", "compute:get_instance_diagnostics": "rule:admin_or_owner", "compute:start": "rule:admin_or_owner", "compute:stop": "rule:admin_or_owner", "compute:lock": "rule:admin_or_owner", "compute:unlock": "rule:admin_or_owner", "compute:unlock_override": "rule:admin_api", "compute:get_vnc_console": "rule:admin_or_owner", "compute:get_spice_console": "rule:admin_or_owner", "compute:get_rdp_console": "rule:admin_or_owner", "compute:get_serial_console": "rule:admin_or_owner", "compute:get_mks_console": "rule:admin_or_owner", "compute:get_console_output": "rule:admin_or_owner", "compute:reset_network": "rule:admin_or_owner", "compute:inject_network_info": "rule:admin_or_owner", "compute:add_fixed_ip": "rule:admin_or_owner", "compute:remove_fixed_ip": "rule:admin_or_owner", "compute:attach_volume": "rule:admin_or_owner", "compute:detach_volume": "rule:admin_or_owner", "compute:swap_volume": "rule:admin_or_owner", "compute:attach_interface": "rule:admin_or_owner", "compute:detach_interface": "rule:admin_or_owner", "compute:set_admin_password": "rule:admin_or_owner", "compute:rescue": "rule:admin_or_owner", "compute:unrescue": "rule:admin_or_owner", "compute:suspend": "rule:admin_or_owner", "compute:resume": "rule:admin_or_owner", "compute:pause": "rule:admin_or_owner", "compute:unpause": "rule:admin_or_owner", "compute:shelve": "rule:admin_or_owner", "compute:shelve_offload": "rule:admin_or_owner", "compute:unshelve": "rule:admin_or_owner", "compute:snapshot": "rule:admin_or_owner", "compute:snapshot_volume_backed": "rule:admin_or_owner", "compute:backup": "rule:admin_or_owner", "compute:resize": "rule:admin_or_owner", "compute:confirm_resize": "rule:admin_or_owner", "compute:revert_resize": "rule:admin_or_owner", "compute:rebuild": "rule:admin_or_owner", "compute:reboot": "rule:admin_or_owner", "compute:delete": "rule:admin_or_owner", "compute:soft_delete": "rule:admin_or_owner", "compute:force_delete": "rule:admin_or_owner", "compute:security_groups:add_to_instance": "rule:admin_or_owner", "compute:security_groups:remove_from_instance": "rule:admin_or_owner", "compute:restore": "rule:admin_or_owner", "compute:volume_snapshot_create": "rule:admin_or_owner", "compute:volume_snapshot_delete": "rule:admin_or_owner", "admin_api": "is_admin:True", "compute_extension:accounts": "rule:admin_api", "compute_extension:admin_actions": "rule:admin_api", "compute_extension:admin_actions:pause": "rule:admin_or_owner", "compute_extension:admin_actions:unpause": "rule:admin_or_owner", "compute_extension:admin_actions:suspend": "rule:admin_or_owner", "compute_extension:admin_actions:resume": "rule:admin_or_owner", "compute_extension:admin_actions:lock": "rule:admin_or_owner", "compute_extension:admin_actions:unlock": "rule:admin_or_owner", "compute_extension:admin_actions:resetNetwork": "rule:admin_api", "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api", "compute_extension:admin_actions:createBackup": "rule:admin_or_owner", "compute_extension:admin_actions:migrateLive": "rule:admin_api", "compute_extension:admin_actions:resetState": "rule:admin_api", "compute_extension:admin_actions:migrate": "rule:admin_api", "compute_extension:aggregates": "rule:admin_api", "compute_extension:agents": "rule:admin_api", "compute_extension:attach_interfaces": "rule:admin_or_owner", "compute_extension:baremetal_nodes": "rule:admin_api", "compute_extension:cells": "rule:admin_api", "compute_extension:cells:create": "rule:admin_api", "compute_extension:cells:delete": "rule:admin_api", "compute_extension:cells:update": "rule:admin_api", "compute_extension:cells:sync_instances": "rule:admin_api", "compute_extension:certificates": "rule:admin_or_owner", "compute_extension:cloudpipe": "rule:admin_api", "compute_extension:cloudpipe_update": "rule:admin_api", "compute_extension:config_drive": "rule:admin_or_owner", "compute_extension:console_output": "rule:admin_or_owner", "compute_extension:consoles": "rule:admin_or_owner", "compute_extension:createserverext": "rule:admin_or_owner", "compute_extension:deferred_delete": "rule:admin_or_owner", "compute_extension:disk_config": "rule:admin_or_owner", "compute_extension:evacuate": "rule:admin_api", "compute_extension:extended_server_attributes": "rule:admin_api", "compute_extension:extended_status": "rule:admin_or_owner", "compute_extension:extended_availability_zone": "rule:admin_or_owner", "compute_extension:extended_ips": "rule:admin_or_owner", "compute_extension:extended_ips_mac": "rule:admin_or_owner", "compute_extension:extended_vif_net": "rule:admin_or_owner", "compute_extension:extended_volumes": "rule:admin_or_owner", "compute_extension:fixed_ips": "rule:admin_api", "compute_extension:flavor_access": "rule:admin_or_owner", "compute_extension:flavor_access:addTenantAccess": "rule:admin_api", "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api", "compute_extension:flavor_disabled": "rule:admin_or_owner", "compute_extension:flavor_rxtx": "rule:admin_or_owner", "compute_extension:flavor_swap": "rule:admin_or_owner", "compute_extension:flavorextradata": "rule:admin_or_owner", "compute_extension:flavorextraspecs:index": "rule:admin_or_owner", "compute_extension:flavorextraspecs:show": "rule:admin_or_owner", "compute_extension:flavorextraspecs:create": "rule:admin_api", "compute_extension:flavorextraspecs:update": "rule:admin_api", "compute_extension:flavorextraspecs:delete": "rule:admin_api", "compute_extension:flavormanage": "rule:admin_api", "compute_extension:floating_ip_dns": "rule:admin_or_owner", "compute_extension:floating_ip_pools": "rule:admin_or_owner", "compute_extension:floating_ips": "rule:admin_or_owner", "compute_extension:floating_ips_bulk": "rule:admin_api", "compute_extension:fping": "rule:admin_or_owner", "compute_extension:fping:all_tenants": "rule:admin_api", "compute_extension:hide_server_addresses": "is_admin:False", "compute_extension:hosts": "rule:admin_api", "compute_extension:hypervisors": "rule:admin_api", "compute_extension:image_size": "rule:admin_or_owner", "compute_extension:instance_actions": "rule:admin_or_owner", "compute_extension:instance_actions:events": "rule:admin_api", "compute_extension:instance_usage_audit_log": "rule:admin_api", "compute_extension:keypairs": "rule:admin_or_owner", "compute_extension:keypairs:index": "rule:admin_or_owner", "compute_extension:keypairs:show": "rule:admin_or_owner", "compute_extension:keypairs:create": "rule:admin_or_owner", "compute_extension:keypairs:delete": "rule:admin_or_owner", "compute_extension:multinic": "rule:admin_or_owner", "compute_extension:networks": "rule:admin_api", "compute_extension:networks:view": "rule:admin_or_owner", "compute_extension:networks_associate": "rule:admin_api", "compute_extension:os-tenant-networks": "rule:admin_or_owner", "compute_extension:quotas:show": "rule:admin_or_owner", "compute_extension:quotas:update": "rule:admin_api", "compute_extension:quotas:delete": "rule:admin_api", "compute_extension:quota_classes": "rule:admin_or_owner", "compute_extension:rescue": "rule:admin_or_owner", "compute_extension:security_group_default_rules": "rule:admin_api", "compute_extension:security_groups": "rule:admin_or_owner", "compute_extension:server_diagnostics": "rule:admin_api", "compute_extension:server_groups": "rule:admin_or_owner", "compute_extension:server_password": "rule:admin_or_owner", "compute_extension:server_usage": "rule:admin_or_owner", "compute_extension:services": "rule:admin_api", "compute_extension:shelve": "rule:admin_or_owner", "compute_extension:shelveOffload": "rule:admin_api", "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner", "compute_extension:simple_tenant_usage:list": "rule:admin_api", "compute_extension:unshelve": "rule:admin_or_owner", "compute_extension:users": "rule:admin_api", "compute_extension:virtual_interfaces": "rule:admin_or_owner", "compute_extension:virtual_storage_arrays": "rule:admin_or_owner", "compute_extension:volumes": "rule:admin_or_owner", "compute_extension:volume_attachments:index": "rule:admin_or_owner", "compute_extension:volume_attachments:show": "rule:admin_or_owner", "compute_extension:volume_attachments:create": "rule:admin_or_owner", "compute_extension:volume_attachments:update": "rule:admin_or_owner", "compute_extension:volume_attachments:delete": "rule:admin_or_owner", "compute_extension:volumetypes": "rule:admin_or_owner", "compute_extension:availability_zone:list": "rule:admin_or_owner", "compute_extension:availability_zone:detail": "rule:admin_api", "compute_extension:used_limits_for_admin": "rule:admin_api", "compute_extension:migrations:index": "rule:admin_api", "compute_extension:os-assisted-volume-snapshots:create": "rule:admin_api", "compute_extension:os-assisted-volume-snapshots:delete": "rule:admin_api", "compute_extension:console_auth_tokens": "rule:admin_api", "compute_extension:os-server-external-events:create": "rule:admin_api", "network:get_all": "rule:admin_or_owner", "network:get": "rule:admin_or_owner", "network:create": "rule:admin_or_owner", "network:delete": "rule:admin_or_owner", "network:associate": "rule:admin_or_owner", "network:disassociate": "rule:admin_or_owner", "network:get_vifs_by_instance": "rule:admin_or_owner", "network:allocate_for_instance": "rule:admin_or_owner", "network:deallocate_for_instance": "rule:admin_or_owner", "network:validate_networks": "rule:admin_or_owner", "network:get_instance_uuids_by_ip_filter": "rule:admin_or_owner", "network:get_instance_id_by_floating_address": "rule:admin_or_owner", "network:setup_networks_on_host": "rule:admin_or_owner", "network:get_backdoor_port": "rule:admin_or_owner", "network:get_floating_ip": "rule:admin_or_owner", "network:get_floating_ip_pools": "rule:admin_or_owner", "network:get_floating_ip_by_address": "rule:admin_or_owner", "network:get_floating_ips_by_project": "rule:admin_or_owner", "network:get_floating_ips_by_fixed_address": "rule:admin_or_owner", "network:allocate_floating_ip": "rule:admin_or_owner", "network:associate_floating_ip": "rule:admin_or_owner", "network:disassociate_floating_ip": "rule:admin_or_owner", "network:release_floating_ip": "rule:admin_or_owner", "network:migrate_instance_start": "rule:admin_or_owner", "network:migrate_instance_finish": "rule:admin_or_owner", "network:get_fixed_ip": "rule:admin_or_owner", "network:get_fixed_ip_by_address": "rule:admin_or_owner", "network:add_fixed_ip_to_instance": "rule:admin_or_owner", "network:remove_fixed_ip_from_instance": "rule:admin_or_owner", "network:add_network_to_project": "rule:admin_or_owner", "network:get_instance_nw_info": "rule:admin_or_owner", "network:get_dns_domains": "rule:admin_or_owner", "network:add_dns_entry": "rule:admin_or_owner", "network:modify_dns_entry": "rule:admin_or_owner", "network:delete_dns_entry": "rule:admin_or_owner", "network:get_dns_entries_by_address": "rule:admin_or_owner", "network:get_dns_entries_by_name": "rule:admin_or_owner", "network:create_private_dns_domain": "rule:admin_or_owner", "network:create_public_dns_domain": "rule:admin_or_owner", "network:delete_dns_domain": "rule:admin_or_owner", "network:attach_external_network": "rule:admin_api", "network:get_vif_by_mac_address": "rule:admin_or_owner", "os_compute_api:servers:detail:get_all_tenants": "is_admin:True", "os_compute_api:servers:index:get_all_tenants": "is_admin:True", "os_compute_api:servers:confirm_resize": "rule:admin_or_owner", "os_compute_api:servers:create": "rule:admin_or_owner", "os_compute_api:servers:create:attach_network": "rule:admin_or_owner", "os_compute_api:servers:create:attach_volume": "rule:admin_or_owner", "os_compute_api:servers:create:forced_host": "rule:admin_api", "os_compute_api:servers:delete": "rule:admin_or_owner", "os_compute_api:servers:update": "rule:admin_or_owner", "os_compute_api:servers:detail": "rule:admin_or_owner", "os_compute_api:servers:index": "rule:admin_or_owner", "os_compute_api:servers:reboot": "rule:admin_or_owner", "os_compute_api:servers:rebuild": "rule:admin_or_owner", "os_compute_api:servers:resize": "rule:admin_or_owner", "os_compute_api:servers:revert_resize": "rule:admin_or_owner", "os_compute_api:servers:show": "rule:admin_or_owner", "os_compute_api:servers:show:host_status": "rule:admin_api", "os_compute_api:servers:create_image": "rule:admin_or_owner", "os_compute_api:servers:create_image:allow_volume_backed": "rule:admin_or_owner", "os_compute_api:servers:start": "rule:admin_or_owner", "os_compute_api:servers:stop": "rule:admin_or_owner", "os_compute_api:servers:trigger_crash_dump": "rule:admin_or_owner", "os_compute_api:servers:migrations:force_complete": "rule:admin_api", "os_compute_api:servers:migrations:delete": "rule:admin_api", "os_compute_api:servers:discoverable": "@", "os_compute_api:servers:migrations:index": "rule:admin_api", "os_compute_api:servers:migrations:show": "rule:admin_api", "os_compute_api:os-access-ips:discoverable": "@", "os_compute_api:os-access-ips": "rule:admin_or_owner", "os_compute_api:os-admin-actions": "rule:admin_api", "os_compute_api:os-admin-actions:discoverable": "@", "os_compute_api:os-admin-actions:reset_network": "rule:admin_api", "os_compute_api:os-admin-actions:inject_network_info": "rule:admin_api", "os_compute_api:os-admin-actions:reset_state": "rule:admin_api", "os_compute_api:os-admin-password": "rule:admin_or_owner", "os_compute_api:os-admin-password:discoverable": "@", "os_compute_api:os-aggregates:discoverable": "@", "os_compute_api:os-aggregates:index": "rule:admin_api", "os_compute_api:os-aggregates:create": "rule:admin_api", "os_compute_api:os-aggregates:show": "rule:admin_api", "os_compute_api:os-aggregates:update": "rule:admin_api", "os_compute_api:os-aggregates:delete": "rule:admin_api", "os_compute_api:os-aggregates:add_host": "rule:admin_api", "os_compute_api:os-aggregates:remove_host": "rule:admin_api", "os_compute_api:os-aggregates:set_metadata": "rule:admin_api", "os_compute_api:os-agents": "rule:admin_api", "os_compute_api:os-agents:discoverable": "@", "os_compute_api:os-attach-interfaces": "rule:admin_or_owner", "os_compute_api:os-attach-interfaces:discoverable": "@", "os_compute_api:os-baremetal-nodes": "rule:admin_api", "os_compute_api:os-baremetal-nodes:discoverable": "@", "os_compute_api:os-block-device-mapping-v1:discoverable": "@", "os_compute_api:os-cells": "rule:admin_api", "os_compute_api:os-cells:create": "rule:admin_api", "os_compute_api:os-cells:delete": "rule:admin_api", "os_compute_api:os-cells:update": "rule:admin_api", "os_compute_api:os-cells:sync_instances": "rule:admin_api", "os_compute_api:os-cells:discoverable": "@", "os_compute_api:os-certificates:create": "rule:admin_or_owner", "os_compute_api:os-certificates:show": "rule:admin_or_owner", "os_compute_api:os-certificates:discoverable": "@", "os_compute_api:os-cloudpipe": "rule:admin_api", "os_compute_api:os-cloudpipe:discoverable": "@", "os_compute_api:os-config-drive": "rule:admin_or_owner", "os_compute_api:os-consoles:discoverable": "@", "os_compute_api:os-consoles:create": "rule:admin_or_owner", "os_compute_api:os-consoles:delete": "rule:admin_or_owner", "os_compute_api:os-consoles:index": "rule:admin_or_owner", "os_compute_api:os-consoles:show": "rule:admin_or_owner", "os_compute_api:os-console-output:discoverable": "@", "os_compute_api:os-console-output": "rule:admin_or_owner", "os_compute_api:os-remote-consoles": "rule:admin_or_owner", "os_compute_api:os-remote-consoles:discoverable": "@", "os_compute_api:os-create-backup:discoverable": "@", "os_compute_api:os-create-backup": "rule:admin_or_owner", "os_compute_api:os-deferred-delete": "rule:admin_or_owner", "os_compute_api:os-deferred-delete:discoverable": "@", "os_compute_api:os-disk-config": "rule:admin_or_owner", "os_compute_api:os-disk-config:discoverable": "@", "os_compute_api:os-evacuate": "rule:admin_api", "os_compute_api:os-evacuate:discoverable": "@", "os_compute_api:os-extended-server-attributes": "rule:admin_api", "os_compute_api:os-extended-server-attributes:discoverable": "@", "os_compute_api:os-extended-status": "rule:admin_or_owner", "os_compute_api:os-extended-status:discoverable": "@", "os_compute_api:os-extended-availability-zone": "rule:admin_or_owner", "os_compute_api:os-extended-availability-zone:discoverable": "@", "os_compute_api:extensions": "rule:admin_or_owner", "os_compute_api:extensions:discoverable": "@", "os_compute_api:extension_info:discoverable": "@", "os_compute_api:os-extended-volumes": "rule:admin_or_owner", "os_compute_api:os-extended-volumes:discoverable": "@", "os_compute_api:os-fixed-ips": "rule:admin_api", "os_compute_api:os-fixed-ips:discoverable": "@", "os_compute_api:os-flavor-access": "rule:admin_or_owner", "os_compute_api:os-flavor-access:discoverable": "@", "os_compute_api:os-flavor-access:remove_tenant_access": "rule:admin_api", "os_compute_api:os-flavor-access:add_tenant_access": "rule:admin_api", "os_compute_api:os-flavor-rxtx": "rule:admin_or_owner", "os_compute_api:os-flavor-rxtx:discoverable": "@", "os_compute_api:flavors": "rule:admin_or_owner", "os_compute_api:flavors:discoverable": "@", "os_compute_api:os-flavor-extra-specs:discoverable": "@", "os_compute_api:os-flavor-extra-specs:index": "rule:admin_or_owner", "os_compute_api:os-flavor-extra-specs:show": "rule:admin_or_owner", "os_compute_api:os-flavor-extra-specs:create": "rule:admin_api", "os_compute_api:os-flavor-extra-specs:update": "rule:admin_api", "os_compute_api:os-flavor-extra-specs:delete": "rule:admin_api", "os_compute_api:os-flavor-manage:discoverable": "@", "os_compute_api:os-flavor-manage": "rule:admin_api", "os_compute_api:os-floating-ip-dns": "rule:admin_or_owner", "os_compute_api:os-floating-ip-dns:discoverable": "@", "os_compute_api:os-floating-ip-dns:domain:update": "rule:admin_api", "os_compute_api:os-floating-ip-dns:domain:delete": "rule:admin_api", "os_compute_api:os-floating-ip-pools": "rule:admin_or_owner", "os_compute_api:os-floating-ip-pools:discoverable": "@", "os_compute_api:os-floating-ips": "rule:admin_or_owner", "os_compute_api:os-floating-ips:discoverable": "@", "os_compute_api:os-floating-ips-bulk": "rule:admin_api", "os_compute_api:os-floating-ips-bulk:discoverable": "@", "os_compute_api:os-fping": "rule:admin_or_owner", "os_compute_api:os-fping:discoverable": "@", "os_compute_api:os-fping:all_tenants": "rule:admin_api", "os_compute_api:os-hide-server-addresses": "is_admin:False", "os_compute_api:os-hide-server-addresses:discoverable": "@", "os_compute_api:os-hosts": "rule:admin_api", "os_compute_api:os-hosts:discoverable": "@", "os_compute_api:os-hypervisors": "rule:admin_api", "os_compute_api:os-hypervisors:discoverable": "@", "os_compute_api:images:discoverable": "@", "os_compute_api:image-size": "rule:admin_or_owner", "os_compute_api:image-size:discoverable": "@", "os_compute_api:os-instance-actions": "rule:admin_or_owner", "os_compute_api:os-instance-actions:discoverable": "@", "os_compute_api:os-instance-actions:events": "rule:admin_api", "os_compute_api:os-instance-usage-audit-log": "rule:admin_api", "os_compute_api:os-instance-usage-audit-log:discoverable": "@", "os_compute_api:ips:discoverable": "@", "os_compute_api:ips:index": "rule:admin_or_owner", "os_compute_api:ips:show": "rule:admin_or_owner", "os_compute_api:os-keypairs:discoverable": "@", "os_compute_api:os-keypairs": "rule:admin_or_owner", "os_compute_api:os-keypairs:index": "rule:admin_api or user_id:%(user_id)s", "os_compute_api:os-keypairs:show": "rule:admin_api or user_id:%(user_id)s", "os_compute_api:os-keypairs:create": "rule:admin_api or user_id:%(user_id)s", "os_compute_api:os-keypairs:delete": "rule:admin_api or user_id:%(user_id)s", "os_compute_api:limits:discoverable": "@", "os_compute_api:limits": "rule:admin_or_owner", "os_compute_api:os-lock-server:discoverable": "@", "os_compute_api:os-lock-server:lock": "rule:admin_or_owner", "os_compute_api:os-lock-server:unlock": "rule:admin_or_owner", "os_compute_api:os-lock-server:unlock:unlock_override": "rule:admin_api", "os_compute_api:os-migrate-server:discoverable": "@", "os_compute_api:os-migrate-server:migrate": "rule:admin_api", "os_compute_api:os-migrate-server:migrate_live": "rule:admin_api", "os_compute_api:os-multinic": "rule:admin_or_owner", "os_compute_api:os-multinic:discoverable": "@", "os_compute_api:os-networks": "rule:admin_api", "os_compute_api:os-networks:view": "rule:admin_or_owner", "os_compute_api:os-networks:discoverable": "@", "os_compute_api:os-networks-associate": "rule:admin_api", "os_compute_api:os-networks-associate:discoverable": "@", "os_compute_api:os-pause-server:discoverable": "@", "os_compute_api:os-pause-server:pause": "rule:admin_or_owner", "os_compute_api:os-pause-server:unpause": "rule:admin_or_owner", "os_compute_api:os-pci:pci_servers": "rule:admin_or_owner", "os_compute_api:os-pci:discoverable": "@", "os_compute_api:os-pci:index": "rule:admin_api", "os_compute_api:os-pci:detail": "rule:admin_api", "os_compute_api:os-pci:show": "rule:admin_api", "os_compute_api:os-personality:discoverable": "@", "os_compute_api:os-preserve-ephemeral-rebuild:discoverable": "@", "os_compute_api:os-quota-sets:discoverable": "@", "os_compute_api:os-quota-sets:show": "rule:admin_or_owner", "os_compute_api:os-quota-sets:defaults": "@", "os_compute_api:os-quota-sets:update": "rule:admin_api", "os_compute_api:os-quota-sets:delete": "rule:admin_api", "os_compute_api:os-quota-sets:detail": "rule:admin_api", "os_compute_api:os-quota-class-sets:update": "rule:admin_api", "os_compute_api:os-quota-class-sets:show": "is_admin:True or quota_class:%(quota_class)s", "os_compute_api:os-quota-class-sets:discoverable": "@", "os_compute_api:os-rescue": "rule:admin_or_owner", "os_compute_api:os-rescue:discoverable": "@", "os_compute_api:os-scheduler-hints:discoverable": "@", "os_compute_api:os-security-group-default-rules:discoverable": "@", "os_compute_api:os-security-group-default-rules": "rule:admin_api", "os_compute_api:os-security-groups": "rule:admin_or_owner", "os_compute_api:os-security-groups:discoverable": "@", "os_compute_api:os-server-diagnostics": "rule:admin_api", "os_compute_api:os-server-diagnostics:discoverable": "@", "os_compute_api:os-server-password": "rule:admin_or_owner", "os_compute_api:os-server-password:discoverable": "@", "os_compute_api:os-server-usage": "rule:admin_or_owner", "os_compute_api:os-server-usage:discoverable": "@", "os_compute_api:os-server-groups": "rule:admin_or_owner", "os_compute_api:os-server-groups:discoverable": "@", "os_compute_api:os-services": "rule:admin_api", "os_compute_api:os-services:discoverable": "@", "os_compute_api:server-metadata:discoverable": "@", "os_compute_api:server-metadata:index": "rule:admin_or_owner", "os_compute_api:server-metadata:show": "rule:admin_or_owner", "os_compute_api:server-metadata:delete": "rule:admin_or_owner", "os_compute_api:server-metadata:create": "rule:admin_or_owner", "os_compute_api:server-metadata:update": "rule:admin_or_owner", "os_compute_api:server-metadata:update_all": "rule:admin_or_owner", "os_compute_api:os-shelve:shelve": "rule:admin_or_owner", "os_compute_api:os-shelve:shelve:discoverable": "@", "os_compute_api:os-shelve:shelve_offload": "rule:admin_api", "os_compute_api:os-simple-tenant-usage:discoverable": "@", "os_compute_api:os-simple-tenant-usage:show": "rule:admin_or_owner", "os_compute_api:os-simple-tenant-usage:list": "rule:admin_api", "os_compute_api:os-suspend-server:discoverable": "@", "os_compute_api:os-suspend-server:suspend": "rule:admin_or_owner", "os_compute_api:os-suspend-server:resume": "rule:admin_or_owner", "os_compute_api:os-tenant-networks": "rule:admin_or_owner", "os_compute_api:os-tenant-networks:discoverable": "@", "os_compute_api:os-shelve:unshelve": "rule:admin_or_owner", "os_compute_api:os-user-data:discoverable": "@", "os_compute_api:os-virtual-interfaces": "rule:admin_or_owner", "os_compute_api:os-virtual-interfaces:discoverable": "@", "os_compute_api:os-volumes": "rule:admin_or_owner", "os_compute_api:os-volumes:discoverable": "@", "os_compute_api:os-volumes-attachments:index": "rule:admin_or_owner", "os_compute_api:os-volumes-attachments:show": "rule:admin_or_owner", "os_compute_api:os-volumes-attachments:create": "rule:admin_or_owner", "os_compute_api:os-volumes-attachments:update": "rule:admin_or_owner", "os_compute_api:os-volumes-attachments:delete": "rule:admin_or_owner", "os_compute_api:os-volumes-attachments:discoverable": "@", "os_compute_api:os-availability-zone:list": "rule:admin_or_owner", "os_compute_api:os-availability-zone:discoverable": "@", "os_compute_api:os-availability-zone:detail": "rule:admin_api", "os_compute_api:os-used-limits": "rule:admin_api", "os_compute_api:os-used-limits:discoverable": "@", "os_compute_api:os-migrations:index": "rule:admin_api", "os_compute_api:os-migrations:discoverable": "@", "os_compute_api:os-assisted-volume-snapshots:create": "rule:admin_api", "os_compute_api:os-assisted-volume-snapshots:delete": "rule:admin_api", "os_compute_api:os-assisted-volume-snapshots:discoverable": "@", "os_compute_api:os-console-auth-tokens": "rule:admin_api", "os_compute_api:os-server-external-events:create": "rule:admin_api" } nova-13.0.0/etc/nova/api-paste.ini0000664000567000056710000000713112701407773020013 0ustar jenkinsjenkins00000000000000############ # Metadata # ############ [composite:metadata] use = egg:Paste#urlmap /: meta [pipeline:meta] pipeline = cors metaapp [app:metaapp] paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory ############# # OpenStack # ############# [composite:osapi_compute] use = call:nova.api.openstack.urlmap:urlmap_factory /: oscomputeversions # starting in Liberty the v21 implementation replaces the v2 # implementation and is suggested that you use it as the default. If # this causes issues with your clients you can rollback to the # *frozen* v2 api by commenting out the above stanza and using the # following instead:: # /v2: openstack_compute_api_legacy_v2 # if rolling back to v2 fixes your issue please file a critical bug # at - https://bugs.launchpad.net/nova/+bugs # # v21 is an exactly feature match for v2, except it has more stringent # input validation on the wsgi surface (prevents fuzzing early on the # API). It also provides new features via API microversions which are # opt into for clients. Unaware clients will receive the same frozen # v2 API feature set, but with some relaxed validation /v2: openstack_compute_api_v21_legacy_v2_compatible /v2.1: openstack_compute_api_v21 # NOTE: this is deprecated in favor of openstack_compute_api_v21_legacy_v2_compatible [composite:openstack_compute_api_legacy_v2] use = call:nova.api.auth:pipeline_factory noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_ratelimit osapi_compute_app_legacy_v2 keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_ratelimit osapi_compute_app_legacy_v2 keystone_nolimit = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_legacy_v2 [composite:openstack_compute_api_v21] use = call:nova.api.auth:pipeline_factory_v21 noauth2 = cors compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21 keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21 [composite:openstack_compute_api_v21_legacy_v2_compatible] use = call:nova.api.auth:pipeline_factory_v21 noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21 keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_v2_compatible osapi_compute_app_v21 [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:compute_req_id] paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory [filter:faultwrap] paste.filter_factory = nova.api.openstack:FaultWrapper.factory [filter:noauth2] paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory [filter:legacy_ratelimit] paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory [filter:sizelimit] paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory [filter:legacy_v2_compatible] paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory [app:osapi_compute_app_legacy_v2] paste.app_factory = nova.api.openstack.compute:APIRouter.factory [app:osapi_compute_app_v21] paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory [pipeline:oscomputeversions] pipeline = faultwrap oscomputeversionapp [app:oscomputeversionapp] paste.app_factory = nova.api.openstack.compute.versions:Versions.factory ########## # Shared # ########## [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = nova [filter:keystonecontext] paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory nova-13.0.0/etc/nova/nova-config-generator.conf0000664000567000056710000000113512701407773022466 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/nova/nova.conf.sample wrap_width = 79 namespace = nova namespace = nova.conf namespace = nova.api namespace = nova.cache_utils namespace = nova.cells namespace = nova.compute namespace = nova.network namespace = nova.network.neutronv2 namespace = nova.virt namespace = oslo.cache namespace = oslo.log namespace = oslo.messaging namespace = oslo.policy namespace = oslo.service.periodic_task namespace = oslo.service.service namespace = oslo.service.sslutils namespace = oslo.db namespace = oslo.middleware namespace = oslo.concurrency namespace = keystonemiddleware.auth_token nova-13.0.0/etc/nova/logging_sample.conf0000664000567000056710000000305312701407773021264 0ustar jenkinsjenkins00000000000000[loggers] keys = root, nova [handlers] keys = stderr, stdout, watchedfile, syslog, null [formatters] keys = context, default [logger_root] level = WARNING handlers = null [logger_nova] level = INFO handlers = stderr qualname = nova [logger_amqp] level = WARNING handlers = stderr qualname = amqp [logger_amqplib] level = WARNING handlers = stderr qualname = amqplib [logger_sqlalchemy] level = WARNING handlers = stderr qualname = sqlalchemy # "level = INFO" logs SQL queries. # "level = DEBUG" logs SQL queries and results. # "level = WARNING" logs neither. (Recommended for production systems.) [logger_boto] level = WARNING handlers = stderr qualname = boto # NOTE(mikal): suds is used by the vmware driver, removing this will # cause many extraneous log lines for their tempest runs. Refer to # https://review.openstack.org/#/c/219225/ for details. [logger_suds] level = INFO handlers = stderr qualname = suds [logger_eventletwsgi] level = WARNING handlers = stderr qualname = eventlet.wsgi.server [handler_stderr] class = StreamHandler args = (sys.stderr,) formatter = context [handler_stdout] class = StreamHandler args = (sys.stdout,) formatter = context [handler_watchedfile] class = handlers.WatchedFileHandler args = ('nova.log',) formatter = context [handler_syslog] class = handlers.SysLogHandler args = ('/dev/log', handlers.SysLogHandler.LOG_USER) formatter = context [handler_null] class = logging.NullHandler formatter = default args = () [formatter_context] class = oslo_log.formatters.ContextFormatter [formatter_default] format = %(message)s nova-13.0.0/etc/nova/cells.json0000664000567000056710000000132312701407773017421 0ustar jenkinsjenkins00000000000000{ "parent": { "name": "parent", "api_url": "http://api.example.com:8774", "transport_url": "rabbit://rabbit.example.com", "weight_offset": 0.0, "weight_scale": 1.0, "is_parent": true }, "cell1": { "name": "cell1", "api_url": "http://api.example.com:8774", "transport_url": "rabbit://rabbit1.example.com", "weight_offset": 0.0, "weight_scale": 1.0, "is_parent": false }, "cell2": { "name": "cell2", "api_url": "http://api.example.com:8774", "transport_url": "rabbit://rabbit2.example.com", "weight_offset": 0.0, "weight_scale": 1.0, "is_parent": false } } nova-13.0.0/babel.cfg0000664000567000056710000000002112701407773015426 0ustar jenkinsjenkins00000000000000[python: **.py] nova-13.0.0/openstack-common.conf0000664000567000056710000000021712701410011020006 0ustar jenkinsjenkins00000000000000[DEFAULT] # The list of modules to copy from oslo-incubator module=cliutils # The base module to hold the copy of openstack.common base=nova nova-13.0.0/.coveragerc0000664000567000056710000000014612701407773016031 0ustar jenkinsjenkins00000000000000[run] branch = True source = nova omit = nova/tests/*,nova/openstack/* [report] ignore_errors = True nova-13.0.0/MAINTAINERS0000664000567000056710000000133712701407773015410 0ustar jenkinsjenkins00000000000000Nova doesn't have maintainers in the same way as the Linux Kernel. However, we do have sub-teams who maintain parts of Nova and a series of nominated "czars" to deal with cross functional tasks. Each of these sub-teams and roles are documented on our wiki at https://wiki.openstack.org/wiki/Nova You can find helpful contacts for many parts of our code repository at https://wiki.openstack.org/wiki/Nova#Developer_Contacts We also have a page which documents tips and mentoring opportunities for new Nova developers at https://wiki.openstack.org/wiki/Nova/Mentoring Finally, you should also check out our developer reference at http://docs.openstack.org/developer/nova/devref/ Thanks for your interest in Nova, please come again! nova-13.0.0/setup.cfg0000664000567000056710000002552212701410205015516 0ustar jenkinsjenkins00000000000000[metadata] name = nova summary = Cloud computing fabric controller description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://docs.openstack.org/developer/nova/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 [global] setup-hooks = pbr.hooks.setup_hook [files] packages = nova [entry_points] oslo.config.opts = nova = nova.opts:list_opts nova.conf = nova.conf.opts:list_opts nova.api = nova.api.opts:list_opts nova.cells = nova.cells.opts:list_opts nova.compute = nova.compute.opts:list_opts nova.network = nova.network.opts:list_opts nova.network.neutronv2 = nova.network.neutronv2.api:list_opts nova.virt = nova.virt.opts:list_opts nova.cache_utils = nova.cache_utils:list_opts oslo.config.opts.defaults = nova.api = nova.common.config:set_middleware_defaults nova.compute.monitors.cpu = virt_driver = nova.compute.monitors.cpu.virt_driver:Monitor nova.compute.resources = nova.image.download.modules = file = nova.image.download.file console_scripts = nova-all = nova.cmd.all:main nova-api = nova.cmd.api:main nova-api-metadata = nova.cmd.api_metadata:main nova-api-os-compute = nova.cmd.api_os_compute:main nova-cells = nova.cmd.cells:main nova-cert = nova.cmd.cert:main nova-compute = nova.cmd.compute:main nova-conductor = nova.cmd.conductor:main nova-console = nova.cmd.console:main nova-consoleauth = nova.cmd.consoleauth:main nova-dhcpbridge = nova.cmd.dhcpbridge:main nova-idmapshift = nova.cmd.idmapshift:main nova-manage = nova.cmd.manage:main nova-network = nova.cmd.network:main nova-novncproxy = nova.cmd.novncproxy:main nova-rootwrap = oslo_rootwrap.cmd:main nova-rootwrap-daemon = oslo_rootwrap.cmd:daemon nova-scheduler = nova.cmd.scheduler:main nova-serialproxy = nova.cmd.serialproxy:main nova-spicehtml5proxy = nova.cmd.spicehtml5proxy:main nova-xvpvncproxy = nova.cmd.xvpvncproxy:main nova.api.v21.extensions = access_ips = nova.api.openstack.compute.access_ips:AccessIPs admin_actions = nova.api.openstack.compute.admin_actions:AdminActions admin_password = nova.api.openstack.compute.admin_password:AdminPassword agents = nova.api.openstack.compute.agents:Agents aggregates = nova.api.openstack.compute.aggregates:Aggregates assisted_volume_snapshots = nova.api.openstack.compute.assisted_volume_snapshots:AssistedVolumeSnapshots attach_interfaces = nova.api.openstack.compute.attach_interfaces:AttachInterfaces availability_zone = nova.api.openstack.compute.availability_zone:AvailabilityZone baremetal_nodes = nova.api.openstack.compute.baremetal_nodes:BareMetalNodes block_device_mapping = nova.api.openstack.compute.block_device_mapping:BlockDeviceMapping cells = nova.api.openstack.compute.cells:Cells certificates = nova.api.openstack.compute.certificates:Certificates cloudpipe = nova.api.openstack.compute.cloudpipe:Cloudpipe config_drive = nova.api.openstack.compute.config_drive:ConfigDrive console_auth_tokens = nova.api.openstack.compute.console_auth_tokens:ConsoleAuthTokens console_output = nova.api.openstack.compute.console_output:ConsoleOutput consoles = nova.api.openstack.compute.consoles:Consoles create_backup = nova.api.openstack.compute.create_backup:CreateBackup deferred_delete = nova.api.openstack.compute.deferred_delete:DeferredDelete disk_config = nova.api.openstack.compute.disk_config:DiskConfig evacuate = nova.api.openstack.compute.evacuate:Evacuate extended_availability_zone = nova.api.openstack.compute.extended_availability_zone:ExtendedAvailabilityZone extended_server_attributes = nova.api.openstack.compute.extended_server_attributes:ExtendedServerAttributes extended_status = nova.api.openstack.compute.extended_status:ExtendedStatus extended_volumes = nova.api.openstack.compute.extended_volumes:ExtendedVolumes extension_info = nova.api.openstack.compute.extension_info:ExtensionInfo fixed_ips = nova.api.openstack.compute.fixed_ips:FixedIps flavors = nova.api.openstack.compute.flavors:Flavors flavors_extraspecs = nova.api.openstack.compute.flavors_extraspecs:FlavorsExtraSpecs flavor_access = nova.api.openstack.compute.flavor_access:FlavorAccess flavor_rxtx = nova.api.openstack.compute.flavor_rxtx:FlavorRxtx flavor_manage = nova.api.openstack.compute.flavor_manage:FlavorManage floating_ip_dns = nova.api.openstack.compute.floating_ip_dns:FloatingIpDns floating_ip_pools = nova.api.openstack.compute.floating_ip_pools:FloatingIpPools floating_ips = nova.api.openstack.compute.floating_ips:FloatingIps floating_ips_bulk = nova.api.openstack.compute.floating_ips_bulk:FloatingIpsBulk fping = nova.api.openstack.compute.fping:Fping hide_server_addresses = nova.api.openstack.compute.hide_server_addresses:HideServerAddresses hosts = nova.api.openstack.compute.hosts:Hosts hypervisors = nova.api.openstack.compute.hypervisors:Hypervisors images = nova.api.openstack.compute.images:Images image_metadata = nova.api.openstack.compute.image_metadata:ImageMetadata image_size = nova.api.openstack.compute.image_size:ImageSize instance_actions = nova.api.openstack.compute.instance_actions:InstanceActions instance_usage_audit_log = nova.api.openstack.compute.instance_usage_audit_log:InstanceUsageAuditLog ips = nova.api.openstack.compute.ips:IPs keypairs = nova.api.openstack.compute.keypairs:Keypairs limits = nova.api.openstack.compute.limits:Limits lock_server = nova.api.openstack.compute.lock_server:LockServer migrate_server = nova.api.openstack.compute.migrate_server:MigrateServer migrations = nova.api.openstack.compute.migrations:Migrations multinic = nova.api.openstack.compute.multinic:Multinic multiple_create = nova.api.openstack.compute.multiple_create:MultipleCreate networks = nova.api.openstack.compute.networks:Networks networks_associate = nova.api.openstack.compute.networks_associate:NetworksAssociate pause_server = nova.api.openstack.compute.pause_server:PauseServer personality = nova.api.openstack.compute.personality:Personality preserve_ephemeral_rebuild = nova.api.openstack.compute.preserve_ephemeral_rebuild:PreserveEphemeralRebuild quota_classes = nova.api.openstack.compute.quota_classes:QuotaClasses quota_sets = nova.api.openstack.compute.quota_sets:QuotaSets remote_consoles = nova.api.openstack.compute.remote_consoles:RemoteConsoles rescue = nova.api.openstack.compute.rescue:Rescue scheduler_hints = nova.api.openstack.compute.scheduler_hints:SchedulerHints security_group_default_rules = nova.api.openstack.compute.security_group_default_rules:SecurityGroupDefaultRules security_groups = nova.api.openstack.compute.security_groups:SecurityGroups server_diagnostics = nova.api.openstack.compute.server_diagnostics:ServerDiagnostics server_external_events = nova.api.openstack.compute.server_external_events:ServerExternalEvents server_metadata = nova.api.openstack.compute.server_metadata:ServerMetadata server_migrations = nova.api.openstack.compute.server_migrations:ServerMigrations server_password = nova.api.openstack.compute.server_password:ServerPassword server_usage = nova.api.openstack.compute.server_usage:ServerUsage server_groups = nova.api.openstack.compute.server_groups:ServerGroups servers = nova.api.openstack.compute.servers:Servers services = nova.api.openstack.compute.services:Services shelve = nova.api.openstack.compute.shelve:Shelve simple_tenant_usage = nova.api.openstack.compute.simple_tenant_usage:SimpleTenantUsage suspend_server = nova.api.openstack.compute.suspend_server:SuspendServer tenant_networks = nova.api.openstack.compute.tenant_networks:TenantNetworks used_limits = nova.api.openstack.compute.used_limits:UsedLimits user_data = nova.api.openstack.compute.user_data:UserData versions = nova.api.openstack.compute.versionsV21:Versions virtual_interfaces = nova.api.openstack.compute.virtual_interfaces:VirtualInterfaces volumes = nova.api.openstack.compute.volumes:Volumes nova.api.v21.extensions.server.create = access_ips = nova.api.openstack.compute.access_ips:AccessIPs availability_zone = nova.api.openstack.compute.availability_zone:AvailabilityZone block_device_mapping = nova.api.openstack.compute.block_device_mapping:BlockDeviceMapping block_device_mapping_v1 = nova.api.openstack.compute.block_device_mapping_v1:BlockDeviceMappingV1 config_drive = nova.api.openstack.compute.config_drive:ConfigDrive disk_config = nova.api.openstack.compute.disk_config:DiskConfig keypairs_create = nova.api.openstack.compute.keypairs:Keypairs multiple_create = nova.api.openstack.compute.multiple_create:MultipleCreate personality = nova.api.openstack.compute.personality:Personality scheduler_hints = nova.api.openstack.compute.scheduler_hints:SchedulerHints security_groups = nova.api.openstack.compute.security_groups:SecurityGroups user_data = nova.api.openstack.compute.user_data:UserData nova.api.v21.extensions.server.rebuild = access_ips = nova.api.openstack.compute.access_ips:AccessIPs disk_config = nova.api.openstack.compute.disk_config:DiskConfig personality = nova.api.openstack.compute.personality:Personality preserve_ephemeral_rebuild = nova.api.openstack.compute.preserve_ephemeral_rebuild:PreserveEphemeralRebuild nova.api.v21.extensions.server.update = access_ips = nova.api.openstack.compute.access_ips:AccessIPs disk_config = nova.api.openstack.compute.disk_config:DiskConfig nova.api.v21.extensions.server.resize = disk_config = nova.api.openstack.compute.disk_config:DiskConfig nova.api.v21.test_extensions = basic = nova.tests.unit.api.openstack.compute.basic:Basic microversions = nova.tests.unit.api.openstack.compute.microversions:Microversions nova.ipv6_backend = rfc2462 = nova.ipv6.rfc2462 account_identifier = nova.ipv6.account_identifier nova.scheduler.host_manager = host_manager = nova.scheduler.host_manager:HostManager ironic_host_manager = nova.scheduler.ironic_host_manager:IronicHostManager nova.scheduler.driver = filter_scheduler = nova.scheduler.filter_scheduler:FilterScheduler caching_scheduler = nova.scheduler.caching_scheduler:CachingScheduler chance_scheduler = nova.scheduler.chance:ChanceScheduler fake_scheduler = nova.tests.unit.scheduler.fakes:FakeScheduler [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [build_apiguide] all_files = 1 build-dir = api-guide/build source-dir = api-guide/source [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [compile_catalog] directory = nova/locale domain = nova [update_catalog] domain = nova output_dir = nova/locale input_file = nova/locale/nova.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = nova/locale/nova.pot [wheel] universal = 1 [pbr] autodoc_index_modules = True autodoc_exclude_modules = nova.wsgi.nova-* nova.tests.* warnerrors = true nova-13.0.0/tools/0000775000567000056710000000000012701410205015027 5ustar jenkinsjenkins00000000000000nova-13.0.0/tools/pretty_tox.sh0000775000567000056710000000065212701407773017632 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash set -o pipefail TESTRARGS=$1 # --until-failure is not compatible with --subunit see: # # https://bugs.launchpad.net/testrepository/+bug/1411804 # # this work around exists until that is addressed if [[ "$TESTARGS" =~ "until-failure" ]]; then python setup.py testr --slowest --testr-args="$TESTRARGS" else python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f fi nova-13.0.0/tools/flake8wrap.sh0000775000567000056710000000073312701407773017455 0ustar jenkinsjenkins00000000000000#!/bin/sh # # A simple wrapper around flake8 which makes it possible # to ask it to only verify files changed in the current # git HEAD patch. # # Intended to be invoked via tox: # # tox -epep8 -- -HEAD # if test "x$1" = "x-HEAD" ; then shift files=$(git diff --name-only HEAD~1 | tr '\n' ' ') echo "Running flake8 on ${files}" diff -u --from-file /dev/null ${files} | flake8 --diff "$@" else echo "Running flake8 on all files" exec flake8 "$@" fi nova-13.0.0/tools/regression_tester.py0000775000567000056710000000672012701407773021177 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tool for checking if patch contains a regression test. By default runs against current patch but can be set to use any gerrit review as specified by change number (uses 'git review -d'). Idea: take tests from patch to check, and run against code from previous patch. If new tests pass, then no regression test, if new tests fails against old code then either * new tests depend on new code and cannot confirm regression test is valid (false positive) * new tests detects the bug being fixed (detect valid regression test) Due to the risk of false positives, the results from this need some human interpretation. """ from __future__ import print_function import optparse import string import subprocess import sys def run(cmd, fail_ok=False): print("running: %s" % cmd) obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) obj.wait() if obj.returncode != 0 and not fail_ok: print("The above command terminated with an error.") sys.exit(obj.returncode) return obj.stdout.read() def main(): usage = """ Tool for checking if a patch includes a regression test. Usage: %prog [options]""" parser = optparse.OptionParser(usage) parser.add_option("-r", "--review", dest="review", help="gerrit review number to test") (options, args) = parser.parse_args() if options.review: original_branch = run("git rev-parse --abbrev-ref HEAD") run("git review -d %s" % options.review) else: print ("no gerrit review number specified, running on latest commit" "on current branch.") test_works = False # run new tests with old code run("git checkout HEAD^ nova") run("git checkout HEAD nova/tests") # identify which tests have changed tests = run("git whatchanged --format=oneline -1 | grep \"nova/tests\" " "| cut -f2").split() test_list = [] for test in tests: test_list.append(string.replace(test[0:-3], '/', '.')) if not test_list: test_works = False expect_failure = "" else: # run new tests, expect them to fail expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)), fail_ok=True) if "FAILED (id=" in expect_failure: test_works = True # cleanup run("git checkout HEAD nova") if options.review: new_branch = run("git status | head -1 | cut -d ' ' -f 4") run("git checkout %s" % original_branch) run("git branch -D %s" % new_branch) print(expect_failure) print("") print("*******************************") if test_works: print("FOUND a regression test") else: print("NO regression test") sys.exit(1) if __name__ == "__main__": main() nova-13.0.0/tools/install_venv_common.py0000664000567000056710000001350712701407773021503 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() nova-13.0.0/tools/ebtables.workaround0000664000567000056710000000253512701407773020752 0ustar jenkinsjenkins00000000000000#!/bin/bash # # Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # This is a terrible, terrible, truly terrible work around for # environments that have libvirt < 1.2.11. ebtables requires that you # specifically tell it you would like to not race and get punched in # the face when 2 run at the same time with a --concurrent flag. # # INSTALL instructions # # * Copy /sbin/ebtables to /sbin/ebtables.real # * Copy the ebtables.workaround script to /sbin/ebtables # # Note: upgrades to ebtables will overwrite this work around. If you # are packaging this file consider putting a trigger in place so that # the workaround is replaced after ebtables upgrade. # # Additional Note: this file can be removed from nova once our libvirt # minimum is >= 1.2.11. flock -w 300 /var/lock/ebtables.nova /sbin/ebtables.real $@ nova-13.0.0/tools/install_venv.py0000664000567000056710000000454212701407773020132 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import os import sys import install_venv_common as install_venv def print_help(venv, root): help = """ Nova development environment setup is complete. Nova development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Nova virtualenv for the extent of your current shell session you can run: $ source %s/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ %s/tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help % (venv, root)) def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if os.environ.get('tools_path'): root = os.environ['tools_path'] venv = os.path.join(root, '.venv') if os.environ.get('venv'): venv = os.environ['venv'] pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Nova' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help(venv, root) if __name__ == '__main__': main(sys.argv) nova-13.0.0/tools/clean-vlans0000775000567000056710000000214012701407773017175 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. export LC_ALL=C sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo brctl delbr foo sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo ip link del foo nova-13.0.0/tools/build_latex_pdf.sh0000775000567000056710000000154012701407773020533 0ustar jenkinsjenkins00000000000000#!/bin/bash # Build tox venv and use it tox -edocs --notest source .tox/docs/bin/activate # Build latex source sphinx-build -b latex doc/source doc/build/latex pushd doc/build/latex # Workaround all the sphinx latex bugs # Convert svg to png (requires ImageMagick) convert architecture.svg architecture.png # Update the latex to point to the new image, switch unicode chars to latex # markup, and add packages for symbols sed -i -e 's/architecture.svg/architecture.png/g' -e 's/\\code{✔}/\\checkmark/g' -e 's/\\code{✖}/\\ding{54}/g' -e 's/\\usepackage{multirow}/\\usepackage{multirow}\n\\usepackage{amsmath,amssymb,latexsym}\n\\usepackage{pifont}/g' Nova.tex # To run the actual latex build you need to ensure that you have latex installed # on ubuntu the texlive-full package will take care of this make deactivate popd cp doc/build/latex/Nova.pdf . nova-13.0.0/tools/abandon_old_reviews.sh0000775000567000056710000000534612701407773021422 0ustar jenkinsjenkins00000000000000#!/bin/bash # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # # before you run this modify your .ssh/config to create a # review.openstack.org entry: # # Host review.openstack.org # User # Port 29418 # # Note: due to gerrit bug somewhere, this double posts messages. :( # first purge the all reviews that are more than 4w old and blocked by a core -2 set -o errexit function abandon_review { local gitid=$1 shift local msg=$@ echo "Abandoning $gitid" # echo ssh review.openstack.org gerrit review $gitid --abandon --message \"$msg\" ssh review.openstack.org gerrit review $gitid --abandon --message \"$msg\" } PROJECTS="(project:openstack/nova OR project:openstack/python-novaclient)" blocked_reviews=$(ssh review.openstack.org "gerrit query --current-patch-set --format json $PROJECTS status:open age:4w label:Code-Review<=-2" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g') blocked_msg=$(cat < 4 weeks without comment and currently blocked by a core reviewer with a -2. We are abandoning this for now. Feel free to reactivate the review by pressing the restore button and contacting the reviewer with the -2 on this review to ensure you address their concerns. EOF ) # For testing, put in a git rev of something you own and uncomment # blocked_reviews="b6c4218ae4d75b86c33fa3d37c27bc23b46b6f0f" for review in $blocked_reviews; do # echo ssh review.openstack.org gerrit review $review --abandon --message \"$msg\" echo "Blocked review $review" abandon_review $review $blocked_msg done # then purge all the reviews that are > 4w with no changes and Jenkins has -1ed failing_reviews=$(ssh review.openstack.org "gerrit query --current-patch-set --format json $PROJECTS status:open age:4w NOT label:Verified>=1,jenkins" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g') failing_msg=$(cat < 4 weeks without comment, and failed Jenkins the last time it was checked. We are abandoning this for now. Feel free to reactivate the review by pressing the restore button and leaving a 'recheck' comment to get fresh test results. EOF ) for review in $failing_reviews; do echo "Failing review $review" abandon_review $review $failing_msg done nova-13.0.0/tools/xenserver/0000775000567000056710000000000012701410205017050 5ustar jenkinsjenkins00000000000000nova-13.0.0/tools/xenserver/vdi_chain_cleanup.py0000664000567000056710000000713612701407773023104 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is designed to cleanup any VHDs (and their descendents) which have a bad parent pointer. The script needs to be run in the dom0 of the affected host. The available actions are: - print: display the filenames of the affected VHDs - delete: remove the affected VHDs - move: move the affected VHDs out of the SR into another directory """ import glob import os import subprocess import sys class ExecutionFailed(Exception): def __init__(self, returncode, stdout, stderr, max_stream_length=32): self.returncode = returncode self.stdout = stdout[:max_stream_length] self.stderr = stderr[:max_stream_length] self.max_stream_length = max_stream_length def __repr__(self): return "" % ( self.returncode, self.stdout, self.stderr) __str__ = __repr__ def execute(cmd, ok_exit_codes=None): if ok_exit_codes is None: ok_exit_codes = [0] proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = proc.communicate() if proc.returncode not in ok_exit_codes: raise ExecutionFailed(proc.returncode, stdout, stderr) return proc.returncode, stdout, stderr def usage(): print "usage: %s " % sys.argv[0] sys.exit(1) def main(): if len(sys.argv) < 3: usage() sr_path = sys.argv[1] action = sys.argv[2] if action not in ('print', 'delete', 'move'): usage() if action == 'move': if len(sys.argv) < 4: print "error: must specify where to move bad VHDs" sys.exit(1) bad_vhd_path = sys.argv[3] if not os.path.exists(bad_vhd_path): os.makedirs(bad_vhd_path) bad_leaves = [] descendents = {} for fname in glob.glob(os.path.join(sr_path, "*.vhd")): (returncode, stdout, stderr) = execute( ['vhd-util', 'query', '-n', fname, '-p'], ok_exit_codes=[0, 22]) stdout = stdout.strip() if stdout.endswith('.vhd'): try: descendents[stdout].append(fname) except KeyError: descendents[stdout] = [fname] elif 'query failed' in stdout: bad_leaves.append(fname) def walk_vhds(root): yield root if root in descendents: for child in descendents[root]: for vhd in walk_vhds(child): yield vhd for bad_leaf in bad_leaves: for bad_vhd in walk_vhds(bad_leaf): print bad_vhd if action == "print": pass elif action == "delete": os.unlink(bad_vhd) elif action == "move": new_path = os.path.join(bad_vhd_path, os.path.basename(bad_vhd)) os.rename(bad_vhd, new_path) else: raise Exception("invalid action %s" % action) if __name__ == '__main__': main() nova-13.0.0/tools/xenserver/stress_test.py0000664000567000056710000001206212701407773022025 0ustar jenkinsjenkins00000000000000""" This script concurrently builds and migrates instances. This can be useful when troubleshooting race-conditions in virt-layer code. Expects: novarc to be sourced in the environment Helper Script for Xen Dom0: # cat /tmp/destroy_cache_vdis #!/bin/bash xe vdi-list | grep "Glance Image" -C1 | grep "^uuid" | awk '{print $5}' | xargs -n1 -I{} xe vdi-destroy uuid={} """ import argparse import contextlib import multiprocessing import subprocess import sys import time DOM0_CLEANUP_SCRIPT = "/tmp/destroy_cache_vdis" def run(cmd): ret = subprocess.call(cmd, shell=True) if ret != 0: print >> sys.stderr, "Command exited non-zero: %s" % cmd @contextlib.contextmanager def server_built(server_name, image_name, flavor=1, cleanup=True): run("nova boot --image=%(image_name)s --flavor=%(flavor)s" " --poll %(server_name)s" % locals()) try: yield finally: if cleanup: run("nova delete %(server_name)s" % locals()) @contextlib.contextmanager def snapshot_taken(server_name, snapshot_name, cleanup=True): run("nova image-create %(server_name)s %(snapshot_name)s" " --poll" % locals()) try: yield finally: if cleanup: run("nova image-delete %(snapshot_name)s" % locals()) def migrate_server(server_name): run("nova migrate %(server_name)s --poll" % locals()) cmd = "nova list | grep %(server_name)s | awk '{print $6}'" % locals() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) stdout, stderr = proc.communicate() status = stdout.strip() if status.upper() != 'VERIFY_RESIZE': print >> sys.stderr, "Server %(server_name)s failed to rebuild"\ % locals() return False # Confirm the resize run("nova resize-confirm %(server_name)s" % locals()) return True def test_migrate(context): count, args = context server_name = "server%d" % count cleanup = args.cleanup with server_built(server_name, args.image, cleanup=cleanup): # Migrate A -> B result = migrate_server(server_name) if not result: return False # Migrate B -> A return migrate_server(server_name) def rebuild_server(server_name, snapshot_name): run("nova rebuild %(server_name)s %(snapshot_name)s --poll" % locals()) cmd = "nova list | grep %(server_name)s | awk '{print $6}'" % locals() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) stdout, stderr = proc.communicate() status = stdout.strip() if status != 'ACTIVE': print >> sys.stderr, "Server %(server_name)s failed to rebuild"\ % locals() return False return True def test_rebuild(context): count, args = context server_name = "server%d" % count snapshot_name = "snap%d" % count cleanup = args.cleanup with server_built(server_name, args.image, cleanup=cleanup): with snapshot_taken(server_name, snapshot_name, cleanup=cleanup): return rebuild_server(server_name, snapshot_name) def _parse_args(): parser = argparse.ArgumentParser( description='Test Nova for Race Conditions.') parser.add_argument('tests', metavar='TESTS', type=str, nargs='*', default=['rebuild', 'migrate'], help='tests to run: [rebuilt|migrate]') parser.add_argument('-i', '--image', help="image to build from", required=True) parser.add_argument('-n', '--num-runs', type=int, help="number of runs", default=1) parser.add_argument('-c', '--concurrency', type=int, default=5, help="number of concurrent processes") parser.add_argument('--no-cleanup', action='store_false', dest="cleanup", default=True) parser.add_argument('-d', '--dom0-ips', help="IP of dom0's to run cleanup script") return parser.parse_args() def main(): dom0_cleanup_script = DOM0_CLEANUP_SCRIPT args = _parse_args() if args.dom0_ips: dom0_ips = args.dom0_ips.split(',') else: dom0_ips = [] start_time = time.time() batch_size = min(args.num_runs, args.concurrency) pool = multiprocessing.Pool(processes=args.concurrency) results = [] for test in args.tests: test_func = globals().get("test_%s" % test) if not test_func: print >> sys.stderr, "test '%s' not found" % test sys.exit(1) contexts = [(x, args) for x in range(args.num_runs)] try: results += pool.map(test_func, contexts) finally: if args.cleanup: for dom0_ip in dom0_ips: run("ssh root@%(dom0_ip)s %(dom0_cleanup_script)s" % locals()) success = all(results) result = "SUCCESS" if success else "FAILED" duration = time.time() - start_time print "%s, finished in %.2f secs" % (result, duration) sys.exit(0 if success else 1) if __name__ == "__main__": main() nova-13.0.0/tools/xenserver/destroy_cached_images.py0000664000567000056710000000442312701407773023752 0ustar jenkinsjenkins00000000000000""" destroy_cached_images.py This script is used to clean up Glance images that are cached in the SR. By default, this script will only cleanup unused cached images. Options: --dry_run - Don't actually destroy the VDIs --all_cached - Destroy all cached images instead of just unused cached images. """ import eventlet eventlet.monkey_patch() import os import sys from oslo_config import cfg # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir, os.pardir)) if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) from nova import config from nova import utils from nova.virt.xenapi.client import session from nova.virt.xenapi import vm_utils destroy_opts = [ cfg.BoolOpt('all_cached', default=False, help='Destroy all cached images instead of just unused cached' ' images.'), cfg.BoolOpt('dry_run', default=False, help='Don\'t actually delete the VDIs.') ] CONF = cfg.CONF CONF.register_cli_opts(destroy_opts) CONF.import_opt('connection_url', 'nova.virt.xenapi.driver', group='xenserver') CONF.import_opt('connection_username', 'nova.virt.xenapi.driver', group='xenserver') CONF.import_opt('connection_password', 'nova.virt.xenapi.driver', group='xenserver') def main(): config.parse_args(sys.argv) utils.monkey_patch() _session = session.XenAPISession(CONF.xenserver.connection_url, CONF.xenserver.connection_username, CONF.xenserver.connection_password) sr_ref = vm_utils.safe_find_sr(_session) destroyed = vm_utils.destroy_cached_images( _session, sr_ref, all_cached=CONF.all_cached, dry_run=CONF.dry_run) if '--verbose' in sys.argv: print '\n'.join(destroyed) print "Destroyed %d cached VDIs" % len(destroyed) if __name__ == "__main__": main() nova-13.0.0/tools/xenserver/vm_vdi_cleaner.py0000775000567000056710000002542112701407773022426 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """vm_vdi_cleaner.py - List or clean orphaned VDIs/instances on XenServer.""" import doctest import os import sys from oslo_config import cfg import XenAPI possible_topdir = os.getcwd() if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): sys.path.insert(0, possible_topdir) from nova import config from nova import context from nova import db from nova import exception from oslo_utils import timeutils from nova.virt import virtapi from nova.virt.xenapi import driver as xenapi_driver cleaner_opts = [ cfg.IntOpt('zombie_instance_updated_at_window', default=172800, help='Number of seconds zombie instances are cleaned up.'), ] cli_opt = cfg.StrOpt('command', help='Cleaner command') CONF = cfg.CONF CONF.register_opts(cleaner_opts) CONF.register_cli_opt(cli_opt) CONF.import_opt('verbose', 'nova.openstack.common.log') CONF.import_opt("resize_confirm_window", "nova.compute.manager") ALLOWED_COMMANDS = ["list-vdis", "clean-vdis", "list-instances", "clean-instances", "test"] def call_xenapi(xenapi, method, *args): """Make a call to xapi.""" return xenapi._session.call_xenapi(method, *args) def find_orphaned_instances(xenapi): """Find and return a list of orphaned instances.""" ctxt = context.get_admin_context(read_deleted="only") orphaned_instances = [] for vm_ref, vm_rec in _get_applicable_vm_recs(xenapi): try: uuid = vm_rec['other_config']['nova_uuid'] instance = db.instance_get_by_uuid(ctxt, uuid) except (KeyError, exception.InstanceNotFound): # NOTE(jk0): Err on the side of caution here. If we don't know # anything about the particular instance, ignore it. print_xen_object("INFO: Ignoring VM", vm_rec, indent_level=0) continue # NOTE(jk0): This would be triggered if a VM was deleted but the # actual deletion process failed somewhere along the line. is_active_and_deleting = (instance.vm_state == "active" and instance.task_state == "deleting") # NOTE(jk0): A zombie VM is an instance that is not active and hasn't # been updated in over the specified period. is_zombie_vm = (instance.vm_state != "active" and timeutils.is_older_than(instance.updated_at, CONF.zombie_instance_updated_at_window)) if is_active_and_deleting or is_zombie_vm: orphaned_instances.append((vm_ref, vm_rec, instance)) return orphaned_instances def cleanup_instance(xenapi, instance, vm_ref, vm_rec): """Delete orphaned instances.""" xenapi._vmops._destroy(instance, vm_ref) def _get_applicable_vm_recs(xenapi): """An 'applicable' VM is one that is not a template and not the control domain. """ for vm_ref in call_xenapi(xenapi, 'VM.get_all'): try: vm_rec = call_xenapi(xenapi, 'VM.get_record', vm_ref) except XenAPI.Failure, e: if e.details[0] != 'HANDLE_INVALID': raise continue if vm_rec["is_a_template"] or vm_rec["is_control_domain"]: continue yield vm_ref, vm_rec def print_xen_object(obj_type, obj, indent_level=0, spaces_per_indent=4): """Pretty-print a Xen object. Looks like: VM (abcd-abcd-abcd): 'name label here' """ if not CONF.verbose: return uuid = obj["uuid"] try: name_label = obj["name_label"] except KeyError: name_label = "" msg = "%(obj_type)s (%(uuid)s) '%(name_label)s'" % locals() indent = " " * spaces_per_indent * indent_level print "".join([indent, msg]) def _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids): """Find VDIs which are connected to VBDs which are connected to VMs.""" def _is_null_ref(ref): return ref == "OpaqueRef:NULL" def _add_vdi_and_parents_to_connected(vdi_rec, indent_level): indent_level += 1 vdi_and_parent_uuids = [] cur_vdi_rec = vdi_rec while True: cur_vdi_uuid = cur_vdi_rec["uuid"] print_xen_object("VDI", vdi_rec, indent_level=indent_level) connected_vdi_uuids.add(cur_vdi_uuid) vdi_and_parent_uuids.append(cur_vdi_uuid) try: parent_vdi_uuid = vdi_rec["sm_config"]["vhd-parent"] except KeyError: parent_vdi_uuid = None # NOTE(sirp): VDI's can have themselves as a parent?! if parent_vdi_uuid and parent_vdi_uuid != cur_vdi_uuid: indent_level += 1 cur_vdi_ref = call_xenapi(xenapi, 'VDI.get_by_uuid', parent_vdi_uuid) try: cur_vdi_rec = call_xenapi(xenapi, 'VDI.get_record', cur_vdi_ref) except XenAPI.Failure, e: if e.details[0] != 'HANDLE_INVALID': raise break else: break for vm_ref, vm_rec in _get_applicable_vm_recs(xenapi): indent_level = 0 print_xen_object("VM", vm_rec, indent_level=indent_level) vbd_refs = vm_rec["VBDs"] for vbd_ref in vbd_refs: try: vbd_rec = call_xenapi(xenapi, 'VBD.get_record', vbd_ref) except XenAPI.Failure, e: if e.details[0] != 'HANDLE_INVALID': raise continue indent_level = 1 print_xen_object("VBD", vbd_rec, indent_level=indent_level) vbd_vdi_ref = vbd_rec["VDI"] if _is_null_ref(vbd_vdi_ref): continue try: vdi_rec = call_xenapi(xenapi, 'VDI.get_record', vbd_vdi_ref) except XenAPI.Failure, e: if e.details[0] != 'HANDLE_INVALID': raise continue _add_vdi_and_parents_to_connected(vdi_rec, indent_level) def _find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids): """Collects all VDIs and adds system VDIs to the connected set.""" def _system_owned(vdi_rec): vdi_name = vdi_rec["name_label"] return (vdi_name.startswith("USB") or vdi_name.endswith(".iso") or vdi_rec["type"] == "system") for vdi_ref in call_xenapi(xenapi, 'VDI.get_all'): try: vdi_rec = call_xenapi(xenapi, 'VDI.get_record', vdi_ref) except XenAPI.Failure, e: if e.details[0] != 'HANDLE_INVALID': raise continue vdi_uuid = vdi_rec["uuid"] all_vdi_uuids.add(vdi_uuid) # System owned and non-managed VDIs should be considered 'connected' # for our purposes. if _system_owned(vdi_rec): print_xen_object("SYSTEM VDI", vdi_rec, indent_level=0) connected_vdi_uuids.add(vdi_uuid) elif not vdi_rec["managed"]: print_xen_object("UNMANAGED VDI", vdi_rec, indent_level=0) connected_vdi_uuids.add(vdi_uuid) def find_orphaned_vdi_uuids(xenapi): """Walk VM -> VBD -> VDI change and accumulate connected VDIs.""" connected_vdi_uuids = set() _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids) all_vdi_uuids = set() _find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids) orphaned_vdi_uuids = all_vdi_uuids - connected_vdi_uuids return orphaned_vdi_uuids def list_orphaned_vdis(vdi_uuids): """List orphaned VDIs.""" for vdi_uuid in vdi_uuids: if CONF.verbose: print "ORPHANED VDI (%s)" % vdi_uuid else: print vdi_uuid def clean_orphaned_vdis(xenapi, vdi_uuids): """Clean orphaned VDIs.""" for vdi_uuid in vdi_uuids: if CONF.verbose: print "CLEANING VDI (%s)" % vdi_uuid vdi_ref = call_xenapi(xenapi, 'VDI.get_by_uuid', vdi_uuid) try: call_xenapi(xenapi, 'VDI.destroy', vdi_ref) except XenAPI.Failure, exc: print >> sys.stderr, "Skipping %s: %s" % (vdi_uuid, exc) def list_orphaned_instances(orphaned_instances): """List orphaned instances.""" for vm_ref, vm_rec, orphaned_instance in orphaned_instances: if CONF.verbose: print "ORPHANED INSTANCE (%s)" % orphaned_instance.name else: print orphaned_instance.name def clean_orphaned_instances(xenapi, orphaned_instances): """Clean orphaned instances.""" for vm_ref, vm_rec, instance in orphaned_instances: if CONF.verbose: print "CLEANING INSTANCE (%s)" % instance.name cleanup_instance(xenapi, instance, vm_ref, vm_rec) def main(): """Main loop.""" config.parse_args(sys.argv) args = CONF(args=sys.argv[1:], usage='%(prog)s [options] --command={' + '|'.join(ALLOWED_COMMANDS) + '}') command = CONF.command if not command or command not in ALLOWED_COMMANDS: CONF.print_usage() sys.exit(1) if CONF.zombie_instance_updated_at_window < CONF.resize_confirm_window: raise Exception("`zombie_instance_updated_at_window` has to be longer" " than `resize_confirm_window`.") # NOTE(blamar) This tool does not require DB access, so passing in the # 'abstract' VirtAPI class is acceptable xenapi = xenapi_driver.XenAPIDriver(virtapi.VirtAPI()) if command == "list-vdis": if CONF.verbose: print "Connected VDIs:\n" orphaned_vdi_uuids = find_orphaned_vdi_uuids(xenapi) if CONF.verbose: print "\nOrphaned VDIs:\n" list_orphaned_vdis(orphaned_vdi_uuids) elif command == "clean-vdis": orphaned_vdi_uuids = find_orphaned_vdi_uuids(xenapi) clean_orphaned_vdis(xenapi, orphaned_vdi_uuids) elif command == "list-instances": orphaned_instances = find_orphaned_instances(xenapi) list_orphaned_instances(orphaned_instances) elif command == "clean-instances": orphaned_instances = find_orphaned_instances(xenapi) clean_orphaned_instances(xenapi, orphaned_instances) elif command == "test": doctest.testmod() else: print "Unknown command '%s'" % command sys.exit(1) if __name__ == "__main__": main() nova-13.0.0/tools/xenserver/populate_other_config.py0000664000567000056710000000616012701407773024024 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ One-time script to populate VDI.other_config. We use metadata stored in VDI.other_config to associate a VDI with a given instance so that we may safely cleanup orphaned VDIs. We had a bug in the code that meant that the vast majority of VDIs created would not have the other_config populated. After deploying the fixed code, this script is intended to be run against all compute-workers in a cluster so that existing VDIs can have their other_configs populated. Run on compute-worker (not Dom0): python ./tools/xenserver/populate_other_config.py [--dry-run|--verbose] """ import os import sys possible_topdir = os.getcwd() if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): sys.path.insert(0, possible_topdir) from nova import config from nova.virt import virtapi from nova.virt.xenapi import driver as xenapi_driver from nova.virt.xenapi import vm_utils from oslo_config import cfg from oslo_utils import uuidutils cli_opts = [ cfg.BoolOpt('dry-run', default=False, help='Whether to actually update other_config.'), ] CONF = cfg.CONF CONF.register_cli_opts(cli_opts) def main(): config.parse_args(sys.argv) xenapi = xenapi_driver.XenAPIDriver(virtapi.VirtAPI()) session = xenapi._session vdi_refs = session.call_xenapi('VDI.get_all') for vdi_ref in vdi_refs: vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref) other_config = vdi_rec['other_config'] # Already set... if 'nova_instance_uuid' in other_config: continue name_label = vdi_rec['name_label'] # We only want name-labels of form instance--[optional-suffix] if not name_label.startswith('instance-'): continue # Parse out UUID instance_uuid = name_label.replace('instance-', '')[:36] if not uuidutils.is_uuid_like(instance_uuid): print "error: name label '%s' wasn't UUID-like" % name_label continue vdi_type = vdi_rec['name_description'] # We don't need a full instance record, just the UUID instance = {'uuid': instance_uuid} if not CONF.dry_run: vm_utils._set_vdi_info(session, vdi_ref, vdi_type, name_label, vdi_type, instance) if CONF.verbose: print "Setting other_config for instance_uuid=%s vdi_uuid=%s" % ( instance_uuid, vdi_rec['uuid']) if CONF.dry_run: print "Dry run completed" if __name__ == "__main__": main() nova-13.0.0/tools/xenserver/rotate_xen_guest_logs.sh0000775000567000056710000000472212701407773024037 0ustar jenkinsjenkins00000000000000#!/bin/bash set -eux # Script to rotate console logs # # Should be run on Dom0, with cron, every minute: # * * * * * /root/rotate_xen_guest_logs.sh # # Should clear out the guest logs on every boot # because the domain ids may get re-used for a # different tenant after the reboot # # /var/log/xen/guest should be mounted into a # small loopback device to stop any guest being # able to fill dom0 file system log_dir="/var/log/xen/guest" kb=1024 max_size_bytes=$(($kb*$kb)) truncated_size_bytes=$((5*$kb)) syslog_tag='rotate_xen_guest_logs' log_file_base="${log_dir}/console." # Only delete log files older than this number of minutes # to avoid a race where Xen creates the domain and starts # logging before the XAPI VM start returns (and allows us # to preserve the log file using last_dom_id) min_logfile_age=10 # Ensure logging is setup correctly for all domains xenstore-write /local/logconsole/@ "${log_file_base}%d" # Grab the list of logs now to prevent a race where the domain is # started after we get the valid last_dom_ids, but before the logs are # deleted. Add spaces to ensure we can do containment tests below current_logs=$(find "$log_dir" -type f) # Ensure the last_dom_id is set + updated for all running VMs for vm in $(xe vm-list power-state=running --minimal | tr ',' ' '); do xe vm-param-set uuid=$vm other-config:last_dom_id=$(xe vm-param-get uuid=$vm param-name=dom-id) done # Get the last_dom_id for all VMs valid_last_dom_ids=$(xe vm-list params=other-config --minimal | tr ';,' '\n\n' | grep last_dom_id | sed -e 's/last_dom_id: //g' | xargs) echo "Valid dom IDs: $valid_last_dom_ids" | /usr/bin/logger -t $syslog_tag # Remove old console files that do not correspond to valid last_dom_id's allowed_consoles=".*console.\(${valid_last_dom_ids// /\\|}\)$" delete_logs=`find "$log_dir" -type f -mmin +${min_logfile_age} -not -regex "$allowed_consoles"` for log in $delete_logs; do if echo "$current_logs" | grep -q -w "$log"; then echo "Deleting: $log" | /usr/bin/logger -t $syslog_tag rm $log fi done # Truncate all remaining logs for log in `find "$log_dir" -type f -regex '.*console.*' -size +${max_size_bytes}c`; do echo "Truncating log: $log" | /usr/bin/logger -t $syslog_tag tmp="$log.tmp" tail -c $truncated_size_bytes "$log" > "$tmp" mv -f "$tmp" "$log" # Notify xen that it needs to reload the file domid="${log##*.}" xenstore-write /local/logconsole/$domid "$log" xenstore-rm /local/logconsole/$domid done nova-13.0.0/tools/xenserver/cleanup_sm_locks.py0000775000567000056710000000753512701407773023000 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to cleanup old XenServer /var/lock/sm locks. XenServer 5.6 and 6.0 do not appear to always cleanup locks when using a FileSR. ext3 has a limit of 32K inode links, so when we have 32K-2 (31998) locks laying around, builds will begin to fail because we can't create any additional locks. This cleanup script is something we can run periodically as a stop-gap measure until this is fixed upstream. This script should be run on the dom0 of the affected machine. """ import errno import optparse import os import sys import time BASE = '/var/lock/sm' def _get_age_days(secs): return float(time.time() - secs) / 86400 def _parse_args(): parser = optparse.OptionParser() parser.add_option("-d", "--dry-run", action="store_true", dest="dry_run", default=False, help="don't actually remove locks") parser.add_option("-l", "--limit", action="store", type='int', dest="limit", default=sys.maxint, help="max number of locks to delete (default: no limit)") parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="don't print status messages to stdout") options, args = parser.parse_args() try: days_old = int(args[0]) except (IndexError, ValueError): parser.print_help() sys.exit(1) return options, days_old def main(): options, days_old = _parse_args() if not os.path.exists(BASE): print >> sys.stderr, "error: '%s' doesn't exist. Make sure you're"\ " running this on the dom0." % BASE sys.exit(1) lockpaths_removed = 0 nspaths_removed = 0 for nsname in os.listdir(BASE)[:options.limit]: nspath = os.path.join(BASE, nsname) if not os.path.isdir(nspath): continue # Remove old lockfiles removed = 0 locknames = os.listdir(nspath) for lockname in locknames: lockpath = os.path.join(nspath, lockname) lock_age_days = _get_age_days(os.path.getmtime(lockpath)) if lock_age_days > days_old: lockpaths_removed += 1 removed += 1 if options.verbose: print 'Removing old lock: %03d %s' % (lock_age_days, lockpath) if not options.dry_run: os.unlink(lockpath) # Remove empty namespace paths if len(locknames) == removed: nspaths_removed += 1 if options.verbose: print 'Removing empty namespace: %s' % nspath if not options.dry_run: try: os.rmdir(nspath) except OSError, e: if e.errno == errno.ENOTEMPTY: print >> sys.stderr, "warning: directory '%s'"\ " not empty" % nspath else: raise if options.dry_run: print "** Dry Run **" print "Total locks removed: ", lockpaths_removed print "Total namespaces removed: ", nspaths_removed if __name__ == '__main__': main() nova-13.0.0/tools/with_venv.sh0000775000567000056710000000033212701407773017415 0ustar jenkinsjenkins00000000000000#!/bin/bash tools_path=${tools_path:-$(dirname $0)} venv_path=${venv_path:-${tools_path}} venv_dir=${venv_name:-/../.venv} TOOLS=${tools_path} VENV=${venv:-${venv_path}/${venv_dir}} source ${VENV}/bin/activate && "$@" nova-13.0.0/tools/nova-manage.bash_completion0000664000567000056710000000214012701407773022325 0ustar jenkinsjenkins00000000000000# bash completion for openstack nova-manage _nova_manage_opts="" # lazy init _nova_manage_opts_exp="" # lazy init # dict hack for bash 3 _set_nova_manage_subopts () { eval _nova_manage_subopts_"$1"='$2' } _get_nova_manage_subopts () { eval echo '${_nova_manage_subopts_'"$1"'#_nova_manage_subopts_}' } _nova_manage() { local cur prev subopts COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" if [ "x$_nova_manage_opts" == "x" ] ; then _nova_manage_opts="`nova-manage bash-completion 2>/dev/null`" _nova_manage_opts_exp="`echo $_nova_manage_opts | sed -e "s/\s/|/g"`" fi if [[ " `echo $_nova_manage_opts` " =~ " $prev " ]] ; then if [ "x$(_get_nova_manage_subopts "$prev")" == "x" ] ; then subopts="`nova-manage bash-completion $prev 2>/dev/null`" _set_nova_manage_subopts "$prev" "$subopts" fi COMPREPLY=($(compgen -W "$(_get_nova_manage_subopts "$prev")" -- ${cur})) elif [[ ! " ${COMP_WORDS[@]} " =~ " "($_nova_manage_opts_exp)" " ]] ; then COMPREPLY=($(compgen -W "${_nova_manage_opts}" -- ${cur})) fi return 0 } complete -F _nova_manage nova-manage nova-13.0.0/tools/colorizer.py0000775000567000056710000002677612701407773017456 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013, Nebula, Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Colorizer Code is borrowed from Twisted: # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Display a subunit stream through a colorized unittest test runner.""" import heapq import sys import unittest import subunit import testtools class _AnsiColorizer(object): """A colorizer is an object that loosely wraps around a stream, allowing callers to write text to the stream in a particular color. Colorizer classes must implement C{supported()} and C{write(text, color)}. """ _colors = dict(black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37) def __init__(self, stream): self.stream = stream def supported(cls, stream=sys.stdout): """A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise. """ if not stream.isatty(): return False # auto color only on TTYs try: import curses except ImportError: return False else: try: try: return curses.tigetnum("colors") > 2 except curses.error: curses.setupterm() return curses.tigetnum("colors") > 2 except Exception: # guess false in case of error return False supported = classmethod(supported) def write(self, text, color): """Write the given text to the stream in the given color. @param text: Text to be written to the stream. @param color: A string label for a color. e.g. 'red', 'white'. """ color = self._colors[color] self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) class _Win32Colorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): import win32console red, green, blue, bold = (win32console.FOREGROUND_RED, win32console.FOREGROUND_GREEN, win32console.FOREGROUND_BLUE, win32console.FOREGROUND_INTENSITY) self.stream = stream self.screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) self._colors = { 'normal': red | green | blue, 'red': red | bold, 'green': green | bold, 'blue': blue | bold, 'yellow': red | green | bold, 'magenta': red | blue | bold, 'cyan': green | blue | bold, 'white': red | green | blue | bold } def supported(cls, stream=sys.stdout): try: import win32console screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) except ImportError: return False import pywintypes try: screenBuffer.SetConsoleTextAttribute( win32console.FOREGROUND_RED | win32console.FOREGROUND_GREEN | win32console.FOREGROUND_BLUE) except pywintypes.error: return False else: return True supported = classmethod(supported) def write(self, text, color): color = self._colors[color] self.screenBuffer.SetConsoleTextAttribute(color) self.stream.write(text) self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) class _NullColorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): self.stream = stream def supported(cls, stream=sys.stdout): return True supported = classmethod(supported) def write(self, text, color): self.stream.write(text) def get_elapsed_time_color(elapsed_time): if elapsed_time > 1.0: return 'red' elif elapsed_time > 0.25: return 'yellow' else: return 'green' class NovaTestResult(testtools.TestResult): def __init__(self, stream, descriptions, verbosity): super(NovaTestResult, self).__init__() self.stream = stream self.showAll = verbosity > 1 self.num_slow_tests = 10 self.slow_tests = [] # this is a fixed-sized heap self.colorizer = None # NOTE(vish): reset stdout for the terminal check stdout = sys.stdout sys.stdout = sys.__stdout__ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: if colorizer.supported(): self.colorizer = colorizer(self.stream) break sys.stdout = stdout self.start_time = None self.last_time = {} self.results = {} self.last_written = None def _writeElapsedTime(self, elapsed): color = get_elapsed_time_color(elapsed) self.colorizer.write(" %.2f" % elapsed, color) def _addResult(self, test, *args): try: name = test.id() except AttributeError: name = 'Unknown.unknown' test_class, test_name = name.rsplit('.', 1) elapsed = (self._now() - self.start_time).total_seconds() item = (elapsed, test_class, test_name) if len(self.slow_tests) >= self.num_slow_tests: heapq.heappushpop(self.slow_tests, item) else: heapq.heappush(self.slow_tests, item) self.results.setdefault(test_class, []) self.results[test_class].append((test_name, elapsed) + args) self.last_time[test_class] = self._now() self.writeTests() def _writeResult(self, test_name, elapsed, long_result, color, short_result, success): if self.showAll: self.stream.write(' %s' % str(test_name).ljust(66)) self.colorizer.write(long_result, color) if success: self._writeElapsedTime(elapsed) self.stream.writeln() else: self.colorizer.write(short_result, color) def addSuccess(self, test): super(NovaTestResult, self).addSuccess(test) self._addResult(test, 'OK', 'green', '.', True) def addFailure(self, test, err): if test.id() == 'process-returncode': return super(NovaTestResult, self).addFailure(test, err) self._addResult(test, 'FAIL', 'red', 'F', False) def addError(self, test, err): super(NovaTestResult, self).addFailure(test, err) self._addResult(test, 'ERROR', 'red', 'E', False) def addSkip(self, test, reason=None, details=None): super(NovaTestResult, self).addSkip(test, reason, details) self._addResult(test, 'SKIP', 'blue', 'S', True) def startTest(self, test): self.start_time = self._now() super(NovaTestResult, self).startTest(test) def writeTestCase(self, cls): if not self.results.get(cls): return if cls != self.last_written: self.colorizer.write(cls, 'white') self.stream.writeln() for result in self.results[cls]: self._writeResult(*result) del self.results[cls] self.stream.flush() self.last_written = cls def writeTests(self): time = self.last_time.get(self.last_written, self._now()) if not self.last_written or (self._now() - time).total_seconds() > 2.0: diff = 3.0 while diff > 2.0: classes = self.results.keys() oldest = min(classes, key=lambda x: self.last_time[x]) diff = (self._now() - self.last_time[oldest]).total_seconds() self.writeTestCase(oldest) else: self.writeTestCase(self.last_written) def done(self): self.stopTestRun() def stopTestRun(self): for cls in list(self.results): self.writeTestCase(cls) self.stream.writeln() self.writeSlowTests() def writeSlowTests(self): # Pare out 'fast' tests slow_tests = [item for item in self.slow_tests if get_elapsed_time_color(item[0]) != 'green'] if slow_tests: slow_total_time = sum(item[0] for item in slow_tests) slow = ("Slowest %i tests took %.2f secs:" % (len(slow_tests), slow_total_time)) self.colorizer.write(slow, 'yellow') self.stream.writeln() last_cls = None # sort by name for elapsed, cls, name in sorted(slow_tests, key=lambda x: x[1] + x[2]): if cls != last_cls: self.colorizer.write(cls, 'white') self.stream.writeln() last_cls = cls self.stream.write(' %s' % str(name).ljust(68)) self._writeElapsedTime(elapsed) self.stream.writeln() def printErrors(self): if self.showAll: self.stream.writeln() self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) def printErrorList(self, flavor, errors): for test, err in errors: self.colorizer.write("=" * 70, 'red') self.stream.writeln() self.colorizer.write(flavor, 'red') self.stream.writeln(": %s" % test.id()) self.colorizer.write("-" * 70, 'red') self.stream.writeln() self.stream.writeln("%s" % err) test = subunit.ProtocolTestCase(sys.stdin, passthrough=None) if sys.version_info[0:2] <= (2, 6): runner = unittest.TextTestRunner(verbosity=2) else: runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult) if runner.run(test).wasSuccessful(): exit_code = 0 else: exit_code = 1 sys.exit(exit_code) nova-13.0.0/tools/reserve-migrations.py0000775000567000056710000000464312701407773021260 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python import argparse import glob import os import subprocess BASE = 'nova/db/sqlalchemy/migrate_repo/versions'.split('/') API_BASE = 'nova/db/sqlalchemy/api_migrations/migrate_repo/versions'.split('/') STUB = """ # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for backports. # Do not use this number for new work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass """ def get_last_migration(base): path = os.path.join(*tuple(base + ['[0-9]*.py'])) migrations = sorted([os.path.split(fn)[-1] for fn in glob.glob(path)]) return int(migrations[-1].split('_')[0]) def reserve_migrations(base, number, git_add): last = get_last_migration(base) for i in range(last + 1, last + number + 1): name = '%03i_placeholder.py' % i path = os.path.join(*tuple(base + [name])) with open(path, 'w') as f: f.write(STUB) print('Created %s' % path) if git_add: subprocess.call('git add %s' % path, shell=True) def main(): parser = argparse.ArgumentParser() parser.add_argument('-n', '--number', default=10, type=int, help='Number of migrations to reserve') parser.add_argument('-g', '--git-add', action='store_const', const=True, default=False, help='Automatically git-add new migrations') parser.add_argument('-a', '--api', action='store_const', const=True, default=False, help='Reserve migrations for the API database') args = parser.parse_args() if args.api: base = API_BASE else: base = BASE reserve_migrations(base, args.number, args.git_add) if __name__ == '__main__': main() nova-13.0.0/tools/db/0000775000567000056710000000000012701410205015414 5ustar jenkinsjenkins00000000000000nova-13.0.0/tools/db/schema_diff.py0000775000567000056710000002036312701407773020245 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility for diff'ing two versions of the DB schema. Each release cycle the plan is to compact all of the migrations from that release into a single file. This is a manual and, unfortunately, error-prone process. To ensure that the schema doesn't change, this tool can be used to diff the compacted DB schema to the original, uncompacted form. The database is specified by providing a SQLAlchemy connection URL WITHOUT the database-name portion (that will be filled in automatically with a temporary database name). The schema versions are specified by providing a git ref (a branch name or commit hash) and a SQLAlchemy-Migrate version number: Run like: MYSQL: ./tools/db/schema_diff.py mysql+pymysql://root@localhost \ master:latest my_branch:82 POSTGRESQL: ./tools/db/schema_diff.py postgresql://localhost \ master:latest my_branch:82 DB2: ./tools/db/schema_diff.py ibm_db_sa://localhost \ master:latest my_branch:82 """ from __future__ import print_function import datetime import glob import os import subprocess import sys from nova.i18n import _ # Dump def dump_db(db_driver, db_name, db_url, migration_version, dump_filename): if not db_url.endswith('/'): db_url += '/' db_url += db_name db_driver.create(db_name) try: _migrate(db_url, migration_version) db_driver.dump(db_name, dump_filename) finally: db_driver.drop(db_name) # Diff def diff_files(filename1, filename2): pipeline = ['diff -U 3 %(filename1)s %(filename2)s' % {'filename1': filename1, 'filename2': filename2}] # Use colordiff if available if subprocess.call(['which', 'colordiff']) == 0: pipeline.append('colordiff') pipeline.append('less -R') cmd = ' | '.join(pipeline) subprocess.check_call(cmd, shell=True) # Database class Mysql(object): def create(self, name): subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name]) def drop(self, name): subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name]) def dump(self, name, dump_filename): subprocess.check_call( 'mysqldump -u root %(name)s > %(dump_filename)s' % {'name': name, 'dump_filename': dump_filename}, shell=True) class Postgresql(object): def create(self, name): subprocess.check_call(['createdb', name]) def drop(self, name): subprocess.check_call(['dropdb', name]) def dump(self, name, dump_filename): subprocess.check_call( 'pg_dump %(name)s > %(dump_filename)s' % {'name': name, 'dump_filename': dump_filename}, shell=True) class Ibm_db_sa(object): @classmethod def db2cmd(cls, cmd): """Wraps a command to be run under the DB2 instance user.""" subprocess.check_call('su - $(db2ilist) -c "%s"' % cmd, shell=True) def create(self, name): self.db2cmd('db2 \'create database %s\'' % name) def drop(self, name): self.db2cmd('db2 \'drop database %s\'' % name) def dump(self, name, dump_filename): self.db2cmd('db2look -d %(name)s -e -o %(dump_filename)s' % {'name': name, 'dump_filename': dump_filename}) # The output file gets dumped to the db2 instance user's home directory # so we have to copy it back to our current working directory. subprocess.check_call('cp /home/$(db2ilist)/%s ./' % dump_filename, shell=True) def _get_db_driver_class(db_url): try: return globals()[db_url.split('://')[0].capitalize()] except KeyError: raise Exception(_("database %s not supported") % db_url) # Migrate MIGRATE_REPO = os.path.join(os.getcwd(), "nova/db/sqlalchemy/migrate_repo") def _migrate(db_url, migration_version): earliest_version = _migrate_get_earliest_version() # NOTE(sirp): sqlalchemy-migrate currently cannot handle the skipping of # migration numbers. _migrate_cmd( db_url, 'version_control', str(earliest_version - 1)) upgrade_cmd = ['upgrade'] if migration_version != 'latest': upgrade_cmd.append(str(migration_version)) _migrate_cmd(db_url, *upgrade_cmd) def _migrate_cmd(db_url, *cmd): manage_py = os.path.join(MIGRATE_REPO, 'manage.py') args = ['python', manage_py] args += cmd args += ['--repository=%s' % MIGRATE_REPO, '--url=%s' % db_url] subprocess.check_call(args) def _migrate_get_earliest_version(): versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py') versions = [] for path in glob.iglob(versions_glob): filename = os.path.basename(path) prefix = filename.split('_', 1)[0] try: version = int(prefix) except ValueError: pass versions.append(version) versions.sort() return versions[0] # Git def git_current_branch_name(): ref_name = git_symbolic_ref('HEAD', quiet=True) current_branch_name = ref_name.replace('refs/heads/', '') return current_branch_name def git_symbolic_ref(ref, quiet=False): args = ['git', 'symbolic-ref', ref] if quiet: args.append('-q') proc = subprocess.Popen(args, stdout=subprocess.PIPE) stdout, stderr = proc.communicate() return stdout.strip() def git_checkout(branch_name): subprocess.check_call(['git', 'checkout', branch_name]) def git_has_uncommited_changes(): return subprocess.call(['git', 'diff', '--quiet', '--exit-code']) == 1 # Command def die(msg): print("ERROR: %s" % msg, file=sys.stderr) sys.exit(1) def usage(msg=None): if msg: print("ERROR: %s" % msg, file=sys.stderr) prog = "schema_diff.py" args = ["", "", ""] print("usage: %s %s" % (prog, ' '.join(args)), file=sys.stderr) sys.exit(1) def parse_options(): try: db_url = sys.argv[1] except IndexError: usage("must specify DB connection url") try: orig_branch, orig_version = sys.argv[2].split(':') except IndexError: usage('original branch and version required (e.g. master:82)') try: new_branch, new_version = sys.argv[3].split(':') except IndexError: usage('new branch and version required (e.g. master:82)') return db_url, orig_branch, orig_version, new_branch, new_version def main(): timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S") ORIG_DB = 'orig_db_%s' % timestamp NEW_DB = 'new_db_%s' % timestamp ORIG_DUMP = ORIG_DB + ".dump" NEW_DUMP = NEW_DB + ".dump" options = parse_options() db_url, orig_branch, orig_version, new_branch, new_version = options # Since we're going to be switching branches, ensure user doesn't have any # uncommitted changes if git_has_uncommited_changes(): die("You have uncommitted changes. Please commit them before running " "this command.") db_driver = _get_db_driver_class(db_url)() users_branch = git_current_branch_name() git_checkout(orig_branch) try: # Dump Original Schema dump_db(db_driver, ORIG_DB, db_url, orig_version, ORIG_DUMP) # Dump New Schema git_checkout(new_branch) dump_db(db_driver, NEW_DB, db_url, new_version, NEW_DUMP) diff_files(ORIG_DUMP, NEW_DUMP) finally: git_checkout(users_branch) if os.path.exists(ORIG_DUMP): os.unlink(ORIG_DUMP) if os.path.exists(NEW_DUMP): os.unlink(NEW_DUMP) if __name__ == "__main__": main() nova-13.0.0/tools/enable-pre-commit-hook.sh0000775000567000056710000000232512701407773021646 0ustar jenkinsjenkins00000000000000#!/bin/sh # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. PRE_COMMIT_SCRIPT=.git/hooks/pre-commit make_hook() { echo "exec ./run_tests.sh -N -p" >> $PRE_COMMIT_SCRIPT chmod +x $PRE_COMMIT_SCRIPT if [ -w $PRE_COMMIT_SCRIPT -a -x $PRE_COMMIT_SCRIPT ]; then echo "pre-commit hook was created successfully" else echo "unable to create pre-commit hook" fi } # NOTE(jk0): Make sure we are in nova's root directory before adding the hook. if [ ! -d ".git" ]; then echo "unable to find .git; moving up a directory" cd .. if [ -d ".git" ]; then make_hook else echo "still unable to find .git; hook not created" fi else make_hook fi nova-13.0.0/plugins/0000775000567000056710000000000012701410205015350 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/0000775000567000056710000000000012701410205017371 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/doc/0000775000567000056710000000000012701410205020136 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/doc/networking.rst0000664000567000056710000001367312701407773023111 0ustar jenkinsjenkins00000000000000Multi Tenancy Networking Protections in XenServer ================================================= The purpose of the vif_rules script is to allow multi-tenancy on a XenServer host. In a multi-tenant cloud environment a host machine needs to be able to enforce network isolation amongst guest instances, at both layer two and layer three. The rules prevent guests from taking and using unauthorized IP addresses, sniffing other guests traffic, and prevents ARP poisoning attacks. This current revision only supports IPv4, but will support IPv6 in the future. Kernel Requirements =================== - physdev module - arptables support - ebtables support - iptables support If the kernel doesn't support these, you will need to obtain the Source RPMS for the proper version of XenServer to recompile the dom0 kernel. XenServer Requirements (32-bit dom0) ==================================== - arptables 32-bit rpm - ebtables 32-bit rpm - python-simplejson XenServer Environment Specific Notes ==================================== - XenServer 5.5 U1 based on the 2.6.18 kernel didn't include physdev module support. Support for this had to be recompiled into the kernel. - XenServer 5.6 based on the 2.6.27 kernel didn't include physdev, ebtables, or arptables. - XenServer 5.6 FP1 didn't include physdev, ebtables, or arptables but they do have a Cloud Supplemental pack available to partners which swaps out the kernels for kernels that support the networking rules. How it works - tl;dr ==================== iptables, ebtables, and arptables drop rules are applied to all forward chains on the host. These are applied at boot time with an init script. They ensure all forwarded packets are dropped by default. Allow rules are then applied to the instances to ensure they have permission to talk on the internet. How it works - Long =================== Any time an underprivileged domain or domU is started or stopped, it gets a unique domain id (dom_id). This dom_id is utilized in a number of places, one of which is that it is assigned to the virtual interface (vif). The vifs are attached to the bridge that is attached to the physical network. For instance, if you had a public bridge attached to eth0 and your domain id was 5, your vif would be vif5.0. The networking rules are applied to the VIF directly so they apply at the lowest level of the networking stack. Because the VIF changes along with the domain id on any start, stop, or reboot of the instance, the rules need to be removed and re-added any time that occurs. Because the dom_id can change often, the vif_rules script is hooked into the /etc/xensource/scripts/vif script that gets called anytime an instance is started, or stopped, which includes pauses and resumes. Examples of the rules ran for the host on boot: iptables -P FORWARD DROP iptables -A FORWARD -m physdev --physdev-in eth0 -j ACCEPT ebtables -P FORWARD DROP ebtables -A FORWARD -o eth0 -j ACCEPT arptables -P FORWARD DROP arptables -A FORWARD --opcode Request --in-interface eth0 -j ACCEPT arptables -A FORWARD --opcode Reply --in-interface eth0 -j ACCEPT Examples of the rules that are ran per instance state change: iptables -A FORWARD -m physdev --physdev-in vif1.0 -s 10.1.135.22/32 -j ACCEPT arptables -A FORWARD --opcode Request --in-interface "vif1.0" \ --source-ip 10.1.135.22 -j ACCEPT arptables -A FORWARD --opcode Reply --in-interface "vif1.0" \ --source-ip 10.1.135.22 --source-mac 9e:6e:cc:19:7f:fe -j ACCEPT ebtables -A FORWARD -p 0806 -o vif1.0 --arp-ip-dst 10.1.135.22 -j ACCEPT ebtables -A FORWARD -p 0800 -o vif1.0 --ip-dst 10.1.135.22 -j ACCEPT ebtables -I FORWARD 1 -s ! 9e:6e:cc:19:7f:fe -i vif1.0 -j DROP Typically when you see a vif, it'll look like vif.. vif2.1 for example would be domain 2 on the second interface. The vif_rules.py script needs to pull information about the IPs and MAC addresses assigned to the instance. The current implementation assumes that information is put into the VM Record into the xenstore-data key in a JSON string. The vif_rules.py script reads out of the JSON string to determine the IPs, and MAC addresses to protect. An example format is given below: # xe vm-param-get uuid= param-name=xenstore-data xenstore-data (MRW): vm-data/networking/4040fa7292e4: {"label": "public", "ips": [{"netmask":"255.255.255.0", "enabled":"1", "ip":"173.200.100.10"}], "mac":"40:40:fa:72:92:e4", "gateway":"173.200.100.1", "vm_id":"123456", "dns":["72.3.128.240","72.3.128.241"]}; vm-data/networking/40402321c9b8: {"label":"private", "ips":[{"netmask":"255.255.224.0", "enabled":"1", "ip":"10.177.10.10"}], "routes":[{"route":"10.176.0.0", "netmask":"255.248.0.0", "gateway":"10.177.10.1"}, {"route":"10.191.192.0", "netmask":"255.255.192.0", "gateway":"10.177.10.1"}], "mac":"40:40:23:21:c9:b8"} The key is used for two purposes. First, the vif_rules.py script reads from it to apply the rules needed after parsing the JSON. Second, because it is put into the xenstore-data field, the xenstore is populated with this data on boot. This allows a guest agent the ability to read out data about the instance and apply configurations as needed. Installation ============ - Copy host-rules into /etc/init.d/ and make sure to chmod +x host-rules. - Run 'chkconfig host-rules on' to add the init script to start up. - Copy vif_rules.py into /etc/xensource/scripts - Patch /etc/xensource/scripts/vif using the supplied patch file. It may vary for different versions of XenServer but it should be pretty self explanatory. It calls the vif_rules.py script on domain creation and tear down. - Run '/etc/init.d/host-rules start' to start up the host based rules. - The instance rules will then fire on creation of the VM as long as the correct JSON is in place. - You can check to see if the rules are in place with: iptables --list, arptables --list, or ebtables --list nova-13.0.0/plugins/xenserver/networking/0000775000567000056710000000000012701410205021560 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/networking/etc/0000775000567000056710000000000012701410205022333 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/networking/etc/sysconfig/0000775000567000056710000000000012701410205024337 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/networking/etc/sysconfig/openvswitch-nova0000664000567000056710000000141212701407773027612 0ustar jenkinsjenkins00000000000000# The interfaces that you want to apply base OVS rules to. If this is # unspecified then rules are applied to all eth* interfaces, which is a good # default. # # If you are worried about the performance of having rules on interfaces # that aren't carrying tenant traffic, or you want to do something # custom, then here you can explicitly choose the interfaces that should have # rules applied. # # Note that if there is an IP address on the bridge in domain 0 (i.e. the # xenbrX interface) then a rule will be applied that allows traffic to it. # Make sure that this is what you want. If you don't want tenant traffic # to be able to reach domain 0 -- the usual case -- then you should have # tenant traffic and domain 0 on entirely separate bridges. #INTERFACES="eth0 eth1" nova-13.0.0/plugins/xenserver/networking/etc/init.d/0000775000567000056710000000000012701410205023520 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/networking/etc/init.d/host-rules0000775000567000056710000000542712701407773025603 0ustar jenkinsjenkins00000000000000#!/bin/bash # # host-rules Start/Stop the networking host rules # # chkconfig: 2345 85 15 # description: Networking Host Rules for Multi Tenancy Protections # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. IPTABLES=/sbin/iptables EBTABLES=/sbin/ebtables ARPTABLES=/sbin/arptables iptables-up() { $IPTABLES -P FORWARD DROP $IPTABLES -A FORWARD -m physdev --physdev-in eth0 -j ACCEPT $IPTABLES -A FORWARD -m physdev --physdev-in eth1 -j ACCEPT } ebtables-up() { $EBTABLES -P FORWARD DROP $EBTABLES -A FORWARD -o eth0 -j ACCEPT $EBTABLES -A FORWARD -o eth1 -j ACCEPT } arptables-up() { $ARPTABLES -P FORWARD DROP $ARPTABLES -A FORWARD --opcode Request --in-interface eth0 -j ACCEPT $ARPTABLES -A FORWARD --opcode Reply --in-interface eth0 -j ACCEPT $ARPTABLES -A FORWARD --opcode Request --in-interface eth1 -j ACCEPT $ARPTABLES -A FORWARD --opcode Reply --in-interface eth1 -j ACCEPT } iptables-down() { $IPTABLES -P FORWARD ACCEPT $IPTABLES -D FORWARD -m physdev --physdev-in eth0 -j ACCEPT $IPTABLES -D FORWARD -m physdev --physdev-in eth1 -j ACCEPT } ebtables-down() { $EBTABLES -P FORWARD ACCEPT $EBTABLES -D FORWARD -o eth0 -j ACCEPT $EBTABLES -D FORWARD -o eth1 -j ACCEPT } arptables-down() { $ARPTABLES -P FORWARD ACCEPT $ARPTABLES -D FORWARD --opcode Request --in-interface eth0 -j ACCEPT $ARPTABLES -D FORWARD --opcode Reply --in-interface eth0 -j ACCEPT $ARPTABLES -D FORWARD --opcode Request --in-interface eth1 -j ACCEPT $ARPTABLES -D FORWARD --opcode Reply --in-interface eth1 -j ACCEPT } start() { iptables-up ebtables-up arptables-up } stop() { iptables-down ebtables-down arptables-down } case "$1" in start) start RETVAL=$? ;; stop) stop RETVAL=$? ;; restart) stop start RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart}" exit 1 ;; esac exit $RETVAL nova-13.0.0/plugins/xenserver/networking/etc/init.d/openvswitch-nova0000775000567000056710000000456112701407773027006 0ustar jenkinsjenkins00000000000000#!/bin/bash # # openvswitch-nova # # chkconfig: 2345 23 89 # description: Apply initial OVS flows for Nova # Copyright 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # Copyright (C) 2009, 2010, 2011 Nicira Networks, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions elif [ -f /etc/rc.d/init.d/functions ]; then . /etc/rc.d/init.d/functions elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "$0: missing LSB shell function library" >&2 exit 1 fi OVS_CONFIGURE_BASE_FLOWS=/etc/xensource/scripts/ovs_configure_base_flows.py if test -e /etc/sysconfig/openvswitch-nova; then . /etc/sysconfig/openvswitch-nova else echo "$0: missing configuration file: /etc/sysconfig/openvswitch-nova" exit 1 fi if test -e /etc/xensource/network.conf; then NETWORK_MODE=$(cat /etc/xensource/network.conf) fi case ${NETWORK_MODE:=openvswitch} in vswitch|openvswitch) ;; bridge) exit 0 ;; *) echo "Open vSwitch disabled (/etc/xensource/network.conf is invalid)" >&2 exit 0 ;; esac function run_ovs_conf_base_flows { local action="$1" local all_interfaces=$(cd /sys/class/net/; /bin/ls -d eth*) local interfaces="${INTERFACES-$all_interfaces}" for interface in $interfaces; do /usr/bin/python $OVS_CONFIGURE_BASE_FLOWS $action $interface done } function start { run_ovs_conf_base_flows online } function stop { run_ovs_conf_base_flows offline } function restart { run_ovs_conf_base_flows reset } case "$1" in start) start ;; stop) stop ;; restart) restart ;; *) echo "usage: openvswitch-nova [start|stop|restart]" exit 1 ;; esac nova-13.0.0/plugins/xenserver/networking/etc/udev/0000775000567000056710000000000012701410205023276 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/networking/etc/udev/rules.d/0000775000567000056710000000000012701410205024652 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules0000664000567000056710000000040512701407773032047 0ustar jenkinsjenkins00000000000000SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" # is this one needed? #SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" nova-13.0.0/plugins/xenserver/networking/etc/xensource/0000775000567000056710000000000012701410205024346 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/networking/etc/xensource/scripts/0000775000567000056710000000000012701410205026035 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch0000664000567000056710000000120112701407773030730 0ustar jenkinsjenkins00000000000000--- vif 2010-12-20 16:39:46.000000000 +0000 +++ vif_modified 2010-11-19 23:24:37.000000000 +0000 @@ -213,6 +213,7 @@ # xs-xen.pq.hq:91e986b8e49f netback-wait-for-hotplug xenstore-write "/local/domain/0/backend/vif/${DOMID}/${DEVID}/hotplug-status" "connected" + python /etc/xensource/scripts/vif_rules.py ${DOMID} online 2>&1 > /dev/null fi ;; @@ -224,6 +225,7 @@ remove) if [ "${TYPE}" = "vif" ] ;then + python /etc/xensource/scripts/vif_rules.py ${DOMID} offline 2>&1 > /dev/null xenstore-rm "${HOTPLUG}/hotplug" fi logger -t scripts-vif "${dev} has been removed" nova-13.0.0/plugins/xenserver/networking/etc/xensource/scripts/novalib.py0000664000567000056710000000236012701407773030062 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess def execute_get_output(*command): """Execute and return stdout.""" devnull = open(os.devnull, 'w') command = map(str, command) proc = subprocess.Popen(command, close_fds=True, stdout=subprocess.PIPE, stderr=devnull) devnull.close() stdout = proc.communicate()[0] return stdout.strip() def execute(*command): """Execute without returning stdout.""" devnull = open(os.devnull, 'w') command = map(str, command) subprocess.call(command, close_fds=True, stdout=devnull, stderr=devnull) devnull.close() nova-13.0.0/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py0000775000567000056710000002414212701407773033363 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This script is used to configure openvswitch flows on XenServer hosts. """ import os import sys # This is written to Python 2.4, since that is what is available on XenServer import netaddr import simplejson as json import novalib # noqa OVS_OFCTL = '/usr/bin/ovs-ofctl' class OvsFlow(object): def __init__(self, bridge, params): self.bridge = bridge self.params = params def add(self, rule): novalib.execute(OVS_OFCTL, 'add-flow', self.bridge, rule % self.params) def clear_flows(self, ofport): novalib.execute(OVS_OFCTL, 'del-flows', self.bridge, "in_port=%s" % ofport) def main(command, vif_raw, net_type): if command not in ('online', 'offline'): return vif_name, dom_id, vif_index = vif_raw.split('-') vif = "%s%s.%s" % (vif_name, dom_id, vif_index) bridge = novalib.execute_get_output('/usr/bin/ovs-vsctl', 'iface-to-br', vif) xsls = novalib.execute_get_output('/usr/bin/xenstore-ls', '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: xsread = novalib.execute_get_output('/usr/bin/xenstore-read', '/local/domain/%s/vm-data/networking/%s' % (dom_id, mac)) data = json.loads(xsread) if data["label"] == "public": this_vif = "vif%s.0" % dom_id phys_dev = "eth0" else: this_vif = "vif%s.1" % dom_id phys_dev = "eth1" if vif == this_vif: vif_ofport = novalib.execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', vif, 'ofport') phys_ofport = novalib.execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', phys_dev, 'ofport') params = dict(VIF_NAME=vif, MAC=data['mac'], OF_PORT=vif_ofport, PHYS_PORT=phys_ofport) ovs = OvsFlow(bridge, params) if command == 'offline': # I haven't found a way to clear only IPv4 or IPv6 rules. ovs.clear_flows(vif_ofport) if command == 'online': if net_type in ('ipv4', 'all') and 'ips' in data: for ip4 in data['ips']: ovs.params.update({'IPV4_ADDR': ip4['ip']}) apply_ovs_ipv4_flows(ovs, bridge, params) if net_type in ('ipv6', 'all') and 'ip6s' in data: for ip6 in data['ip6s']: mac_eui64 = netaddr.EUI(data['mac']).eui64() link_local = str(mac_eui64.ipv6_link_local()) ovs.params.update({'IPV6_LINK_LOCAL_ADDR': link_local}) ovs.params.update({'IPV6_GLOBAL_ADDR': ip6['ip']}) apply_ovs_ipv6_flows(ovs, bridge, params) def apply_ovs_ipv4_flows(ovs, bridge, params): # When ARP traffic arrives from a vif, push it to virtual port # 9999 for further processing ovs.add("priority=4,arp,in_port=%(OF_PORT)s,dl_src=%(MAC)s," "nw_src=%(IPV4_ADDR)s,arp_sha=%(MAC)s,actions=resubmit:9999") ovs.add("priority=4,arp,in_port=%(OF_PORT)s,dl_src=%(MAC)s," "nw_src=0.0.0.0,arp_sha=%(MAC)s,actions=resubmit:9999") # When IP traffic arrives from a vif, push it to virtual port 9999 # for further processing ovs.add("priority=4,ip,in_port=%(OF_PORT)s,dl_src=%(MAC)s," "nw_src=%(IPV4_ADDR)s,actions=resubmit:9999") # Drop IP bcast/mcast ovs.add("priority=6,ip,in_port=%(OF_PORT)s,dl_dst=ff:ff:ff:ff:ff:ff," "actions=drop") ovs.add("priority=5,ip,in_port=%(OF_PORT)s,nw_dst=224.0.0.0/4," "actions=drop") ovs.add("priority=5,ip,in_port=%(OF_PORT)s,nw_dst=240.0.0.0/4," "actions=drop") # Pass ARP requests coming from any VMs on the local HV (port # 9999) or coming from external sources (PHYS_PORT) to the VM and # physical NIC. We output this to the physical NIC as well, since # with instances of shared ip groups, the active host for the # destination IP might be elsewhere... ovs.add("priority=3,arp,in_port=9999,nw_dst=%(IPV4_ADDR)s," "actions=output:%(OF_PORT)s,output:%(PHYS_PORT)s") # Pass ARP traffic originating from external sources the VM with # the matching IP address ovs.add("priority=3,arp,in_port=%(PHYS_PORT)s,nw_dst=%(IPV4_ADDR)s," "actions=output:%(OF_PORT)s") # Pass ARP traffic from one VM (src mac already validated) to # another VM on the same HV ovs.add("priority=3,arp,in_port=9999,dl_dst=%(MAC)s," "actions=output:%(OF_PORT)s") # Pass ARP replies coming from the external environment to the # target VM ovs.add("priority=3,arp,in_port=%(PHYS_PORT)s,dl_dst=%(MAC)s," "actions=output:%(OF_PORT)s") # ALL IP traffic: Pass IP data coming from any VMs on the local HV # (port 9999) or coming from external sources (PHYS_PORT) to the # VM and physical NIC. We output this to the physical NIC as # well, since with instances of shared ip groups, the active host # for the destination IP might be elsewhere... ovs.add("priority=3,ip,in_port=9999,dl_dst=%(MAC)s," "nw_dst=%(IPV4_ADDR)s,actions=output:%(OF_PORT)s," "output:%(PHYS_PORT)s") # Pass IP traffic from the external environment to the VM ovs.add("priority=3,ip,in_port=%(PHYS_PORT)s,dl_dst=%(MAC)s," "nw_dst=%(IPV4_ADDR)s,actions=output:%(OF_PORT)s") # Send any local traffic to the physical NIC's OVS port for # physical network learning ovs.add("priority=2,in_port=9999,actions=output:%(PHYS_PORT)s") def apply_ovs_ipv6_flows(ovs, bridge, params): # allow valid IPv6 ND outbound (are both global and local IPs needed?) # Neighbor Solicitation ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s," "actions=normal") ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,actions=normal") ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s," "actions=normal") ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,actions=normal") # Neighbor Advertisement ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136," "nd_target=%(IPV6_LINK_LOCAL_ADDR)s,actions=normal") ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,actions=normal") ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136," "nd_target=%(IPV6_GLOBAL_ADDR)s,actions=normal") ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,actions=normal") # drop all other neighbor discovery (req b/c we permit all icmp6 below) ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=135,actions=drop") ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=136,actions=drop") # do not allow sending specific ICMPv6 types # Router Advertisement ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=134,actions=drop") # Redirect Gateway ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=137,actions=drop") # Mobile Prefix Solicitation ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=146,actions=drop") # Mobile Prefix Advertisement ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=147,actions=drop") # Multicast Router Advertisement ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=151,actions=drop") # Multicast Router Solicitation ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=152,actions=drop") # Multicast Router Termination ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=153,actions=drop") # allow valid IPv6 outbound, by type ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp6,actions=normal") ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp6,actions=normal") ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," "ipv6_src=%(IPV6_GLOBAL_ADDR)s,tcp6,actions=normal") ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,tcp6,actions=normal") ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," "ipv6_src=%(IPV6_GLOBAL_ADDR)s,udp6,actions=normal") ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,udp6,actions=normal") # all else will be dropped ... if __name__ == "__main__": if len(sys.argv) != 4: print ("usage: %s [online|offline] vif-domid-idx [ipv4|ipv6|all] " % os.path.basename(sys.argv[0])) sys.exit(1) else: command, vif_raw, net_type = sys.argv[1:4] main(command, vif_raw, net_type) nova-13.0.0/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py0000775000567000056710000000526612701407773033517 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This script is used to configure base openvswitch flows for XenServer hosts. """ import os import sys import novalib # noqa def main(command, phys_dev_name): ovs_ofctl = lambda *rule: novalib.execute('/usr/bin/ovs-ofctl', *rule) bridge_name = novalib.execute_get_output('/usr/bin/ovs-vsctl', 'iface-to-br', phys_dev_name) # always clear all flows first ovs_ofctl('del-flows', bridge_name) if command in ('online', 'reset'): pnic_ofport = novalib.execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', phys_dev_name, 'ofport') # these flows are lower priority than all VM-specific flows. # allow all traffic from the physical NIC, as it is trusted (i.e., # from a filtered vif, or from the physical infrastructure) ovs_ofctl('add-flow', bridge_name, "priority=2,in_port=%s,actions=normal" % pnic_ofport) # Allow traffic from dom0 if there is a management interface # present (its IP address is on the bridge itself) bridge_addr = novalib.execute_get_output('/sbin/ip', '-o', '-f', 'inet', 'addr', 'show', bridge_name) if bridge_addr != '': ovs_ofctl('add-flow', bridge_name, "priority=2,in_port=LOCAL,actions=normal") # default drop ovs_ofctl('add-flow', bridge_name, 'priority=1,actions=drop') if __name__ == "__main__": if len(sys.argv) != 3 or sys.argv[1] not in ('online', 'offline', 'reset'): print(sys.argv) script_name = os.path.basename(sys.argv[0]) print("This script configures base ovs flows.") print("usage: %s [online|offline|reset] phys-dev-name" % script_name) print(" ex: %s online eth0" % script_name) sys.exit(1) else: command, phys_dev_name = sys.argv[1:3] main(command, phys_dev_name) nova-13.0.0/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py0000775000567000056710000001137212701407773030434 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This script is used to configure iptables, ebtables, and arptables rules on XenServer hosts. """ import os import sys # This is written to Python 2.4, since that is what is available on XenServer import simplejson as json import novalib # noqa def main(dom_id, command, only_this_vif=None): xsls = novalib.execute_get_output('/usr/bin/xenstore-ls', '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: xsread = novalib.execute_get_output('/usr/bin/xenstore-read', '/local/domain/%s/vm-data/networking/%s' % (dom_id, mac)) data = json.loads(xsread) for ip in data['ips']: if data["label"] == "public": vif = "vif%s.0" % dom_id else: vif = "vif%s.1" % dom_id if (only_this_vif is None) or (vif == only_this_vif): params = dict(IP=ip['ip'], VIF=vif, MAC=data['mac']) apply_ebtables_rules(command, params) apply_arptables_rules(command, params) apply_iptables_rules(command, params) # A note about adding rules: # Whenever we add any rule to iptables, arptables or ebtables we first # delete the same rule to ensure the rule only exists once. def apply_iptables_rules(command, params): iptables = lambda *rule: novalib.execute('/sbin/iptables', *rule) iptables('-D', 'FORWARD', '-m', 'physdev', '--physdev-in', params['VIF'], '-s', params['IP'], '-j', 'ACCEPT') if command == 'online': iptables('-A', 'FORWARD', '-m', 'physdev', '--physdev-in', params['VIF'], '-s', params['IP'], '-j', 'ACCEPT') def apply_arptables_rules(command, params): arptables = lambda *rule: novalib.execute('/sbin/arptables', *rule) arptables('-D', 'FORWARD', '--opcode', 'Request', '--in-interface', params['VIF'], '--source-ip', params['IP'], '--source-mac', params['MAC'], '-j', 'ACCEPT') arptables('-D', 'FORWARD', '--opcode', 'Reply', '--in-interface', params['VIF'], '--source-ip', params['IP'], '--source-mac', params['MAC'], '-j', 'ACCEPT') if command == 'online': arptables('-A', 'FORWARD', '--opcode', 'Request', '--in-interface', params['VIF'], '--source-mac', params['MAC'], '-j', 'ACCEPT') arptables('-A', 'FORWARD', '--opcode', 'Reply', '--in-interface', params['VIF'], '--source-ip', params['IP'], '--source-mac', params['MAC'], '-j', 'ACCEPT') def apply_ebtables_rules(command, params): ebtables = lambda *rule: novalib.execute("/sbin/ebtables", *rule) ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'], '--arp-ip-dst', params['IP'], '-j', 'ACCEPT') ebtables('-D', 'FORWARD', '-p', '0800', '-o', params['VIF'], '--ip-dst', params['IP'], '-j', 'ACCEPT') if command == 'online': ebtables('-A', 'FORWARD', '-p', '0806', '-o', params['VIF'], '--arp-ip-dst', params['IP'], '-j', 'ACCEPT') ebtables('-A', 'FORWARD', '-p', '0800', '-o', params['VIF'], '--ip-dst', params['IP'], '-j', 'ACCEPT') ebtables('-D', 'FORWARD', '-s', '!', params['MAC'], '-i', params['VIF'], '-j', 'DROP') if command == 'online': ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'], '-i', params['VIF'], '-j', 'DROP') if __name__ == "__main__": if len(sys.argv) < 3: print ("usage: %s dom_id online|offline [vif]" % os.path.basename(sys.argv[0])) sys.exit(1) else: dom_id, command = sys.argv[1:3] vif = len(sys.argv) == 4 and sys.argv[3] or None main(dom_id, command, vif) nova-13.0.0/plugins/xenserver/xenapi/0000775000567000056710000000000012701410205020655 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/xenapi/README0000664000567000056710000000051612701407773021557 0ustar jenkinsjenkins00000000000000This directory contains files that are required for the XenAPI support. They should be installed in the XenServer / Xen Cloud Platform dom0. If you install them manually, you will need to ensure that the newly added files are executable. You can do this by running the following command (from dom0): chmod a+x /etc/xapi.d/plugins/* nova-13.0.0/plugins/xenserver/xenapi/etc/0000775000567000056710000000000012701410205021430 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/0000775000567000056710000000000012701410205022613 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/0000775000567000056710000000000012701410205024274 5ustar jenkinsjenkins00000000000000nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py0000664000567000056710000001150112701407773027674 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # # Helper functions for the Nova xapi plugins. In time, this will merge # with the pluginlib.py shipped with xapi, but for now, that file is not # very stable, so it's easiest just to have a copy of all the functions # that we need. # import gettext import logging import logging.handlers import time import XenAPI translations = gettext.translation('nova', fallback=True) _ = translations.ugettext # Logging setup def configure_logging(name): log = logging.getLogger() log.setLevel(logging.DEBUG) sysh = logging.handlers.SysLogHandler('/dev/log') sysh.setLevel(logging.DEBUG) formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) sysh.setFormatter(formatter) log.addHandler(sysh) # Exceptions class PluginError(Exception): """Base Exception class for all plugin errors.""" def __init__(self, *args): Exception.__init__(self, *args) class ArgumentError(PluginError): """Raised when required arguments are missing, argument values are invalid, or incompatible arguments are given. """ def __init__(self, *args): PluginError.__init__(self, *args) # Argument validation def exists(args, key): """Validates that a freeform string argument to a RPC method call is given. Returns the string. """ if key in args: return args[key] else: raise ArgumentError(_('Argument %s is required.') % key) def optional(args, key): """If the given key is in args, return the corresponding value, otherwise return None """ return key in args and args[key] or None def _get_domain_0(session): this_host_ref = session.xenapi.session.get_this_host(session.handle) expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' expr = expr % this_host_ref return list(session.xenapi.VM.get_all_records_where(expr).keys())[0] def with_vdi_in_dom0(session, vdi, read_only, f): dom0 = _get_domain_0(session) vbd_rec = {} vbd_rec['VM'] = dom0 vbd_rec['VDI'] = vdi vbd_rec['userdevice'] = 'autodetect' vbd_rec['bootable'] = False vbd_rec['mode'] = read_only and 'RO' or 'RW' vbd_rec['type'] = 'disk' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug(_('Creating VBD for VDI %s ... '), vdi) vbd = session.xenapi.VBD.create(vbd_rec) logging.debug(_('Creating VBD for VDI %s done.'), vdi) try: logging.debug(_('Plugging VBD %s ... '), vbd) session.xenapi.VBD.plug(vbd) logging.debug(_('Plugging VBD %s done.'), vbd) return f(session.xenapi.VBD.get_device(vbd)) finally: logging.debug(_('Destroying VBD for VDI %s ... '), vdi) _vbd_unplug_with_retry(session, vbd) try: session.xenapi.VBD.destroy(vbd) except XenAPI.Failure, e: # noqa logging.error(_('Ignoring XenAPI.Failure %s'), e) logging.debug(_('Destroying VBD for VDI %s done.'), vdi) def _vbd_unplug_with_retry(session, vbd): """Call VBD.unplug on the given VBD, with a retry if we get DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're seeing the device still in use, even when all processes using the device should be dead. """ while True: try: session.xenapi.VBD.unplug(vbd) logging.debug(_('VBD.unplug successful first time.')) return except XenAPI.Failure, e: # noqa if (len(e.details) > 0 and e.details[0] == 'DEVICE_DETACH_REJECTED'): logging.debug(_('VBD.unplug rejected: retrying...')) time.sleep(1) elif (len(e.details) > 0 and e.details[0] == 'DEVICE_ALREADY_DETACHED'): logging.debug(_('VBD.unplug successful eventually.')) return else: logging.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), e) return nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance0000775000567000056710000004406712701407773025506 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true """Handle the uploading and downloading of images via Glance.""" try: import httplib except ImportError: from six.moves import http_client as httplib import md5 # noqa import socket import urllib2 from urlparse import urlparse import pluginlib_nova import utils pluginlib_nova.configure_logging('glance') logging = pluginlib_nova.logging PluginError = pluginlib_nova.PluginError SOCKET_TIMEOUT_SECONDS = 90 class RetryableError(Exception): pass def _download_tarball_and_verify(request, staging_path): # NOTE(johngarbutt) By default, there is no timeout. # To ensure the script does not hang if we lose connection # to glance, we add this socket timeout. # This is here so there is no chance the timeout out has # been adjusted by other library calls. socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS) try: response = urllib2.urlopen(request) except urllib2.HTTPError, error: # noqa raise RetryableError(error) except urllib2.URLError, error: # noqa raise RetryableError(error) except httplib.HTTPException, error: # noqa # httplib.HTTPException and derivatives (BadStatusLine in particular) # don't have a useful __repr__ or __str__ raise RetryableError('%s: %s' % (error.__class__.__name__, error)) url = request.get_full_url() logging.info("Reading image data from %s" % url) callback_data = {'bytes_read': 0} checksum = md5.new() def update_md5(chunk): callback_data['bytes_read'] += len(chunk) checksum.update(chunk) try: try: utils.extract_tarball(response, staging_path, callback=update_md5) except Exception, error: # noqa raise RetryableError(error) finally: bytes_read = callback_data['bytes_read'] logging.info("Read %d bytes from %s", bytes_read, url) # Use ETag if available, otherwise X-Image-Meta-Checksum etag = response.info().getheader('etag', None) if etag is None: etag = response.info().getheader('x-image-meta-checksum', None) # Verify checksum using ETag checksum = checksum.hexdigest() if etag is None: msg = "No ETag found for comparison to checksum %(checksum)s" logging.info(msg % {'checksum': checksum}) elif checksum != etag: msg = 'ETag %(etag)s does not match computed md5sum %(checksum)s' raise RetryableError(msg % {'checksum': checksum, 'etag': etag}) else: msg = "Verified image checksum %(checksum)s" logging.info(msg % {'checksum': checksum}) def _download_tarball(sr_path, staging_path, image_id, glance_host, glance_port, glance_use_ssl, extra_headers): """Download the tarball image from Glance and extract it into the staging area. Retry if there is any failure. """ if glance_use_ssl: scheme = 'https' else: scheme = 'http' endpoint = "%(scheme)s://%(glance_host)s:%(glance_port)d" % { 'scheme': scheme, 'glance_host': glance_host, 'glance_port': glance_port} _download_tarball_by_url(sr_path, staging_path, image_id, endpoint, extra_headers) def _download_tarball_by_url(sr_path, staging_path, image_id, glance_endpoint, extra_headers): """Download the tarball image from Glance and extract it into the staging area. Retry if there is any failure. """ url = ("%(glance_endpoint)s/v1/images/%(image_id)s" % { 'glance_endpoint': glance_endpoint, 'image_id': image_id}) logging.info("Downloading %s" % url) request = urllib2.Request(url, headers=extra_headers) try: _download_tarball_and_verify(request, staging_path) except Exception: logging.exception('Failed to retrieve %(url)s' % {'url': url}) raise def _upload_tarball(staging_path, image_id, glance_host, glance_port, glance_use_ssl, extra_headers, properties): if glance_use_ssl: scheme = 'https' else: scheme = 'http' url = '%s://%s:%s' % (scheme, glance_host, glance_port) _upload_tarball_by_url(staging_path, image_id, url, extra_headers, properties) def _upload_tarball_by_url(staging_path, image_id, glance_endpoint, extra_headers, properties): """Upload an image to Glance. Create a tarball of the image and then stream that into Glance using chunked-transfer-encoded HTTP. """ # NOTE(johngarbutt) By default, there is no timeout. # To ensure the script does not hang if we lose connection # to glance, we add this socket timeout. # This is here so there is no chance the timeout out has # been adjusted by other library calls. socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS) url = '%(glance_endpoint)s/v1/images/%(image_id)s' % { 'glance_endpoint': glance_endpoint, 'image_id': image_id} logging.info("Writing image data to %s" % url) # NOTE(sdague): this is python 2.4, which means urlparse returns a # tuple, not a named tuple. # 0 - scheme # 1 - host:port (aka netloc) # 2 - path parts = urlparse(url) try: if parts[0] == 'https': conn = httplib.HTTPSConnection(parts[1]) else: conn = httplib.HTTPConnection(parts[1]) conn.connect() except Exception, error: # noqa logging.exception('Failed to connect %(url)s' % {'url': url}) raise RetryableError(error) try: validate_image_status_before_upload(conn, url, extra_headers) try: # NOTE(sirp): httplib under python2.4 won't accept # a file-like object to request conn.putrequest('PUT', parts[2]) # NOTE(sirp): There is some confusion around OVF. Here's a summary # of where we currently stand: # 1. OVF as a container format is misnamed. We really should be # using OVA since that is the name for the container format; # OVF is the standard applied to the manifest file contained # within. # 2. We're currently uploading a vanilla tarball. In order to be # OVF/OVA compliant, we'll need to embed a minimal OVF # manifest as the first file. # NOTE(dprince): In order to preserve existing Glance properties # we set X-Glance-Registry-Purge-Props on this request. headers = { 'content-type': 'application/octet-stream', 'transfer-encoding': 'chunked', 'x-image-meta-is-public': 'False', 'x-image-meta-status': 'queued', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-glance-registry-purge-props': 'False'} headers.update(**extra_headers) for key, value in properties.items(): header_key = "x-image-meta-property-%s" % key.replace('_', '-') headers[header_key] = str(value) for header, value in headers.items(): conn.putheader(header, value) conn.endheaders() except Exception, error: # noqa logging.exception('Failed to upload %(url)s' % {'url': url}) raise RetryableError(error) callback_data = {'bytes_written': 0} def send_chunked_transfer_encoded(chunk): chunk_len = len(chunk) callback_data['bytes_written'] += chunk_len try: conn.send("%x\r\n%s\r\n" % (chunk_len, chunk)) except Exception, error: # noqa logging.exception('Failed to upload when sending chunks') raise RetryableError(error) compression_level = properties.get('xenapi_image_compression_level') utils.create_tarball( None, staging_path, callback=send_chunked_transfer_encoded, compression_level=compression_level) send_chunked_transfer_encoded('') # Chunked-Transfer terminator bytes_written = callback_data['bytes_written'] logging.info("Wrote %d bytes to %s" % (bytes_written, url)) resp = conn.getresponse() if resp.status == httplib.OK: return logging.error("Unexpected response while writing image data to %s: " "Response Status: %i, Response body: %s" % (url, resp.status, resp.read())) check_resp_status_and_retry(resp, image_id, url) finally: conn.close() def check_resp_status_and_retry(resp, image_id, url): # Note(Jesse): This branch sorts errors into those that are permanent, # those that are ephemeral, and those that are unexpected. if resp.status in (httplib.BAD_REQUEST, # 400 httplib.UNAUTHORIZED, # 401 httplib.PAYMENT_REQUIRED, # 402 httplib.FORBIDDEN, # 403 httplib.NOT_FOUND, # 404 httplib.METHOD_NOT_ALLOWED, # 405 httplib.NOT_ACCEPTABLE, # 406 httplib.PROXY_AUTHENTICATION_REQUIRED, # 407 httplib.CONFLICT, # 409 httplib.GONE, # 410 httplib.LENGTH_REQUIRED, # 411 httplib.PRECONDITION_FAILED, # 412 httplib.REQUEST_ENTITY_TOO_LARGE, # 413 httplib.REQUEST_URI_TOO_LONG, # 414 httplib.UNSUPPORTED_MEDIA_TYPE, # 415 httplib.REQUESTED_RANGE_NOT_SATISFIABLE, # 416 httplib.EXPECTATION_FAILED, # 417 httplib.UNPROCESSABLE_ENTITY, # 422 httplib.LOCKED, # 423 httplib.FAILED_DEPENDENCY, # 424 httplib.UPGRADE_REQUIRED, # 426 httplib.NOT_IMPLEMENTED, # 501 httplib.HTTP_VERSION_NOT_SUPPORTED, # 505 httplib.NOT_EXTENDED, # 510 ): raise PluginError("Got Permanent Error response [%i] while " "uploading image [%s] to glance [%s]" % (resp.status, image_id, url)) # NOTE(nikhil): Only a sub-set of the 500 errors are retryable. We # optimistically retry on 500 errors below. elif resp.status in (httplib.REQUEST_TIMEOUT, # 408 httplib.INTERNAL_SERVER_ERROR, # 500 httplib.BAD_GATEWAY, # 502 httplib.SERVICE_UNAVAILABLE, # 503 httplib.GATEWAY_TIMEOUT, # 504 httplib.INSUFFICIENT_STORAGE, # 507 ): raise RetryableError("Got Ephemeral Error response [%i] while " "uploading image [%s] to glance [%s]" % (resp.status, image_id, url)) else: # Note(Jesse): Assume unexpected errors are retryable. If you are # seeing this error message, the error should probably be added # to either the ephemeral or permanent error list. raise RetryableError("Got Unexpected Error response [%i] while " "uploading image [%s] to glance [%s]" % (resp.status, image_id, url)) def validate_image_status_before_upload(conn, url, extra_headers): try: parts = urlparse(url) path = parts[2] image_id = path.split('/')[-1] # NOTE(nikhil): Attempt to determine if the Image has a status # of 'queued'. Because data will continued to be sent to Glance # until it has a chance to check the Image state, discover that # it is not 'active' and send back a 409. Hence, the data will be # unnecessarily buffered by Glance. This wastes time and bandwidth. # LP bug #1202785 conn.request('HEAD', path, headers=extra_headers) head_resp = conn.getresponse() # NOTE(nikhil): read the response to re-use the conn object. body_data = head_resp.read(8192) if len(body_data) > 8: err_msg = ('Cannot upload data for image %(image_id)s as the ' 'HEAD call had more than 8192 bytes of data in ' 'the response body.' % {'image_id': image_id}) raise PluginError("Got Permanent Error while uploading image " "[%s] to glance [%s]. " "Message: %s" % (image_id, url, err_msg)) else: head_resp.read() except Exception, error: # noqa logging.exception('Failed to HEAD the image %(image_id)s while ' 'checking image status before attempting to ' 'upload %(url)s' % {'image_id': image_id, 'url': url}) raise RetryableError(error) if head_resp.status != httplib.OK: logging.error("Unexpected response while doing a HEAD call " "to image %s , url = %s , Response Status: " "%i" % (image_id, url, head_resp.status)) check_resp_status_and_retry(head_resp, image_id, url) else: image_status = head_resp.getheader('x-image-meta-status') if image_status not in ('queued', ): err_msg = ('Cannot upload data for image %(image_id)s as the ' 'image status is %(image_status)s' % {'image_id': image_id, 'image_status': image_status}) logging.exception(err_msg) raise PluginError("Got Permanent Error while uploading image " "[%s] to glance [%s]. " "Message: %s" % (image_id, url, err_msg)) else: logging.info('Found image %(image_id)s in status ' '%(image_status)s. Attempting to ' 'upload.' % {'image_id': image_id, 'image_status': image_status}) def download_vhd2(session, image_id, endpoint, uuid_stack, sr_path, extra_headers): """Download an image from Glance, unbundle it, and then deposit the VHDs into the storage repository """ staging_path = utils.make_staging_area(sr_path) try: # Download tarball into staging area and extract it _download_tarball_by_url( sr_path, staging_path, image_id, endpoint, extra_headers) # Move the VHDs from the staging area into the storage repository return utils.import_vhds(sr_path, staging_path, uuid_stack) finally: utils.cleanup_staging_area(staging_path) def download_vhd(session, image_id, glance_host, glance_port, glance_use_ssl, uuid_stack, sr_path, extra_headers): """Download an image from Glance, unbundle it, and then deposit the VHDs into the storage repository """ staging_path = utils.make_staging_area(sr_path) try: # Download tarball into staging area and extract it _download_tarball( sr_path, staging_path, image_id, glance_host, glance_port, glance_use_ssl, extra_headers) # Move the VHDs from the staging area into the storage repository return utils.import_vhds(sr_path, staging_path, uuid_stack) finally: utils.cleanup_staging_area(staging_path) def upload_vhd2(session, vdi_uuids, image_id, endpoint, sr_path, extra_headers, properties): """Bundle the VHDs comprising an image and then stream them into Glance. """ staging_path = utils.make_staging_area(sr_path) try: utils.prepare_staging_area(sr_path, staging_path, vdi_uuids) _upload_tarball_by_url(staging_path, image_id, endpoint, extra_headers, properties) finally: utils.cleanup_staging_area(staging_path) def upload_vhd(session, vdi_uuids, image_id, glance_host, glance_port, glance_use_ssl, sr_path, extra_headers, properties): """Bundle the VHDs comprising an image and then stream them into Glance. """ staging_path = utils.make_staging_area(sr_path) try: utils.prepare_staging_area(sr_path, staging_path, vdi_uuids) _upload_tarball(staging_path, image_id, glance_host, glance_port, glance_use_ssl, extra_headers, properties) finally: utils.cleanup_staging_area(staging_path) if __name__ == '__main__': utils.register_plugin_calls(download_vhd, upload_vhd, download_vhd2, upload_vhd2) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/bittorrent0000775000567000056710000002731212701407773026443 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true """Download images via BitTorrent.""" import errno import inspect import os import random import shutil import tempfile import time import libtorrent import urllib2 import utils import pluginlib_nova pluginlib_nova.configure_logging('bittorrent') logging = pluginlib_nova.logging # Taken from units since we don't pull down full library Mi = 1024 ** 2 DEFAULT_TORRENT_CACHE = '/images/torrents' DEFAULT_SEED_CACHE = '/images/seeds' SEEDER_PROCESS = '_bittorrent_seeder' DEFAULT_MMA = int(libtorrent.bandwidth_mixed_algo_t.prefer_tcp) DEFAULT_MORQ = 400 DEFAULT_MQDB = 8 * Mi DEFAULT_MQDBLW = 0 def _make_torrent_cache(): torrent_cache_path = os.environ.get( 'TORRENT_CACHE', DEFAULT_TORRENT_CACHE) if not os.path.exists(torrent_cache_path): os.mkdir(torrent_cache_path) return torrent_cache_path def _fetch_torrent_file(torrent_cache_path, image_id, torrent_url): torrent_path = os.path.join( torrent_cache_path, image_id + '.torrent') if not os.path.exists(torrent_path): logging.info("Downloading %s" % torrent_url) # Write contents to temporary path to ensure we don't have partially # completed files in the cache. temp_directory = tempfile.mkdtemp(dir=torrent_cache_path) try: temp_path = os.path.join( temp_directory, os.path.basename(torrent_path)) temp_file = open(temp_path, 'wb') try: remote_torrent_file = urllib2.urlopen(torrent_url) shutil.copyfileobj(remote_torrent_file, temp_file) finally: temp_file.close() os.rename(temp_path, torrent_path) finally: shutil.rmtree(temp_directory) return torrent_path def _reap_old_torrent_files(torrent_cache_path, torrent_max_last_accessed): """Delete any torrent files that haven't been accessed recently.""" if not torrent_max_last_accessed: logging.debug("Reaping old torrent files disabled, skipping...") return logging.debug("Preparing to reap old torrent files," " torrent_max_last_accessed=%d" % torrent_max_last_accessed) for fname in os.listdir(torrent_cache_path): torrent_path = os.path.join(torrent_cache_path, fname) last_accessed = time.time() - os.path.getatime(torrent_path) if last_accessed > torrent_max_last_accessed: logging.debug("Reaping '%s', last_accessed=%d" % ( torrent_path, last_accessed)) utils.delete_if_exists(torrent_path) def _download(torrent_path, save_as_path, torrent_listen_port_start, torrent_listen_port_end, torrent_download_stall_cutoff): session = libtorrent.session() session.listen_on(torrent_listen_port_start, torrent_listen_port_end) mixed_mode_algorithm = os.environ.get( 'DEFAULT_MIXED_MODE_ALGORITHM', DEFAULT_MMA) max_out_request_queue = os.environ.get( 'DEFAULT_MAX_OUT_REQUEST_QUEUE', DEFAULT_MORQ) max_queued_disk_bytes = os.environ.get( 'DEFAULT_MAX_QUEUED_DISK_BYTES', DEFAULT_MQDB) max_queued_disk_bytes_low_watermark = os.environ.get( 'DEFAULT_MAX_QUEUED_DISK_BYTES_LOW_WATERMARK', DEFAULT_MQDBLW) session_opts = {'mixed_mode_algorithm': mixed_mode_algorithm, 'max_queued_disk_bytes': max_queued_disk_bytes, 'max_out_request_queue': max_out_request_queue, 'max_queued_disk_bytes_low_watermark': max_queued_disk_bytes_low_watermark} session.set_settings(session_opts) info = libtorrent.torrent_info( libtorrent.bdecode(open(torrent_path, 'rb').read())) torrent = session.add_torrent( info, save_as_path, storage_mode=libtorrent.storage_mode_t.storage_mode_sparse) try: last_progress = 0 last_progress_updated = time.time() log_time = 0 while not torrent.is_seed(): s = torrent.status() progress = s.progress * 100 if progress != last_progress: last_progress = progress last_progress_updated = time.time() stall_duration = time.time() - last_progress_updated if stall_duration > torrent_download_stall_cutoff: logging.error( "Download stalled: stall_duration=%d," " torrent_download_stall_cutoff=%d" % ( stall_duration, torrent_download_stall_cutoff)) raise Exception("Bittorrent download stall detected, bailing!") log_time += 1 if log_time % 10 == 0: logging.debug( '%.2f%% complete (down: %.1f kb/s up: %.1f kB/s peers: %d)' ' %s %s' % (progress, s.download_rate / 1000, s.upload_rate / 1000, s.num_peers, s.state, torrent_path)) time.sleep(1) finally: session.remove_torrent(torrent) logging.debug("Download of '%s' finished" % torrent_path) def _should_seed(seed_path, torrent_seed_duration, torrent_seed_chance, torrent_max_seeder_processes_per_host): if not torrent_seed_duration: logging.debug("Seeding disabled, skipping...") return False if os.path.exists(seed_path): logging.debug("Seed is already present, skipping....") return False rand = random.random() if rand > torrent_seed_chance: logging.debug("%.2f > %.2f, seeding randomly skipping..." % ( rand, torrent_seed_chance)) return False num_active_seeders = len(list(_active_seeder_processes())) if (torrent_max_seeder_processes_per_host >= 0 and num_active_seeders >= torrent_max_seeder_processes_per_host): logging.debug("max number of seeder processes for this host reached" " (%d), skipping..." % torrent_max_seeder_processes_per_host) return False return True def _seed(torrent_path, seed_cache_path, torrent_seed_duration, torrent_listen_port_start, torrent_listen_port_end): plugin_path = os.path.dirname(inspect.getabsfile(inspect.currentframe())) seeder_path = os.path.join(plugin_path, SEEDER_PROCESS) seed_cmd = map(str, [seeder_path, torrent_path, seed_cache_path, torrent_seed_duration, torrent_listen_port_start, torrent_listen_port_end]) utils.run_command(seed_cmd) def _seed_if_needed(seed_cache_path, tarball_path, torrent_path, torrent_seed_duration, torrent_seed_chance, torrent_listen_port_start, torrent_listen_port_end, torrent_max_seeder_processes_per_host): seed_filename = os.path.basename(tarball_path) seed_path = os.path.join(seed_cache_path, seed_filename) if _should_seed(seed_path, torrent_seed_duration, torrent_seed_chance, torrent_max_seeder_processes_per_host): logging.debug("Preparing to seed '%s' for %d secs" % ( seed_path, torrent_seed_duration)) utils._rename(tarball_path, seed_path) # Daemonize and seed the image _seed(torrent_path, seed_cache_path, torrent_seed_duration, torrent_listen_port_start, torrent_listen_port_end) else: utils.delete_if_exists(tarball_path) def _extract_tarball(tarball_path, staging_path): """Extract the tarball into the staging directory.""" tarball_fileobj = open(tarball_path, 'rb') try: utils.extract_tarball(tarball_fileobj, staging_path) finally: tarball_fileobj.close() def _active_seeder_processes(): """Yields command-line of active seeder processes. Roughly equivalent to performing ps | grep _bittorrent_seeder """ pids = [pid for pid in os.listdir('/proc') if pid.isdigit()] for pid in pids: try: cmdline = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read() except IOError, e: # noqa if e.errno != errno.ENOENT: raise if SEEDER_PROCESS in cmdline: yield cmdline def _reap_finished_seeds(seed_cache_path): """Delete any cached seeds where the seeder process has died.""" logging.debug("Preparing to reap finished seeds") missing = {} for fname in os.listdir(seed_cache_path): seed_path = os.path.join(seed_cache_path, fname) missing[seed_path] = None for cmdline in _active_seeder_processes(): for seed_path in missing.keys(): seed_filename = os.path.basename(seed_path) if seed_filename in cmdline: del missing[seed_path] for seed_path in missing: logging.debug("Reaping cached seed '%s'" % seed_path) utils.delete_if_exists(seed_path) def _make_seed_cache(): seed_cache_path = os.environ.get('SEED_CACHE', DEFAULT_SEED_CACHE) if not os.path.exists(seed_cache_path): os.mkdir(seed_cache_path) return seed_cache_path def download_vhd(session, image_id, torrent_url, torrent_seed_duration, torrent_seed_chance, torrent_max_last_accessed, torrent_listen_port_start, torrent_listen_port_end, torrent_download_stall_cutoff, uuid_stack, sr_path, torrent_max_seeder_processes_per_host): """Download an image from BitTorrent, unbundle it, and then deposit the VHDs into the storage repository """ seed_cache_path = _make_seed_cache() torrent_cache_path = _make_torrent_cache() # Housekeeping _reap_finished_seeds(seed_cache_path) _reap_old_torrent_files(torrent_cache_path, torrent_max_last_accessed) torrent_path = _fetch_torrent_file( torrent_cache_path, image_id, torrent_url) staging_path = utils.make_staging_area(sr_path) try: tarball_filename = os.path.basename(torrent_path).replace( '.torrent', '') tarball_path = os.path.join(staging_path, tarball_filename) # Download tarball into staging area _download(torrent_path, staging_path, torrent_listen_port_start, torrent_listen_port_end, torrent_download_stall_cutoff) # Extract the tarball into the staging area _extract_tarball(tarball_path, staging_path) # Move the VHDs from the staging area into the storage repository vdi_list = utils.import_vhds(sr_path, staging_path, uuid_stack) # Seed image for others in the swarm _seed_if_needed(seed_cache_path, tarball_path, torrent_path, torrent_seed_duration, torrent_seed_chance, torrent_listen_port_start, torrent_listen_port_end, torrent_max_seeder_processes_per_host) finally: utils.cleanup_staging_area(staging_path) return vdi_list if __name__ == '__main__': utils.register_plugin_calls(download_vhd) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/ipxe0000775000567000056710000001020112701407773025201 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true """Inject network configuration into iPXE ISO for boot.""" import logging import os import shutil import utils # FIXME(sirp): should this use pluginlib from 5.6? import pluginlib_nova pluginlib_nova.configure_logging('ipxe') ISOLINUX_CFG = """SAY iPXE ISO boot image TIMEOUT 30 DEFAULT ipxe.krn LABEL ipxe.krn KERNEL ipxe.krn INITRD netcfg.ipxe """ NETCFG_IPXE = """#!ipxe :start imgfree ifclose net0 set net0/ip %(ip_address)s set net0/netmask %(netmask)s set net0/gateway %(gateway)s set dns %(dns)s ifopen net0 goto menu :menu chain %(boot_menu_url)s goto boot :boot sanboot --no-describe --drive 0x80 """ def _write_file(filename, data): # If the ISO was tampered with such that the destination is a symlink, # that could allow a malicious user to write to protected areas of the # dom0 filesystem. /HT to comstud for pointing this out. # # Short-term, checking that the destination is not a symlink should be # sufficient. # # Long-term, we probably want to perform all file manipulations within a # chroot jail to be extra safe. if os.path.islink(filename): raise RuntimeError('SECURITY: Cannot write to symlinked destination') logging.debug("Writing to file '%s'" % filename) f = open(filename, 'w') try: f.write(data) finally: f.close() def _unbundle_iso(sr_path, filename, path): logging.debug("Unbundling ISO '%s'" % filename) read_only_path = utils.make_staging_area(sr_path) try: utils.run_command(['mount', '-o', 'loop', filename, read_only_path]) try: shutil.copytree(read_only_path, path) finally: utils.run_command(['umount', read_only_path]) finally: utils.cleanup_staging_area(read_only_path) def _create_iso(mkisofs_cmd, filename, path): logging.debug("Creating ISO '%s'..." % filename) orig_dir = os.getcwd() os.chdir(path) try: utils.run_command([mkisofs_cmd, '-quiet', '-l', '-o', filename, '-c', 'boot.cat', '-b', 'isolinux.bin', '-no-emul-boot', '-boot-load-size', '4', '-boot-info-table', '.']) finally: os.chdir(orig_dir) def inject(session, sr_path, vdi_uuid, boot_menu_url, ip_address, netmask, gateway, dns, mkisofs_cmd): iso_filename = '%s.img' % os.path.join(sr_path, 'iso', vdi_uuid) # Create staging area so we have a unique path but remove it since # shutil.copytree will recreate it staging_path = utils.make_staging_area(sr_path) utils.cleanup_staging_area(staging_path) try: _unbundle_iso(sr_path, iso_filename, staging_path) # Write Configs _write_file(os.path.join(staging_path, 'netcfg.ipxe'), NETCFG_IPXE % {"ip_address": ip_address, "netmask": netmask, "gateway": gateway, "dns": dns, "boot_menu_url": boot_menu_url}) _write_file(os.path.join(staging_path, 'isolinux.cfg'), ISOLINUX_CFG) _create_iso(mkisofs_cmd, iso_filename, staging_path) finally: utils.cleanup_staging_area(staging_path) if __name__ == "__main__": utils.register_plugin_calls(inject) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/_bittorrent_seeder0000775000567000056710000000762312701407773030134 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true """Seed a bittorent image. This file should not be executed directly, rather it should be kicked off by the `bittorent` dom0 plugin.""" import os import sys import time import libtorrent import pluginlib_nova pluginlib_nova.configure_logging('_bittorrent_seeder') logging = pluginlib_nova.logging def _daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): """Daemonize the current process. Do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177). Source: http://www.jejik.com/articles/2007/02/ a_simple_unix_linux_daemon_in_python/ """ # 1st fork try: pid = os.fork() if pid > 0: # first parent returns return False except OSError, e: # noqa logging.error("fork #1 failed: %d (%s)" % ( e.errno, e.strerror)) return # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # 2nd fork try: pid = os.fork() if pid > 0: # second parent exits sys.exit(0) except OSError, e: # noqa logging.error("fork #2 failed: %d (%s)" % ( e.errno, e.strerror)) return # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = open(stdin, 'r') so = open(stdout, 'a+') se = open(stderr, 'a+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) return True def main(torrent_path, seed_cache_path, torrent_seed_duration, torrent_listen_port_start, torrent_listen_port_end): seed_time = time.time() + torrent_seed_duration logging.debug("Seeding '%s' for %d secs" % ( torrent_path, torrent_seed_duration)) child = _daemonize() if not child: return # At this point we're the daemonized child... session = libtorrent.session() session.listen_on(torrent_listen_port_start, torrent_listen_port_end) torrent_file = open(torrent_path, 'rb') try: torrent_data = torrent_file.read() finally: torrent_file.close() decoded_data = libtorrent.bdecode(torrent_data) info = libtorrent.torrent_info(decoded_data) torrent = session.add_torrent( info, seed_cache_path, storage_mode=libtorrent.storage_mode_t.storage_mode_sparse) try: while time.time() < seed_time: time.sleep(5) finally: session.remove_torrent(torrent) logging.debug("Seeding of '%s' finished" % torrent_path) if __name__ == "__main__": (torrent_path, seed_cache_path, torrent_seed_duration, torrent_listen_port_start, torrent_listen_port_end) = sys.argv[1:] torrent_seed_duration = int(torrent_seed_duration) torrent_listen_port_start = int(torrent_listen_port_start) torrent_listen_port_end = int(torrent_listen_port_end) main(torrent_path, seed_cache_path, torrent_seed_duration, torrent_listen_port_start, torrent_listen_port_end) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/bandwidth0000775000567000056710000000325312701407773026211 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features """Fetch Bandwidth data from VIF network devices.""" import utils import pluginlib_nova pluginlib_nova.configure_logging('bandwidth') def _read_proc_net(): devs = [l.strip() for l in open('/proc/net/dev', 'r').readlines()] # Ignore headers devs = devs[2:] dlist = [d.split(':', 1) for d in devs if d.startswith('vif')] devmap = dict() for name, stats in dlist: slist = stats.split() dom, vifnum = name[3:].split('.', 1) dev = devmap.get(dom, {}) # Note, we deliberately swap in and out, as instance traffic # shows up inverted due to going though the bridge. (mdragon) dev[vifnum] = dict(bw_in=int(slist[8]), bw_out=int(slist[0])) devmap[dom] = dev return devmap def fetch_all_bandwidth(session): return _read_proc_net() if __name__ == '__main__': utils.register_plugin_calls(fetch_all_bandwidth) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/nova_plugin_version0000775000567000056710000000243012701407773030327 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013 OpenStack Foundation # Copyright (c) 2013 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features """Returns the version of the nova plugins""" import utils # MAJOR VERSION: Incompatible changes # MINOR VERSION: Compatible changes, new plugins, etc # 1.0 - Initial version. # 1.1 - New call to check GC status # 1.2 - Added support for pci passthrough devices # 1.3 - Add vhd2 functions for doing glance operations by url PLUGIN_VERSION = "1.3" def get_version(session): return PLUGIN_VERSION if __name__ == '__main__': utils.register_plugin_calls(get_version) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds0000775000567000056710000000315112701407773026620 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features """Handle the uploading and downloading of images via Glance.""" import os import shutil import utils import pluginlib_nova pluginlib_nova.configure_logging('workarounds') def _copy_vdis(sr_path, staging_path, vdi_uuids): seq_num = 0 for vdi_uuid in vdi_uuids: src = os.path.join(sr_path, "%s.vhd" % vdi_uuid) dst = os.path.join(staging_path, "%d.vhd" % seq_num) shutil.copyfile(src, dst) seq_num += 1 def safe_copy_vdis(session, sr_path, vdi_uuids, uuid_stack): staging_path = utils.make_staging_area(sr_path) try: _copy_vdis(sr_path, staging_path, vdi_uuids) return utils.import_vhds(sr_path, staging_path, uuid_stack) finally: utils.cleanup_staging_area(staging_path) if __name__ == '__main__': utils.register_plugin_calls(safe_copy_vdis) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py0000664000567000056710000004060612701407773026034 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features """Various utilities used by XenServer plugins.""" try: import cPickle as pickle except ImportError: import pickle import errno import logging import os import shutil import signal import subprocess import tempfile import XenAPIPlugin LOG = logging.getLogger(__name__) CHUNK_SIZE = 8192 class CommandNotFound(Exception): pass def delete_if_exists(path): try: os.unlink(path) except OSError, e: # noqa if e.errno == errno.ENOENT: LOG.warning("'%s' was already deleted, skipping delete" % path) else: raise def _link(src, dst): LOG.info("Hard-linking file '%s' -> '%s'" % (src, dst)) os.link(src, dst) def _rename(src, dst): LOG.info("Renaming file '%s' -> '%s'" % (src, dst)) try: os.rename(src, dst) except OSError, e: # noqa if e.errno == errno.EXDEV: LOG.error("Invalid cross-device link. Perhaps %s and %s should " "be symlinked on the same filesystem?" % (src, dst)) raise def make_subprocess(cmdline, stdout=False, stderr=False, stdin=False, universal_newlines=False, close_fds=True, env=None): """Make a subprocess according to the given command-line string """ LOG.info("Running cmd '%s'" % " ".join(cmdline)) kwargs = {} kwargs['stdout'] = stdout and subprocess.PIPE or None kwargs['stderr'] = stderr and subprocess.PIPE or None kwargs['stdin'] = stdin and subprocess.PIPE or None kwargs['universal_newlines'] = universal_newlines kwargs['close_fds'] = close_fds kwargs['env'] = env try: proc = subprocess.Popen(cmdline, **kwargs) except OSError, e: # noqa if e.errno == errno.ENOENT: raise CommandNotFound else: raise return proc class SubprocessException(Exception): def __init__(self, cmdline, ret, out, err): Exception.__init__(self, "'%s' returned non-zero exit code: " "retcode=%i, out='%s', stderr='%s'" % (cmdline, ret, out, err)) self.cmdline = cmdline self.ret = ret self.out = out self.err = err def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None): """Ensure that the process returned a zero exit code indicating success """ if ok_exit_codes is None: ok_exit_codes = [0] out, err = proc.communicate(cmd_input) ret = proc.returncode if ret not in ok_exit_codes: LOG.error("Command '%(cmdline)s' with process id '%(pid)s' expected " "return code in '%(ok)s' but got '%(rc)s': %(err)s" % {'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes, 'rc': ret, 'err': err}) raise SubprocessException(' '.join(cmdline), ret, out, err) return out def run_command(cmd, cmd_input=None, ok_exit_codes=None): """Abstracts out the basics of issuing system commands. If the command returns anything in stderr, an exception is raised with that information. Otherwise, the output from stdout is returned. cmd_input is passed to the process on standard input. """ proc = make_subprocess(cmd, stdout=True, stderr=True, stdin=True, close_fds=True) return finish_subprocess(proc, cmd, cmd_input=cmd_input, ok_exit_codes=ok_exit_codes) def try_kill_process(proc): """Sends the given process the SIGKILL signal.""" pid = proc.pid LOG.info("Killing process %s" % pid) try: os.kill(pid, signal.SIGKILL) except Exception: LOG.exception("Failed to kill %s" % pid) def make_staging_area(sr_path): """The staging area is a place where we can temporarily store and manipulate VHDs. The use of the staging area is different for upload and download: Download ======== When we download the tarball, the VHDs contained within will have names like "snap.vhd" and "image.vhd". We need to assign UUIDs to them before moving them into the SR. However, since 'image.vhd' may be a base_copy, we need to link it to 'snap.vhd' (using vhd-util modify) before moving both into the SR (otherwise the SR.scan will cause 'image.vhd' to be deleted). The staging area gives us a place to perform these operations before they are moved to the SR, scanned, and then registered with XenServer. Upload ====== On upload, we want to rename the VHDs to reflect what they are, 'snap.vhd' in the case of the snapshot VHD, and 'image.vhd' in the case of the base_copy. The staging area provides a directory in which we can create hard-links to rename the VHDs without affecting what's in the SR. NOTE ==== The staging area is created as a subdirectory within the SR in order to guarantee that it resides within the same filesystem and therefore permit hard-linking and cheap file moves. """ staging_path = tempfile.mkdtemp(dir=sr_path) return staging_path def cleanup_staging_area(staging_path): """Remove staging area directory On upload, the staging area contains hard-links to the VHDs in the SR; it's safe to remove the staging-area because the SR will keep the link count > 0 (so the VHDs in the SR will not be deleted). """ if os.path.exists(staging_path): shutil.rmtree(staging_path) def _handle_old_style_images(staging_path): """Rename files to conform to new image format, if needed. Old-Style: snap.vhd -> image.vhd -> base.vhd New-Style: 0.vhd -> 1.vhd -> ... (n-1).vhd The New-Style format has the benefit of being able to support a VDI chain of arbitrary length. """ file_num = 0 for filename in ('snap.vhd', 'image.vhd', 'base.vhd'): path = os.path.join(staging_path, filename) if os.path.exists(path): _rename(path, os.path.join(staging_path, "%d.vhd" % file_num)) file_num += 1 # Rename any format of name to 0.vhd when there is only single one contents = os.listdir(staging_path) if len(contents) == 1: filename = contents[0] if filename != '0.vhd' and filename.endswith('.vhd'): _rename( os.path.join(staging_path, filename), os.path.join(staging_path, '0.vhd')) def _assert_vhd_not_hidden(path): """Sanity check to ensure that only appropriate VHDs are marked as hidden. If this flag is incorrectly set, then when we move the VHD into the SR, it will be deleted out from under us. """ query_cmd = ["vhd-util", "query", "-n", path, "-f"] out = run_command(query_cmd) for line in out.splitlines(): if line.lower().startswith('hidden'): value = line.split(':')[1].strip() if value == "1": raise Exception( "VHD %s is marked as hidden without child" % path) def _vhd_util_check(vdi_path): check_cmd = ["vhd-util", "check", "-n", vdi_path, "-p"] out = run_command(check_cmd, ok_exit_codes=[0, 22]) first_line = out.splitlines()[0].strip() return out, first_line def _validate_vhd(vdi_path): """This checks for several errors in the VHD structure. Most notably, it checks that the timestamp in the footer is correct, but may pick up other errors also. This check ensures that the timestamps listed in the VHD footer aren't in the future. This can occur during a migration if the clocks on the two Dom0's are out-of-sync. This would corrupt the SR if it were imported, so generate an exception to bail. """ out, first_line = _vhd_util_check(vdi_path) if 'invalid' in first_line: LOG.warning("VHD invalid, attempting repair.") repair_cmd = ["vhd-util", "repair", "-n", vdi_path] run_command(repair_cmd) out, first_line = _vhd_util_check(vdi_path) if 'invalid' in first_line: if 'footer' in first_line: part = 'footer' elif 'header' in first_line: part = 'header' else: part = 'setting' details = first_line.split(':', 1) if len(details) == 2: details = details[1] else: details = first_line extra = '' if 'timestamp' in first_line: extra = (" ensure source and destination host machines have " "time set correctly") LOG.info("VDI Error details: %s" % out) raise Exception( "VDI '%(vdi_path)s' has an invalid %(part)s: '%(details)s'" "%(extra)s" % {'vdi_path': vdi_path, 'part': part, 'details': details, 'extra': extra}) LOG.info("VDI is valid: %s" % vdi_path) def _validate_vdi_chain(vdi_path): """This check ensures that the parent pointers on the VHDs are valid before we move the VDI chain to the SR. This is *very* important because a bad parent pointer will corrupt the SR causing a cascade of failures. """ def get_parent_path(path): query_cmd = ["vhd-util", "query", "-n", path, "-p"] out = run_command(query_cmd, ok_exit_codes=[0, 22]) first_line = out.splitlines()[0].strip() if first_line.endswith(".vhd"): return first_line elif 'has no parent' in first_line: return None elif 'query failed' in first_line: raise Exception("VDI '%s' not present which breaks" " the VDI chain, bailing out" % path) else: raise Exception("Unexpected output '%s' from vhd-util" % out) cur_path = vdi_path while cur_path: _validate_vhd(cur_path) cur_path = get_parent_path(cur_path) def _validate_sequenced_vhds(staging_path): """This check ensures that the VHDs in the staging area are sequenced properly from 0 to n-1 with no gaps. """ seq_num = 0 filenames = os.listdir(staging_path) for filename in filenames: if not filename.endswith('.vhd'): continue # Ignore legacy swap embedded in the image, generated on-the-fly now if filename == "swap.vhd": continue vhd_path = os.path.join(staging_path, "%d.vhd" % seq_num) if not os.path.exists(vhd_path): raise Exception("Corrupt image. Expected seq number: %d. Files: %s" % (seq_num, filenames)) seq_num += 1 def import_vhds(sr_path, staging_path, uuid_stack): """Move VHDs from staging area into the SR. The staging area is necessary because we need to perform some fixups (assigning UUIDs, relinking the VHD chain) before moving into the SR, otherwise the SR manager process could potentially delete the VHDs out from under us. Returns: A dict of imported VHDs: {'root': {'uuid': 'ffff-aaaa'}} """ _handle_old_style_images(staging_path) _validate_sequenced_vhds(staging_path) files_to_move = [] # Collect sequenced VHDs and assign UUIDs to them seq_num = 0 while True: orig_vhd_path = os.path.join(staging_path, "%d.vhd" % seq_num) if not os.path.exists(orig_vhd_path): break # Rename (0, 1 .. N).vhd -> aaaa-bbbb-cccc-dddd.vhd vhd_uuid = uuid_stack.pop() vhd_path = os.path.join(staging_path, "%s.vhd" % vhd_uuid) _rename(orig_vhd_path, vhd_path) if seq_num == 0: leaf_vhd_path = vhd_path leaf_vhd_uuid = vhd_uuid files_to_move.append(vhd_path) seq_num += 1 # Re-link VHDs, in reverse order, from base-copy -> leaf parent_path = None for vhd_path in reversed(files_to_move): if parent_path: # Link to parent modify_cmd = ["vhd-util", "modify", "-n", vhd_path, "-p", parent_path] run_command(modify_cmd) parent_path = vhd_path # Sanity check the leaf VHD _assert_vhd_not_hidden(leaf_vhd_path) _validate_vdi_chain(leaf_vhd_path) # Move files into SR for orig_path in files_to_move: new_path = os.path.join(sr_path, os.path.basename(orig_path)) _rename(orig_path, new_path) imported_vhds = dict(root=dict(uuid=leaf_vhd_uuid)) return imported_vhds def prepare_staging_area(sr_path, staging_path, vdi_uuids, seq_num=0): """Hard-link VHDs into staging area.""" for vdi_uuid in vdi_uuids: source = os.path.join(sr_path, "%s.vhd" % vdi_uuid) link_name = os.path.join(staging_path, "%d.vhd" % seq_num) _link(source, link_name) seq_num += 1 def create_tarball(fileobj, path, callback=None, compression_level=None): """Create a tarball from a given path. :param fileobj: a file-like object holding the tarball byte-stream. If None, then only the callback will be used. :param path: path to create tarball from :param callback: optional callback to call on each chunk written :param compression_level: compression level, e.g., 9 for gzip -9. """ tar_cmd = ["tar", "-zc", "--directory=%s" % path, "."] env = os.environ.copy() if compression_level and 1 <= compression_level <= 9: env["GZIP"] = "-%d" % compression_level tar_proc = make_subprocess(tar_cmd, stdout=True, stderr=True, env=env) try: while True: chunk = tar_proc.stdout.read(CHUNK_SIZE) if chunk == '': break if callback: callback(chunk) if fileobj: fileobj.write(chunk) except Exception: try_kill_process(tar_proc) raise finish_subprocess(tar_proc, tar_cmd) def extract_tarball(fileobj, path, callback=None): """Extract a tarball to a given path. :param fileobj: a file-like object holding the tarball byte-stream :param path: path to extract tarball into :param callback: optional callback to call on each chunk read """ tar_cmd = ["tar", "-zx", "--directory=%s" % path] tar_proc = make_subprocess(tar_cmd, stderr=True, stdin=True) try: while True: chunk = fileobj.read(CHUNK_SIZE) if chunk == '': break if callback: callback(chunk) tar_proc.stdin.write(chunk) # NOTE(tpownall): If we do not poll for the tar process exit # code when tar has exited pre maturely there is the chance # that tar will become a defunct zombie child under glance plugin # and re parented under init forever waiting on the stdin pipe to # close. Polling for the exit code allows us to break the pipe. returncode = tar_proc.poll() tar_pid = tar_proc.pid if returncode is not None: LOG.error("tar extract with process id '%(pid)s' " "exited early with '%(rc)s'" % {'pid': tar_pid, 'rc': returncode}) raise SubprocessException( ' '.join(tar_cmd), returncode, "", "") except SubprocessException: # no need to kill already dead process raise except Exception: LOG.exception("Failed while sending data to tar pid: %s" % tar_pid) try_kill_process(tar_proc) raise finish_subprocess(tar_proc, tar_cmd) def _handle_serialization(func): def wrapped(session, params): params = pickle.loads(params['params']) rv = func(session, *params['args'], **params['kwargs']) return pickle.dumps(rv) return wrapped def register_plugin_calls(*funcs): """Wrapper around XenAPIPlugin.dispatch which handles pickle serialization. """ wrapped_dict = {} for func in funcs: wrapped_dict[func.__name__] = _handle_serialization(func) XenAPIPlugin.dispatch(wrapped_dict) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration0000775000567000056710000000564412701407773026244 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features """ XenAPI Plugin for transferring data between host nodes """ import utils import pluginlib_nova pluginlib_nova.configure_logging('migration') logging = pluginlib_nova.logging def move_vhds_into_sr(session, instance_uuid, sr_path, uuid_stack): """Moves the VHDs from their copied location to the SR.""" staging_path = "/images/instance%s" % instance_uuid imported_vhds = utils.import_vhds(sr_path, staging_path, uuid_stack) utils.cleanup_staging_area(staging_path) return imported_vhds def _rsync_vhds(instance_uuid, host, staging_path, user="root"): if not staging_path.endswith('/'): staging_path += '/' dest_path = '/images/instance%s/' % (instance_uuid) ip_cmd = ["/sbin/ip", "addr", "show"] output = utils.run_command(ip_cmd) if ' %s/' % host in output: # If copying to localhost, don't use SSH rsync_cmd = ["/usr/bin/rsync", "-av", "--progress", staging_path, dest_path] else: ssh_cmd = 'ssh -o StrictHostKeyChecking=no' rsync_cmd = ["/usr/bin/rsync", "-av", "--progress", "-e", ssh_cmd, staging_path, '%s@%s:%s' % (user, host, dest_path)] # NOTE(hillad): rsync's progress is carriage returned, requiring # universal_newlines for real-time output. rsync_proc = utils.make_subprocess(rsync_cmd, stdout=True, stderr=True, universal_newlines=True) while True: rsync_progress = rsync_proc.stdout.readline() if not rsync_progress: break logging.debug("[%s] %s" % (instance_uuid, rsync_progress)) utils.finish_subprocess(rsync_proc, rsync_cmd) def transfer_vhd(session, instance_uuid, host, vdi_uuid, sr_path, seq_num): """Rsyncs a VHD to an adjacent host.""" staging_path = utils.make_staging_area(sr_path) try: utils.prepare_staging_area( sr_path, staging_path, [vdi_uuid], seq_num=seq_num) _rsync_vhds(instance_uuid, host, staging_path) finally: utils.cleanup_staging_area(staging_path) if __name__ == '__main__': utils.register_plugin_calls(move_vhds_into_sr, transfer_vhd) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/console0000775000567000056710000000521312701407773025705 0ustar jenkinsjenkins00000000000000#!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true """ To configure this plugin, you must set the following xenstore key: /local/logconsole/@ = "/var/log/xen/guest/console.%d" This can be done by running: xenstore-write /local/logconsole/@ "/var/log/xen/guest/console.%d" WARNING: You should ensure appropriate log rotation to ensure guests are not able to consume too much Dom0 disk space, and equally should not be able to stop other guests from logging. Adding and removing the following xenstore key will reopen the log, as will be required after a log rotate: /local/logconsole/ """ import base64 import logging import zlib import XenAPIPlugin import pluginlib_nova pluginlib_nova.configure_logging("console") CONSOLE_LOG_DIR = '/var/log/xen/guest' CONSOLE_LOG_FILE_PATTERN = CONSOLE_LOG_DIR + '/console.%d' MAX_CONSOLE_BYTES = 102400 SEEK_SET = 0 SEEK_END = 2 def _last_bytes(file_like_object): try: file_like_object.seek(-MAX_CONSOLE_BYTES, SEEK_END) except IOError, e: # noqa if e.errno == 22: file_like_object.seek(0, SEEK_SET) else: raise return file_like_object.read() def get_console_log(session, arg_dict): try: raw_dom_id = arg_dict['dom_id'] except KeyError: raise pluginlib_nova.PluginError("Missing dom_id") try: dom_id = int(raw_dom_id) except ValueError: raise pluginlib_nova.PluginError("Invalid dom_id") logfile = open(CONSOLE_LOG_FILE_PATTERN % dom_id, 'rb') try: try: log_content = _last_bytes(logfile) except IOError, e: # noqa msg = "Error reading console: %s" % e logging.debug(msg) raise pluginlib_nova.PluginError(msg) finally: logfile.close() return base64.b64encode(zlib.compress(log_content)) if __name__ == "__main__": XenAPIPlugin.dispatch({"get_console_log": get_console_log}) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel0000775000567000056710000001101312701407773025516 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true """Handle the manipulation of kernel images.""" import errno import os import shutil import XenAPIPlugin import pluginlib_nova pluginlib_nova.configure_logging('kernel') logging = pluginlib_nova.logging exists = pluginlib_nova.exists optional = pluginlib_nova.optional with_vdi_in_dom0 = pluginlib_nova.with_vdi_in_dom0 KERNEL_DIR = '/boot/guest' def _copy_vdi(dest, copy_args): vdi_uuid = copy_args['vdi_uuid'] vdi_size = copy_args['vdi_size'] cached_image = copy_args['cached-image'] logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s", dest, vdi_uuid) filename = KERNEL_DIR + '/' + vdi_uuid # Make sure KERNEL_DIR exists, otherwise create it if not os.path.isdir(KERNEL_DIR): logging.debug("Creating directory %s", KERNEL_DIR) os.makedirs(KERNEL_DIR) # Read data from /dev/ and write into a file on /boot/guest of = open(filename, 'wb') f = open(dest, 'rb') # Copy only vdi_size bytes data = f.read(vdi_size) of.write(data) if cached_image: # Create a cache file. If caching is enabled, kernel images do not have # to be fetched from glance. cached_image = KERNEL_DIR + '/' + cached_image logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s", dest, cached_image) cache_file = open(cached_image, 'wb') cache_file.write(data) cache_file.close() logging.debug("Done. Filename: %s", cached_image) f.close() of.close() logging.debug("Done. Filename: %s", filename) return filename def copy_vdi(session, args): vdi = exists(args, 'vdi-ref') size = exists(args, 'image-size') cached_image = optional(args, 'cached-image') # Use the uuid as a filename vdi_uuid = session.xenapi.VDI.get_uuid(vdi) copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size), 'cached-image': cached_image} filename = with_vdi_in_dom0(session, vdi, False, lambda dev: _copy_vdi('/dev/%s' % dev, copy_args)) return filename def create_kernel_ramdisk(session, args): """Creates a copy of the kernel/ramdisk image if it is present in the cache. If the image is not present in the cache, it does nothing. """ cached_image = exists(args, 'cached-image') image_uuid = exists(args, 'new-image-uuid') cached_image_filename = KERNEL_DIR + '/' + cached_image filename = KERNEL_DIR + '/' + image_uuid if os.path.isfile(cached_image_filename): shutil.copyfile(cached_image_filename, filename) logging.debug("Done. Filename: %s", filename) else: filename = "" logging.debug("Cached kernel/ramdisk image not found") return filename def _remove_file(filepath): try: os.remove(filepath) except OSError, exc: # noqa if exc.errno != errno.ENOENT: raise def remove_kernel_ramdisk(session, args): """Removes kernel and/or ramdisk from dom0's file system.""" kernel_file = optional(args, 'kernel-file') ramdisk_file = optional(args, 'ramdisk-file') if kernel_file: _remove_file(kernel_file) if ramdisk_file: _remove_file(ramdisk_file) return "ok" if __name__ == '__main__': XenAPIPlugin.dispatch({'copy_vdi': copy_vdi, 'create_kernel_ramdisk': create_kernel_ramdisk, 'remove_kernel_ramdisk': remove_kernel_ramdisk}) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost0000775000567000056710000003652212701407773025742 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 OpenStack Foundation # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true # # XenAPI plugin for host operations # try: import json except ImportError: import simplejson as json import logging import re import sys import time import utils import pluginlib_nova as pluginlib import XenAPI import XenAPIPlugin try: import xmlrpclib except ImportError: import six.moves.xmlrpc_client as xmlrpclib pluginlib.configure_logging("xenhost") _ = pluginlib._ host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)") config_file_path = "/usr/etc/xenhost.conf" DEFAULT_TRIES = 23 DEFAULT_SLEEP = 10 def jsonify(fnc): def wrapper(*args, **kwargs): return json.dumps(fnc(*args, **kwargs)) return wrapper class TimeoutError(StandardError): pass def _run_command(cmd, cmd_input=None): """Wrap utils.run_command to raise PluginError on failure """ try: return utils.run_command(cmd, cmd_input=cmd_input) except utils.SubprocessException, e: # noqa raise pluginlib.PluginError(e.err) def _resume_compute(session, compute_ref, compute_uuid): """Resume compute node on slave host after pool join. This has to happen regardless of the success or failure of the join operation. """ try: # session is valid if the join operation has failed session.xenapi.VM.start(compute_ref, False, True) except XenAPI.Failure: # if session is invalid, e.g. xapi has restarted, then the pool # join has been successful, wait for xapi to become alive again for c in range(0, DEFAULT_TRIES): try: _run_command(["xe", "vm-start", "uuid=%s" % compute_uuid]) return except pluginlib.PluginError: logging.exception('Waited %d seconds for the slave to ' 'become available.' % (c * DEFAULT_SLEEP)) time.sleep(DEFAULT_SLEEP) raise pluginlib.PluginError('Unrecoverable error: the host has ' 'not come back for more than %d seconds' % (DEFAULT_SLEEP * (DEFAULT_TRIES + 1))) @jsonify def set_host_enabled(self, arg_dict): """Sets this host's ability to accept new instances. It will otherwise continue to operate normally. """ enabled = arg_dict.get("enabled") if enabled is None: raise pluginlib.PluginError( _("Missing 'enabled' argument to set_host_enabled")) host_uuid = arg_dict['host_uuid'] if enabled == "true": result = _run_command(["xe", "host-enable", "uuid=%s" % host_uuid]) elif enabled == "false": result = _run_command(["xe", "host-disable", "uuid=%s" % host_uuid]) else: raise pluginlib.PluginError(_("Illegal enabled status: %s") % enabled) # Should be empty string if result: raise pluginlib.PluginError(result) # Return the current enabled status cmd = ["xe", "host-param-get", "uuid=%s" % host_uuid, "param-name=enabled"] host_enabled = _run_command(cmd) if host_enabled == "true": status = "enabled" else: status = "disabled" return {"status": status} def _write_config_dict(dct): conf_file = file(config_file_path, "w") json.dump(dct, conf_file) conf_file.close() def _get_config_dict(): """Returns a dict containing the key/values in the config file. If the file doesn't exist, it is created, and an empty dict is returned. """ try: conf_file = file(config_file_path) config_dct = json.load(conf_file) conf_file.close() except IOError: # File doesn't exist config_dct = {} # Create the file _write_config_dict(config_dct) return config_dct @jsonify def get_config(self, arg_dict): """Return the value stored for the specified key, or None if no match.""" conf = _get_config_dict() params = arg_dict["params"] try: dct = json.loads(params) except Exception: dct = params key = dct["key"] ret = conf.get(key) if ret is None: # Can't jsonify None return "None" return ret @jsonify def set_config(self, arg_dict): """Write the specified key/value pair, overwriting any existing value.""" conf = _get_config_dict() params = arg_dict["params"] try: dct = json.loads(params) except Exception: dct = params key = dct["key"] val = dct["value"] if val is None: # Delete the key, if present conf.pop(key, None) else: conf.update({key: val}) _write_config_dict(conf) def iptables_config(session, args): # command should be either save or restore logging.debug("iptables_config:enter") logging.debug("iptables_config: args=%s", args) cmd_args = pluginlib.exists(args, 'cmd_args') logging.debug("iptables_config: cmd_args=%s", cmd_args) process_input = pluginlib.optional(args, 'process_input') logging.debug("iptables_config: process_input=%s", process_input) cmd = json.loads(cmd_args) cmd = map(str, cmd) # either execute iptable-save or iptables-restore # command must be only one of these two # process_input must be used only with iptables-restore if len(cmd) > 0 and cmd[0] in ('iptables-save', 'iptables-restore', 'ip6tables-save', 'ip6tables-restore'): result = _run_command(cmd, process_input) ret_str = json.dumps(dict(out=result, err='')) logging.debug("iptables_config:exit") return ret_str # else don't do anything and return an error else: raise pluginlib.PluginError(_("Invalid iptables command")) def _power_action(action, arg_dict): # Host must be disabled first host_uuid = arg_dict['host_uuid'] result = _run_command(["xe", "host-disable", "uuid=%s" % host_uuid]) if result: raise pluginlib.PluginError(result) # All running VMs must be shutdown result = _run_command(["xe", "vm-shutdown", "--multiple", "resident-on=%s" % host_uuid]) if result: raise pluginlib.PluginError(result) cmds = {"reboot": "host-reboot", "startup": "host-power-on", "shutdown": "host-shutdown"} result = _run_command(["xe", cmds[action], "uuid=%s" % host_uuid]) # Should be empty string if result: raise pluginlib.PluginError(result) return {"power_action": action} @jsonify def host_reboot(self, arg_dict): """Reboots the host.""" return _power_action("reboot", arg_dict) @jsonify def host_shutdown(self, arg_dict): """Reboots the host.""" return _power_action("shutdown", arg_dict) @jsonify def host_start(self, arg_dict): """Starts the host. Currently not feasible, since the host runs on the same machine as Xen. """ return _power_action("startup", arg_dict) @jsonify def host_join(self, arg_dict): """Join a remote host into a pool. The pool's master is the host where the plugin is called from. The following constraints apply: - The host must have no VMs running, except nova-compute, which will be shut down (and restarted upon pool-join) automatically, - The host must have no shared storage currently set up, - The host must have the same license of the master, - The host must have the same supplemental packs as the master. """ session = XenAPI.Session(arg_dict.get("url")) session.login_with_password(arg_dict.get("user"), arg_dict.get("password")) compute_ref = session.xenapi.VM.get_by_uuid(arg_dict.get('compute_uuid')) session.xenapi.VM.clean_shutdown(compute_ref) try: if arg_dict.get("force"): session.xenapi.pool.join(arg_dict.get("master_addr"), arg_dict.get("master_user"), arg_dict.get("master_pass")) else: session.xenapi.pool.join_force(arg_dict.get("master_addr"), arg_dict.get("master_user"), arg_dict.get("master_pass")) finally: _resume_compute(session, compute_ref, arg_dict.get("compute_uuid")) @jsonify def host_data(self, arg_dict): """Runs the commands on the xenstore host to return the current status information. """ host_uuid = arg_dict['host_uuid'] resp = _run_command(["xe", "host-param-list", "uuid=%s" % host_uuid]) parsed_data = parse_response(resp) # We have the raw dict of values. Extract those that we need, # and convert the data types as needed. ret_dict = cleanup(parsed_data) # Add any config settings config = _get_config_dict() ret_dict.update(config) return ret_dict def parse_response(resp): data = {} for ln in resp.splitlines(): if not ln: continue mtch = host_data_pattern.match(ln.strip()) try: k, v = mtch.groups() data[k] = v except AttributeError: # Not a valid line; skip it continue return data @jsonify def host_uptime(self, arg_dict): """Returns the result of the uptime command on the xenhost.""" return {"uptime": _run_command(['uptime'])} def cleanup(dct): """Take the raw KV pairs returned and translate them into the appropriate types, discarding any we don't need. """ def safe_int(val): """Integer values will either be string versions of numbers, or empty strings. Convert the latter to nulls. """ try: return int(val) except ValueError: return None def strip_kv(ln): return [val.strip() for val in ln.split(":", 1)] out = {} # sbs = dct.get("supported-bootloaders", "") # out["host_supported-bootloaders"] = sbs.split("; ") # out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "") # out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "") # out["host_local-cache-sr"] = dct.get("local-cache-sr", "") out["enabled"] = dct.get("enabled", "true") == "true" out["host_memory"] = omm = {} omm["total"] = safe_int(dct.get("memory-total", "")) omm["overhead"] = safe_int(dct.get("memory-overhead", "")) omm["free"] = safe_int(dct.get("memory-free", "")) omm["free-computed"] = safe_int( dct.get("memory-free-computed", "")) # out["host_API-version"] = avv = {} # avv["vendor"] = dct.get("API-version-vendor", "") # avv["major"] = safe_int(dct.get("API-version-major", "")) # avv["minor"] = safe_int(dct.get("API-version-minor", "")) out["enabled"] = dct.get("enabled", True) out["host_uuid"] = dct.get("uuid", None) out["host_name-label"] = dct.get("name-label", "") out["host_name-description"] = dct.get("name-description", "") # out["host_host-metrics-live"] = dct.get( # "host-metrics-live", "false") == "true" out["host_hostname"] = dct.get("hostname", "") out["host_ip_address"] = dct.get("address", "") oc = dct.get("other-config", "") out["host_other-config"] = ocd = {} if oc: for oc_fld in oc.split("; "): ock, ocv = strip_kv(oc_fld) ocd[ock] = ocv capabilities = dct.get("capabilities", "") out["host_capabilities"] = capabilities.replace(";", "").split() # out["host_allowed-operations"] = dct.get( # "allowed-operations", "").split("; ") # lsrv = dct.get("license-server", "") # out["host_license-server"] = ols = {} # if lsrv: # for lspart in lsrv.split("; "): # lsk, lsv = lspart.split(": ") # if lsk == "port": # ols[lsk] = safe_int(lsv) # else: # ols[lsk] = lsv # sv = dct.get("software-version", "") # out["host_software-version"] = osv = {} # if sv: # for svln in sv.split("; "): # svk, svv = strip_kv(svln) # osv[svk] = svv cpuinf = dct.get("cpu_info", "") out["host_cpu_info"] = ocp = {} if cpuinf: for cpln in cpuinf.split("; "): cpk, cpv = strip_kv(cpln) if cpk in ("cpu_count", "family", "model", "stepping"): ocp[cpk] = safe_int(cpv) else: ocp[cpk] = cpv # out["host_edition"] = dct.get("edition", "") # out["host_external-auth-service-name"] = dct.get( # "external-auth-service-name", "") return out def query_gc(session, sr_uuid, vdi_uuid): result = _run_command(["/opt/xensource/sm/cleanup.py", "-q", "-u", sr_uuid]) # Example output: "Currently running: True" return result[19:].strip() == "True" def get_pci_device_details(session): """Returns a string that is a list of pci devices with details. This string is obtained by running the command lspci. With -vmm option, it dumps PCI device data in machine readable form. This verbose format display a sequence of records separated by a blank line. We will also use option "-n" to get vendor_id and device_id as numeric values and the "-k" option to get the kernel driver used if any. """ return _run_command(["lspci", "-vmmnk"]) def get_pci_type(session, pci_device): """Returns the type of the PCI device (type-PCI, type-VF or type-PF). pci-device -- The address of the pci device """ # We need to add the domain if it is missing if pci_device.count(':') == 1: pci_device = "0000:" + pci_device output = _run_command(["ls", "/sys/bus/pci/devices/" + pci_device + "/"]) if "physfn" in output: return "type-VF" if "virtfn" in output: return "type-PF" return "type-PCI" if __name__ == "__main__": # Support both serialized and non-serialized plugin approaches _, methodname = xmlrpclib.loads(sys.argv[1]) if methodname in ['query_gc', 'get_pci_device_details', 'get_pci_type']: utils.register_plugin_calls(query_gc, get_pci_device_details, get_pci_type) XenAPIPlugin.dispatch( {"host_data": host_data, "set_host_enabled": set_host_enabled, "host_shutdown": host_shutdown, "host_reboot": host_reboot, "host_start": host_start, "host_join": host_join, "get_config": get_config, "set_config": set_config, "iptables_config": iptables_config, "host_uptime": host_uptime}) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py0000775000567000056710000001636012701407773026546 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # # XenAPI plugin for reading/writing information to xenstore # try: import json except ImportError: import simplejson as json import utils # noqa import XenAPIPlugin # noqa import pluginlib_nova as pluginlib # noqa pluginlib.configure_logging("xenstore") class XenstoreError(pluginlib.PluginError): """Errors that occur when calling xenstore-* through subprocesses.""" def __init__(self, cmd, return_code, stderr, stdout): msg = "cmd: %s; returncode: %d; stderr: %s; stdout: %s" msg = msg % (cmd, return_code, stderr, stdout) self.cmd = cmd self.return_code = return_code self.stderr = stderr self.stdout = stdout pluginlib.PluginError.__init__(self, msg) def jsonify(fnc): def wrapper(*args, **kwargs): ret = fnc(*args, **kwargs) try: json.loads(ret) except ValueError: # Value should already be JSON-encoded, but some operations # may write raw sting values; this will catch those and # properly encode them. ret = json.dumps(ret) return ret return wrapper def record_exists(arg_dict): """Returns whether or not the given record exists. The record path is determined from the given path and dom_id in the arg_dict. """ cmd = ["xenstore-exists", "/local/domain/%(dom_id)s/%(path)s" % arg_dict] try: _run_command(cmd) return True except XenstoreError, e: # noqa if e.stderr == '': # if stderr was empty, this just means the path did not exist return False # otherwise there was a real problem raise @jsonify def read_record(self, arg_dict): """Returns the value stored at the given path for the given dom_id. These must be encoded as key/value pairs in arg_dict. You can optionally include a key 'ignore_missing_path'; if this is present and boolean True, attempting to read a non-existent path will return the string 'None' instead of raising an exception. """ cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict] try: result = _run_command(cmd) return result.strip() except XenstoreError, e: # noqa if not arg_dict.get("ignore_missing_path", False): raise if not record_exists(arg_dict): return "None" # Just try again in case the agent write won the race against # the record_exists check. If this fails again, it will likely raise # an equally meaningful XenstoreError as the one we just caught result = _run_command(cmd) return result.strip() @jsonify def write_record(self, arg_dict): """Writes to xenstore at the specified path. If there is information already stored in that location, it is overwritten. As in read_record, the dom_id and path must be specified in the arg_dict; additionally, you must specify a 'value' key, whose value must be a string. Typically, you can json-ify more complex values and store the json output. """ cmd = ["xenstore-write", "/local/domain/%(dom_id)s/%(path)s" % arg_dict, arg_dict["value"]] _run_command(cmd) return arg_dict["value"] @jsonify def list_records(self, arg_dict): """Returns all the stored data at or below the given path for the given dom_id. The data is returned as a json-ified dict, with the path as the key and the stored value as the value. If the path doesn't exist, an empty dict is returned. """ dirpath = "/local/domain/%(dom_id)s/%(path)s" % arg_dict cmd = ["xenstore-ls", dirpath.rstrip("/")] try: recs = _run_command(cmd) except XenstoreError, e: # noqa if not record_exists(arg_dict): return {} # Just try again in case the path was created in between # the "ls" and the existence check. If this fails again, it will # likely raise an equally meaningful XenstoreError recs = _run_command(cmd) base_path = arg_dict["path"] paths = _paths_from_ls(recs) ret = {} for path in paths: if base_path: arg_dict["path"] = "%s/%s" % (base_path, path) else: arg_dict["path"] = path rec = read_record(self, arg_dict) try: val = json.loads(rec) except ValueError: val = rec ret[path] = val return ret @jsonify def delete_record(self, arg_dict): """Just like it sounds: it removes the record for the specified VM and the specified path from xenstore. """ cmd = ["xenstore-rm", "/local/domain/%(dom_id)s/%(path)s" % arg_dict] try: return _run_command(cmd) except XenstoreError, e: # noqa if 'could not remove path' in e.stderr: # Entry already gone. We're good to go. return '' raise def _paths_from_ls(recs): """The xenstore-ls command returns a listing that isn't terribly useful. This method cleans that up into a dict with each path as the key, and the associated string as the value. """ last_nm = "" level = 0 path = [] ret = [] for ln in recs.splitlines(): nm, val = ln.rstrip().split(" = ") barename = nm.lstrip() this_level = len(nm) - len(barename) if this_level == 0: ret.append(barename) level = 0 path = [] elif this_level == level: # child of same parent ret.append("%s/%s" % ("/".join(path), barename)) elif this_level > level: path.append(last_nm) ret.append("%s/%s" % ("/".join(path), barename)) level = this_level elif this_level < level: path = path[:this_level] ret.append("%s/%s" % ("/".join(path), barename)) level = this_level last_nm = barename return ret def _run_command(cmd): """Wrap utils.run_command to raise XenstoreError on failure """ try: return utils.run_command(cmd) except utils.SubprocessException, e: # noqa raise XenstoreError(e.cmdline, e.ret, e.err, e.out) if __name__ == "__main__": XenAPIPlugin.dispatch( {"read_record": read_record, "write_record": write_record, "list_records": list_records, "delete_record": delete_record}) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent0000775000567000056710000002300112701407773025334 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true # # XenAPI plugin for reading/writing information to xenstore # import base64 import commands # noqa try: import json except ImportError: import simplejson as json import time import XenAPIPlugin import pluginlib_nova pluginlib_nova.configure_logging("agent") import xenstore DEFAULT_TIMEOUT = 30 PluginError = pluginlib_nova.PluginError _ = pluginlib_nova._ class TimeoutError(StandardError): pass class RebootDetectedError(StandardError): pass def version(self, arg_dict): """Get version of agent.""" timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT)) arg_dict["value"] = json.dumps({"name": "version", "value": "agent"}) request_id = arg_dict["id"] arg_dict["path"] = "data/host/%s" % request_id xenstore.write_record(self, arg_dict) try: resp = _wait_for_agent(self, request_id, arg_dict, timeout) except TimeoutError, e: # noqa raise PluginError(e) return resp def key_init(self, arg_dict): """Handles the Diffie-Hellman key exchange with the agent to establish the shared secret key used to encrypt/decrypt sensitive info to be passed, such as passwords. Returns the shared secret key value. """ timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT)) # WARNING: Some older Windows agents will crash if the public key isn't # a string pub = arg_dict["pub"] arg_dict["value"] = json.dumps({"name": "keyinit", "value": pub}) request_id = arg_dict["id"] arg_dict["path"] = "data/host/%s" % request_id xenstore.write_record(self, arg_dict) try: resp = _wait_for_agent(self, request_id, arg_dict, timeout) except TimeoutError, e: # noqa raise PluginError(e) return resp def password(self, arg_dict): """Writes a request to xenstore that tells the agent to set the root password for the given VM. The password should be encrypted using the shared secret key that was returned by a previous call to key_init. The encrypted password value should be passed as the value for the 'enc_pass' key in arg_dict. """ timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT)) enc_pass = arg_dict["enc_pass"] arg_dict["value"] = json.dumps({"name": "password", "value": enc_pass}) request_id = arg_dict["id"] arg_dict["path"] = "data/host/%s" % request_id xenstore.write_record(self, arg_dict) try: resp = _wait_for_agent(self, request_id, arg_dict, timeout) except TimeoutError, e: # noqa raise PluginError(e) return resp def resetnetwork(self, arg_dict): """Writes a resquest to xenstore that tells the agent to reset networking. """ timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT)) arg_dict['value'] = json.dumps({'name': 'resetnetwork', 'value': ''}) request_id = arg_dict['id'] arg_dict['path'] = "data/host/%s" % request_id xenstore.write_record(self, arg_dict) try: resp = _wait_for_agent(self, request_id, arg_dict, timeout) except TimeoutError, e: # noqa raise PluginError(e) return resp def inject_file(self, arg_dict): """Expects a file path and the contents of the file to be written. Both should be base64-encoded in order to eliminate errors as they are passed through the stack. Writes that information to xenstore for the agent, which will decode the file and intended path, and create it on the instance. The original agent munged both of these into a single entry; the new agent keeps them separate. We will need to test for the new agent, and write the xenstore records to match the agent version. We will also need to test to determine if the file injection method on the agent has been disabled, and raise a NotImplemented error if that is the case. """ timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT)) b64_path = arg_dict["b64_path"] b64_file = arg_dict["b64_contents"] request_id = arg_dict["id"] agent_features = _get_agent_features(self, arg_dict) if "file_inject" in agent_features: # New version of the agent. Agent should receive a 'value' # key whose value is a dictionary containing 'b64_path' and # 'b64_file'. See old version below. arg_dict["value"] = json.dumps({"name": "file_inject", "value": {"b64_path": b64_path, "b64_file": b64_file}}) elif "injectfile" in agent_features: # Old agent requires file path and file contents to be # combined into one base64 value. raw_path = base64.b64decode(b64_path) raw_file = base64.b64decode(b64_file) new_b64 = base64.b64encode("%s,%s" % (raw_path, raw_file)) arg_dict["value"] = json.dumps({"name": "injectfile", "value": new_b64}) else: # Either the methods don't exist in the agent, or they # have been disabled. raise NotImplementedError(_("NOT IMPLEMENTED: Agent does not" " support file injection.")) arg_dict["path"] = "data/host/%s" % request_id xenstore.write_record(self, arg_dict) try: resp = _wait_for_agent(self, request_id, arg_dict, timeout) except TimeoutError, e: # noqa raise PluginError(e) return resp def agent_update(self, arg_dict): """Expects an URL and md5sum of the contents, then directs the agent to update itself. """ timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT)) request_id = arg_dict["id"] url = arg_dict["url"] md5sum = arg_dict["md5sum"] arg_dict["value"] = json.dumps({"name": "agentupdate", "value": "%s,%s" % (url, md5sum)}) arg_dict["path"] = "data/host/%s" % request_id xenstore.write_record(self, arg_dict) try: resp = _wait_for_agent(self, request_id, arg_dict, timeout) except TimeoutError, e: # noqa raise PluginError(e) return resp def _get_agent_features(self, arg_dict): """Return an array of features that an agent supports.""" timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT)) tmp_id = commands.getoutput("uuidgen") dct = {} dct.update(arg_dict) dct["value"] = json.dumps({"name": "features", "value": ""}) dct["path"] = "data/host/%s" % tmp_id xenstore.write_record(self, dct) try: resp = _wait_for_agent(self, tmp_id, dct, timeout) except TimeoutError, e: # noqa raise PluginError(e) response = json.loads(resp) if response['returncode'] != 0: return response["message"].split(",") else: return {} def _wait_for_agent(self, request_id, arg_dict, timeout): """Periodically checks xenstore for a response from the agent. The request is always written to 'data/host/{id}', and the agent's response for that request will be in 'data/guest/{id}'. If no value appears from the agent within the timeout specified, the original request is deleted and a TimeoutError is raised. """ arg_dict["path"] = "data/guest/%s" % request_id arg_dict["ignore_missing_path"] = True start = time.time() reboot_detected = False while time.time() - start < timeout: ret = xenstore.read_record(self, arg_dict) # Note: the response for None with be a string that includes # double quotes. if ret != '"None"': # The agent responded return ret time.sleep(.5) # NOTE(johngarbutt) If we can't find this domid, then # the VM has rebooted, so we must trigger domid refresh. # Check after the sleep to give xenstore time to update # after the VM reboot. exists_args = { "dom_id": arg_dict["dom_id"], "path": "name", } dom_id_is_present = xenstore.record_exists(exists_args) if not dom_id_is_present: reboot_detected = True break # No response within the timeout period; bail out # First, delete the request record arg_dict["path"] = "data/host/%s" % request_id xenstore.delete_record(self, arg_dict) if reboot_detected: raise RebootDetectedError(_("REBOOT: dom_id %s no longer " "present") % arg_dict["dom_id"]) else: raise TimeoutError(_("TIMEOUT: No response from agent within" " %s seconds.") % timeout) if __name__ == "__main__": XenAPIPlugin.dispatch( {"version": version, "key_init": key_init, "password": password, "resetnetwork": resetnetwork, "inject_file": inject_file, "agentupdate": agent_update}) nova-13.0.0/plugins/xenserver/xenapi/etc/xapi.d/plugins/config_file0000775000567000056710000000224112701407773026505 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features import XenAPIPlugin def get_val(session, args): config_key = args['key'] config_file = open('/etc/xapi.conf') try: for line in config_file: split = line.split('=') if (len(split) == 2) and (split[0].strip() == config_key): return split[1].strip() return "" finally: config_file.close() if __name__ == '__main__': XenAPIPlugin.dispatch({"get_val": get_val}) nova-13.0.0/doc/0000775000567000056710000000000012701410205014434 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/source/0000775000567000056710000000000012701410205015734 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/source/vmstates.rst0000664000567000056710000001211612701407773020355 0ustar jenkinsjenkins00000000000000Virtual Machine States and Transitions ======================================= The following diagrams and tables show the required virtual machine (VM) states and task states for various commands issued by the user. Allowed State Transitions -------------------------- .. graphviz:: digraph states { graph [pad=".35", ranksep="0.65", nodesep="0.55", concentrate=true]; node [fontsize=10 fontname="Monospace"]; edge [arrowhead="normal", arrowsize="0.8"]; label="All states are allowed to transition to DELETED and ERROR."; forcelabels=true; labelloc=bottom; labeljust=left; /* states */ building [label="BUILDING"] active [label="ACTIVE"] paused [label="PAUSED"] suspended [label="SUSPENDED"] stopped [label="STOPPED"] rescued [label="RESCUED"] resized [label="RESIZED"] soft_deleted [label="SOFT_DELETED"] shelved [label="SHELVED"] shelved_offloaded [label="SHELVED_OFFLOADED"] deleted [label="DELETED", color="red"] error [label="ERROR", color="red"] /* transitions [action] */ building -> active active -> active [headport=nw, tailport=ne] // manual layout active -> soft_deleted [tailport=e] // prevent arrowhead overlap active -> suspended active -> paused [tailport=w] // prevent arrowhead overlap active -> stopped active -> shelved active -> shelved_offloaded active -> rescued active -> resized soft_deleted -> active [headport=e] // prevent arrowhead overlap suspended -> active suspended -> shelved suspended -> shelved_offloaded paused -> active paused -> shelved paused -> shelved_offloaded stopped -> active stopped -> stopped [headport=nw, tailport=ne] // manual layout stopped -> resized stopped -> rescued stopped -> shelved stopped -> shelved_offloaded resized -> active rescued -> active shelved -> shelved_offloaded shelved -> active shelved_offloaded -> active } Requirements for Commands ------------------------- ================== ================== ==================== ================ Command Req'd VM States Req'd Task States Target State ================== ================== ==================== ================ pause Active, Shutoff, Resize Verify, unset Paused Rescued unpause Paused N/A Active suspend Active, Shutoff N/A Suspended resume Suspended N/A Active rescue Active, Shutoff Resize Verify, unset Rescued unrescue Rescued N/A Active set admin password Active N/A Active rebuild Active, Shutoff Resize Verify, unset Active force delete Soft Deleted N/A Deleted restore Soft Deleted N/A Active soft delete Active, Shutoff, N/A Soft Deleted Error delete Active, Shutoff, N/A Deleted Building, Rescued, Error backup Active, Shutoff N/A Active, Shutoff snapshot Active, Shutoff N/A Active, Shutoff start Shutoff, Stopped N/A Active stop Active, Shutoff, Resize Verify, unset Stopped Rescued reboot Active, Shutoff, Resize Verify, unset Active Rescued resize Active, Shutoff Resize Verify, unset Resized revert resize Active, Shutoff Resize Verify, unset Active confirm resize Active, Shutoff Resize Verify, unset Active ================== ================== ==================== ================ VM states and Possible Commands ------------------------------- ============ ================================================================= VM State Commands ============ ================================================================= Paused unpause Suspended resume Active set admin password, suspend, pause, rescue, rebuild, soft delete, delete, backup, snapshot, stop, reboot, resize, revert resize, confirm resize Shutoff suspend, pause, rescue, rebuild, soft delete, delete, backup, start, snapshot, stop, reboot, resize, revert resize, confirm resize Rescued unrescue, pause Stopped rescue, delete, start Soft Deleted force delete, restore Error soft delete, delete Building delete Rescued delete, stop, reboot ============ ================================================================= Create Instance States ---------------------- The following diagram shows the sequence of VM states, task states, and power states when a new VM instance is created. .. image:: ./images/create_vm_states.svg :alt: Sequence of VM states, task states, and power states when a new VM instance is created. nova-13.0.0/doc/source/index.rst0000664000567000056710000001560212701410011017574 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2012 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================================== Welcome to Nova's developer documentation! ========================================== Nova is an OpenStack project designed to provide power massively scalable, on demand, self service access to compute resources. The developer documentation provided here is continually kept up-to-date based on the latest code, and may not represent the state of the project at any specific prior release. .. note:: This is documentation for developers, if you are looking for more general documentation including API, install, operator and user guides see `docs.openstack.org`_ .. _`docs.openstack.org`: http://docs.openstack.org This documentation is intended to help explain what the Nova developers think is the current scope of the Nova project, as well as the architectural decisions we have made in order to support that scope. We also document our plans for evolving our architecture over time. Finally, we documented our current development process and policies. Compute API References ====================== Nova has had a v2 API for a long time. We are currently in the process of moving to a new implementation of that API, which we have called v2.1. v2.1 started life as an API called v3, but that name should never be used any more. We are currently in the process of transitioning users over to the v2.1 implementation, at which point the v2 code will be deleted. * `v2.1 (CURRENT)`_ * `v2 (SUPPORTED)`_ and `v2 extensions (SUPPORTED)`_ (Will be deprecated in the near future.) Changes to the Compute API post v2.1 are made using microversions. You can see a history of our microversions here: .. toctree:: :maxdepth: 1 api_microversion_history We also publish end-user API docs as an API Guide. * `Compute API Guide`_ .. _`v2.1 (CURRENT)`: http://developer.openstack.org/api-ref-compute-v2.1.html .. _`v2 (SUPPORTED)`: http://developer.openstack.org/api-ref-compute-v2.html .. _`v2 extensions (SUPPORTED)`: http://developer.openstack.org/api-ref-compute-v2-ext.html .. _`Compute API Guide`: http://developer.openstack.org/api-guide/compute/ There was a session on the v2.1 API at the Liberty summit which you can watch `here `_. Feature Status ============== Nova aims to have a single compute API that works the same across all deployments of Nova. While many features are well-tested, well-documented, support live upgrade, and are ready for production, some are not. Also the choice of underlying technology affects the list of features that are ready for production. Our first attempt to communicate this is the feature support matrix (previously called the hypervisor support matrix). Over time we hope to evolve that to include a classification of each feature's maturity and exactly what technology combinations are covered by current integration testing efforts. .. toctree:: :maxdepth: 1 test_strategy feature_classification support-matrix Developer Guide =============== If you are new to Nova, this should help you start to understand what Nova actually does, and why. .. toctree:: :maxdepth: 1 how_to_get_involved process architecture project_scope development.environment Development Policies -------------------- The Nova community is a large community. We have lots of users, and they all have a lot of expectations around upgrade and backwards compatibility. For example, having a good stable API, with discoverable versions and capabilities is important for maintaining the strong ecosystem around Nova. Our process is always evolving, just as Nova and the community around Nova evolves over time. If there are things that seem strange, or you have ideas on how to improve things, please engage in that debate, so we continue to improve how the Nova community operates. This section looks at the processes and why. The main aim behind all the process is to aid good communication between all members of the Nova community, while keeping users happy and keeping developers productive. .. toctree:: :maxdepth: 1 process blueprints policies Architecture Concepts ---------------------- This follows on for the discussion in the introduction, and digs into details on specific parts of the Nova architecture. We find it important to document the reasons behind our architectural decisions, so its easier for people to engage in the debates about the future of Nova's architecture. This is all part of Open Design and Open Development. .. toctree:: :maxdepth: 1 aggregates threading vmstates i18n filter_scheduler rpc block_device_mapping addmethod.openstackapi conductor notifications Architecture Evolution Plans ----------------------------- The following section includes documents that describe the overall plan behind groups of nova-specs. Most of these cover items relating to the evolution of various parts of Nova's architecture. Once the work is complete, these documents will move into the "Concepts" section. If you want to get involved in shaping the future of Nova's architecture, these are a great place to start reading up on the current plans. .. toctree:: :maxdepth: 1 cells upgrade api_plugins api_microversion_dev policy_enforcement stable_api code-review scheduler_evolution Advanced testing and guides ---------------------------- .. toctree:: :maxdepth: 1 gmr testing/libvirt-numa testing/serial-console Sample Configuration File ------------------------- .. toctree:: :maxdepth: 1 sample_config Man Pages ---------- .. toctree:: :maxdepth: 1 man/index Module Reference ---------------- .. toctree:: :maxdepth: 1 services .. # NOTE(mriedem): This is the section where we hide things that we don't # actually want in the table of contents but sphinx build would fail if # they aren't in the toctree somewhere. For example, we hide api/autoindex # since that's already covered with modindex below. .. toctree:: :hidden: api/autoindex Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` nova-13.0.0/doc/source/blueprints.rst0000664000567000056710000000473012701407773020701 0ustar jenkinsjenkins00000000000000================================== Blueprints, Specs and Priorities ================================== Like most OpenStack projects, Nova uses `blueprints`_ and specifications (specs) to track new features, but not all blueprints require a spec. This document covers when a spec is needed. .. note:: Nova's specs live at: `specs.openstack.org`_ .. _`blueprints`: http://docs.openstack.org/infra/manual/developers.html#working-on-specifications-and-blueprints .. _`specs.openstack.org`: http://specs.openstack.org/openstack/nova-specs/ Specs ===== A spec is needed for any feature that requires a design discussion. All features need a blueprint but not all blueprints require a spec. If a new feature is straightforward enough that it doesn't need any design discussion, then no spec is required. In order to provide the sort of documentation that would otherwise be provided via a spec, the commit message should include a ``DocImpact`` flag and a thorough description of the feature from a user/operator perspective. Guidelines for when a feature doesn't need a spec. * Is the feature a single self contained change? * If the feature touches code all over the place, it probably should have a design discussion. * If the feature is big enough that it needs more then one commit, it probably should have a design discussion. * Not an API change. * API changes always require a design discussion. Project Priorities =================== * Pick several project priority themes, in the form of use cases, to help us prioritize work * Generate list of improvement blueprints based on the themes * Produce rough draft of list going into summit and finalize the list at the summit * Publish list of project priorities and look for volunteers to work on them * Update spec template to include * Specific use cases * State if the spec is project priority or not * Keep an up to date list of project priority blueprints that need code review in an etherpad. * Consumers of project priority and project priority blueprint lists: * Reviewers looking for direction of where to spend their blueprint review time. If a large subset of nova-core doesn't use the project priorities it means the core team is not aligned properly and should revisit the list of project priorities * The blueprint approval team, to help find the right balance of blueprints * Contributors looking for something to work on * People looking for what they can expect in the next release nova-13.0.0/doc/source/how_to_get_involved.rst0000664000567000056710000003443612701407773022564 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _getting_involved: ===================================== How to get (more) involved with Nova ===================================== So you want to get more involved with Nova? Or you are new to Nova and wondering where to start? We are working on building easy ways for you to get help and ideas on how to learn more about Nova and how the Nova community works. Any questions, please ask! If you are unsure who to ask, then please contact the `Mentoring Czar`__. __ `Nova People`_ How do I get started? ===================== There are quite a few global docs on this: - http://www.openstack.org/assets/welcome-guide/OpenStackWelcomeGuide.pdf - https://wiki.openstack.org/wiki/How_To_Contribute - http://www.openstack.org/community/ There is more general info, non Nova specific info here: - https://wiki.openstack.org/wiki/Mentors - https://wiki.openstack.org/wiki/OpenStack_Upstream_Training What should I work on? ~~~~~~~~~~~~~~~~~~~~~~ So you are starting out your Nova journey, where is a good place to start? If you'd like to learn how Nova works before changing anything (good idea!), we recommend looking for reviews with -1s and -2s and seeing why they got downvoted. There is also the :ref:`code-review`. Once you have some understanding, start reviewing patches. It's OK to ask people to explain things you don't understand. It's also OK to see some potential problems but put a +0. Another way is to look for a subteam you'd like to get involved with and review their patches. See: https://etherpad.openstack.org/p/mitaka-nova-priorities-tracking Once you're ready to write code, take a look at some of the work already marked as low-hanging fruit: * https://bugs.launchpad.net/nova/+bugs?field.tag=low-hanging-fruit * https://etherpad.openstack.org/p/nova-low-hanging-fruit How do I get my feature in? ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The best way of getting your feature in is... well it depends. First concentrate on solving your problem and/or use case, don't fixate on getting the code you have working merged. Its likely things will need significant re-work after you discuss how your needs match up with all the existing ways Nova is currently being used. The good news, is this process should leave you with a feature thats more flexible and doesn't lock you into your current way of thinking. A key part of getting code merged, is helping with reviewing other people's code. Great reviews of others code will help free up more core reviewer time to look at your own patches. In addition, you will understand how the review is thinking when they review your code. Also, work out if any on going efforts are blocking your feature and helping out speeding those up. The spec review process should help with this effort. For more details on our process, please see: :ref:`process`. What is expected of a good contributor? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TODO - need more info on this Top Tips for working with the Nova community ============================================ Here are some top tips around engaging with the Nova community: - IRC - we talk a lot in #openstack-nova - do ask us questions in there, and we will try to help you - not sure about asking questions? feel free to listen in around other people's questions - we recommend you setup an IRC bouncer: https://wiki.openstack.org/wiki/IRC - Email - Use the [nova] tag in the mailing lists - Filtering on [nova] and [all] can help tame the list - Be Open - i.e. don't review your teams code in private, do it publicly in gerrit - i.e. be ready to talk about openly about problems you are having, not "theoretical" issues - that way you can start to gain the trust of the wider community - Got a problem? Please ask! - Please raise any problems and ask questions early - we want to help you before you are frustrated or annoyed - unsure who to ask? Just ask in IRC, or check out the list of `Nova people`_. - Talk about problems first, then solutions - Nova is a big project. At first, it can be hard to see the big picture - Don't think about "merging your patch", instead think about "solving your problem" - conversations are more productive that way - Its not the decision thats important, it's the reason behind it thats important - Don't like the way the community is going? - Please ask why we ware going that way, and please engage with the debate - If you don't, we are unable to learn from what you have to offer - No one will decide, this is stuck, who can help me? - it's rare, but it happens - it's the `Nova PTL`__'s job to help you - ...but if you don't ask, it's hard for them to help you __ `Nova People`_ Process ======= It can feel like you are faced with a wall of process. We are a big community, to make sure the right communication happens, we do use a minimal amount of process. If you find something that doesn't make sense, please: - ask questions to find out \*why\* it happens - if you know of a better way to do it, please speak up - one "better way" might be to remove the process if it no longer helps To learn more about Nova's process, please read :ref:`process`. Why bother with any process? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Why is it worth creating a bug or blueprint to track your code review? This may seem like silly process, but there is usually a good reason behind it. We have lots of code to review, and we have tools to try and get to really important code reviews first. If yours is really important, but not picked up by our tools, it's possible you just get lost in the bottom of a big queue. If you have a bug fix, you have done loads of work to identify the issue, and test out your fix, and submit it. By adding a bug report, you are making it easier for other folks who hit the same problem to find your work, possibly saving them the hours of pain you went through. With any luck that gives all those people the time to fix different bugs, all that might have affected you, if you had not given them the time go fix it. Its similar with blueprints. You have worked out how to scratch your itch, lets tell others about that great new feature you have added, so they can use that. Also, it stops someone with a similar idea going through all the pain of creating a feature only to find you already have that feature ready and up for review, or merged into the latest release. Hopefully this gives you an idea why we have applied a small layer of process to what we are doing. Having said all this, we need to unlearn old habits to move forward, there may be better ways to do things, and we are open to trying them. Please help be part of the solution. .. _why_plus1: Why do code reviews if I am not in nova-core? ============================================= Code reviews are the life blood of the Nova developer community. There is a good discussion on how you do good reviews, and how anyone can be a reviewer: http://docs.openstack.org/infra/manual/developers.html#peer-review In the draft process guide, I discuss how doing reviews can help get your code merged faster: :ref:`process`. Lets look at some of the top reasons why participating with code reviews really helps you: - Doing more reviews, and seeing what other reviewers notice, will help you better understand what is expected of code that gets merged into master - Having more non-core people do great reviews, leaves less review work for the core reviewers to do, so we are able get more code merged - Empathy is one of the keys to a happy community. If you are used to doing code reviews, you will better understand the comments you get when people review your code. As you do more code reviews, and see what others notice, you will get a better idea of what people are looking for when then apply a +2 to your code. - TODO - needs more detail What are the most useful types of code review comments? Well here are a few to the top ones: - Fundamental flaws are the biggest thing to spot. Does the patch break a whole set of existing users, or an existing feature? - Consistency of behaviour is really important. Does this bit of code do things differently to where similar things happen else where in Nova? - Is the code easy to maintain, well tested and easy to read? Code is read order of magnitude times more than it is written, so optimise for the reader of the code, not the writer. - TODO - what others should go here? Let's look at some problems people hit when starting out doing code reviews: - My +1 doesn't mean anything, why should I bother? - So your +1 really does help. Some really useful -1 votes that lead to a +1 vote helps get code into a position - When to use -1 vs 0 vs +1 - Please see the guidelines here: http://docs.openstack.org/infra/manual/developers.html#peer-review - I have already reviewed this code internally, no point in adding a +1 externally? - Please talk to your company about doing all code reviews in the public, that is a much better way to get involved. showing how the code has evolved upstream, is much better than trying to 'perfect' code internally, before uploading for public review. You can use Draft mode, and mark things as WIP if you prefer, but please do the reviews upstream. - Where do I start? What should I review? - There are various tools, but a good place to start is: https://etherpad.openstack.org/p/ -nova-priorities-tracking - Depending on the time in the cycle, it's worth looking at NeedsCodeReview blueprints: https://blueprints.launchpad.net/nova/ - Maybe take a look at things you want to see merged, bug fixes and features, or little code fixes - Look for things that have been waiting a long time for a review: http://5885fef486164bb8596d-41634d3e64ee11f37e8658ed1b4d12ec.r44.cf3.rackcdn.com/nova-openreviews.html - If you get through the above lists, try other tools, such as: http://status.openstack.org/reviews - TODO - I think there is more to add here How to do great code reviews? ============================= http://docs.openstack.org/infra/manual/developers.html#peer-review For more tips, please see: `Why do code reviews if I am not in nova-core?`_ How do I become nova-core? ========================== You don't have to be nova-core to be a valued member of the Nova community. There are many, many ways you can help. Every quality review that helps someone get their patch closer to being ready to merge helps everyone get their code merged faster. The first step to becoming nova-core is learning how to be an active member of the Nova community, including learning how to do great code reviews. For more details see: https://wiki.openstack.org/wiki/Nova/CoreTeam#Membership_Expectations If you feel like you have the time to commit to all the nova-core membership expectations, reach out to the Nova PTL who will be able to find you an existing member of nova-core to help mentor you. If all goes well, and you seem like a good candidate, your mentor will contact the rest of the nova-core team to ask them to start looking at your reviews, so they are able to vote for you, if you get nominated for join nova-core. We encourage all mentoring, where possible, to occur on #openstack-nova so everyone can learn and benefit from your discussions. The above mentoring is available to every one who wants to learn how to better code reviews, even if you don't ever want to commit to becoming nova-core. If you already have a mentor, that's great, the process is only there for folks who are still trying to find a mentor. Being admitted to the mentoring program no way guarantees you will become a member of nova-core eventually, it's here to help you improve, and help you have the sort of involvement and conversations that can lead to becoming a member of nova-core. How to do great nova-spec reviews? ================================== http://specs.openstack.org/openstack/nova-specs/specs/mitaka/template.html http://docs.openstack.org/developer/nova/devref/kilo.blueprints.html#when-is-a-blueprint-needed Spec reviews are always a step ahead of the normal code reviews. Follow the above links for some great information on specs/reviews. The following could be some important tips: 1. The specs are published as html documents. Ensure that the author has a proper render of the same via the .rst file. 2. More often than not, it's important to know that there are no overlaps across multiple specs. 3. Ensure that a proper dependency of the spec is identified. For example - a user desired feature that requires a proper base enablement should be a dependent spec. 4. Ask for clarity on changes that appear ambiguous to you. 5. Every release nova gets a huge set of spec proposals and that's a huge task for the limited set of nova cores to complete. Helping the cores with additional reviews is always a great thing. How to do great bug triage? =========================== https://wiki.openstack.org/wiki/Nova/BugTriage More details coming soon... How to step up into a project leadership role? ============================================== There are many ways to help lead the Nova project: * Mentoring efforts, and getting started tips: https://wiki.openstack.org/wiki/Nova/Mentoring * Info on process, with a focus on how you can go from an idea to getting code merged Nova: https://wiki.openstack.org/wiki/Nova/Mitaka_Release_Schedule * Consider leading an existing `Nova subteam`_ or forming a new one. * Consider becoming a `Bug tag owner`_. * Contact the PTL about becoming a Czar `Nova People`_. .. _`Nova people`: https://wiki.openstack.org/wiki/Nova#People .. _`Nova subteam`: https://wiki.openstack.org/wiki/Nova#Nova_subteams .. _`Bug tag owner`: https://wiki.openstack.org/wiki/Nova/BugTriage#Step_2:_Triage_Tagged_Bugs nova-13.0.0/doc/source/threading.rst0000664000567000056710000000473212701407773020461 0ustar jenkinsjenkins00000000000000Threading model =============== All OpenStack services use *green thread* model of threading, implemented through using the Python `eventlet `_ and `greenlet `_ libraries. Green threads use a cooperative model of threading: thread context switches can only occur when specific eventlet or greenlet library calls are made (e.g., sleep, certain I/O calls). From the operating system's point of view, each OpenStack service runs in a single thread. The use of green threads reduces the likelihood of race conditions, but does not completely eliminate them. In some cases, you may need to use the ``@lockutils.synchronized(...)`` decorator to avoid races. In addition, since there is only one operating system thread, a call that blocks that main thread will block the entire process. Yielding the thread in long-running tasks ----------------------------------------- If a code path takes a long time to execute and does not contain any methods that trigger an eventlet context switch, the long-running thread will block any pending threads. This scenario can be avoided by adding calls to the eventlet sleep method in the long-running code path. The sleep call will trigger a context switch if there are pending threads, and using an argument of 0 will avoid introducing delays in the case that there is only a single green thread:: from eventlet import greenthread ... greenthread.sleep(0) In current code, time.sleep(0)does the same thing as greenthread.sleep(0) if time module is patched through eventlet.monkey_patch(). To be explicit, we recommend contributors use ``greenthread.sleep()`` instead of ``time.sleep()``. MySQL access and eventlet ------------------------- Queries to the MySQL database will block the main thread of a service. This is because OpenStack services use an external C library for accessing the MySQL database. Since eventlet cannot use monkey-patching to intercept blocking calls in a C library, the resulting database query blocks the thread. The Diablo release contained a thread-pooling implementation that did not block, but this implementation resulted in a `bug`_ and was removed. See this `mailing list thread`_ for a discussion of this issue, including a discussion of the `impact on performance`_. .. _bug: https://bugs.launchpad.net/nova/+bug/838581 .. _mailing list thread: https://lists.launchpad.net/openstack/msg08118.html .. _impact on performance: https://lists.launchpad.net/openstack/msg08217.html nova-13.0.0/doc/source/cells.rst0000664000567000056710000002051012701407773017606 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======= Cells ======= Cells V1 ======== Historically, Nova has depended on a single logical database and message queue that all nodes depend on for communication and data persistence. This becomes an issue for deployers as scaling and providing fault tolerance for these systems is difficult. We have an experimental feature in Nova called "cells", hereafter referred to as "cells v1", which is used by some large deployments to partition compute nodes into smaller groups, coupled with a database and queue. This seems to be a well-liked and easy-to-understand arrangement of resources, but the implementation of it has issues for maintenance and correctness. See `Comparison with Cells V1`_ for more detail. Status ~~~~~~ Cells v1 is considered experimental and receives much less testing than the rest of Nova. For example, there is no job for testing cells v1 with Neutron. The priority for the core team is implementation of and migration to cells v2. Because of this, there are a few restrictions placed on cells v1: #. Cells v1 is in feature freeze. This means no new feature proposals for cells v1 will be accepted by the core team, which includes but is not limited to API parity, e.g. supporting virtual interface attach/detach with Neutron. #. Latent bugs caused by the cells v1 design will not be fixed, e.g. `bug 1489581 `_. So if new tests are added to Tempest which trigger a latent bug in cells v1 it may not be fixed. However, regressions in working function should be tracked with bugs and fixed. **Suffice it to say, new deployments of cells v1 are not encouraged.** The restrictions above are basically meant to prioritize effort and focus on getting cells v2 completed, and feature requests and hard to fix latent bugs detract from that effort. Further discussion on this can be found in the `2015/11/12 Nova meeting minutes `_. There are no plans to remove Cells V1 until V2 is usable by existing deployments and there is a migration path. Cells V2 ======== Manifesto ~~~~~~~~~ Proposal -------- Right now, when a request hits the Nova API for a particular instance, the instance information is fetched from the database, which contains the hostname of the compute node on which the instance currently lives. If the request needs to take action on the instance (which is most of them), the hostname is used to calculate the name of a queue, and a message is written there which finds its way to the proper compute node. The meat of this proposal is changing the above hostname lookup into two parts that yield three pieces of information instead of one. Basically, instead of merely looking up the *name* of the compute node on which an instance lives, we will also obtain database and queue connection information. Thus, when asked to take action on instance $foo, we will: 1. Lookup the three-tuple of (database, queue, hostname) for that instance 2. Connect to that database and fetch the instance record 3. Connect to the queue and send the message to the proper hostname queue The above differs from the current organization in two ways. First, we need to do two database lookups before we know where the instance lives. Second, we need to demand-connect to the appropriate database and queue. Both of these have performance implications, but we believe we can mitigate the impacts through the use of things like a memcache of instance mapping information and pooling of connections to database and queue systems. The number of cells will always be much smaller than the number of instances. There are availability implications with this change since something like a 'nova list' which might query multiple cells could end up with a partial result if there is a database failure in a cell. A database failure within a cell would cause larger issues than a partial list result so the expectation is that it would be addressed quickly and cellsv2 will handle it by indicating in the response that the data may not be complete. Since this is very similar to what we have with current cells, in terms of organization of resources, we have decided to call this "cellsv2" for disambiguation. After this work is complete there will no longer be a "no cells" deployment. The default installation of Nova will be a single cell setup. Benefits -------- The benefits of this new organization are: * Native sharding of the database and queue as a first-class-feature in nova. All of the code paths will go through the lookup procedure and thus we won't have the same feature parity issues as we do with current cells. * No high-level replication of all the cell databases at the top. The API will need a database of its own for things like the instance index, but it will not need to replicate all the data at the top level. * It draws a clear line between global and local data elements. Things like flavors and keypairs are clearly global concepts that need only live at the top level. Providing this separation allows compute nodes to become even more stateless and insulated from things like deleted/changed global data. * Existing non-cells users will suddenly gain the ability to spawn a new "cell" from their existing deployment without changing their architecture. Simply adding information about the new database and queue systems to the new index will allow them to consume those resources. * Existing cells users will need to fill out the cells mapping index, shutdown their existing cells synchronization service, and ultimately clean up their top level database. However, since the high-level organization is not substantially different, they will not have to re-architect their systems to move to cellsv2. * Adding new sets of hosts as a new "cell" allows them to be plugged into a deployment and tested before allowing builds to be scheduled to them. Comparison with Cells V1 ------------------------ In reality, the proposed organization is nearly the same as what we currently have in cells today. A cell mostly consists of a database, queue, and set of compute nodes. The primary difference is that current cells require a nova-cells service that synchronizes information up and down from the top level to the child cell. Additionally, there are alternate code paths in compute/api.py which handle routing messages to cells instead of directly down to a compute host. Both of these differences are relevant to why we have a hard time achieving feature and test parity with regular nova (because many things take an alternate path with cells) and why it's hard to understand what is going on (all the extra synchronization of data). The new proposed cellsv2 organization avoids both of these problems by letting things live where they should, teaching nova to natively find the right db, queue, and compute node to handle a given request. Database split ~~~~~~~~~~~~~~ As mentioned above there is a split between global data and data that is local to a cell. The following is a breakdown of what data can uncontroversially considered global versus local to a cell. Missing data will be filled in as consensus is reached on the data that is more difficult to cleanly place. The missing data is mostly concerned with scheduling and networking. Global (API-level) Tables ------------------------- instance_types instance_type_projects instance_type_extra_specs quotas project_user_quotas quota_classes quota_usages security_groups security_group_rules security_group_default_rules provider_fw_rules key_pairs migrations networks tags Cell-level Tables ----------------- instances instance_info_caches instance_extra instance_metadata instance_system_metadata instance_faults instance_actions instance_actions_events instance_id_mappings pci_devices block_device_mapping virtual_interfaces nova-13.0.0/doc/source/project_scope.rst0000664000567000056710000003205712701407773021354 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Scope of the Nova project ========================== Nova is focusing on doing an awesome job of its core mission. This document aims to clarify that core mission. This is a living document to help record where we agree about what Nova should and should not be doing, and why. Please treat this as a discussion of interesting, and hopefully useful, examples. It is not intended to be an exhaustive policy statement. Mission -------- Our mission statement starts with: To implement services and associated libraries to provide massively scalable, on demand, self service access to compute resources. Our official mission statement also includes the following examples of compute resources: bare metal, virtual machines, and containers. For the full official mission statement see: http://governance.openstack.org/reference/projects/nova.html#mission This document aims to help clarify what the mission statement means. Compute Resources ------------------ Nova is all about access to compute resources. This section looks at the types of compute resource Nova works with. Virtual Servers **************** Nova was originally focused purely on providing access to virtual servers running on a variety of different hypervisors. The majority of users use Nova only to provide access to virtual servers from a single hypervisor, however, its possible to have a Nova deployment include multiple different types of hypervisors, while at the same time offering containers and bare metal servers. Containers *********** The Nova API is not a good fit for a lot of container use cases. The Magnum project intends to deliver a good container experience built on top of Nova. Nova allows you to use containers in a similar way to how you would use on demand virtual machines. We want to maintain this distinction, so we maintain the integrity and usefulness of the existing Nova API. For example, Nova is not designed to spin up new containers for every apache request, nor do we plan to control what goes on inside containers. They get the same metadata provided to them as virtual machines, to do with as they see fit. Bare Metal Servers ******************* Ironic project has been pioneering the idea of treating physical machines in a similar way to on demand virtual machines. Nova's driver is able to allow a multi-tenant cloud style use of Ironic controlled resources. While currently there are operations that are a fundamental part of our virtual machine abstraction that are not currently available in ironic, such as attaching iSCSI volumes, it does not fundamentally change the semantics of our API, and as such is a suitable Nova driver. Moreover, it is expected that gap with shrink over time. Driver Parity ************** Our goal for the Nova API to provide a consistent abstraction to access on demand compute resources. We are not aiming to expose all features of all hypervisors. Where the details of the underlying hypervisor leak through our APIs, we have failed in this goal, and we must work towards better abstractions that are more interoperable. This is one reason why we put so much emphasis on the use of Tempest in third party CI systems. The key tenant of driver parity is that if a feature is supported in a driver, it must feel the same to users, as if they where using any of the other drivers that also support that feature. The exception is that, if possible for widely different performance characteristics, but the effect of that API call must be identical. Following on from that, should a feature only be added to one of the drivers, we must make every effort to ensure another driver could be implemented to match that behavior. Its important that drivers support enough features, so the API actually provides a consistent abstraction. For example, being unable to create a server or delete a server, would severely undermine that goal. In fact, Nova only ever manages resources it creates. Upgrades --------- Nova is widely used in production. As such we need to respect the needs of our existing users. At the same time we need evolve the current code base, including both adding and removing features. This section outlines how we expect people to upgrade, and what we do to help existing users that upgrade in the way we expect. Upgrade expectations ********************* Our upgrade plan is to concentrate on upgrades from N-1 to the Nth release. So for someone running juno, they would have to upgrade to kilo before upgrading to liberty. This is designed to balance the need for a smooth upgrade, against having to keep maintaining the compatibility code to make that upgrade possible. We talk about this approach as users consuming the stable branch. In addition, we also support users upgrading from the master branch, technically, between any two commits within the same release cycle. In certain cases, when crossing release boundaries, you must upgrade to the stable branch, before upgrading to the tip of master. This is to support those that are doing some level of "Continuous Deployment" from the tip of master into production. Many of the public cloud provides running OpenStack use this approach so they are able to get access to bug fixes and features they work on into production sooner. This becomes important when you consider reverting a commit that turns out to have been bad idea. We have to assume any public API change may have already been deployed into production, and as such cannot be reverted. In a similar way, a database migration may have been deployed. Any commit that will affect an upgrade gets the UpgradeImpact tag added to the commit message, so there is no requirement to wait for release notes. Don't break existing users **************************** As a community we are aiming towards a smooth upgrade process, where users must be unaware you have just upgraded your deployment, except that there might be additional feature available and improved stability and performance of some existing features. We don't ever want to remove features our users rely on. Sometimes we need to migrate users to a new implementation of that feature, which may require extra steps by the deployer, but the end users must be unaffected by such changes. However there are times when some features become a problem to maintain, and fall into disrepair. We aim to be honest with our users and highlight the issues we have, so we are in a position to find help to fix that situation. Ideally we are able to rework the feature so it can be maintained, but in some rare cases, the feature no longer works, is not tested, and no one is stepping forward to maintain that feature, the best option can be to remove that feature. When we remove features, we need to warn users by first marking those features as deprecated, before we finally remove the feature. The idea is to get feedback on how important the feature is to our user base. Where a feature is important we work with the whole community to find a path forward for those users. API Scope ---------- Nova aims to provide a highly interoperable and stable REST API for our users to get self-service access to compute resources. No more API Proxies ******************** Nova API current has some APIs that are now (in kilo) mostly just a proxy to other OpenStack services. If it were possible to remove a public API, these are some we might start with. As such, we don't want to add any more. The first example is the API that is a proxy to the Glance v1 API. As Glance moves to deprecate its v1 API, we need to translate calls from the old v1 API we expose, to Glance's v2 API. The next API to mention is the networking APIs, in particular the security groups API. If you are using nova-network, Nova is still the only way to perform these network operations. But if you use Neutron, security groups has a much richer Neutron API, and if you use both Nova API and Neutron API, the miss match can lead to some very unexpected results, in certain cases. Our intention is to avoid adding to the problems we already have in this area. No more Orchestration ********************** Nova is a low level infrastructure API. It is plumbing upon which richer ideas can be built. Heat and Magnum being great examples of that. While we have some APIs that could be considered orchestration, and we must continue to maintain those, we do not intend to add any more APIs that do orchestration. Third Party APIs ***************** Nova aims to focus on making a great API that is highly interoperable across all Nova deployments. We have historically done a very poor job of implementing and maintaining compatibility with third party APIs inside the Nova tree. As such, all new efforts should instead focus on external projects that provide third party compatibility on top of the Nova API. Where needed, we will work with those projects to extend the Nova API such that its possible to add that functionality on top of the Nova API. However, we do not intend to add API calls for those services to persist third party API specific information in the Nova database. Instead we want to focus on additions that enhance the existing Nova API. Scalability ------------ Our mission includes the text "massively scalable". Lets discuss what that means. Nova has three main axes of scale: Number of API requests, number of compute nodes and number of active instances. In many cases the number of compute nodes and active instances are so closely related, you rarely need to consider those separately. There are other items, such as the number of tenants, and the number of instances per tenant. But, again, these are very rarely the key scale issue. Its possible to have a small cloud with lots of requests for very short lived VMs, or a large cloud with lots of longer lived VMs. These need to scale out different components of the Nova system to reach their required level of scale. Ideally all Nova components are either scaled out to match the number of API requests and build requests, or scaled out to match the number of running servers. If we create components that have their load increased relative to both of these items, we can run into inefficiencies or resource contention. Although it is possible to make that work in some cases, this should always be considered. We intend Nova to be usable for both small and massive deployments. Where small involves 1-10 hypervisors and massive deployments are single regions with greater than 10,000 hypervisors. That should be seen as our current goal, not an upper limit. There are some features that would not scale well for either the small scale or the very large scale. Ideally we would not accept these features, but if there is a strong case to add such features, we must work hard to ensure you can run without that feature at the scale you are required to run. IaaS not Batch Processing -------------------------- Currently Nova focuses on providing on-demand compute resources in the style of classic Infrastructure-as-a-service clouds. A large pool of compute resources that people can consume in a self-service way. Nova is not currently optimized for dealing with a larger number of requests for compute resources compared with the amount of compute resources currently available. We generally assume that a level of spare capacity is maintained for future requests. This is needed for users who want to quickly scale out, and extra capacity becomes available again as users scale in. While spare capacity is also not required, we are not optimizing for a system that aims to run at 100% capacity at all times. As such our quota system is more focused on limiting the current level of resource usage, rather than ensuring a fair balance of resources between all incoming requests. This doesn't exclude adding features to support making a better use of spare capacity, such as "spot instances". There have been discussions around how to change Nova to work better for batch job processing. But the current focus is on how to layer such an abstraction on top of the basic primitives Nova currently provides, possibly adding additional APIs where that makes good sense. Should this turn out to be impractical, we may have to revise our approach. Deployment and Packaging ------------------------- Nova does not plan on creating its own packaging or deployment systems. Our CI infrastructure is powered by Devstack. This can also be used by developers to test their work on a full deployment of Nova. We do not develop any deployment or packaging for production deployments. Being widely adopted by many distributions and commercial products, we instead choose to work with all those parties to ensure they are able to effectively package and deploy Nova. nova-13.0.0/doc/source/policies.rst0000664000567000056710000001016212701407773020315 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Development policies -------------------- Out Of Tree Support =================== While nova has many entrypoints and other places in the code that allow for wiring in out of tree code, upstream doesn't actively make any guarantees about these extensibility points; we don't support them, make any guarantees about compatibility, stability, etc. Public Contractual APIs ======================== Although nova has many internal APIs, they are not all public contractual APIs. Below is a link of our public contractual APIs: * http://developer.openstack.org/api-ref-compute-v2.1.html Anything not in this list is considered private, not to be used outside of nova, and should not be considered stable. REST APIs ========== Follow the guidelines set in: https://wiki.openstack.org/wiki/APIChangeGuidelines The canonical source for REST API behavior is the code *not* documentation. Documentation is manually generated after the code by folks looking at the code and writing up what they think it does, and it is very easy to get this wrong. This policy is in place to prevent us from making backwards incompatible changes to REST APIs. Patches and Reviews =================== Merging a patch requires a non-trivial amount of reviewer resources. As a patch author, you should try to offset the reviewer resources spent on your patch by reviewing other patches. If no one does this, the review team (cores and otherwise) become spread too thin. For review guidelines see: http://docs.openstack.org/infra/manual/developers.html#peer-review Reverts for Retrospective Vetos =============================== Sometimes our simple "2 +2s" approval policy will result in errors. These errors might be a bug that was missed, or equally importantly, it might be that other cores feel that there is a need for more discussion on the implementation of a given piece of code. Rather than `an enforced time-based solution`_ - for example, a patch couldn't be merged until it has been up for review for 3 days - we have chosen an honor-based system where core reviewers would not approve potentially contentious patches until the proposal had been sufficiently socialized and everyone had a chance to raise any concerns. Recognising that mistakes can happen, we also have a policy where contentious patches which were quickly approved should be reverted so that the discussion around the proposal can continue as if the patch had never been merged in the first place. In such a situation, the procedure is: 0. The commit to be reverted must not have been released. 1. The core team member who has a -2 worthy objection should propose a revert, stating the specific concerns that they feel need addressing. 2. Any subsequent patches depending on the to-be-reverted patch may need to be reverted also. 3. Other core team members should quickly approve the revert. No detailed debate should be needed at this point. A -2 vote on a revert is strongly discouraged, because it effectively blocks the right of cores approving the revert from -2 voting on the original patch. 4. The original patch submitter should re-submit the change, with a reference to the original patch and the revert. 5. The original reviewers of the patch should restore their votes and attempt to summarize their previous reasons for their votes. 6. The patch should not be re-approved until the concerns of the people proposing the revert are worked through. A mailing list discussion or design spec might be the best way to achieve this. .. _`an enforced time-based solution`: https://lists.launchpad.net/openstack/msg08574.html nova-13.0.0/doc/source/conductor.rst0000664000567000056710000000617212701407773020514 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Conductor as a place for orchestrating tasks ============================================ In addition to its roles as a database proxy and object backporter the conductor service also serves as a centralized place to manage the execution of workflows which involve the scheduler. Rebuild, resize/migrate, and building an instance are managed here. This was done in order to have a better separation of responsibilities between what compute nodes should handle and what the scheduler should handle, and to clean up the path of execution. Conductor was chosen because in order to query the scheduler in a synchronous manner it needed to happen after the API had returned a response otherwise API response times would increase. And changing the scheduler call from asynchronous to synchronous helped to clean up the code. To illustrate this the old process for building an instance was: * API receives request to build an instance. * API sends an RPC cast to the scheduler to pick a compute. * Scheduler sends an RPC cast to the compute to build the instance, which means the scheduler needs to be able to communicate with all computes. * If the build succeeds it stops here. * If the build fails then the compute decides if the max number of scheduler retries has been hit. If so the build stops there. * If the build should be rescheduled the compute sends an RPC cast to the scheduler in order to pick another compute. This was overly complicated and meant that the logic for scheduling/rescheduling was distributed throughout the code. The answer to this was to change to process to be the following: * API receives request to build an instance. * API sends an RPC cast to the conductor to build an instance. (or runs locally if conductor is configured to use local_mode) * Conductor sends an RPC call to the scheduler to pick a compute and waits for the response. If there is a scheduler fail it stops the build at the conductor. * Conductor sends an RPC cast to the compute to build the instance. * If the build succeeds it stops here. * If the build fails then compute sends an RPC cast to conductor to build an instance. This is the same RPC message that was sent by the API. This new process means the scheduler only deals with scheduling, the compute only deals with building an instance, and the conductor manages the workflow. The code is now cleaner in the scheduler and computes. The resize/migrate process has not yet been fully converted to a style to take advantage of what conductor can provide so expect that this will change over time. nova-13.0.0/doc/source/support-matrix.ini0000664000567000056710000011724412701410011021457 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # # ========================================= # Nova Hypervisor Feature Capability Matrix # ========================================= # # This obsoletes the information previously at # # https://wiki.openstack.org/wiki/HypervisorSupportMatrix # # This file contains a specification of what feature capabilities each # hypervisor driver in Nova is able to support. Feature capabilities include # what API operations are supported, what storage / networking features can be # used and what aspects of the guest machine can be configured. The capabilities # can be considered to be structured into nested groups, but in this file they # have been flattened for ease of representation. The section names represent # the group structure. At the top level there are the following groups defined # # - operation - public API operations # - storage - host storage configuration options # - networking - host networking configuration options # - guest - guest hardware configuration options # # When considering which capabilities should be marked as mandatory, # consider the general guiding principles listed in the support-matrix.rst # file # # The 'status' field takes possible values # # - mandatory - unconditionally required to be implemented # - optional - optional to support, nice to have # - choice(group) - at least one of the options within the named group # must be implemented # - conditional(cond) - required, if the referenced condition is met. # # The value against each 'driver-impl-XXXX' entry refers to the level # of the implementation of the feature in that driver # # - complete - fully implemented, expected to work at all times # - partial - implemented, but with caveats about when it will work # eg some configurations or hardware or guest OS may not # support it # - missing - not implemented at all # # In the case of the driver being marked as 'partial', then # 'driver-notes-XXX' entry should be used to explain the caveats # around the implementation. # # The 'cli' field takes a list of nova client commands, separated by semicolon. # These CLi commands are related to that feature. # Example: # cli=nova list;nova show # [targets] # List of driver impls we are going to record info for later # This list only covers drivers that are in the Nova source # tree. Out of tree drivers should maintain their own equivalent # document, and merge it with this when their code merges into # Nova core. driver-impl-xenserver=XenServer driver-impl-libvirt-kvm-x86=Libvirt KVM (x86) driver-impl-libvirt-kvm-ppc64=Libvirt KVM (ppc64) driver-impl-libvirt-kvm-s390x=Libvirt KVM (s390x) driver-impl-libvirt-qemu-x86=Libvirt QEMU (x86) driver-impl-libvirt-lxc=Libvirt LXC driver-impl-libvirt-xen=Libvirt Xen driver-impl-libvirt-vz-vm=Libvirt Virtuozzo VM driver-impl-libvirt-vz-ct=Libvirt Virtuozzo CT driver-impl-vmware=VMware vCenter driver-impl-hyperv=Hyper-V driver-impl-ironic=Ironic [operation.attach-volume] title=Attach block volume to instance status=optional notes=The attach volume operation provides a means to hotplug additional block storage to a running instance. This allows storage capabilities to be expanded without interruption of service. In a cloud model it would be more typical to just spin up a new instance with large storage, so the ability to hotplug extra storage is for those cases where the instance is considered to be more of a pet than cattle. Therefore this operation is not considered to be mandatory to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [operation.detach-volume] title=Detach block volume from instance status=optional notes=See notes for attach volume operation. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [operation.maintenance-mode] title=Set the host in a maintenance mode status=optional notes=This operation allows a host to be placed into maintenance mode, automatically triggering migration of any running instances to an alternative host and preventing new instances from being launched. This is not considered to be a mandatory operation to support. The CLI command is "nova host-update ". The driver methods to implement are "host_maintenance_mode" and "set_host_enabled". cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=missing driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=missing driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.evacuate] title=Evacuate instances from a host status=optional notes=A possible failure scenario in a cloud environment is the outage of one of the compute nodes. In such a case the instances of the down host can be evacuated to another host. It is assumed that the old host is unlikely ever to be powered back on, otherwise the evacuation attempt will be rejected. When the instances get moved to the new host, their volumes get re-attached and the locally stored data is dropped. That happens in the same way as a rebuild. This is not considered to be a mandatory operation to support. cli=nova evacuate ;nova host-evacuate driver-impl-xenserver=unknown driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=unknown driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=unknown driver-impl-libvirt-lxc=unknown driver-impl-libvirt-xen=unknown driver-impl-vmware=unknown driver-impl-hyperv=unknown driver-impl-ironic=unknown driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.get-guest-info] title=Guest instance status status=mandatory notes=Provides a quick report on information about the guest instance, including the power state, memory allocation, CPU allocation, number of vCPUs and cummulative CPU execution time. As well as being informational, the power state is used by the compute manager for tracking changes in guests. Therefore this operation is considered mandatory to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.get-host-info] title=Guest host status status=optional notes=Unclear what this refers to cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.live-migrate] title=Live migrate instance across hosts status=optional notes=Live migration provides a way to move an instance off one compute host, to another compute host. Administrators may use this to evacuate instances from a host that needs to undergo maintenance tasks, though of course this may not help if the host is already suffering a failure. In general instances are considered cattle rather than pets, so it is expected that an instance is liable to be killed if host maintenance is required. It is technically challenging for some hypervisors to provide support for the live migration operation, particularly those built on the container based virtualization. Therefore this operation is not considered mandatory to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-notes-vmware=https://bugs.launchpad.net/nova/+bug/1192192 driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.launch] title=Launch instance status=mandatory notes=Importing pre-existing running virtual machines on a host is considered out of scope of the cloud paradigm. Therefore this operation is mandatory to support in drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.pause] title=Stop instance CPUs (pause) status=optional notes=Stopping an instances CPUs can be thought of as roughly equivalent to suspend-to-RAM. The instance is still present in memory, but execution has stopped. The problem, however, is that there is no mechanism to inform the guest OS that this takes place, so upon unpausing, its clocks will no longer report correct time. For this reason hypervisor vendors generally discourage use of this feature and some do not even implement it. Therefore this operation is considered optional to support in drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [operation.reboot] title=Reboot instance status=optional notes=It is reasonable for a guest OS administrator to trigger a graceful reboot from inside the instance. A host initiated graceful reboot requires guest co-operation and a non-graceful reboot can be achieved by a combination of stop+start. Therefore this operation is considered optional. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.rescue] title=Rescue instance status=optional notes=The rescue operation starts an instance in a special configuration whereby it is booted from an special root disk image. The goal is to allow an administrator to recover the state of a broken virtual machine. In general the cloud model considers instances to be cattle, so if an instance breaks the general expectation is that it be thrown away and a new instance created. Therefore this operation is considered optional to support in drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.resize] title=Resize instance status=optional notes=The resize operation allows the user to change a running instance to match the size of a different flavor from the one it was initially launched with. There are many different flavor attributes that potentially need to be updated. In general it is technically challenging for a hypervisor to support the alteration of all relevant config settings for a running instance. Therefore this operation is considered optional to support in drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=partial driver-notes-ironic=Only certain ironic drivers support this driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.resume] title=Restore instance status=optional notes=See notes for the suspend operation cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.service-control.wtf.com] title=Service control status=optional notes=Something something, dark side, something something. Hard to claim this is mandatory when no one seems to know what "Service control" refers to in the context of virt drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=complete driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.set-admin-password] title=Set instance admin password status=optional notes=Provides a mechanism to re(set) the password of the administrator account inside the instance operating system. This requires that the hypervisor has a way to communicate with the running guest operating system. Given the wide range of operating systems in existence it is unreasonable to expect this to be practical in the general case. The configdrive and metadata service both provide a mechanism for setting the administrator password at initial boot time. In the case where this operation were not available, the administrator would simply have to login to the guest and change the password in the normal manner, so this is just a convenient optimization. Therefore this operation is not considered mandatory for drivers to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-notes-libvirt-kvm-x86=Requires libvirt>=1.2.16 and hw_qemu_guest_agent. driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=complete driver-notes-libvirt-qemu-x86=Requires libvirt>=1.2.16 and hw_qemu_guest_agent. driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.snapshot] title=Save snapshot of instance disk status=optional notes=The snapshot operation allows the current state of the instance root disk to be saved and uploaded back into the glance image repository. The instance can later be booted again using this saved image. This is in effect making the ephemeral instance root disk into a semi-persistent storage, in so much as it is preserved even though the guest is no longer running. In general though, the expectation is that the root disks are ephemeral so the ability to take a snapshot cannot be assumed. Therefore this operation is not considered mandatory to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=partial driver-notes-libvirt-xen=Only cold snapshots (pause + snapshot) supported driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.suspend] title=Suspend instance status=optional notes=Suspending an instance can be thought of as roughly equivalent to suspend-to-disk. The instance no longer consumes any RAM or CPUs, with its live running state having been preserved in a file on disk. It can later be restored, at which point it should continue execution where it left off. As with stopping instance CPUs, it suffers from the fact that the guest OS will typically be left with a clock that is no longer telling correct time. For container based virtualization solutions, this operation is particularly technically challenging to implement and is an area of active research. This operation tends to make more sense when thinking of instances as pets, rather than cattle, since with cattle it would be simpler to just terminate the instance instead of suspending. Therefore this operation is considered optional to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.swap-volume] title=Swap block volumes status=optional notes=The swap volume operation is a mechanism for changing running instance so that its attached volume(s) are backed by different storage in the host. An alternative to this would be to simply terminate the existing instance and spawn a new instance with the new storage. In other words this operation is primarily targeted towards the pet use case rather than cattle. Therefore this is considered optional to support. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [operation.terminate] title=Shutdown instance status=mandatory notes=The ability to terminate a virtual machine is required in order for a cloud user to stop utilizing resources and thus avoid indefinitely ongoing billing. Therefore this operation is mandatory to support in drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-notes-libvirt-lxc=Fails in latest Ubuntu Trusty kernel from security repository (3.13.0-76-generic), but works in upstream 3.13.x kernels as well as default Ubuntu Trusty latest kernel (3.13.0-58-generic). driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.trigger-crash-dump] title=Trigger crash dump status=optional notes=The trigger crash dump operation is a mechanism for triggering a crash dump in an instance. The feature is typically implemented by injecting an NMI (Non-maskable Interrupt) into the instance. It provides a means to dump the production memory image as a dump file which is useful for users. Therefore this operation is considered optional to support. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.unpause] title=Resume instance CPUs (unpause) status=optional notes=See notes for the "Stop instance CPUs" operation cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [guest.disk.autoconfigure.wtf.com] title=Auto configure disk status=optional notes=something something, dark side, something something. Unclear just what this is about. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=missing driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=missing driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [guest.disk.rate-limit] title=Instance disk I/O limits status=optional notes=The ability to set rate limits on virtual disks allows for greater performance isolation between instances running on the same host storage. It is valid to delegate scheduling of I/O operations to the hypervisor with its default settings, instead of doing fine grained tuning. Therefore this is not considered to be an mandatory configuration to support. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [guest.setup.configdrive] title=Config drive support status=choice(guest.setup) notes=The config drive provides an information channel into the guest operating system, to enable configuration of the administrator password, file injection, registration of SSH keys, etc. Since cloud images typically ship with all login methods locked, a mechanism to set the administrator password of keys is required to get login access. Alternatives include the metadata service and disk injection. At least one of the guest setup mechanisms is required to be supported by drivers, in order to enable login access. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [guest.setup.inject.file] title=Inject files into disk image status=optional notes=This allows for the end user to provide data for multiple files to be injected into the root filesystem before an instance is booted. This requires that the compute node understand the format of the filesystem and any partitioning scheme it might use on the block device. This is a non-trivial problem considering the vast number of filesystems in existence. The problem of injecting files to a guest OS is better solved by obtaining via the metadata service or config drive. Therefore this operation is considered optional to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [guest.setup.inject.networking] title=Inject guest networking config status=optional notes=This allows for static networking configuration (IP address, netmask, gateway and routes) to be injected directly into the root filesystem before an instance is booted. This requires that the compute node understand how networking is configured in the guest OS which is a non-trivial problem considering the vast number of operating system types. The problem of configuring networking is better solved by DHCP or by obtaining static config via the metadata service or config drive. Therefore this operation is considered optional to support. cli= driver-impl-xenserver=partial driver-notes-xenserver=Only for Debian derived guests driver-impl-libvirt-kvm-x86=partial driver-notes-libvirt-kvm-x86=Only for Debian derived guests driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=partial driver-notes-libvirt-qemu-x86=Only for Debian derived guests driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=partial driver-notes-vmware=requires vmware tools installed driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [console.rdp] title=Remote desktop over RDP status=choice(console) notes=This allows the administrator to interact with the graphical console of the guest OS via RDP. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Some operating systems may prefer to emit messages via the serial console for easier consumption. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=missing driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=missing driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [console.serial.log] title=View serial console logs status=choice(console) notes=This allows the administrator to query the logs of data emitted by the guest OS on its virtualized serial port. For UNIX guests this typically includes all boot up messages and so is useful for diagnosing problems when an instance fails to successfully boot. Not all guest operating systems will be able to emit boot information on a serial console, others may only support graphical consoles. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [console.serial.interactive] title=Remote interactive serial console status=choice(console) notes=This allows the administrator to interact with the serial console of the guest OS. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Not all guest operating systems will be able to emit boot information on a serial console, others may only support graphical consoles. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. This feature was introduced in the Juno release with blueprint https://blueprints.launchpad.net/nova/+spec/serial-ports cli=nova get-serial-console driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=unknown driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=unknown driver-impl-libvirt-lxc=unknown driver-impl-libvirt-xen=unknown driver-impl-vmware=missing driver-impl-hyperv=missing driver-notes-hyperv=Will be complete when this review is merged: https://review.openstack.org/#/c/145004/ driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [console.spice] title=Remote desktop over SPICE status=choice(console) notes=This allows the administrator to interact with the graphical console of the guest OS via SPICE. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Some operating systems may prefer to emit messages via the serial console for easier consumption. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [console.vnc] title=Remote desktop over VNC status=choice(console) notes=This allows the administrator to interact with the graphical console of the guest OS via VNC. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Some operating systems may prefer to emit messages via the serial console for easier consumption. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [storage.block] title=Block storage support status=optional notes=Block storage provides instances with direct attached virtual disks that can be used for persistent storage of data. As an alternative to direct attached disks, an instance may choose to use network based persistent storage. OpenStack provides object storage via the Swift service, or a traditional filesystem such as as NFS/GlusterFS may be used. Some types of instances may not require persistent storage at all, being simple transaction processing systems reading requests & sending results to and from the network. Therefore support for this configuration is not considered mandatory for drivers to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=partial driver-impl-libvirt-vz-ct=missing [storage.block.backend.fibrechannel] title=Block storage over fibre channel status=optional notes=To maximise performance of the block storage, it may be desirable to directly access fibre channel LUNs from the underlying storage technology on the compute hosts. Since this is just a performance optimization of the I/O path it is not considered mandatory to support. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [storage.block.backend.iscsi] title=Block storage over iSCSI status=condition(storage.block==complete) notes=If the driver wishes to support block storage, it is common to provide an iSCSI based backend to access the storage from cinder. This isolates the compute layer for knowledge of the specific storage technology used by Cinder, albeit at a potential performance cost due to the longer I/O path involved. If the driver chooses to support block storage, then this is considered mandatory to support, otherwise it is considered optional. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [storage.block.backend.iscsi.auth.chap] title=CHAP authentication for iSCSI status=optional notes=If accessing the cinder iSCSI service over an untrusted LAN it is desirable to be able to enable authentication for the iSCSI protocol. CHAP is the commonly used authentication protocol for iSCSI. This is not considered mandatory to support. (?) cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [storage.image] title=Image storage support status=mandatory notes=This refers to the ability to boot an instance from an image stored in the glance image repository. Without this feature it would not be possible to bootstrap from a clean environment, since there would be no way to get block volumes populated and reliance on external PXE servers is out of scope. Therefore this is considered a mandatory storage feature to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [networking.firewallrules] title=Network firewall rules status=optional notes=Unclear how this is different from security groups cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [networking.routing] title=Network routing status=optional notes=Unclear what this refers to cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=missing driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [networking.securitygroups] title=Network security groups status=optional notes=The security groups feature provides a way to define rules to isolate the network traffic of different instances running on a compute host. This would prevent actions such as MAC and IP address spoofing, or the ability to setup rogue DHCP servers. In a private cloud environment this may be considered to be a superfluous requirement. Thereforce this is considered to be an optional configuration to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=partial driver-notes-vmware=This is supported by the Neutron NSX plugins driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [networking.topology.flat] title=Flat networking status=choice(networking.topology) notes=Provide network conenctivity to guests using a flat topology across all compute nodes. At least one of the networking configurations is mandatory to support in the drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [networking.topology.vlan] title=VLAN networking status=choice(networking.topology) notes=Provide network connectivity to guests using VLANs to define the topology. At least one of the networking configurations is mandatory to support in the drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.uefi-boot] title=uefi boot status=optional notes=This allows users to boot a guest with uefi firmware. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=partial driver-notes-ironic=depends on hardware support driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing nova-13.0.0/doc/source/_ga/0000775000567000056710000000000012701410205016462 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/source/_ga/layout.html0000664000567000056710000000110712701407773020704 0ustar jenkinsjenkins00000000000000{% extends "!layout.html" %} {% block footer %} {{ super() }} {% endblock %} nova-13.0.0/doc/source/block_device_mapping.rst0000664000567000056710000002451212701407773022636 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Block Device Mapping in Nova ============================ Nova has a concept of block devices that can be exposed to cloud instances. There are several types of block devices an instance can have (we will go into more details about this later in this document), and which ones are available depends on a particular deployment and the usage limitations set for tenants and users. Block device mapping is a way to organize and keep data about all of the block devices an instance has. When we talk about block device mapping, we usually refer to one of two things 1. API/CLI structure and syntax for specifying block devices for an instance boot request 2. The data structure internal to Nova that is used for recording and keeping, which is ultimately persisted in the block_device_mapping table. However, Nova internally has several "slightly" different formats for representing the same data. All of them are documented in the code and or presented by a distinct set of classes, but not knowing that they exist might trip up people reading the code. So in addition to BlockDeviceMapping [1]_ objects that mirror the database schema, we have: 2.1 The API format - this is the set of raw key-value pairs received from the API client, and is almost immediately transformed into the object; however, some validations are done using this format. We will refer to this format as the 'API BDMs' from now on. 2.2 The virt driver format - this is the format defined by the classes in :mod: `nova.virt.block_device`. This format is used and expected by the code in the various virt drivers. These classes, in addition to exposing a different format (mimicking the Python dict interface), also provide a place to bundle some functionality common to certain types of block devices (for example attaching volumes which has to interact with both Cinder and the virt driver code). We will refer to this format as 'Driver BDMs' from now on. Data format and its history ---------------------------- In the early days of Nova, block device mapping general structure closely mirrored that of the EC2 API. During the Havana release of Nova, block device handling code, and in turn the block device mapping structure, had work done on improving the generality and usefulness. These improvements included exposing additional details and features in the API. In order to facilitate this, a new extension was added to the v2 API called `BlockDeviceMappingV2Boot` [2]_, that added an additional `block_device_mapping_v2` field to the instance boot API request. Block device mapping v1 (aka legacy) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This was the original format that supported only cinder volumes (similar to how EC2 block devices support only EBS volumes). Every entry was keyed by device name (we will discuss why this was problematic in its own section later on this page), and would accept only: * UUID of the Cinder volume or snapshot * Type field - used only to distinguish between volumes and Cinder volume snapshots * Optional size field * Optional `delete_on_termination` flag While all of Nova internal code only uses and stores the new data structure, we still need to handle API requests that use the legacy format. This is handled by the Nova API service on every request. As we will see later, since block device mapping information can also be stored in the image metadata in Glance, this is another place where we need to handle the v1 format. The code to handle legacy conversions is part of the :mod: `nova.block_device` module. Intermezzo - problem with device names ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Using device names as the primary per-instance identifier, and exposing them in the API, is problematic for Nova mostly because several hypervisors Nova supports with its drivers can't guarantee that the device names the guest OS assigns are the ones the user requested from Nova. Exposing such a detail in the public API of Nova is obviously not ideal, but it needed to stay for backwards compatibility. It is also required for some (slightly obscure) features around overloading a block device in a Glance image when booting an instance [3]. The plan for fixing this was to allow users to not specify the device name of a block device, and Nova will determine it (with the help of the virt driver), so that it can still be discovered through the API and used when necessary, like for the features mentioned above (and preferably only then). Another use for specifying the device name was to allow the "boot from volume" functionality, by specifying a device name that matches the root device name for the instance (usually `/dev/vda`). Currently (mid Liberty) users are discouraged from specifying device names for all calls requiring or allowing block device mapping, except when trying to override the image block device mapping on instance boot, and it will likely remain like that in the future. Libvirt device driver will outright override any device names passed with it's own values. Block device mapping v2 ^^^^^^^^^^^^^^^^^^^^^^^ New format was introduced in an attempt to solve issues with the original block device mapping format discussed above, and also to allow for more flexibility and addition of features that were not possible with the simple format we had. New block device mapping is a list of dictionaries containing the following fields (in addition to the ones that were already there): * source_type - this can have one of the following values: * `image` * `volume` * `snapshot` * `blank` * dest_type - this can have one of the following values: * `local` * `volume` Combination of the above two fields would define what kind of block device the entry is referring to. We currently support the following combinations: * `image` -> `local` - this is only currently reserved for the entry referring to the Glance image that the instance is being booted with (it should also be marked as a boot device). It is also worth noting that an API request that specifies this, also has to provide the same Glance uuid as the `image_ref` parameter to the boot request (this is done for backwards compatibility and may be changed in the future). This functionality might be extended to specify additional Glance images to be attached to an instance after boot (similar to kernel/ramdisk images) but this functionality is not supported by any of the current drivers. * `volume` -> `volume` - this is just a Cinder volume to be attached to the instance. It can be marked as a boot device. * `snapshot` -> `volume` - this works exactly as passing `type=snap` does. It would create a volume from a Cinder volume snapshot and attach that volume to the instance. Can be marked bootable. * `image` -> `volume` - As one would imagine, this would download a Glance image to a cinder volume and attach it to an instance. Can also be marked as bootable. This is really only a shortcut for creating a volume out of an image before booting an instance with the newly created volume. * `blank` -> `volume` - Creates a blank Cinder volume and attaches it. This will also require the volume size to be set. * `blank` -> `local` - Depending on the guest_format field (see below), this will either mean an ephemeral blank disk on hypervisor local storage, or a swap disk (instances can have only one of those). * guest_format - Tells Nova how/if to format the device prior to attaching, should be only used with blank local images. Denotes a swap disk if the value is `swap`. * device_name - See the previous section for a more in depth explanation of this - currently best left empty (not specified that is), unless the user wants to override the existing device specified in the image metadata. In case of Libvirt, even when passed in with the purpose of overriding the existing image metadata, final set of device names for the instance may still get changed by the driver. * disk_bus and device_type - low level details that some hypervisors (currently only libvirt) may support. Some example disk_bus values can be: `ide`, `usb`, `virtio`, `scsi`, while device_type may be `disk`, `cdrom`, `floppy`, `lun`. This is not an exhaustive list as it depends on the virtualization driver, and may change as more support is added. Leaving these empty is the most common thing to do. * boot_index - Defines the order in which a hypervisor will try devices when attempting to boot the guest from storage. Each device which is capable of being used as boot device should be given a unique boot index, starting from 0 in ascending order. Some hypervisors may not support booting from multiple devices, so will only consider the device with boot index of 0. Some hypervisors will support booting from multiple devices, but only if they are of different types - eg a disk and CD-ROM. Setting a negative value or None indicates that the device should not be used for booting. The simplest usage is to set it to 0 for the boot device and leave it as None for any other devices. Nova will not allow mixing of two formats in a single request, and will do basic validation to make sure that the requested block device mapping is valid before accepting a boot request. .. [1] In addition to the BlockDeviceMapping Nova object, we also have the BlockDeviceDict class in :mod: `nova.block_device` module. This class handles transforming and validating the API BDM format. .. [2] This work predates API microversions and thus the only way to add it was by means of an API extension. .. [3] This is a feature that the EC2 API offers as well and has been in Nova for a long time, although it has been broken in several releases. More info can be found on `this bug ` nova-13.0.0/doc/source/testing/0000775000567000056710000000000012701410205017411 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/source/testing/libvirt-numa.rst0000664000567000056710000005225212701407773022602 0ustar jenkinsjenkins00000000000000 ================================================ Testing NUMA related hardware setup with libvirt ================================================ This page describes how to test the libvirt driver's handling of the NUMA placement, large page allocation and CPU pinning features. It relies on setting up a virtual machine as the test environment and requires support for nested virtualization since plain QEMU is not sufficiently functional. The virtual machine will itself be given NUMA topology, so it can then act as a virtual "host" for testing purposes. ------------------------------------------ Provisioning a virtual machine for testing ------------------------------------------ The entire test process will take place inside a large virtual machine running Fedora 21. The instructions should work for any other Linux distribution which includes libvirt >= 1.2.9 and QEMU >= 2.1.2 The tests will require support for nested KVM, which is not enabled by default on hypervisor hosts. It must be explicitly turned on in the host when loading the kvm-intel/kvm-amd kernel modules. On Intel hosts verify it with .. code-block:: bash # cat /sys/module/kvm_intel/parameters/nested N # rmmod kvm-intel # echo "options kvm-intel nested=y" > /etc/modprobe.d/dist.conf # modprobe kvm-intel # cat /sys/module/kvm_intel/parameters/nested Y While on AMD hosts verify it with .. code-block:: bash # cat /sys/module/kvm_amd/parameters/nested 0 # rmmod kvm-amd # echo "options kvm-amd nested=1" > /etc/modprobe.d/dist.conf # modprobe kvm-amd # cat /sys/module/kvm_amd/parameters/nested 1 The virt-install command below shows how to provision a basic Fedora 21 x86_64 guest with 8 virtual CPUs, 8 GB of RAM and 20 GB of disk space: .. code-block:: bash # cd /var/lib/libvirt/images # wget http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Alpha/Server/x86_64/iso/Fedora-Server-netinst-x86_64-21_Alpha.iso # virt-install \ --name f21x86_64 \ --ram 8000 \ --vcpus 8 \ --file /var/lib/libvirt/images/f21x86_64.img \ --file-size 20 --cdrom /var/lib/libvirt/images/Fedora-Server-netinst-x86_64-21_Alpha.iso \ --os-variant fedora20 When the virt-viewer application displays the installer, follow the defaults for the installation with a couple of exceptions * The automatic disk partition setup can be optionally tweaked to reduce the swap space allocated. No more than 500MB is required, free'ing up an extra 1.5 GB for the root disk. * Select "Minimal install" when asked for the installation type since a desktop environment is not required. * When creating a user account be sure to select the option "Make this user administrator" so it gets 'sudo' rights Once the installation process has completed, the virtual machine will reboot into the final operating system. It is now ready to deploy an OpenStack development environment. --------------------------------- Setting up a devstack environment --------------------------------- For later ease of use, copy your SSH public key into the virtual machine .. code-block:: bash # ssh-copy-id Now login to the virtual machine .. code-block:: bash # ssh We'll install devstack under $HOME/src/cloud/. .. code-block:: bash # mkdir -p $HOME/src/cloud # cd $HOME/src/cloud # chmod go+rx $HOME The Fedora minimal install does not contain git and only has the crude & old-fashioned "vi" editor. .. code-block:: bash # sudo yum -y install git emacs At this point a fairly standard devstack setup can be done. The config below is just an example that is convenient to use to place everything in $HOME instead of /opt/stack. Change the IP addresses to something appropriate for your environment of course .. code-block:: bash # git clone git://github.com/openstack-dev/devstack.git # cd devstack # cat >>local.conf < select numa_topology from compute_nodes; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "NUMATopology", | "nova_object.data": { | "cells": [{ | "nova_object.name": "NUMACell", | "nova_object.data": { | "cpu_usage": 0, | "memory_usage": 0, | "cpuset": [0, 1, 2, 3, 4, 5, 6, 7], | "pinned_cpus": [], | "siblings": [], | "memory": 7793, | "mempages": [ | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 987430, | "used": 0, | "size_kb": 4 | }, | }, | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 0, | "used": 0, | "size_kb": 2048 | }, | } | ], | "id": 0 | }, | }, | ] | }, | } +----------------------------------------------------------------------------+ Meanwhile, the guest instance should not have any NUMA configuration recorded .. code-block:: bash MariaDB [nova]> select numa_topology from instance_extra; +---------------+ | numa_topology | +---------------+ | NULL | +---------------+ ----------------------------------------------------- Reconfiguring the test instance to have NUMA topology ----------------------------------------------------- Now that devstack is proved operational, it is time to configure some NUMA topology for the test VM, so that it can be used to verify the OpenStack NUMA support. To do the changes, the VM instance that is running devstack must be shut down. .. code-block:: bash # sudo shutdown -h now And now back on the physical host edit the guest config as root .. code-block:: bash # sudo virsh edit f21x86_64 The first thing is to change the block to do passthrough of the host CPU. In particular this exposes the "SVM" or "VMX" feature bits to the guest so that "Nested KVM" can work. At the same time we want to define the NUMA topology of the guest. To make things interesting we're going to give the guest an asymmetric topology with 4 CPUS and 4 GBs of RAM in the first NUMA node and 2 CPUs and 2 GB of RAM in the second and third NUMA nodes. So modify the guest XML to include the following CPU XML .. code-block:: bash The guest can now be started again, and ssh back into it .. code-block:: bash # virsh start f21x86_64 ...wait for it to finish booting # ssh Before starting OpenStack services again, it is necessary to reconfigure Nova to enable the NUMA scheduler filter. The libvirt virtualization type must also be explicitly set to KVM, so that guests can take advantage of nested KVM. .. code-block:: bash # sudo emacs /etc/nova/nova.conf Set the following parameters: .. code-block:: bash [DEFAULT] scheduler_default_filters=RetryFilter, AvailabilityZoneFilter, RamFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter, NUMATopologyFilter [libvirt] virt_type = kvm With that done, OpenStack can be started again .. code-block:: bash # cd $HOME/src/cloud/devstack # ./rejoin-stack.sh The first thing is to check that the compute node picked up the new NUMA topology setup for the guest .. code-block:: bash # mysql -u root -p nova MariaDB [nova]> select numa_topology from compute_nodes; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "NUMATopology", | "nova_object.data": { | "cells": [{ | "nova_object.name": "NUMACell", | "nova_object.data": { | "cpu_usage": 0, | "memory_usage": 0, | "cpuset": [0, 1, 2, 3], | "pinned_cpus": [], | "siblings": [], | "memory": 3857, | "mempages": [ | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 987430, | "used": 0, | "size_kb": 4 | }, | }, | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 0, | "used": 0, | "size_kb": 2048 | }, | } | ], | "id": 0 | }, | }, | { | "nova_object.name": "NUMACell", | "nova_object.data": { | "cpu_usage": 0, | "memory_usage": 0, | "cpuset": [4, 5], | "pinned_cpus": [], | "siblings": [], | "memory": 1969, | "mempages": [ | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 504216, | "used": 0, | "size_kb": 4 | }, | }, | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 0, | "used": 0, | "size_kb": 2048 | }, | } | ], | "id": 1 | }, | }, | { | "nova_object.name": "NUMACell", | "nova_object.data": { | "cpu_usage": 0, | "memory_usage": 0, | "cpuset": [6, 7], | "pinned_cpus": [], | "siblings": [], | "memory": 1967, | "mempages": [ | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 503575, | "used": 0, | "size_kb": 4 | }, | }, | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 0, | "used": 0, | "size_kb": 2048 | }, | } | ], | "id": 2 | }, | } | ] | }, | } +----------------------------------------------------------------------------+ This indeed shows that there are now 3 NUMA nodes for the "host" machine, the first with 4 GB of RAM and 4 CPUs, and others with 2 GB of RAM and 2 CPUs each. ----------------------------------------------------- Testing instance boot with no NUMA topology requested ----------------------------------------------------- For the sake of backwards compatibility, if the NUMA filter is enabled, but the flavor/image does not have any NUMA settings requested, it should be assumed that the guest will have a single NUMA node. The guest should be locked to a single host NUMA node too. Boot a guest with the m1.tiny flavor to test this condition .. code-block:: bash # . openrc admin admin # nova boot --image cirros-0.3.2-x86_64-uec --flavor m1.tiny cirros1 Now look at the libvirt guest XML. It should show that the vCPUs are locked to pCPUs within a particular node. .. code-block:: bash # virsh -c qemu:///system list .... # virsh -c qemu:///system dumpxml instanceXXXXXX ... 1 ... This example shows that the guest has been locked to the 3rd NUMA node (which contains pCPUs 6 and 7). Note that there is no explicit NUMA topology listed in the guest XML. ------------------------------------------------ Testing instance boot with 1 NUMA cell requested ------------------------------------------------ Moving forward a little, explicitly tell Nova that the NUMA topology for the guest should have a single NUMA node. This should operate in an identical manner to the default behavior where no NUMA policy is set. To define the topology we will create a new flavor .. code-block:: bash # nova flavor-create m1.numa 999 1024 1 4 # nova flavor-key m1.numa set hw:numa_nodes=1 # nova flavor-show m1.numa Now boot the guest using this new flavor .. code-block:: bash # nova boot --image cirros-0.3.2-x86_64-uec --flavor m1.numa cirros2 Looking at the resulting guest XML from libvirt .. code-block:: bash # virsh -c qemu:///system dumpxml instanceXXXXXX ... 4 ... ... The XML shows: * Each guest CPU has been pinned to the physical CPUs associated with a particular NUMA node * The emulator threads have been pinned to the union of all physical CPUs in the host NUMA node that the guest is placed on * The guest has been given a virtual NUMA topology with a single node holding all RAM and CPUs * The guest NUMA node has been strictly pinned to a host NUMA node. As a further sanity test, check what Nova recorded for the instance in the database. This should match the information .. code-block:: bash MariaDB [nova]> select numa_topology from instance_extra; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "InstanceNUMATopology", | "nova_object.data": { | "instance_uuid": "4c2302fe-3f0f-46f1-9f3e-244011f6e03a", | "cells": [ | { | "nova_object.name": "InstanceNUMACell", | "nova_object.data": { | "cpu_topology": null, | "pagesize": null, | "cpuset": [ | 0, | 1, | 2, | 3 | ], | "memory": 1024, | "cpu_pinning_raw": null, | "id": 0 | }, | } | ] | }, | } +----------------------------------------------------------------------------+ ------------------------------------------------- Testing instance boot with 2 NUMA cells requested ------------------------------------------------- Now getting more advanced we tell Nova that the guest will have two NUMA nodes. To define the topology we will change the previously defined flavor .. code-block:: bash # nova flavor-key m1.numa set hw:numa_nodes=2 # nova flavor-show m1.numa Now boot the guest using this changed flavor .. code-block:: bash # nova boot --image cirros-0.3.2-x86_64-uec --flavor m1.numa cirros2 Looking at the resulting guest XML from libvirt .. code-block:: bash # virsh -c qemu:///system dumpxml instanceXXXXXX ... 4 ... ... The XML shows: * Each guest CPU has been pinned to the physical CPUs associated with particular NUMA nodes * The emulator threads have been pinned to the union of all physical CPUs in the host NUMA nodes that the guest is placed on * The guest has been given a virtual NUMA topology with two nodes, each holding half the RAM and CPUs * The guest NUMA nodes have been strictly pinned to different host NUMA node. As a further sanity test, check what Nova recorded for the instance in the database. This should match the information .. code-block:: bash MariaDB [nova]> select numa_topology from instance_extra; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "InstanceNUMATopology", | "nova_object.data": { | "instance_uuid": "a14fcd68-567e-4d71-aaa4-a12f23f16d14", | "cells": [ | { | "nova_object.name": "InstanceNUMACell", | "nova_object.data": { | "cpu_topology": null, | "pagesize": null, | "cpuset": [ | 0, | 1 | ], | "memory": 512, | "cpu_pinning_raw": null, | "id": 0 | }, | }, | { | "nova_object.name": "InstanceNUMACell", | "nova_object.data": { | "cpu_topology": null, | "pagesize": null, | "cpuset": [ | 2, | 3 | ], | "memory": 512, | "cpu_pinning_raw": null, | "id": 1 | }, | } | ] | }, | } | +----------------------------------------------------------------------------+ nova-13.0.0/doc/source/testing/serial-console.rst0000664000567000056710000000527012701407773023106 0ustar jenkinsjenkins00000000000000 ====================== Testing Serial Console ====================== The main aim of this feature is exposing an interactive web-based serial consoles through a web-socket proxy. This page describes how to test it from a devstack environment. --------------------------------- Setting up a devstack environment --------------------------------- For instructions on how to setup devstack with serial console support enabled see `this guide `_. --------------- Testing the API --------------- Starting a new instance. .. code-block:: bash # cd devstack && . openrc # nova boot --flavor 1 --image cirros-0.3.2-x86_64-uec cirros1 Nova provides a command `nova get-serial-console` which will returns a URL with a valid token to connect to the serial console of VMs. .. code-block:: bash # nova get-serial-console cirros1 +--------+-----------------------------------------------------------------+ | Type | Url | +--------+-----------------------------------------------------------------+ | serial | ws://127.0.0.1:6083/?token=5f7854b7-bf3a-41eb-857a-43fc33f0b1ec | +--------+-----------------------------------------------------------------+ Currently nova does not provide any client able to connect from an interactive console through a web-socket. A simple client for *test purpose* can be written with few lines of Python. .. code-block:: python # sudo easy_install ws4py || sudo pip install ws4py # cat >> client.py < cirros1 login nova-13.0.0/doc/source/man/0000775000567000056710000000000012701410205016507 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/source/man/index.rst0000664000567000056710000000240412701407773020370 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Command-line Utilities ====================== In this section you will find information on Nova's command line utilities. Reference --------- .. toctree:: :maxdepth: 1 nova-all nova-api-metadata nova-api-os-compute nova-api nova-cells nova-cert nova-compute nova-conductor nova-console nova-consoleauth nova-dhcpbridge nova-idmapshift nova-manage nova-network nova-novncproxy nova-rootwrap nova-scheduler nova-spicehtml5proxy nova-xvpvncproxy nova-serialproxy nova-13.0.0/doc/source/man/nova-novncproxy.rst0000664000567000056710000000151512701407773022451 0ustar jenkinsjenkins00000000000000=============== nova-novncproxy =============== -------------------------------------------------------- Websocket novnc Proxy for OpenStack Nova noVNC consoles. -------------------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-novncproxy [options] DESCRIPTION =========== Websocket proxy that is compatible with OpenStack Nova noVNC consoles. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-network.rst0000664000567000056710000000133412701407773021714 0ustar jenkinsjenkins00000000000000============= nova-network ============= --------------------- Nova Network Server --------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-network [options] DESCRIPTION =========== Nova Network is responsible for allocating IPs and setting up the network OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-scheduler.rst0000664000567000056710000000127612701407773022206 0ustar jenkinsjenkins00000000000000============== nova-scheduler ============== -------------- Nova Scheduler -------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-scheduler [options] DESCRIPTION =========== Nova Scheduler picks a compute node to run a VM instance. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-xvpvncproxy.rst0000664000567000056710000000132612701407773022652 0ustar jenkinsjenkins00000000000000================ nova-xvpvncproxy ================ ----------------------------- XVP VNC Console Proxy Server ----------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-xvpvncproxy [options] DESCRIPTION =========== XVP VNC Console Proxy Server OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-conductor.rst0000664000567000056710000000135712701407773022230 0ustar jenkinsjenkins00000000000000============== nova-conductor ============== ----------------------------- Server for the Nova Conductor ----------------------------- :Author: openstack@lists.openstack.org :Date: 2012-11-16 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-conductor [options] DESCRIPTION =========== nova-conductor is a server daemon that serves the Nova Conductor service, which provides coordination and database query support for Nova. OPTIONS ======= **General options** FILES ===== * /etc/nova/nova.conf SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-cells.rst0000664000567000056710000000144312701407773021326 0ustar jenkinsjenkins00000000000000========== nova-cells ========== -------------------------------- Server for the Nova Cells -------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-cells [options] DESCRIPTION =========== Starts the nova-cells service. The nova-cells service handles communication between cells and selects cells for new instances. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-cert.rst0000664000567000056710000000150612701407773021161 0ustar jenkinsjenkins00000000000000========== nova-cert ========== -------------------------------- Server for the Nova Cert -------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-cert [options] DESCRIPTION =========== nova-cert is a server daemon that serves the Nova Cert service for X509 certificates. Used to generate certificates for euca-bundle-image. Only needed for EC2 API. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-idmapshift.rst0000664000567000056710000000610112701407773022350 0ustar jenkinsjenkins00000000000000=============== nova-idmapshift =============== ----------------------------------------- Tool used by Nova libvirt-lxc virt driver ----------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-idmapshift [options] path DESCRIPTION =========== nova-idmapshift is a tool that properly sets the ownership of a filesystem for use with linux user namespaces. This tool can only be used with linux lxc containers. When using user namespaces with linux lxc containers, the filesystem of the container must be owned by the targeted user and group ids being applied to that container. Otherwise, processes inside the container won't be able to access the filesystem. For example: nova-idmapshift -i -u 0:10000:2000 -g 0:10000:2000 path This command will idempotently shift `path` to proper ownership using the provided uid and gid mappings. When using the uid map string '0:10000:2000', this means that user ids inside the container between 0 and 1999 will map to user ids on the host between 10000 and 11999. Root (0) becomes 10000, user 1 becomes 10001, user 50 becomes 10050 and user 1999 becomes 11999. This means that files that are owned by root need to actually be owned by user 10000, and files owned by 50 need to be owned by 10050, and so on. nova-idmapshift will take the uid and gid strings used for user namespaces and properly set up the filesystem for use by those users. Uids and gids outside of provided ranges will be mapped to nobody-id (default is max uid/gid) so that they are inaccessible inside the container. OPTIONS ======= Positional arguments ~~~~~~~~~~~~~~~~~~~~ path Root path of the filesystem to be shifted Optional arguments ~~~~~~~~~~~~~~~~~~ -h, --help Show this help message and exit. -u USER_MAPS, --uid=USER_MAPS User ID mappings, in the form: [[guest-uid:host-uid:count],...] -g GROUP_MAPS, --gid=GROUP_MAPS Group ID mappings, in the form: [[guest-gid:host-gid:count],...] -n nobody-id, --nobody nobody-id ID to map all unmapped uid and gids to. Defaults to 65534. -i, --idempotent Shift operation will only be performed if filesystem appears unshifted. Defaults to false. -c, --confirm Will perform check on the filesystem: Returns 0 when filesystem appears shifted. Returns 1 when filesystem appears unshifted. Defaults to false. -d, --dry-run Print chown operations, but won't perform them. Defaults to false. -v, --verbose Print chown operations while performing them. Defaults to false. SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-all.rst0000664000567000056710000000141712701407773020775 0ustar jenkinsjenkins00000000000000========= nova-all ========= ----------------------------- Server for all Nova services ----------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-all [options] DESCRIPTION =========== nova-all is a server daemon that serves all Nova services, each in a separate greenthread OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/api-paste.ini * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-rootwrap.rst0000664000567000056710000000221512701407773022077 0ustar jenkinsjenkins00000000000000============= nova-rootwrap ============= ----------------------- Root wrapper for Nova ----------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-rootwrap [options] DESCRIPTION =========== Filters which commands nova is allowed to run as another user. To use this, you should set the following in nova.conf: rootwrap_config=/etc/nova/rootwrap.conf You also need to let the nova user run nova-rootwrap as root in sudoers: nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf * To make allowed commands node-specific, your packaging should only install {compute,network}.filters respectively on compute and network nodes (i.e. nova-api nodes should not have any of those files installed). OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-api-metadata.rst0000664000567000056710000000145312701407773022554 0ustar jenkinsjenkins00000000000000================= nova-api-metadata ================= --------------------------------- Server for the Nova Metadata API --------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-api-metadata [options] DESCRIPTION =========== nova-api-metadata is a server daemon that serves the Nova Metadata API OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/api-paste.ini * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-compute.rst0000664000567000056710000000162112701407773021676 0ustar jenkinsjenkins00000000000000============ nova-compute ============ --------------------- Nova Compute Server --------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-compute [options] DESCRIPTION =========== Handles all processes relating to instances (guest vms). nova-compute is responsible for building a disk image, launching it via the underlying virtualization driver, responding to calls to check its state, attaching persistent storage, and terminating it. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-consoleauth.rst0000664000567000056710000000140412701407773022545 0ustar jenkinsjenkins00000000000000================ nova-consoleauth ================ ------------------------------------------- Nova Console Authentication Server ------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-consoleauth [options] DESCRIPTION =========== Provides Authentication for nova consoles OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-console.rst0000664000567000056710000000136612701407773021672 0ustar jenkinsjenkins00000000000000============ nova-console ============ ---------------------------- Nova Console Server ---------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-console [options] DESCRIPTION =========== nova-console is a console Proxy to set up multi-tenant VM console access (i.e. with xvp) OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-dhcpbridge.rst0000664000567000056710000000164612701407773022324 0ustar jenkinsjenkins00000000000000=============== nova-dhcpbridge =============== -------------------------------------------------- Handles Lease Database updates from DHCP servers -------------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-dhcpbridge [options] DESCRIPTION =========== Handles lease database updates from DHCP servers. Used whenever nova is managing DHCP (vlan and flatDHCP). nova-dhcpbridge should not be run as a daemon. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/api-paste.ini * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-api.rst0000664000567000056710000000147612701407773021003 0ustar jenkinsjenkins00000000000000======== nova-api ======== ------------------------------------------- Server for the Nova EC2 and OpenStack APIs ------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-api [options] DESCRIPTION =========== nova-api is a server daemon that serves the nova EC2 and OpenStack APIs in separate greenthreads OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/api-paste.ini * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-manage.rst0000664000567000056710000001057712701407773021464 0ustar jenkinsjenkins00000000000000=========== nova-manage =========== ------------------------------------------------------ control and manage cloud computer instances and images ------------------------------------------------------ :Author: openstack@lists.openstack.org :Date: 2012-04-05 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-manage [] DESCRIPTION =========== nova-manage controls cloud computing instances by managing shell selection, vpn connections, and floating IP address configuration. More information about OpenStack Nova is at http://nova.openstack.org. OPTIONS ======= The standard pattern for executing a nova-manage command is: ``nova-manage []`` Run without arguments to see a list of available command categories: ``nova-manage`` Categories are project, shell, vpn, and floating. Detailed descriptions are below. You can also run with a category argument such as user to see a list of all commands in that category: ``nova-manage floating`` These sections describe the available categories and arguments for nova-manage. Nova Db ~~~~~~~ ``nova-manage db version`` Print the current main database version. ``nova-manage db sync`` Sync the main database up to the most recent version. This is the standard way to create the db as well. ``nova-manage db archive_deleted_rows [--max_rows ] [--verbose]`` Move deleted rows from production tables to shadow tables. Specifying --verbose will print the results of the archive operation for any tables that were changed. ``nova-manage db null_instance_uuid_scan [--delete]`` Lists and optionally deletes database records where instance_uuid is NULL. Nova ApiDb ~~~~~~~~~~ ``nova-manage api_db version`` Print the current cells api database version. ``nova-manage api_db sync`` Sync the api cells database up to the most recent version. This is the standard way to create the db as well. Nova Logs ~~~~~~~~~ ``nova-manage logs errors`` Displays nova errors from log files. ``nova-manage logs syslog `` Displays nova alerts from syslog. Nova Shell ~~~~~~~~~~ ``nova-manage shell bpython`` Starts a new bpython shell. ``nova-manage shell ipython`` Starts a new ipython shell. ``nova-manage shell python`` Starts a new python shell. ``nova-manage shell run`` Starts a new shell using python. ``nova-manage shell script `` Runs the named script from the specified path with flags set. Nova VPN ~~~~~~~~ ``nova-manage vpn list`` Displays a list of projects, their IP port numbers, and what state they're in. ``nova-manage vpn run `` Starts the VPN for the named project. ``nova-manage vpn spawn`` Runs all VPNs. Nova Floating IPs ~~~~~~~~~~~~~~~~~ ``nova-manage floating create [--pool ] [--interface ]`` Creates floating IP addresses for the given range, optionally specifying a floating pool and a network interface. ``nova-manage floating delete `` Deletes floating IP addresses in the range given. ``nova-manage floating list`` Displays a list of all floating IP addresses. Nova Images ~~~~~~~~~~~ ``nova-manage image image_register `` Registers an image with the image service. ``nova-manage image kernel_register `` Registers a kernel with the image service. ``nova-manage image ramdisk_register `` Registers a ramdisk with the image service. ``nova-manage image all_register `` Registers an image kernel and ramdisk with the image service. ``nova-manage image convert `` Converts all images in directory from the old (Bexar) format to the new format. Nova VM ~~~~~~~~~~~ ``nova-manage vm list [host]`` Show a list of all instances. Accepts optional hostname (to show only instances on specific host). ``nova-manage live-migration `` Live migrate instance from current host to destination host. Requires instance id (which comes from euca-describe-instance) and destination host name (which can be found from nova-manage service list). SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-serialproxy.rst0000664000567000056710000000154512701407773022610 0ustar jenkinsjenkins00000000000000================ nova-serialproxy ================ -------------------------------------------------------- Websocket serial Proxy for OpenStack Nova serial ports. -------------------------------------------------------- :Author: openstack@lists.launchpad.net :Date: 2014-03-15 :Copyright: OpenStack Foundation :Version: 2014.2 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-serialproxy [options] DESCRIPTION =========== Websocket proxy that is compatible with OpenStack Nova serial ports. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova is sourced in Launchpad so you can view current bugs at `OpenStack Nova `__ nova-13.0.0/doc/source/man/nova-api-os-compute.rst0000664000567000056710000000153512701407773023070 0ustar jenkinsjenkins00000000000000==================== nova-api-os-compute ==================== ------------------------------------------- Server for the Nova OpenStack Compute APIs ------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-api-os-compute [options] DESCRIPTION =========== nova-api-os-compute is a server daemon that serves the Nova OpenStack Compute API OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/api-paste.ini * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/man/nova-spicehtml5proxy.rst0000664000567000056710000000154712701407773023410 0ustar jenkinsjenkins00000000000000==================== nova-spicehtml5proxy ==================== -------------------------------------------------------- Websocket Proxy for OpenStack Nova SPICE HTML5 consoles. -------------------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-spicehtml5proxy [options] DESCRIPTION =========== Websocket proxy that is compatible with OpenStack Nova SPICE HTML5 consoles. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.0.0/doc/source/upgrade.rst0000664000567000056710000002501112701407773020134 0ustar jenkinsjenkins00000000000000.. Copyright 2014 Rackspace All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Upgrades ======== Nova aims to provide upgrades with minimal downtime. Firstly, the data plane. There should be no VM downtime when you upgrade Nova. Nova has had this since the early days, with the exception of some nova-network related services. Secondly, we want no downtime during upgrades of the Nova control plane. This document is trying to describe how we can achieve that. Once we have introduced the key concepts relating to upgrade, we will introduce the process needed for a no downtime upgrade of nova. Current Database Upgrade Types ------------------------------ Currently Nova has 2 types of database upgrades that are in use. #. Offline Migrations #. Online Migrations Offline Migrations consist of: '''''''''''''''''''''''''''''' #. Database schema migrations from pre-defined migrations in nova/db/sqlalchemy/migrate_repo/versions. #. *Deprecated* Database data migrations from pre-defined migrations in nova/db/sqlalchemy/migrate_repo/versions. Online Migrations consist of: ''''''''''''''''''''''''''''' #. Online data migrations from inside Nova object source code. #. *Future* Online schema migrations using auto-generation from models. An example of online data migrations are the flavor migrations done as part of Nova object version 1.18. This included a transient migration of flavor storage from one database location to another. :emphasis:`Note: Database downgrades are not supported.` Migration policy: ''''''''''''''''' The following guidelines for schema and data migrations are followed in order to ease upgrades: * Additive schema migrations - In general, almost all schema migrations should be additive. Put simply, they should only create elements like columns, indices, and tables. * Subtractive schema migrations - To remove an element like a column or table during the N release cycle: #. The element must be deprecated and retained for backward compatibility. (This allows for graceful upgrade from N to N+1.) #. Data migration, by the objects layer, must completely migrate data from the old version of the schema to the new version. * `Data migration example `_ * `Data migration enforcement example `_ (for sqlalchemy migrate/deprecated scripts): #. The column can then be removed with a migration at the start of N+2. * All schema migrations should be idempotent. (For example, a migration should check if an element exists in the schema before attempting to add it.) This logic comes for free in the autogenerated workflow of the online migrations. * Constraints - When adding a foreign or unique key constraint, the schema migration code needs to handle possible problems with data before applying the constraint. (Example: A unique constraint must clean up duplicate records before applying said constraint.) * Data migrations - As mentioned above, data migrations will be done in an online fashion by custom code in the object layer that handles moving data between the old and new portions of the schema. In addition, for each type of data migration performed, there should exist a nova-manage option for an operator to manually request that rows be migrated. * See `flavor migration spec `_ for an example of data migrations in the object layer. *Future* work - #. Adding plumbing to enforce that relevant data migrations are completed before running `contract` in the expand/migrate/contract schema migration workflow. A potential solution would be for `contract` to run a gating test for each specific subtract operation to determine if the operation can be completed. Concepts -------- Here are the key concepts you need to know before reading the section on the upgrade process: RPC version pinning Through careful RPC versioning, newer nodes are able to talk to older nova-compute nodes. When upgrading control plane nodes, we can pin them at an older version of the compute RPC API, until all the compute nodes are able to be upgraded. https://wiki.openstack.org/wiki/RpcMajorVersionUpdates .. note:: This does not apply to cells deployments since cells does not currently support rolling upgrades. It is assumed that cells deployments are upgraded in lockstep so n-1 cells compatibility does not work. Online Configuration Reload During the upgrade, we pin new serves at the older RPC version. When all services are updated to use newer code, we need to unpin them so we are able to use any new functionality. To avoid having to restart the service, using the current SIGHUP signal handling, or otherwise, ideally we need a way to update the currently running process to use the latest configuration. Graceful service shutdown Many nova services are python processes listening for messages on a AMQP queue, including nova-compute. When sending the process the SIGTERM the process stops getting new work from its queue, completes any outstanding work, then terminates. During this process, messages can be left on the queue for when the python process starts back up. This gives us a way to shutdown a service using older code, and start up a service using newer code with minimal impact. If its a service that can have multiple workers, like nova-conductor, you can usually add the new workers before the graceful shutdown of the old workers. In the case of singleton services, like nova-compute, some actions could be delayed during the restart, but ideally no actions should fail due to the restart. NOTE: while this is true for the RabbitMQ RPC backend, we need to confirm what happens for other RPC backends. API load balancer draining When upgrading API nodes, you can make your load balancer only send new connections to the newer API nodes, allowing for a seamless update of your API nodes. Expand/Contract DB Migrations Modern databases are able to make many schema changes while you are still writing to the database. Taking this a step further, we can make all DB changes by first adding the new structures, expanding. Then you can slowly move all the data into a new location and format. Once that is complete, you can drop bits of the scheme that are no long needed, i.e. contract. We have plans to implement this here: https://review.openstack.org/#/c/102545/5/specs/juno/online-schema-changes.rst,cm Online Data Migrations using objects In Kilo we are moving all data migration into the DB objects code. When trying to migrate data in the database from the old format to the new format, this is done in the object code when reading or saving things that are in the old format. For records that are not updated, you need to run a background process to convert those records into the newer format. This process must be completed before you contract the database schema. We have the first example of this happening here: http://specs.openstack.org/openstack/nova-specs/specs/kilo/approved/flavor-from-sysmeta-to-blob.html DB prune deleted rows Currently resources are soft deleted in the database, so users are able to track instances in the DB that are created and destroyed in production. However, most people have a data retention policy, of say 30 days or 90 days after which they will want to delete those entries. Not deleting those entries affects DB performance as indices grow very large and data migrations take longer as there is more data to migrate. nova-conductor object backports RPC pinning ensures new services can talk to the older service's method signatures. But many of the parameters are objects that may well be too new for the old service to understand, so you are able to send the object back to the nova-conductor to be downgraded to a version the older service can understand. Process ------- NOTE: This still requires much work before it can become reality. This is more an aspirational plan that helps describe how all the pieces of the jigsaw fit together. This is the planned process for a zero downtime upgrade: #. Prune deleted DB rows, check previous migrations are complete #. Expand DB schema (e.g. add new column) #. Pin RPC versions for all services that are upgraded from this point, using the current version #. Upgrade all nova-conductor nodes (to do object backports) #. Upgrade all other services, except nova-compute and nova-api, using graceful shutdown #. Upgrade nova-compute nodes (this is the bulk of the work). #. Unpin RPC versions #. Add new API nodes, and enable new features, while using a load balancer to "drain" the traffic from old API nodes #. Run the new nova-manage command that ensures all DB records are "upgraded" to new data version #. "Contract" DB schema (e.g. drop unused columns) Testing ------- Once we have all the pieces in place, we hope to move the Grenade testing to follow this new pattern. The current tests only cover the existing upgrade process where: * old computes can run with new control plane * but control plane is turned off for DB migrations Unresolved issues ----------------- Ideally you could rollback. We would need to add some kind of object data version pinning, so you can be running all new code to some extent, before there is no path back. Or have some way of reversing the data migration before the final contract. It is unknown how expensive on demand object backports would be. We could instead always send older versions of objects until the RPC pin is removed, but that means we might have new code getting old objects, which is currently not the case. nova-13.0.0/doc/source/notifications.rst0000664000567000056710000002705612701407773021371 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Notifications in Nova ===================== Similarly to other OpenStack services Nova emits notifications to the message bus with the Notifier class provided by oslo.messaging [1]_. From the notification consumer point of view a notification consists of two parts: an envelope with a fixed structure defined by oslo.messaging and a payload defined by the service emitting the notification. The envelope format is the following:: { "priority": , "event_type": , "timestamp": , "publisher_id": , "message_id": , "payload": } There are two types of notifications in Nova: legacy notifications which have an unversioned payload and newer notifications which have a versioned payload. Unversioned notifications ------------------------- Nova code uses the nova.rpc.get_notifier call to get a configured oslo.messaging Notifier object and it uses the oslo provided functions on the Notifier object to emit notifications. The configuration of the returned Notifier object depends on the parameters of the get_notifier call and the value of the oslo.messaging configuration options `driver` and `topics`. There are notification configuration options in Nova which are specific for certain notification types like `notify_on_state_change`, `notify_api_faults`, `default_notification_level`, etc. The structure of the payload of the unversioned notifications is defined in the code that emits the notification and no documentation or enforced backward compatibility contract exists for that format. Versioned notifications ----------------------- The versioned notification concept is created to fix the shortcomings of the unversioned notifications. The envelope structure of the emitted notification is the same as in the unversioned notification case as it is provided by oslo.messaging. However the payload is not a free form dictionary but a serialized oslo versionedobject [2]_. .. _service.update: For example the wire format of the `service.update` notification looks like the following:: { "priority":"INFO", "payload":{ "nova_object.namespace":"nova", "nova_object.name":"ServiceStatusPayload", "nova_object.version":"1.0", "nova_object.data":{ "host":"host1", "disabled":false, "last_seen_up":null, "binary":"nova-compute", "topic":"compute", "disabled_reason":null, "report_count":1, "forced_down":false, "version":2 } }, "event_type":"service.update", "publisher_id":"nova-compute:host1" } The serialized oslo versionedobject as a payload provides a version number to the consumer so the consumer can detect if the structure of the payload is changed. Nova provides the following contract regarding the versioned notification payload: * the payload version defined by the `the nova_object.version` field of the payload will be increased if and only if the syntax or the semantics of the `nova_object.data` field of the payload is changed. * a minor version bump indicates a backward compatible change which means that only new fields are added to the payload so a well written consumer can still consume the new payload without any change. * a major version bump indicates a backward incompatible change of the payload which can mean removed fields, type change, etc in the payload. There is a Nova configuration parameter `notification_format` that can be used to specify which notifications are emitted by Nova. The possible values are `unversioned`, `versioned`, `both` and the default value is `both`. How to add a new versioned notification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To support the above contract from the Nova code every versioned notification is modeled with oslo versionedobjects. Every versioned notification class shall inherit from the `nova.objects.notification.NotificationBase` which already defines three mandatory fields of the notification `event_type`, `publisher_id` and `priority`. The new notification class shall add a new field `payload` with an appropriate payload type. The payload object of the notifications shall inherit from the `nova.objects.notification.NotificationPayloadBase` class and shall define the fields of the payload as versionedobject fields. The base classes are described in [3]_. The following code example defines the necessary model classes for a new notification `myobject.update`:: @notification.notification_sample('myobject-update.json') @base.NovaObjectRegistry.register class MyObjectNotification(notification.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('MyObjectUpdatePayload') } @base.NovaObjectRegistry.register class MyObjectUpdatePayload(notification.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'some_data': fields.StringField(), 'another_data': fields.StringField(), } After that the notification can be populated and emitted with the following code:: payload = MyObjectUpdatePayload(some_data="foo", another_data="bar") MyObjectNotification( publisher=notification.NotificationPublisher.from_service_obj( ), event_type=notification.EventType( object='myobject', action=fields.NotificationAction.UPDATE), priority=fields.NotificationPriority.INFO, payload=payload).emit(context) The above code will generate the following notification on the wire:: { "priority":"INFO", "payload":{ "nova_object.namespace":"nova", "nova_object.name":"MyObjectUpdatePayload", "nova_object.version":"1.0", "nova_object.data":{ "some_data":"foo", "another_data":"bar", } }, "event_type":"myobject.update", "publisher_id":":" } There is a possibility to reuse an existing versionedobject as notification payload by adding a `SCHEMA` field for the payload class that defines a mapping between the fields of existing objects and the fields of the new payload object. For example the service.status notification reuses the existing `nova.objects.service.Service` object when defines the notification's payload:: @notification.notification_sample('service-update.json') @base.NovaObjectRegistry.register class ServiceStatusNotification(notification.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('ServiceStatusPayload') } @base.NovaObjectRegistry.register class ServiceStatusPayload(notification.NotificationPayloadBase): SCHEMA = { 'host': ('service', 'host'), 'binary': ('service', 'binary'), 'topic': ('service', 'topic'), 'report_count': ('service', 'report_count'), 'disabled': ('service', 'disabled'), 'disabled_reason': ('service', 'disabled_reason'), 'availability_zone': ('service', 'availability_zone'), 'last_seen_up': ('service', 'last_seen_up'), 'forced_down': ('service', 'forced_down'), 'version': ('service', 'version') } # Version 1.0: Initial version VERSION = '1.0' fields = { 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'topic': fields.StringField(nullable=True), 'report_count': fields.IntegerField(), 'disabled': fields.BooleanField(), 'disabled_reason': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'last_seen_up': fields.DateTimeField(nullable=True), 'forced_down': fields.BooleanField(), 'version': fields.IntegerField(), } def populate_schema(self, service): super(ServiceStatusPayload, self).populate_schema(service=service) If the `SCHEMA` field is defined then the payload object needs to be populated with the `populate_schema` call before it can be emitted:: payload = ServiceStatusPayload() payload.populate_schema(service=) ServiceStatusNotification( publisher=notification.NotificationPublisher.from_service_obj( ), event_type=notification.EventType( object='service', action=fields.NotificationAction.UPDATE), priority=fields.NotificationPriority.INFO, payload=payload).emit(context) The above code will emit the :ref:`already shown notification` on the wire. Every item in the `SCHEMA` has the syntax of:: : (, ) The mapping defined in the `SCHEMA` field has the following semantics. When the `populate_schema` function is called the content of the `SCHEMA` field is enumerated and the value of the field of the pointed parameter object is copied to the requested payload field. So in the above example the `host` field of the payload object is populated from the value of the `host` field of the `service` object that is passed as a parameter to the `populate_schema` call. A notification payload object can reuse fields from multiple existing objects. Also a notification can have both new and reused fields in its payload. Note that the notification's publisher instance can be created two different ways. It can be created by instantiating the `NotificationPublisher` object with a `host` and a `binary` string parameter or it can be generated from a `Service` object by calling `NotificationPublisher.from_service_obj` function. Versioned notifications shall have a sample file stored under `doc/sample_notifications` directory and the notification object shall be decorated with the `notification_sample` decorator. For example the `service.update` notification has a sample file stored in `doc/sample_notifications/service-update.json` and the ServiceUpdateNotification class is decorated accordingly. Existing versioned notifications ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versioned_notifications:: .. [1] http://docs.openstack.org/developer/oslo.messaging/notifier.html .. [2] http://docs.openstack.org/developer/oslo.versionedobjects .. [3] http://docs.openstack.org/developer/nova/devref/api/nova.objects.notification.html nova-13.0.0/doc/source/test_strategy.rst0000664000567000056710000001000612701407773021404 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============== Test Strategy ============== A key part of the "four opens" is ensuring the OpenStack delivers well-tested and usable software. For more details see: http://docs.openstack.org/project-team-guide/introduction.html#the-four-opens Experience has shown that untested features are frequently broken, in part due to the velocity of upstream changes. As we aim to ensure we keep all features working across upgrades, we must aim to test all features. Reporting Test Coverage ======================= For details on plans to report the current test coverage, please see: :doc:`feature_classification` Running tests and reporting results =================================== Voting in Gerrit ---------------- On every review in gerrit, check tests are run on very patch set, and are able to report a +1 or -1 vote. For more details, please see: http://docs.openstack.org/infra/manual/developers.html#automated-testing Before merging any code, there is an integrate gate test queue, to ensure master is always passing all tests. For more details, please see: http://docs.openstack.org/infra/zuul/gating.html Infra vs Third-Party -------------------- Tests that use fully open source components are generally run by the OpenStack Infra teams. Test setups that use non-open technology must be run outside of that infrastructure, but should still report their results upstream. For more details, please see: http://docs.openstack.org/infra/system-config/third_party.html Ad-hoc testing -------------- It is particularly common for people to run ad-hoc tests on each released milestone, such as RC1, to stop regressions. While these efforts can help stabilize the release, as a community we have a much stronger preference for continuous integration testing. Partly this is because we encourage users to deploy master, and we generally have to assume that any upstream commit may already been deployed in production. Types of tests ============== Unit tests ---------- Unit tests help document and enforce the contract for each component. Without good unit test coverage it is hard to continue to quickly evolve the codebase. The correct level of unit test coverage is very subjective, and as such we are not aiming for a particular percentage of coverage, rather we are aiming for good coverage. Generally, every code change should have a related unit test: http://docs.openstack.org/developer/hacking/#creating-unit-tests Integration tests ----------------- Today, our integration tests involve running the Tempest test suite on a variety of Nova deployment scenarios. In addition, we have third parties running the tests on their preferred Nova deployment scenario. Functional tests ---------------- Nova has a set of in-tree functional tests that focus on things that are out of scope for tempest testing and unit testing. Tempest tests run against a full live OpenStack deployment, generally deployed using devstack. At the other extreme, unit tests typically use mock to test a unit of code in isolation. Functional tests don't run an entire stack, they are isolated to nova code, and have no reliance on external services. They do have a WSGI app, nova services and a database, with minimal stubbing of nova internals. Interoperability tests ----------------------- The DefCore committee maintains a list that contains a subset of Tempest tests. These are used to verify if a particular Nova deployment's API responds as expected. For more details, see: https://github.com/openstack/defcore nova-13.0.0/doc/source/services.rst0000664000567000056710000000455312701407773020340 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _service_manager_driver: Services, Managers and Drivers ============================== The responsibilities of Services, Managers, and Drivers, can be a bit confusing to people that are new to nova. This document attempts to outline the division of responsibilities to make understanding the system a little bit easier. Currently, Managers and Drivers are specified by flags and loaded using utils.load_object(). This method allows for them to be implemented as singletons, classes, modules or objects. As long as the path specified by the flag leads to an object (or a callable that returns an object) that responds to getattr, it should work as a manager or driver. The :mod:`nova.service` Module ------------------------------ .. automodule:: nova.service :noindex: :members: :undoc-members: :show-inheritance: The :mod:`nova.manager` Module ------------------------------ .. automodule:: nova.manager :noindex: :members: :undoc-members: :show-inheritance: Implementation-Specific Drivers ------------------------------- A manager will generally load a driver for some of its tasks. The driver is responsible for specific implementation details. Anything running shell commands on a host, or dealing with other non-python code should probably be happening in a driver. Drivers should minimize touching the database, although it is currently acceptable for implementation specific data. This may be reconsidered at some point. It usually makes sense to define an Abstract Base Class for the specific driver (i.e. VolumeDriver), to define the methods that a different driver would need to implement. nova-13.0.0/doc/source/process.rst0000664000567000056710000011770312701407773020175 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _process: ================= Nova team process ================= Nova is always evolving its processes, but it's important to explain why we have them: so that we can all work to ensure the interactions we need to happen do happen. The process we have should always be there to make good communication between all members of our community easier. OpenStack Wide Patterns ======================= Nova follows most of the generally adopted norms for OpenStack projects. You can get more details here: * http://docs.openstack.org/infra/manual/developers.html * http://git.openstack.org/cgit/openstack/project-team-guide If you are new to Nova, please read this first: :ref:`getting_involved`. Dates overview ============== For Mitaka, please see: https://wiki.openstack.org/wiki/Nova/Mitaka_Release_Schedule For Liberty, please see: https://wiki.openstack.org/wiki/Nova/Liberty_Release_Schedule Feature Freeze ~~~~~~~~~~~~~~ This effort is primarily to help the horizontal teams help prepare their items for release, while at the same time giving developers time to focus on stabilising what is currently in master, and encouraging users and packages to perform tests (automated, and manual) on the release, to spot any major bugs. As such we have the following processes: - https://wiki.openstack.org/wiki/FeatureProposalFreeze - make sure all code is up for review - so we can optimise for completed features, not lots of half completed features - https://wiki.openstack.org/wiki/FeatureFreeze - make sure all feature code is merged - https://wiki.openstack.org/wiki/StringFreeze - give translators time to translate all our strings - Note: debug logs are no longer translated - https://wiki.openstack.org/wiki/DepFreeze - time to co-ordinate the final list of deps, and give packagers time to package them - generally it is also quite destabilising to take upgrades (beyond bug fixes) this late We align with this in Nova and the dates for this release are stated above. As with all processes here, there are exceptions. But the exceptions at this stage need to be discussed with the horizontal teams that might be affected by changes beyond this point, and as such are discussed with one of the OpenStack release managers. Spec and Blueprint Approval Freeze ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a (mostly) Nova specific process. Why do we have a Spec Freeze: - specs take a long time to review, keeping it open distracts from code reviews - keeping them "open" and being slow at reviewing the specs (or just ignoring them) really annoys the spec submitters - we generally have more code submitted that we can review, this time bounding is a way to limit the number of submissions By the freeze date, we expect this to also be the complete list of approved blueprints for liberty: https://blueprints.launchpad.net/nova/liberty The date listed above is when we expect all specifications for Liberty to be merged and displayed here: http://specs.openstack.org/openstack/nova-specs/specs/liberty/approved/ New in Liberty, we will keep the backlog open for submission at all times. Note: the focus is on accepting and agreeing problem statements as being in scope, rather than queueing up work items for the next release. We are still working on a new lightweight process to get out of the backlog and approved for a particular release. For more details on backlog specs, please see: http://specs.openstack.org/openstack/nova-specs/specs/backlog/index.html Also new in Liberty, we will allow people to submit Mitaka specs from liberty-2 (rather than liberty-3 as normal). There can be exceptions, usually it's an urgent feature request that comes up after the initial deadline. These will generally be discussed at the weekly Nova meeting, by adding the spec or blueprint to discuss in the appropriate place in the meeting agenda here (ideally make yourself available to discuss the blueprint, or alternatively make your case on the ML before the meeting): https://wiki.openstack.org/wiki/Meetings/Nova Non-priority Feature Freeze ~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a Nova specific process. This only applies to low priority blueprints in this list: https://blueprints.launchpad.net/nova/liberty We currently have a very finite amount of review bandwidth. In order to make code review time for the agreed community wide priorities, we have to not do some other things. To this end, we are reserving liberty-3 for priority features and bug fixes. As such, we intend not to merge any non-priority things during liberty-3, so around liberty-2 is the "Feature Freeze" for blueprints that are not a priority for liberty. For liberty, we are not aligning the Non-priority Feature Freeze with the tagging of liberty-2. That means the liberty-2 tag will not include some features that merge later in the week. This means, we only require the code to be approved before the end of July 30th, we don't require it to be merged by that date. This should help stop any gate issues disrupting our ability to merge all the code that we have managed to get reviewed in time. Ideally all code should be merged by the end of July 31st, but the state of the gate will determine how possible that is. You can see the list of priorities for each release: http://specs.openstack.org/openstack/nova-specs/#priorities For things that are very close to merging, it's possible it might get an exception for one week after the freeze date, given the patches get enough +2s from the core team to get the code merged. But we expect this list to be zero, if everything goes to plan (no massive gate failures, etc). For details, process see: http://lists.openstack.org/pipermail/openstack-dev/2015-July/070920.html Exception process: - Please add request in here: https://etherpad.openstack.org/p/liberty-nova-non-priority-feature-freeze (ideally with core reviewers to sponsor your patch, normally the folks who have already viewed those patches) - make sure you make your request before the end of Wednesday 5th August - nova-drivers will meet to decide what gets an exception (just like they did last release: http://lists.openstack.org/pipermail/openstack-dev/2015-February/056208.html) - an initial list of exceptions (probably just a PTL compiled list at that point) will be available for discussion during the Nova meeting on Thursday 6th August - the aim is to merge the code for all exceptions by the end of Monday 10th August Alternatives: - It was hoped to make this a continuous process using "slots" to control what gets reviewed, but this was rejected by the community when it was last discussed. There is hope this can be resurrected to avoid the "lumpy" nature of this process. - Currently the runways/kanban ideas are blocked on us adopting something like phabricator that could support such workflows String Freeze ~~~~~~~~~~~~~ NOTE: this is still a provisional idea There are general guidelines here: https://wiki.openstack.org/wiki/StringFreeze But below is an experiment for Nova during liberty, to trial a new process. There are four views onto this process. First, the user point of view: - Would like to see untranslated strings, rather than hiding error/info/warn log messages as debug Second, the translators: - Translators will start translation without string freeze, just after feature freeze. - Then we have a strict string freeze date (around RC1 date) - After at least 10 days to finish up the translations before the final release Third, the docs team: - Config string updates often mean there is a DocImpact and docs need updating - best to avoid those during feature freeze, where possible Fourth, the developer point of view: - Add any translated strings before Feature Freeze - Post Feature Freeze, allow string changes where an untranslated string is better than no string - i.e. allow new log message strings, until the hard freeze - Post Feature Freeze, have a soft string freeze, try not to change existing strings, where possible - Note: moving a string and re-using a existing string is fine, as the tooling deals with that automatically - Post Hard String Freeze, there should be no extra strings to translate - Assume any added strings will not be translated - Send email about the string freeze exception in this case only, but there should be zero of these So, what has changed from https://wiki.openstack.org/wiki/StringFreeze, well: - no need to block new strings until much later in the cycle - should stop the need to rework bug fixes to remove useful log messages - instead, just accept the idea of untranslated strings being better than no strings in those cases So for Liberty, 21st September, so we will call 21st September the hard freeze date, as we expect RC1 to be cut sometime after 21st September. Note the date is fixed, it's not aligned with the cutting of RC1. This means we must cut another tarball (RC2 or higher) at some point after 5th October to include new translations, even if there are no more bug fixes, to give time before the final release on 13th-16th October. How do I get my code merged? ============================ OK, so you are new to Nova, and you have been given a feature to implement. How do I make that happen? You can get most of your questions answered here: - http://docs.openstack.org/infra/manual/developers.html But let's put a Nova specific twist on things... Overview ~~~~~~~~ .. image:: ./images/Nova_spec_process.svg :alt: Flow chart showing the Nova bug/feature process Where do you track bugs? ~~~~~~~~~~~~~~~~~~~~~~~~ We track bugs here: - http://bugs.launchpad.net/nova If you fix an issue, please raise a bug so others who spot that issue can find the fix you kindly created for them. Also before submitting your patch it's worth checking to see if someone has already fixed it for you (Launchpad helps you with that, at little, when you create the bug report). When do I need a blueprint vs a spec? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For more details see: - http://docs.openstack.org/developer/nova/devref/kilo.blueprints.html#when-is-a-blueprint-needed To understand this question, we need to understand why blueprints and specs are useful. But here is the rough idea: - if it needs a spec, it will need a blueprint. - if it's an API change, it needs a spec. - if it's a single small patch that touches a small amount of code, with limited deployer and doc impact, it probably doesn't need a spec. If you are unsure, please ask johnthetubaguy on IRC, or one of the other nova-drivers. How do I get my blueprint approved? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So you need your blueprint approved? Here is how: - if you don't need a spec, please add a link to your blueprint to the agenda for the next nova meeting: https://wiki.openstack.org/wiki/Meetings/Nova - be sure your blueprint description has enough context for the review in that meeting. - As of Mitaka, this list is stored in an etherpad: https://etherpad.openstack.org/p/mitaka-nova-spec-review-tracking - if you need a spec, then please submit a nova-spec for review, see: http://docs.openstack.org/infra/manual/developers.html Got any more questions? Contact johnthetubaguy or one of the other nova-specs-core who are awake at the same time as you. IRC is best as you will often get an immediate response, if they are too busy send him/her an email. How do I get a procedural -2 removed from my patch? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When feature freeze hits, any patches for blueprints that are still in review get a procedural -2 to stop them merging. In Nova a blueprint is only approved for a single release. To have the -2 removed, you need to get the blueprint approved for the current release (see `How do I get my blueprint approved?`_). Why are the reviewers being mean to me? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Code reviews take intense concentration and a lot of time. This tends to lead to terse responses with very little preamble or nicety. That said, there's no excuse for being actively rude or mean. OpenStack has a Code of Conduct (https://www.openstack.org/legal/community-code-of-conduct/) and if you feel this has been breached please raise the matter privately. Either with the relevant parties, the PTL or failing those, the OpenStack Foundation. That said, there are many objective reasons for applying a -1 or -2 to a patch: - Firstly and simply, patches must address their intended purpose successfully. - Patches must not have negative side-effects like wiping the database or causing a functional regression. Usually removing anything, however tiny, requires a deprecation warning be issued for a cycle. - Code must be maintainable, that is it must adhere to coding standards and be as readable as possible for an average OpenStack developer (acknowledging this person is ill-defined). - Patches must respect the direction of the project, for example they should not make approved specs substantially more difficult to implement. - Release coordinators need the correct process to be followed so scope can be tracked accurately. Bug fixes require bugs, features require blueprints and all but the simplest features require specs. If there is a blueprint, it must be approved for the release/milestone the patch is attempting to merge into. Please particularly bear in mind that a -2 does not mean "never ever" nor does it mean "your idea is bad and you are dumb". It simply means "do not merge today". You may need to wait some time, rethink your approach or even revisit the problem definition but there is almost always some way forward. The core who applied the -2 should tell you what you need to do. My code review seems stuck, what can I do? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ First and foremost - address any -1s and -2s! The review load on Nova is high enough that patches with negative reviews often get filtered out entirely. A few tips: - Be precise. Ensure you're not talking at cross purposes. - Try to understand where the reviewer is coming from. They may have a very different perspective and/or use-case to you. - If you don't understand the problem, ask them to explain - this is common and helpful behaviour. - Be positive. Everyone's patches have issues, including core reviewers. No-one cares once the issues are fixed. - Try not to flip-flop. When two reviewers are pulling you in different directions, stop pushing code and negotiate the best way forward. - If the reviewer does not respond to replies left on the patchset, reach out to them on IRC or email. If they still don't respond, you can try to ask their colleagues if they're on holiday (or simply wait). Finally, you can ask for mediation in the Nova meeting by adding it to the agenda (https://wiki.openstack.org/wiki/Meetings/Nova). This is also what you should do if you are unable to negotiate a resolution to an issue. Secondly, Nova is a big project, be aware of the average wait times: http://russellbryant.net/openstack-stats/nova-openreviews.html Eventually you should get some +1s from people working through the review queue. Expect to get -1s as well. You can ask for reviews within your company, 1-2 are useful (not more), especially if those reviewers are known to give good reviews. You can spend some time while you wait reviewing other people's code - they may reciprocate and you may learn something (:ref:`Why do code reviews when I'm not core? `). If you've waited an appropriate amount of time and you haven't had any +1s, you can ask on IRC for reviews. Please don't ask for core review straight away, especially not directly (IRC or email). Core reviewer time is very valuable and gaining some +1s is a good way to show your patch meets basic quality standards. Once you have a few +1s, be patient. Remember the average wait times. You can ask for reviews each week in IRC, it helps to ask when cores are awake. Bugs ^^^^ It helps to apply correct tracking information. - Put "Closes-Bug", "Partial-Bug" or "Related-Bug" in the commit message tags as necessary. - If you have to raise a bug in Launchpad first, do it - this helps someone else find your fix. - Make sure the bug has the correct priority and tag set: https://wiki.openstack.org/wiki/Nova/BugTriage#Step_2:_Triage_Tagged_Bugs - If it's a trivial fix (<100 lines as a rule of thumb), add it to: https://etherpad.openstack.org/p/liberty-nova-priorities-tracking Features ^^^^^^^^ Again, it helps to apply correct tracking information. For blueprint-only features: - Put your blueprint in the commit message, EG "blueprint simple-feature". - Mark the blueprint as NeedsCodeReview if you are finished. - Maintain the whiteboard on the blueprint so it's easy to understand which patches need reviews. - Use a single topic for all related patches. All patches for one blueprint should share a topic. For blueprint and spec features, do everything for blueprint-only features and also: - If it's a project or subteam priority, add it to: https://etherpad.openstack.org/p/liberty-nova-priorities-tracking - Ensure your spec is approved for the current release cycle. If your code is a project or subteam priority, the cores interested in that priority might not mind a ping after it has sat with +1s for a week. If you abuse this privilege, you'll lose respect. If it's not a priority, your blueprint/spec has been approved for the cycle and you have been patient, you can raise it during the Nova meeting. The outcome may be that your spec gets unapproved for the cycle, so that priority items can take focus. If this happens to you, sorry - it should not have been approved in the first place, Nova team bit off more than they could chew, it is their mistake not yours. You can re-propose it for the next cycle. If it's not a priority and your spec has not been approved, your code will not merge this cycle. Please re-propose your spec for the next cycle. Nova Process Mission ==================== This section takes a high level look at the guiding principles behind the Nova process. Open ~~~~ Our mission is to have: - Open Source - Open Design - Open Development - Open Community We have to work out how to keep communication open in all areas. We need to be welcoming and mentor new people, and make it easy for them to pickup the knowledge they need to get involved with OpenStack. For more info on Open, please see: https://wiki.openstack.org/wiki/Open Interoperable API, supporting a vibrant ecosystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An interoperable API that gives users on-demand access to compute resources is at the heart of Nova's mission: http://docs.openstack.org/developer/nova/project_scope.html#mission Nova has a vibrant ecosystem of tools built on top of the current Nova API. All features should be designed to work with all technology combinations, so the feature can be adopted by our ecosystem. If a new feature is not adopted by the ecosystem, it will make it hard for your users to make use of those features, defeating most of the reason to add the feature in the first place. The microversion system allows users to isolate themselves This is a very different aim to being "pluggable" or wanting to expose all capabilities to end users. At the same time, it is not just a "lowest common denominator" set of APIs. It should be discoverable which features are available, and while no implementation details should leak to the end users, purely admin concepts may need to understand technology specific details that back the interoperable and more abstract concepts that are exposed to the end user. This is a hard goal, and one area we currently don't do well is isolating image creators from these technology specific details. Smooth Upgrades ~~~~~~~~~~~~~~~ As part of our mission for a vibrant ecosystem around our APIs, we want to make it easy for those deploying Nova to upgrade with minimal impact to their users. Here is the scope of Nova's upgrade support: - upgrade from any commit, to any future commit, within the same major release - only support upgrades between N and N+1 major versions, to reduce technical debt relating to upgrades Here are some of the things we require developers to do, to help with upgrades: - when replacing an existing feature or configuration option, make it clear how to transition to any replacement - deprecate configuration options and features before removing them - i.e. continue to support and test features for at least one release before they are removed - this gives time for operator feedback on any removals - End User API will always be kept backwards compatible Interaction goals ~~~~~~~~~~~~~~~~~ When thinking about the importance of process, we should take a look at: http://agilemanifesto.org With that in mind, let's look at how we want different members of the community to interact. Let's start with looking at issues we have tried to resolve in the past (currently in no particular order). We must: - have a way for everyone to review blueprints and designs, including allowing for input from operators and all types of users (keep it open) - take care to not expand Nova's scope any more than absolutely necessary - ensure we get sufficient focus on the core of Nova so that we can maintain or improve the stability and flexibility of the overall codebase - support any API we release approximately for ever. We currently release every commit, so we're motivate to get the API right first time - avoid low priority blueprints slowing work on high priority work, without blocking those forever - focus on a consistent experience for our users, rather than ease of development - optimise for completed blueprints, rather than more half completed blueprints, so we get maximum value for our users out of our review bandwidth - focus efforts on a subset of patches to allow our core reviewers to be more productive - set realistic expectations on what can be reviewed in a particular cycle, to avoid sitting in an expensive rebase loop - be aware of users that do not work on the project full time - be aware of users that are only able to work on the project at certain times that may not align with the overall community cadence - discuss designs for non-trivial work before implementing it, to avoid the expense of late-breaking design issues FAQs ==== Why bother with all this process? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We are a large community, spread across multiple timezones, working with several horizontal teams. Good communication is a challenge and the processes we have are mostly there to try and help fix some communication challenges. If you have a problem with a process, please engage with the community, discover the reasons behind our current process, and help fix the issues you are experiencing. Why don't you remove old process? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We do! For example, in Liberty we stopped trying to predict the milestones when a feature will land. As we evolve, it is important to unlearn new habits and explore if things get better if we choose to optimise for a different set of issues. Why are specs useful? ~~~~~~~~~~~~~~~~~~~~~ Spec reviews allow anyone to step up and contribute to reviews, just like with code. Before we used gerrit, it was a very messy review process, that felt very "closed" to most people involved in that process. As Nova has grown in size, it can be hard to work out how to modify Nova to meet your needs. Specs are a great way of having that discussion with the wider Nova community. For Nova to be a success, we need to ensure we don't break our existing users. The spec template helps focus the mind on the impact your change might have on existing users and gives an opportunity to discuss the best way to deal with those issues. However, there are some pitfalls with the process. Here are some top tips to avoid them: - keep it simple. Shorter, simpler, more decomposed specs are quicker to review and merge much quicker (just like code patches). - specs can help with documentation but they are only intended to document the design discussion rather than document the final code. - don't add details that are best reviewed in code, it's better to leave those things for the code review. If we have specs, why still have blueprints? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use specs to record the design agreement, we use blueprints to track progress on the implementation of the spec. Currently, in Nova, specs are only approved for one release, and must be re-submitted for each release you want to merge the spec, although that is currently under review. Why do we have priorities? ~~~~~~~~~~~~~~~~~~~~~~~~~~ To be clear, there is no "nova dev team manager", we are an open team of professional software developers, that all work for a variety of (mostly competing) companies that collaborate to ensure the Nova project is a success. Over time, a lot of technical debt has accumulated, because there was a lack of collective ownership to solve those cross-cutting concerns. Before the Kilo release, it was noted that progress felt much slower, because we were unable to get appropriate attention on the architectural evolution of Nova. This was important, partly for major concerns like upgrades and stability. We agreed it's something we all care about and it needs to be given priority to ensure that these things get fixed. Since Kilo, priorities have been discussed at the summit. This turns in to a spec review which eventually means we get a list of priorities here: http://specs.openstack.org/openstack/nova-specs/#priorities Allocating our finite review bandwidth to these efforts means we have to limit the reviews we do on non-priority items. This is mostly why we now have the non-priority Feature Freeze. For more on this, see below. Blocking a priority effort is one of the few widely acceptable reasons to block someone adding a feature. One of the great advantages of being more explicit about that relationship is that people can step up to help review and/or implement the work that is needed to unblock the feature they want to get landed. This is a key part of being an Open community. Why is there a Feature Freeze (and String Freeze) in Nova? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The main reason Nova has a feature freeze is that it gives people working on docs and translations to sync up with the latest code. Traditionally this happens at the same time across multiple projects, so the docs are synced between what used to be called the "integrated release". We also use this time period as an excuse to focus our development efforts on bug fixes, ideally lower risk bug fixes, and improving test coverage. In theory, with a waterfall hat on, this would be a time for testing and stabilisation of the product. In Nova we have a much stronger focus on keeping every commit stable, by making use of extensive continuous testing. In reality, we frequently see the biggest influx of fixes in the few weeks after the release, as distributions do final testing of the released code. It is hoped that the work on Feature Classification will lead us to better understand the levels of testing of different Nova features, so we will be able to reduce and dependency between Feature Freeze and regression testing. It is also likely that the move away from "integrated" releases will help find a more developer friendly approach to keep the docs and translations in sync. Why is there a non-priority Feature Freeze in Nova? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have already discussed why we have priority features. The rate at which code can be merged to Nova is primarily constrained by the amount of time able to be spent reviewing code. Given this, earmarking review time for priority items means depriving it from non-priority items. The simplest way to make space for the priority features is to stop reviewing and merging non-priority features for a whole milestone. The idea being developers should focus on bug fixes and priority features during that milestone, rather than working on non-priority features. A known limitation of this approach is developer frustration. Many developers are not being given permission to review code, work on bug fixes or work on priority features, and so feel very unproductive upstream. An alternative approach of "slots" or "runways" has been considered, that uses a kanban style approach to regulate the influx of work onto the review queue. We are yet to get agreement on a more balanced approach, so the existing system is being continued to ensure priority items are more likely to get the attention they require. Why do you still use Launchpad? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We are actively looking for an alternative to Launchpad's bugs and blueprints. Originally the idea was to create Storyboard. However the development has stalled. A more likely front runner is this: http://phabricator.org/applications/projects/ When should I submit my spec? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Ideally we want to get all specs for a release merged before the summit. For things that we can't get agreement on, we can then discuss those at the summit. There will always be ideas that come up at the summit and need to be finalised after the summit. This causes a rush which is best avoided. How can I get my code merged faster? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So no-one is coming to review your code, how do you speed up that process? Firstly, make sure you are following the above process. If it's a feature, make sure you have an approved blueprint. If it's a bug, make sure it is triaged, has its priority set correctly, it has the correct bug tag and is marked as in progress. If the blueprint has all the code up for review, change it from Started into NeedsCodeReview so people know only reviews are blocking you, make sure it hasn't accidentally got marked as implemented. Secondly, if you have a negative review (-1 or -2) and you responded to that in a comment or uploading a new change with some updates, but that reviewer hasn't come back for over a week, it's probably a good time to reach out to the reviewer on IRC (or via email) to see if they could look again now you have addressed their comments. If you can't get agreement, and your review gets stuck (i.e. requires mediation), you can raise your patch during the Nova meeting and we will try to resolve any disagreement. Thirdly, is it in merge conflict with master or are any of the CI tests failing? Particularly any third-party CI tests that are relevant to the code you are changing. If you're fixing something that only occasionally failed before, maybe recheck a few times to prove the tests stay passing. Without green tests, reviews tend to move on and look at the other patches that have the tests passing. OK, so you have followed all the process (i.e. your patches are getting advertised via the project's tracking mechanisms), and your patches either have no reviews, or only positive reviews. Now what? Have you considered reviewing other people's patches? Firstly, participating in the review process is the best way for you to understand what reviewers are wanting to see in the code you are submitting. As you get more practiced at reviewing it will help you to write "merge-ready" code. Secondly, if you help review other peoples code and help get their patches ready for the core reviewers to add a +2, it will free up a lot of non-core and core reviewer time, so they are more likely to get time to review your code. For more details, please see: :ref:`Why do code reviews when I'm not core? ` Please note, I am not recommending you go to ask people on IRC or via email for reviews. Please try to get your code reviewed using the above process first. In many cases multiple direct pings generate frustration on both sides and that tends to be counter productive. Now you have got your code merged, lets make sure you don't need to fix this bug again. The fact the bug exists means there is a gap in our testing. Your patch should have included some good unit tests to stop the bug coming back. But don't stop there, maybe its time to add tempest tests, to make sure your use case keeps working? Maybe you need to set up a third party CI so your combination of drivers will keep working? Getting that extra testing in place should stop a whole heap of bugs, again giving reviewers more time to get to the issues or features you want to add in the future. Process Evolution Ideas ======================= We are always evolving our process as we try to improve and adapt to the changing shape of the community. Here we discuss some of the ideas, along with their pros and cons. Splitting out the virt drivers (or other bits of code) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently, Nova doesn't have strong enough interfaces to split out the virt drivers, scheduler or REST API. This is seen as the key blocker. Let's look at both sides of the debate here. Reasons for the split: - can have separate core teams for each repo - this leads to quicker turn around times, largely due to focused teams - splitting out things from core means less knowledge required to become core in a specific area Reasons against the split: - loss of interoperability between drivers - this is a core part of Nova's mission, to have a single API across all deployments, and a strong ecosystem of tools and apps built on that - we can overcome some of this with stronger interfaces and functional tests - new features often need changes in the API and virt driver anyway - the new "depends-on" can make these cross-repo dependencies easier - loss of code style consistency across the code base - fear of fragmenting the nova community, leaving few to work on the core of the project - could work in subteams within the main tree TODO - need to complete analysis Subteam recommendation as a +2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are groups of people with great knowledge of particular bits of the code base. It may be a good idea to give their recommendation of a merge. In addition, having the subteam focus review efforts on a subset of patches should help concentrate the nova-core reviews they get, and increase the velocity of getting code merged. The first part is for subgroups to show they can do a great job of recommending patches. This is starting in here: https://etherpad.openstack.org/p/liberty-nova-priorities-tracking Ideally this would be done with gerrit user "tags" rather than an etherpad. There are some investigations by sdague in how feasible it would be to add tags to gerrit. Stop having to submit a spec for each release ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As mentioned above, we use blueprints for tracking, and specs to record design decisions. Targeting specs to a specific release is a heavyweight solution and blurs the lines between specs and blueprints. At the same time, we don't want to lose the opportunity to revise existing blueprints. Maybe there is a better balance? What about this kind of process: - backlog has these folders: - backlog/incomplete - merge a partial spec - backlog/complete - merge complete specs (remove tracking details, such as assignee part of the template) - ?? backlog/expired - specs are moved here from incomplete or complete when no longer seem to be given attention (after 1 year, by default) - /implemented - when a spec is complete it gets moved into the release directory and possibly updated to reflect what actually happened - there will no longer be a per-release approved spec list To get your blueprint approved: - add it to the next nova meeting - if a spec is required, update the URL to point to the spec merged in a spec to the blueprint - ensure there is an assignee in the blueprint - a day before the meeting, a note is sent to the ML to review the list before the meeting - discuss any final objections in the nova-meeting - this may result in a request to refine the spec, if things have changed since it was merged - trivial cases can be approved in advance by a nova-driver, so not all folks need to go through the meeting This still needs more thought, but should decouple the spec review from the release process. It is also more compatible with a runway style system, that might be less focused on milestones. Runways ~~~~~~~ Runways are a form of Kanban, where we look at optimising the flow through the system, by ensure we focus our efforts on reviewing a specific subset of patches. The idea goes something like this: - define some states, such as: design backlog, design review, code backlog, code review, test+doc backlog, complete - blueprints must be in one of the above state - large or high priority bugs may also occupy a code review slot - core reviewer member moves item between the slots - must not violate the rules on the number of items in each state - states have a limited number of slots, to ensure focus - certain percentage of slots are dedicated to priorities, depending on point in the cycle, and the type of the cycle, etc Reasons for: - more focused review effort, get more things merged more quickly - more upfront about when your code is likely to get reviewed - smooth out current "lumpy" non-priority feature freeze system Reasons against: - feels like more process overhead - control is too centralised Replacing Milestones with SemVer Releases ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can deploy any commit of Nova and upgrade to a later commit in that same release. Making our milestones versioned more like an official release would help signal to our users that people can use the milestones in production, and get a level of upgrade support. It could go something like this: - 14.0.0 is milestone 1 - 14.0.1 is milestone 2 (maybe, because we add features, it should be 14.1.0?) - 14.0.2 is milestone 3 - we might do other releases (once a critical bug is fixed?), as it makes sense, but we will always be the time bound ones - 14.0.3 two weeks after milestone 3, adds only bug fixes (and updates to RPC versions?) - maybe a stable branch is created at this point? - 14.1.0 adds updated translations and co-ordinated docs - this is released from the stable branch? - 15.0.0 is the next milestone, in the following cycle - not the bump of the major version to signal an upgrade incompatibility with 13.x We are currently watching Ironic to see how their use of semver goes, and see what lessons need to be learnt before we look to maybe apply this technique during M. Feature Classification ~~~~~~~~~~~~~~~~~~~~~~ This is a look at moving forward this effort: - http://docs.openstack.org/developer/nova/support-matrix.html The things we need to cover: - note what is tested, and how often that test passes (via 3rd party CI, or otherwise) - link to current test results for stable and master (time since last pass, recent pass rate, etc) - TODO - sync with jogo on his third party CI audit and getting trends, ask infra - include experimental features (untested feature) - get better at the impact of volume drivers and network drivers on available features (not just hypervisor drivers) Main benefits: - users get a clear picture of what is known to work - be clear about when experimental features are removed, if no tests are added - allows a way to add experimental things into Nova, and track either their removal or maturation * https://wiki.openstack.org/wiki/Nova/Mitaka_Release_Schedule nova-13.0.0/doc/source/scheduler_evolution.rst0000664000567000056710000001545612701407773022603 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================== Scheduler Evolution =================== Evolving the scheduler has been a priority item over several releases: http://specs.openstack.org/openstack/nova-specs/#priorities The scheduler has become tightly coupled with the rest of nova, limiting its capabilities, accuracy, flexibility and maintainability. The goal of scheduler evolution is to bring about a better separation of concerns between scheduling functionality and the rest of nova. Once this effort has completed, its conceivable that the nova-scheduler could become a separate git repo, outside of nova but within the compute project. This is not the current focus. Problem Use Cases ================== Many users are wanting to do more advanced things with the scheduler, but the current architecture is not ready to support those use cases in a maintainable way. A few examples will help to illustrate where the scheduler falls short: Cross Project Affinity ----------------------- It can be desirable, when booting from a volume, to use a compute node that is close to the shared storage where that volume is. Similarly, for the sake of performance, it can be desirable to use a compute node that is in a particular location in relation to a pre-created port. Accessing Aggregates in Filters and Weights -------------------------------------------- Any DB access in a filter or weight slows down the scheduler. Until the end of kilo, there was no way to deal with the scheduler accessing information about aggregates without querying the DB in every call to host_passes() in a filter. Filter Scheduler Alternatives ------------------------------ For certain use cases, radically different schedulers may perform much better than the filter scheduler. We should not block this innovation. It is unreasonable to assume a single scheduler will work for all use cases. However, to enable this kind of innovation in a maintainable way, a single strong scheduler interface is required. Project Scale issues --------------------- There are many interesting ideas for new schedulers, like the solver scheduler, and frequent requests to add new filters and weights to the scheduling system. The current nova team does not have the bandwidth to deal with all these requests. A dedicated scheduler team could work on these items independently of the rest of nova. The tight coupling that currently exists makes it impossible to work on the scheduler in isolation. A stable interface is required before the code can be split out. Key areas we are evolving ========================== Here we discuss, at a high level, areas that are being addressed as part of the scheduler evolution work. Fixing the Scheduler DB model ------------------------------ We need the nova and scheduler data models to be independent of each other. The first step is breaking the link between the ComputeNode and Service DB tables. In theory where the Service information is stored should be pluggable through the service group API, and should be independent of the scheduler service. For example, it could be managed via zookeeper rather than polling the nova DB. There are also places where filters and weights call into the nova DB to find out information about aggregates. This needs to be sent to the scheduler, rather than reading directly from the nova database. Versioning Scheduler Placement Interfaces ------------------------------------------ At the start of kilo, the scheduler is passed a set of dictionaries across a versioned RPC interface. The dictionaries can create problems with the backwards compatibility needed for live-upgrades. Luckily we already have the oslo.versionedobjects infrastructure we can use to model this data in a way that can be versioned across releases. This effort is mostly focusing around the request_spec. See, for example, `this spec`_. Sending host and node stats to the scheduler --------------------------------------------- Periodically nova-compute updates the scheduler state stored in the database. We need a good way to model the data that is being sent from the compute nodes into the scheduler, so over time, the scheduler can move to having its own database. This is linked to the work on the resource tracker. Updating the Scheduler about other data ---------------------------------------- For things like host aggregates, we need the scheduler to cache information about those, and know when there are changes so it can update its cache. Over time, its possible that we need to send cinder and neutron data, so the scheduler can use that data to help pick a nova-compute host. Resource Tracker ----------------- The recent work to add support for NUMA and PCI pass through have shown we have no good pattern to extend the resource tracker. Ideally we want to keep the innovation inside the nova tree, but we also need it to be easier. This is very related to the effort to re-think how we model resources, as covered by discussion about `resource providers`_. Parallelism and Concurrency ---------------------------- The current design of the nova-scheduler is very racy, and can lead to excessive numbers of build retries before the correct host is found. The recent NUMA features are particularly impacted by how the scheduler works. All this has lead to many people running only a single nova-scheduler process configured to use a very small greenthread pool. The work on cells v2 will mean that we soon need the scheduler to scale for much larger problems. The current scheduler works best with less than 1k nodes but we will need the scheduler to work with at least 10k nodes. Various ideas have been discussed to reduce races when running multiple nova-scheduler processes. One idea is to use two-phase commit "style" resource tracker claims. Another idea involves using incremental updates so it is more efficient to keep the scheduler's state up to date, potentially using Kafka. For more details, see the `backlog spec`_ that describes more of the details around this problem. .. _this spec: http://specs.openstack.org/openstack/nova-specs/specs/kilo/approved/sched-select-destinations-use-request-spec-object.html .. _resource providers: https://blueprints.launchpad.net/nova/+spec/resource-providers .. _backlog spec: http://specs.openstack.org/openstack/nova-specs/specs/backlog/approved/parallel-scheduler.html nova-13.0.0/doc/source/conf.py0000664000567000056710000002214412701407773017256 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # nova documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import subprocess import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'ext.nova_todo', 'sphinx.ext.coverage', 'sphinx.ext.graphviz', 'oslosphinx', "ext.support_matrix", 'oslo_config.sphinxconfiggen', 'ext.versioned_notifications' ] config_generator_config_file = '../../etc/nova/nova-config-generator.conf' sample_config_basename = '_static/nova' todo_include_todos = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'nova' copyright = u'2010-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from nova.version import version_info # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The short X.Y version. version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # A list of glob-style patterns that should be excluded when looking for # source files. They are matched against the source file names relative to the # source directory, using slashes as directory separators on all platforms. exclude_patterns = [ 'api/nova.wsgi.nova-*', 'api/nova.tests.*', ] # The reST default role (used for this markup: `text`) to use # for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['nova.'] # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/nova-all', 'nova-all', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api-os-compute', 'nova-api-os-compute', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api', 'nova-api', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-cells', 'nova-cells', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-cert', 'nova-cert', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-compute', 'nova-compute', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-console', 'nova-console', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-consoleauth', 'nova-consoleauth', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-dhcpbridge', 'nova-dhcpbridge', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-idmapshift', 'nova-idmapshift', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-manage', 'nova-manage', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-network', 'nova-network', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-spicehtml5proxy', 'nova-spicehtml5proxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-serialproxy', 'nova-serialproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric', [u'OpenStack'], 1), ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'novadoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Nova.tex', u'Nova Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True nova-13.0.0/doc/source/sample_config.rst0000664000567000056710000000072712701407773021322 0ustar jenkinsjenkins00000000000000========================== Nova Configuration Options ========================== The following is a sample Nova configuration for adaptation and use. It is auto-generated from Nova when this documentation is built, so if you are having issues with an option, please compare your version of Nova with the version of this documentation. The sample configuration can also be viewed in `file form <_static/nova.conf.sample>`_. .. literalinclude:: _static/nova.conf.sample nova-13.0.0/doc/source/_static/0000775000567000056710000000000012701410205017362 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/source/_static/support-matrix.css0000664000567000056710000000117612701407773023137 0ustar jenkinsjenkins00000000000000 .sp_feature_mandatory { font-weight: bold; } .sp_feature_optional { } .sp_feature_choice { font-style: italic; font-weight: bold; } .sp_feature_condition { font-style: italic; font-weight: bold; } .sp_impl_complete { color: rgb(0, 120, 0); font-weight: normal; } .sp_impl_missing { color: rgb(120, 0, 0); font-weight: normal; } .sp_impl_partial { color: rgb(170, 170, 0); font-weight: normal; } .sp_impl_unknown { color: rgb(170, 170, 170); font-weight: normal; } .sp_impl_summary { font-size: 2em; } .sp_cli { font-family: monospace; background-color: #F5F5F5; }nova-13.0.0/doc/source/gmr.rst0000664000567000056710000000565012701410011017254 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2014 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Nova contains a mechanism whereby developers and system administrators can generate a report about the state of a running Nova executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR2* signal to any Nova process with support (see below). The *GMR* will then be outputted standard error for that particular process. For example, suppose that ``nova-api`` has process id ``8675``, and was run with ``2>/var/log/nova/nova-api-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/nova/nova-api-err.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo-incubator), as well as the Nova version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from nova import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation under :mod:`oslo.reports` nova-13.0.0/doc/source/feature_classification.rst0000664000567000056710000001506212701407773023220 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================== Feature Classification ====================== This document aims to define how we describe features listed in the :doc:`support-matrix`. Aims ==== Our users want the features they rely on to be reliable and always continue to solve for their use case. When things break, users request that we solve their issues quickly. It would be better if we never had those regressions in the first place. We are taking a two-pronged approach: * Tell our users what features are complete, well-documented, and are kept stable by good tests. They will get a good experience if they stick to using those features. Please note that the tests are specific to particular combinations of technologies. A deployment's choice of storage, networking and hypervisor makes a big difference to what features will work. * Get help for the features that are not in the above state, and warn our users about the risks of using those features before they are ready. It should make it much clearer how to help improve the feature. Concepts ======== Some definitions to help understand the later part of the document. Users ----- These are the users we will talk about in this document: * application deployer: creates/deletes servers, directly or indirect via API * application developer: creates images and apps that run on the cloud * cloud operator: administers the cloud * self service administrator: both runs and uses the cloud Now in reality the picture is way more complex. Specifically, there are likely to be different roles for observer, creator and admin roles for the application developer. Similarly, there are likely to be various levels of cloud operator permissions, some read only, see a subset of tenants, etc. Note: this is not attempting to be an exhaustive set of personas that consider various facets of the different users, but instead aims to be a minimal set of users, such that we use a consistent terminology throughout this document. Feature Group ------------- To reduce the size of the matrix, we organize the features into groups. Each group maps to a set of user stories, that can be validated by a set of scenarios, tests. Typically, this means a set of tempest tests. This list focuses on API concepts like attach and detach volumes, rather than deployment specific concepts like attach iSCSI volume to KVM based VM. Deployment ---------- A deployment maps to a specific test environment. A full description of the environment should be provided, so its possible to reproduce the test results that are reported for each of the Feature Groups. Note: this description includes all aspects of the deployment: the hypervisor, the number of nova-compute services, the storage being used, the network driver being used, the types of images being tested, etc. Feature Group Maturity ----------------------- The Feature Group Maturity rating is specific to the API concepts, rather than specific to a particular deployment. That detail is covered in the deployment rating for each feature group. We are starting out these Feature Group ratings: * Incomplete * Experimental * Complete * Complete and Required * Deprecated (scheduled to be removed in a future release) Incomplete features are those that don't have enough functionality to satisfy real world use cases. Experimental features should be used with extreme caution. They are likely to have little or no upstream testing. With little testing there are likely to be many unknown bugs. For a feature to be considered complete, we must have: * Complete API docs (concept and REST call definition) * Complete Administrator docs * Tempest tests that define if the feature works correctly * Has enough functionality, and works reliably enough to be useful in real world scenarios * Unlikely to ever have a reason to drop support for the feature There are various reasons why a feature, once complete, becomes required, but currently its largely when a feature is supported by all drivers. Note that any new drivers need to prove they support all required features before it would be allowed in upstream Nova. Please note that this list is technically unrelated to the DefCore effort, despite there being obvious parallels that could be drawn. Required features are those that any new technology must support before being allowed into tree. The larger the list, the more features can be expected to be available on all Nova based clouds. Deprecated features are those that are scheduled to be removed in a future major release of Nova. If a feature is marked as complete, it should never be deprecated. If a feature is incomplete or experimental for several releases, it runs the risk of being deprecated, and later removed from the code base. Deployment Rating for a Feature Group -------------------------------------- The deployment rating is purely about the state of the tests for each Feature Group on a particular deployment. There will the following ratings: * unknown * not implemented * implemented: self declare the tempest tests pass * regularly tested: tested by third party CI * checked: Tested as part of the check or gate queue The eventual goal is to automate this list from some third party CI reporting system, but so we can make progress, this will be a manual inspection that is documented by an hand written ini file. Ideally, this will be reviewed every milestone. Feature Group Definitions ========================= This is a look at features targeted at application developers, and the current state of each feature, independent of the specific deployment. Please note: this is still a work in progress! Key TODOs: * use new API docs as a template for the feature groups, into ini file * add lists of tempest UUIDs for each group * link from hypervisor support matrix into feature group maturity ratings * add maturity rating into the feature groups, with a justification, which is likely to include lints to API docs, etc * replace tick and cross in support matrix with "deployment ratings" * eventually generate the tick and cross from live, historical, CI results nova-13.0.0/doc/source/development.environment.rst0000664000567000056710000001457512701410011023362 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= Development Quickstart ======================= This page describes how to setup and use a working Python development environment that can be used in developing nova on Ubuntu, Fedora or Mac OS X. These instructions assume you're already familiar with git. Following these instructions will allow you to build the documentation and run the nova unit tests. If you want to be able to run nova (i.e., launch VM instances), you will also need to --- either manually or by letting DevStack do it for you --- install libvirt and at least one of the `supported hypervisors`_. Running nova is currently only supported on Linux, although you can run the unit tests on Mac OS X. .. _supported hypervisors: http://wiki.openstack.org/HypervisorSupportMatrix .. note:: For how to contribute to Nova, see HowToContribute_. Nova uses the Gerrit code review system, GerritWorkflow_. .. _GerritWorkflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow .. _HowToContribute: http://docs.openstack.org/infra/manual/developers.html .. _`docs.openstack.org`: http://docs.openstack.org Setup ===== There are two ways to create a development environment: using DevStack, or explicitly installing and cloning just what you need. Using DevStack -------------- See `Devstack`_ Documentation. If you would like to use Vagrant, there is a `Vagrant`_ for DevStack. .. _`Devstack`: http://docs.openstack.org/developer/devstack/ .. _`Vagrant`: https://github.com/openstack-dev/devstack-vagrant/blob/master/README.md .. Until the vagrant markdown documents are rendered somewhere on .openstack.org, linking to github Explicit Install/Clone ---------------------- DevStack installs a complete OpenStack environment. Alternatively, you can explicitly install and clone just what you need for Nova development. The first step of this process is to install the system (not Python) packages that are required. Following are instructions on how to do this on Linux and on the Mac. Linux Systems ````````````` .. note:: This section is tested for Nova on Ubuntu (14.04-64) and Fedora-based (RHEL 6.1) distributions. Feel free to add notes and change according to your experiences or operating system. Install the prerequisite packages. On Ubuntu:: sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev graphviz libsqlite3-dev python-tox python3-dev python3 gettext On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel libvirt-devel graphviz sqlite-devel python3-devel python3 gettext sudo pip-python install tox On openSUSE-based distributions (SLES 12, openSUSE 13.1, Factory or Tumbleweed):: sudo zypper in gcc git libffi-devel libmysqlclient-devel libvirt-devel libxslt-devel postgresql-devel python-devel python-pip python-tox python-virtualenv python3-devel python3 gettext-runtime Mac OS X Systems ```````````````` Install virtualenv:: sudo easy_install virtualenv Check the version of OpenSSL you have installed:: openssl version The stock version of OpenSSL that ships with Mac OS X 10.6 (OpenSSL 0.9.8l) or Mac OS X 10.7 (OpenSSL 0.9.8r) or Mac OS X 10.10.3 (OpenSSL 0.9.8zc) works fine with nova. OpenSSL versions from brew like OpenSSL 1.0.1k work fine as well. Getting the code ```````````````` Once you have the prerequisite system packages installed, the next step is to clone the code. Grab the code from git:: git clone https://git.openstack.org/openstack/nova cd nova Building the Documentation ========================== Install the prerequisite packages: graphviz To do a full documentation build, issue the following command while the nova directory is current. .. code-block:: bash tox -edocs That will create a Python virtual environment, install the needed Python prerequisites in that environment, and build all the documentation in that environment. Running unit tests ================== See `Running Python Unit Tests`_. .. _`Running Python Unit Tests`: http://docs.openstack.org/infra/manual/python.html#running-python-unit-tests Using a remote debugger ======================= Some modern IDE such as pycharm (commercial) or Eclipse (open source) support remote debugging. In order to run nova with remote debugging, start the nova process with the following parameters --remote_debug-host --remote_debug-port Before you start your nova process, start the remote debugger using the instructions for that debugger. For pycharm - http://blog.jetbrains.com/pycharm/2010/12/python-remote-debug-with-pycharm/ For Eclipse - http://pydev.org/manual_adv_remote_debugger.html More detailed instructions are located here - http://novaremotedebug.blogspot.com Using fake computes for tests ============================= The number of instances supported by fake computes is not limited by physical constraints. It allows you to perform stress tests on a deployment with few resources (typically a laptop). But you must avoid using scheduler filters limiting the number of instances per compute (like RamFilter, DiskFilter, AggregateCoreFilter), otherwise they will limit the number of instances per compute. Fake computes can also be used in multi hypervisor-type deployments in order to take advantage of fake and "real" computes during tests: * create many fake instances for stress tests * create some "real" instances for functional tests Fake computes can be used for testing Nova itself but also applications on top of it. nova-13.0.0/doc/source/support-matrix.rst0000664000567000056710000000361412701407773021530 0ustar jenkinsjenkins00000000000000 Feature Support Matrix ====================== .. warning:: Please note, while this document is still being maintained, this is slowly being updated to re-group and classify features using the definitions described in here: :doc:`feature_classification` When considering which capabilities should be marked as mandatory the following general guiding principles were applied * **Inclusivity** - people have shown ability to make effective use of a wide range of virtualization technologies with broadly varying featuresets. Aiming to keep the requirements as inclusive as possible, avoids second-guessing what a user may wish to use the cloud compute service for. * **Bootstrapping** - a practical use case test is to consider that starting point for the compute deploy is an empty data center with new machines and network connectivity. The look at what are the minimum features required of a compute service, in order to get user instances running and processing work over the network. * **Competition** - an early leader in the cloud compute service space was Amazon EC2. A sanity check for whether a feature should be mandatory is to consider whether it was available in the first public release of EC2. This had quite a narrow featureset, but none the less found very high usage in many use cases. So it serves to illustrate that many features need not be considered mandatory in order to get useful work done. * **Reality** - there are many virt drivers currently shipped with Nova, each with their own supported feature set. Any feature which is missing in at least one virt driver that is already in-tree, must by inference be considered optional until all in-tree drivers support it. This does not rule out the possibility of a currently optional feature becoming mandatory at a later date, based on other principles above. .. support_matrix:: support-matrix.ini nova-13.0.0/doc/source/filter_scheduler.rst0000664000567000056710000006621312701407773022041 0ustar jenkinsjenkins00000000000000Filter Scheduler ================ The **Filter Scheduler** supports `filtering` and `weighting` to make informed decisions on where a new instance should be created. This Scheduler supports working with Compute Nodes only. Filtering --------- .. image:: ./images/filteringWorkflow1.png During its work Filter Scheduler iterates over all found compute nodes, evaluating each against a set of filters. The list of resulting hosts is ordered by weighers. The Scheduler then chooses hosts for the requested number of instances, choosing the most weighted hosts. For a specific filter to succeed for a specific host, the filter matches the user request against the state of the host plus some extra magic as defined by each filter (described in more detail below). If the Scheduler cannot find candidates for the next instance, it means that there are no appropriate hosts where that instance can be scheduled. The Filter Scheduler has to be quite flexible to support the required variety of `filtering` and `weighting` strategies. If this flexibility is insufficient you can implement `your own filtering algorithm`. There are many standard filter classes which may be used (:mod:`nova.scheduler.filters`): * |AllHostsFilter| - does no filtering. It passes all the available hosts. * |ImagePropertiesFilter| - filters hosts based on properties defined on the instance's image. It passes hosts that can support the properties specified on the image used by the instance. * |AvailabilityZoneFilter| - filters hosts by availability zone. It passes hosts matching the availability zone specified in the instance properties. Use a comma to specify multiple zones. The filter will then ensure it matches any zone specified. * |ComputeCapabilitiesFilter| - checks that the capabilities provided by the host compute service satisfy any extra specifications associated with the instance type. It passes hosts that can create the specified instance type. If an extra specs key contains a colon (:), anything before the colon is treated as a namespace and anything after the colon is treated as the key to be matched. If a namespace is present and is not ``capabilities``, the filter ignores the namespace. For example ``capabilities:cpu_info:features`` is a valid scope format. For backward compatibility, the filter also treats the extra specs key as the key to be matched if no namespace is present; this action is highly discouraged because it conflicts with AggregateInstanceExtraSpecsFilter filter when you enable both filters The extra specifications can have an operator at the beginning of the value string of a key/value pair. If there is no operator specified, then a default operator of ``s==`` is used. Valid operators are: :: * = (equal to or greater than as a number; same as vcpus case) * == (equal to as a number) * != (not equal to as a number) * >= (greater than or equal to as a number) * <= (less than or equal to as a number) * s== (equal to as a string) * s!= (not equal to as a string) * s>= (greater than or equal to as a string) * s> (greater than as a string) * s<= (less than or equal to as a string) * s< (less than as a string) * (substring) * (all elements contained in collection) * (find one of these) Examples are: ">= 5", "s== 2.1.0", " gcc", " aes mmx", and " fpu gpu" * |AggregateInstanceExtraSpecsFilter| - checks that the aggregate metadata satisfies any extra specifications associated with the instance type (that have no scope or are scoped with ``aggregate_instance_extra_specs``). It passes hosts that can create the specified instance type. The extra specifications can have the same operators as |ComputeCapabilitiesFilter|. To specify multiple values for the same key use a comma. E.g., "value1,value2" * |ComputeFilter| - passes all hosts that are operational and enabled. * |CoreFilter| - filters based on CPU core utilization. It passes hosts with sufficient number of CPU cores. * |AggregateCoreFilter| - filters hosts by CPU core number with per-aggregate ``cpu_allocation_ratio`` setting. If no per-aggregate value is found, it will fall back to the global default ``cpu_allocation_ratio``. If more than one value is found for a host (meaning the host is in two different aggregates with different ratio settings), the minimum value will be used. * |IsolatedHostsFilter| - filter based on ``image_isolated``, ``host_isolated`` and ``restrict_isolated_hosts_to_isolated_images`` flags. * |JsonFilter| - allows simple JSON-based grammar for selecting hosts. * |RamFilter| - filters hosts by their RAM. Only hosts with sufficient RAM to host the instance are passed. * |AggregateRamFilter| - filters hosts by RAM with per-aggregate ``ram_allocation_ratio`` setting. If no per-aggregate value is found, it will fall back to the global default ``ram_allocation_ratio``. If more than one value is found for a host (meaning the host is in two different aggregates with different ratio settings), the minimum value will be used. * |DiskFilter| - filters hosts by their disk allocation. Only hosts with sufficient disk space to host the instance are passed. ``disk_allocation_ratio`` setting. The virtual disk to physical disk allocation ratio, 1.0 by default. The total allowed allocated disk size will be physical disk multiplied this ratio. * |AggregateDiskFilter| - filters hosts by disk allocation with per-aggregate ``disk_allocation_ratio`` setting. If no per-aggregate value is found, it will fall back to the global default ``disk_allocation_ratio``. If more than one value is found for a host (meaning the host is in two or more different aggregates with different ratio settings), the minimum value will be used. * |NumInstancesFilter| - filters compute nodes by number of running instances. Nodes with too many instances will be filtered. ``max_instances_per_host`` setting. Maximum number of instances allowed to run on this host. The host will be ignored by the scheduler if more than ``max_instances_per_host`` already exist on the host. * |AggregateNumInstancesFilter| - filters hosts by number of instances with per-aggregate ``max_instances_per_host`` setting. If no per-aggregate value is found, it will fall back to the global default ``max_instances_per_host``. If more than one value is found for a host (meaning the host is in two or more different aggregates with different max instances per host settings), the minimum value will be used. * |IoOpsFilter| - filters hosts by concurrent I/O operations on it. hosts with too many concurrent I/O operations will be filtered. ``max_io_ops_per_host`` setting. Maximum number of I/O intensive instances allowed to run on this host, the host will be ignored by scheduler if more than ``max_io_ops_per_host`` instances such as build/resize/snapshot etc are running on it. * |AggregateIoOpsFilter| - filters hosts by I/O operations with per-aggregate ``max_io_ops_per_host`` setting. If no per-aggregate value is found, it will fall back to the global default ``max_io_ops_per_host``. If more than one value is found for a host (meaning the host is in two or more different aggregates with different max io operations settings), the minimum value will be used. * |PciPassthroughFilter| - Filter that schedules instances on a host if the host has devices to meet the device requests in the 'extra_specs' for the flavor. * |SimpleCIDRAffinityFilter| - allows a new instance on a host within the same IP block. * |DifferentHostFilter| - allows the instance on a different host from a set of instances. * |SameHostFilter| - puts the instance on the same host as another instance in a set of instances. * |RetryFilter| - filters hosts that have been attempted for scheduling. Only passes hosts that have not been previously attempted. * |TrustedFilter| (EXPERIMENTAL) - filters hosts based on their trust. Only passes hosts that meet the trust requirements specified in the instance properties. * |TypeAffinityFilter| - Only passes hosts that are not already running an instance of the requested type. * |AggregateTypeAffinityFilter| - limits instance_type by aggregate. This filter passes hosts if no instance_type key is set or the instance_type aggregate metadata value contains the name of the instance_type requested. The value of the instance_type metadata entry is a string that may contain either a single instance_type name or a comma separated list of instance_type names. e.g. 'm1.nano' or "m1.nano,m1.small" * |ServerGroupAntiAffinityFilter| - This filter implements anti-affinity for a server group. First you must create a server group with a policy of 'anti-affinity' via the server groups API. Then, when you boot a new server, provide a scheduler hint of 'group=' where is the UUID of the server group you created. This will result in the server getting added to the group. When the server gets scheduled, anti-affinity will be enforced among all servers in that group. * |ServerGroupAffinityFilter| - This filter works the same way as ServerGroupAntiAffinityFilter. The difference is that when you create the server group, you should specify a policy of 'affinity'. * |AggregateMultiTenancyIsolation| - isolate tenants in specific aggregates. To specify multiple tenants use a comma. Eg. "tenant1,tenant2" * |AggregateImagePropertiesIsolation| - isolates hosts based on image properties and aggregate metadata. Use a comma to specify multiple values for the same property. The filter will then ensure at least one value matches. * |MetricsFilter| - filters hosts based on metrics weight_setting. Only hosts with the available metrics are passed. * |NUMATopologyFilter| - filters hosts based on the NUMA topology requested by the instance, if any. Now we can focus on these standard filter classes in some detail. We'll skip the simplest ones, such as |AllHostsFilter|, |CoreFilter| and |RamFilter|, because their functionality is relatively simple and can be understood from the code. For example class |RamFilter| has the next realization: :: class RamFilter(filters.BaseHostFilter): """Ram Filter with over subscription flag""" def host_passes(self, host_state, filter_properties): """Only return hosts with sufficient available RAM.""" instance_type = filter_properties.get('instance_type') requested_ram = instance_type['memory_mb'] free_ram_mb = host_state.free_ram_mb total_usable_ram_mb = host_state.total_usable_ram_mb used_ram_mb = total_usable_ram_mb - free_ram_mb return total_usable_ram_mb * FLAGS.ram_allocation_ratio - used_ram_mb >= requested_ram Here ``ram_allocation_ratio`` means the virtual RAM to physical RAM allocation ratio (it is ``1.5`` by default). The |AvailabilityZoneFilter| looks at the availability zone of compute node and availability zone from the properties of the request. Each compute service has its own availability zone. So deployment engineers have an option to run scheduler with availability zones support and can configure availability zones on each compute host. This class's method ``host_passes`` returns ``True`` if availability zone mentioned in request is the same on the current compute host. The |ImagePropertiesFilter| filters hosts based on the architecture, hypervisor type and virtual machine mode specified in the instance. For example, an instance might require a host that supports the ARM architecture on a qemu compute host. The |ImagePropertiesFilter| will only pass hosts that can satisfy this request. These instance properties are populated from properties defined on the instance's image. E.g. an image can be decorated with these properties using ``glance image-update img-uuid --property architecture=arm --property hypervisor_type=qemu`` Only hosts that satisfy these requirements will pass the |ImagePropertiesFilter|. |ComputeCapabilitiesFilter| checks if the host satisfies any ``extra_specs`` specified on the instance type. The ``extra_specs`` can contain key/value pairs. The key for the filter is either non-scope format (i.e. no ``:`` contained), or scope format in capabilities scope (i.e. ``capabilities:xxx:yyy``). One example of capabilities scope is ``capabilities:cpu_info:features``, which will match host's cpu features capabilities. The |ComputeCapabilitiesFilter| will only pass hosts whose capabilities satisfy the requested specifications. All hosts are passed if no ``extra_specs`` are specified. |ComputeFilter| is quite simple and passes any host whose compute service is enabled and operational. Now we are going to |IsolatedHostsFilter|. There can be some special hosts reserved for specific images. These hosts are called **isolated**. So the images to run on the isolated hosts are also called isolated. The filter checks if ``image_isolated`` flag named in instance specifications is the same as the host. Isolated hosts can run non isolated images if the flag ``restrict_isolated_hosts_to_isolated_images`` is set to false. |DifferentHostFilter| - method ``host_passes`` returns ``True`` if the host to place an instance on is different from all the hosts used by a set of instances. |SameHostFilter| does the opposite to what |DifferentHostFilter| does. ``host_passes`` returns ``True`` if the host we want to place an instance on is one of the hosts used by a set of instances. |SimpleCIDRAffinityFilter| looks at the subnet mask and investigates if the network address of the current host is in the same sub network as it was defined in the request. |JsonFilter| - this filter provides the opportunity to write complicated queries for the hosts capabilities filtering, based on simple JSON-like syntax. There can be used the following operations for the host states properties: ``=``, ``<``, ``>``, ``in``, ``<=``, ``>=``, that can be combined with the following logical operations: ``not``, ``or``, ``and``. For example, the following query can be found in tests: :: ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024] ] This query will filter all hosts with free RAM greater or equal than 1024 MB and at the same time with free disk space greater or equal than 200 GB. Many filters use data from ``scheduler_hints``, that is defined in the moment of creation of the new server for the user. The only exception for this rule is |JsonFilter|, that takes data from the schedulers ``HostState`` data structure directly. Variable naming, such as the ``$free_ram_mb`` example above, should be based on those attributes. The |RetryFilter| filters hosts that have already been attempted for scheduling. It only passes hosts that have not been previously attempted. If a compute node is raising an exception when spawning an instance, then the compute manager will reschedule it by adding the failing host to a retry dictionary so that the RetryFilter will not accept it as a possible destination. That means that if all of your compute nodes are failing, then the RetryFilter will return 0 hosts and the scheduler will raise a NoValidHost exception even if the problem is related to 1:N compute nodes. If you see that case in the scheduler logs, then your problem is most likely related to a compute problem and you should check the compute logs. The |TrustedFilter| filters hosts based on their trust. Only passes hosts that match the trust requested in the ``extra_specs`` for the flavor. The key for this filter must be scope format as ``trust:trusted_host``, where ``trust`` is the scope of the key and ``trusted_host`` is the actual key value. The value of this pair (``trusted``/``untrusted``) must match the integrity of a host (obtained from the Attestation service) before it is passed by the |TrustedFilter|. The |NUMATopologyFilter| considers the NUMA topology that was specified for the instance through the use of flavor extra_specs in combination with the image properties, as described in detail in the related nova-spec document: * http://git.openstack.org/cgit/openstack/nova-specs/tree/specs/juno/virt-driver-numa-placement.rst and try to match it with the topology exposed by the host, accounting for the ``ram_allocation_ratio`` and ``cpu_allocation_ratio`` for over-subscription. The filtering is done in the following manner: * Filter will attempt to pack instance cells onto host cells. * It will consider the standard over-subscription limits for each host NUMA cell, and provide limits to the compute host accordingly (as mentioned above). * If instance has no topology defined, it will be considered for any host. * If instance has a topology defined, it will be considered only for NUMA capable hosts. To use filters you specify two settings: * ``scheduler_available_filters`` - Defines filter classes made available to the scheduler. This setting can be used multiple times. * ``scheduler_default_filters`` - Of the available filters, defines those that the scheduler uses by default. The default values for these settings in nova.conf are: :: --scheduler_available_filters=nova.scheduler.filters.all_filters --scheduler_default_filters=RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter' With this configuration, all filters in ``nova.scheduler.filters`` would be available, and by default the |RamFilter|, |ComputeFilter|, |AvailabilityZoneFilter|, |ComputeCapabilitiesFilter|, |ImagePropertiesFilter|, |ServerGroupAntiAffinityFilter|, and |ServerGroupAffinityFilter| would be used. To create **your own filter** you must inherit from |BaseHostFilter| and implement one method: ``host_passes``. This method should return ``True`` if a host passes the filter. It takes ``host_state`` (describing the host) and ``filter_properties`` dictionary as the parameters. As an example, nova.conf could contain the following scheduler-related settings: :: --scheduler_driver=nova.scheduler.FilterScheduler --scheduler_available_filters=nova.scheduler.filters.all_filters --scheduler_available_filters=myfilter.MyFilter --scheduler_default_filters=RamFilter,ComputeFilter,MyFilter With these settings, nova will use the ``FilterScheduler`` for the scheduler driver. The standard nova filters and MyFilter are available to the FilterScheduler. The RamFilter, ComputeFilter, and MyFilter are used by default when no filters are specified in the request. Each filter selects hosts in a different way and has different costs. The order of ``scheduler_default_filters`` affects scheduling performance. The general suggestion is to filter out invalid hosts as soon as possible to avoid unnecessary costs. We can sort ``scheduler_default_filters`` items by their costs in reverse order. For example, ComputeFilter is better before any resource calculating filters like RamFilter, CoreFilter. In medium/large environments having AvailabilityZoneFilter before any capability or resource calculating filters can be useful. Weights ------- Filter Scheduler uses the so-called **weights** during its work. A weigher is a way to select the best suitable host from a group of valid hosts by giving weights to all the hosts in the list. In order to prioritize one weigher against another, all the weighers have to define a multiplier that will be applied before computing the weight for a node. All the weights are normalized beforehand so that the multiplier can be applied easily. Therefore the final weight for the object will be:: weight = w1_multiplier * norm(w1) + w2_multiplier * norm(w2) + ... A weigher should be a subclass of ``weights.BaseHostWeigher`` and they can implement both the ``weight_multiplier`` and ``_weight_object`` methods or just implement the ``weight_objects`` method. ``weight_objects`` method is overridden only if you need access to all objects in order to calculate weights, and it just return a list of weights, and not modify the weight of the object directly, since final weights are normalized and computed by ``weight.BaseWeightHandler``. The Filter Scheduler weighs hosts based on the config option `scheduler_weight_classes`, this defaults to `nova.scheduler.weights.all_weighers`, which selects the following weighers: * |RAMWeigher| Compute weight based on available RAM on the compute node. Sort with the largest weight winning. If the multiplier is negative, the host with least RAM available will win (useful for stacking hosts, instead of spreading). * |DiskWeigher| Hosts are weighted and sorted by free disk space with the largest weight winning. If the multiplier is negative, the host with less disk space available will win (useful for stacking hosts, instead of spreading). * |MetricsWeigher| This weigher can compute the weight based on the compute node host's various metrics. The to-be weighed metrics and their weighing ratio are specified in the configuration file as the followings:: metrics_weight_setting = name1=1.0, name2=-1.0 * |IoOpsWeigher| The weigher can compute the weight based on the compute node host's workload. The default is to preferably choose light workload compute hosts. If the multiplier is positive, the weigher prefer choosing heavy workload compute hosts, the weighing has the opposite effect of the default. * |ServerGroupSoftAffinityWeigher| The weigher can compute the weight based on the number of instances that run on the same server group. The largest weight defines the preferred host for the new instance. For the multiplier only a positive value is meaningful for the calculation as a negative value would mean that the affinity weigher would prefer non collocating placement. * |ServerGroupSoftAntiAffinityWeigher| The weigher can compute the weight based on the number of instances that run on the same server group as a negative value. The largest weight defines the preferred host for the new instance. For the multiplier only a positive value is meaningful for the calculation as a negative value would mean that the anti-affinity weigher would prefer collocating placement. Filter Scheduler makes a local list of acceptable hosts by repeated filtering and weighing. Each time it chooses a host, it virtually consumes resources on it, so subsequent selections can adjust accordingly. It is useful if the customer asks for a large block of instances, because weight is computed for each instance requested. .. image:: ./images/filteringWorkflow2.png At the end Filter Scheduler sorts selected hosts by their weight and attempts to provision instances on the chosen hosts. P.S.: you can find more examples of using Filter Scheduler and standard filters in :mod:`nova.tests.scheduler`. .. |AllHostsFilter| replace:: :class:`AllHostsFilter ` .. |ImagePropertiesFilter| replace:: :class:`ImagePropertiesFilter ` .. |AvailabilityZoneFilter| replace:: :class:`AvailabilityZoneFilter ` .. |BaseHostFilter| replace:: :class:`BaseHostFilter ` .. |ComputeCapabilitiesFilter| replace:: :class:`ComputeCapabilitiesFilter ` .. |ComputeFilter| replace:: :class:`ComputeFilter ` .. |CoreFilter| replace:: :class:`CoreFilter ` .. |AggregateCoreFilter| replace:: :class:`AggregateCoreFilter ` .. |IsolatedHostsFilter| replace:: :class:`IsolatedHostsFilter ` .. |JsonFilter| replace:: :class:`JsonFilter ` .. |RamFilter| replace:: :class:`RamFilter ` .. |AggregateRamFilter| replace:: :class:`AggregateRamFilter ` .. |DiskFilter| replace:: :class:`DiskFilter ` .. |AggregateDiskFilter| replace:: :class:`AggregateDiskFilter ` .. |NumInstancesFilter| replace:: :class:`NumInstancesFilter ` .. |AggregateNumInstancesFilter| replace:: :class:`AggregateNumInstancesFilter ` .. |IoOpsFilter| replace:: :class:`IoOpsFilter ` .. |AggregateIoOpsFilter| replace:: :class:`AggregateIoOpsFilter ` .. |PciPassthroughFilter| replace:: :class:`PciPassthroughFilter ` .. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter ` .. |DifferentHostFilter| replace:: :class:`DifferentHostFilter ` .. |SameHostFilter| replace:: :class:`SameHostFilter ` .. |RetryFilter| replace:: :class:`RetryFilter ` .. |TrustedFilter| replace:: :class:`TrustedFilter ` .. |TypeAffinityFilter| replace:: :class:`TypeAffinityFilter ` .. |AggregateTypeAffinityFilter| replace:: :class:`AggregateTypeAffinityFilter ` .. |ServerGroupAntiAffinityFilter| replace:: :class:`ServerGroupAntiAffinityFilter ` .. |ServerGroupAffinityFilter| replace:: :class:`ServerGroupAffinityFilter ` .. |AggregateInstanceExtraSpecsFilter| replace:: :class:`AggregateInstanceExtraSpecsFilter ` .. |AggregateMultiTenancyIsolation| replace:: :class:`AggregateMultiTenancyIsolation ` .. |NUMATopologyFilter| replace:: :class:`NUMATopologyFilter ` .. |RAMWeigher| replace:: :class:`RAMWeigher ` .. |AggregateImagePropertiesIsolation| replace:: :class:`AggregateImagePropertiesIsolation ` .. |MetricsFilter| replace:: :class:`MetricsFilter ` .. |MetricsWeigher| replace:: :class:`MetricsWeigher ` .. |IoOpsWeigher| replace:: :class:`IoOpsWeigher ` .. |ServerGroupSoftAffinityWeigher| replace:: :class:`ServerGroupSoftAffinityWeigher ` .. |ServerGroupSoftAntiAffinityWeigher| replace:: :class:`ServerGroupSoftAntiAffinityWeigher ` .. |DiskWeigher| replace:: :class:`DiskWeigher ` nova-13.0.0/doc/source/images/0000775000567000056710000000000012701410205017201 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/source/images/PowerStates1.png0000664000567000056710000052741512701407773022306 0ustar jenkinsjenkins00000000000000‰PNG  IHDR§}¾ç½{ pHYsœœ&Í:4®¿IDATxÚì] \LÛ?¥E›J5Ê^Ñ‚’¶²dMv%[Ö(¤Å³d©dI¶GgÉßö’5;/e§BÖ(„ •J ÷îÜév›™¦™ifjê|?¹Î=sî9÷žïýžížs~Mp2‹&( ˆ?ÄâñW«((ÀKJ0ÁÃGE/_¾¼^ñ‡aXµ sÄñxÓÈÊú¶ôô*ÜÁÁÁ‚\BÑÀ`ü1™8é&$ÖÝÎñ³ÖªùM6jŒë´ÀD~@úÓIDBx ± 8Ýg¿ûŸ@sˆ4_1//áô ƒ´Á¿[ýHŸÇw/;_ÍdVzá¢\1¯ã_êgù õç‚a$y%(•û»GÜØ6 ›ó¯ˆ]~5Hlýe½cèµ"}œ‡÷#ýMf\§ô÷£¨ÖîP²ü‘…g\yªDó„ ɼðä€õ‚˜Ìã–_Èî¯Î‘SDÞ¥]ÍœùÇ0 ëõŒ¶§&žçï@ŠíÆåÉ“ï‹8¨š©½î˜g4NÅ$¯‰ÀN­ÊK8 çÔX•z&îŸÒž®ÚD<ŽaÌø™l¿°òb¦žŽn&¬Òú«xž_¿5ä3®ÖJ¡À{zÐúåî+¬£Cÿj_+k0 œ<¢lÜ;‡3Ä·Ï1qñnn£sJ€–r1À•cb¢­,z*Ê-ýÜŒGšm{}||ùÁó7S¶æ¬_qÝWÌ¥=tŒ´Áš¿A Œºsx·Ð!Ãüï–ÇÓY9Fž¤N]½b·„Em]–³Ät° Ï?I¸¤ô‡aíH~84 «%¶ËŸJ¢?õë¢àh¢½ˆÇ;÷æàë‹nnKIé\ýwŸglãÌCÄ»?ö/0›Ó¶†qÄL:ŽÝd;rrJ´´”*^5`Å‹<âéîT* yDÍ=w%ëÁWK¼üÜæÖÇßRO²µBBßn…^Tûþ´mÓ¼9>›+|¾Å‡:X1P¯ËíÞl˜@øžÀ,úO°(ƒý>‘ÐÉ«Óõßœ˜·å =s5èòg¼/»}\ÊÝ.í:j£]Kö…˜¢cµ’­ª¹,HG…çåæææ)))U…W5[*BFð¼I??¿uëÖñ¼÷j¯µ··OHH^û…ÊʸÏÒO9~åã#5Lo®Ã€ŽG7.{Œ¬¾I²ù¯(ûîÊF>~Sù‘cÔšßàÚP—)>¤ÛLWQ²ú+{µS®Ý lüÿ@Épr*7Ii¦&¬Þ}gžoXnf<’äAt´°>ož=þG¯ªSè5È úœÛ»90ÀŸî¿/æD7Ûþ$yêÚºä…ÜÙGú“äAtíÝú¼Hº1Æ¥/Ï»-(ùöú+€äQ>…Mµà%<‰Ùuàp÷¾Ã(ò¨äjÎ"ïõG*Ht·m®õúÐÚìÕdl:'î.-=‹‹Á …‚¿m?þ¨U€”bg2…aQcÛý=L\}R¼²KɺA¹!y­¦Ü@·wøizJI\ú¸³×Ü{ÎûWOõÛAîåM,z’ÈãéîU%MüJÇñº˜”dWJÝmpÿ”îö´ ÃYDÙS‘À#Uÿu騾«-±ú~·3Tô)ò6x½CbæÒϵdbTÞ‡Çw[šôä7@e\‘}·ÎÅô*¯3ve•Àêeñ¼¹á›·R|¼‰Õƒm-ù·2ÅŸ1;Šh£%%ZÚñKÝ´ÈÖÝ´3ý6NU~Õü(¬I!Z'Æ_\ú:ðÏk:fMv§†U ¼ç7/™¹ 4šåÁ"ÂÕÅYðÔÙø’ šW9Ž£KpvïæAžóª ܺE}?ƒÏ\U&r÷ª ܨ´h,<Áã*u³Kǰ°Ažó à&ñ¾~{c#—ÕñOø0Woh¶·äñàO›¶ls5¥r`EžDç&íöåÑÇðŒð之­Í+ R-¹&MªÌŸ ¢¯’ô¾°qy˜g7/så=ä6Ø®;ÇmpÉ<}ìz ˜¬ç|3*ÜëÕ6ÐYضÔW!ÆÒ ÔÔÔÌ\‡qâ  ïbššZm%Ý€ø#URϪñ¾fÍÄŸ¬B@ò L»uëöæÍ£ýùó'Ÿfľl ÕœX°`àä9ØDˆ<ûš·†Å_óæÍ‰Î7ß¹b»wï>zôhûöíÍÍÍ«pêÔ©0<*?¥ââb’¹ùóçGFFò 9jÔ(ú/²Qý'%hii}ûFô ù·oß¾I“& BžÍÍ›7QûEz É#±gϞɓ'CG@@@XXÅ"jÞÿþûï?ˆ]F/_¾Ü·/ù‚“§¡¡‘——‡ø'Hò8@‘Ç`0è»MäAä‘+ÊZ¶lùáášB7n”y Ž?úÖ¦'N$cÇŽåØ*ÄØØøñãÇ00©rE$¯E‹Ÿ?æ“Ä•+WúôaO{„ä¡òSœà®Øìííé9òرcÔ;w&QQQjjjtòúõëG®~ruu%=)òPý' P{ R{@ò8ê9²½êåUiwçÑ£GC’îÅ‹×Öý78þ”••yîLÆs/ooï-[¶p·WMLL(òÈ0ˆ?éõ)wZZZ‡ø¦ˆqvv¾xñ"åO‘G…Ù¶mÛœ9s¤³g$*?ÙàO… .ÐÉã Hä5Ä_ž>} {ßùùù†ïߟmç@¨¡jÄŸ¤ðû÷oÁÉ#‘››«©©Y×Èã͟צMQÕ5ÄE6¡Së€u•°[ÊCò<|øð'NÔ‘‘Ã?èHÞ6lÚîÔ¤¤tzÏ ÇK±v°K¤/»úÍÀÕ«Wœœ$¿8ËOrœÂ|ö Ý݆þ¡öþ þê(Ó]xa'ß»wv̼ŠÁ`´k×îåË—µÃ}zu\RÝ‚4*'»…'|öŠèÑ£‡àSSSaµ_Ä^Æ1›Ó=¡ ¹w{⾊ÉĹ=IË9þšš&>MšàÚÚ˜dù­`Üþ‚ùÛ6ˆ½¶ÝÃÒéöDzŽ]WŠÝÒâb¹Æ×ASgÊ'ïîJõKëøC``ª°â(Q|}°JÕŒ°d÷ùóoÉò7ç,›³Ñcá±íœó¯·±–½«Wj7˳vú£kT£ç2¯ëüiÁ¶ôÝTÒ¼ÍÙbÎEÊN*tdeáU4Cˆ 9p4nâh—JÜ];=oÀ€áôÀ9¬Tà%Ϙxg†#ek—Áè.¥úÎ ¹ì»QÃß§.CWß`y ½æÌ'IbÈ#iX³*Æ»ÒlZ“ÄO™Âi&W«ÜLgg„/[ºrQ ë—$Ô~©)°-âj™.`AÊ30ŸØž!ı‹¼eˆ¬ð—™Y €Ôå㛸Sà«®¸Rº¢ADÄ•·‘G7½!®µwh\ûåçÜc™“®SêægooÜph¢’ºz2üA±%øÁÝä"Ö?,\yЮ»iÊãlÒ_;#öƒ7´xœ?»?D÷ý‘φÀ—½íËåKxß~µÍ½`ÄNÀM\öÇ‚í5ƒéÁN[Á÷!þœûÏ"ë³Òè(üœÈWOÄ:¹‡Ê hqöí7£Îµ_dkª¼èàÎ*GÔ;Ú±'J³'tTnÆÝÉ«CM˜&ÕµÍHScƒˆ«§,VÉǃ ff<–½¯ ûè [ulu‰?öBê¹§R¶ÆÁÓm›æÍñÙ h_,—mؼÀ]Ö),+Û)'WéìÅ òÎý=x†öXö­¤@Qéˆ Œ¿<¥lÝI¹»ŽÚh¯oM¸ÐÈ<@NnWÕÈ/¼¢’Z­°%ŠþxyÞG½~Ù¨ÿˆ?„ªø«÷};¤?Äâñ‡øC@üI¼v«ãî}&dœ¿¯×€ªÿk"ì1ß\B ¡¦Š+)³G“ƒƒƒ¹dùòå”{ÙšåK‚•ø†ÏÈÌ64Ðù>é¬×îÃÅŸ<ûÌy¸t€k |täú±~¶ä¯¾Òz¿¼¼– {É®m+—. >þD ‡á¬ýàCÖø'm ?ÆÚýÊ {k°âK(3>àN^ßôGáÒzUþè--ê ᢭u£ö‹“ä˜2eLÑ:ø¿ÛËÿغ-œµu±•\?ŠÈ%¦”rݤéˆm¶Íæs”+æ‹×aþØ…'$HhÛ:,àÄ”™²—ëZ;,†‚¸ªãÃL‰€>°ðäÅ=<öàðqs+Ù¼y'-XiE‰Qúe<šOÅgoÖÑŽ+±?½G­»áªM½m¹÷ÿÔ쾚 íø'†1ó“û»µ§aê©%¦C +»Ì´ Œýé¯ìzglÑE\Šú+Ž/éG—àzÿ£Ùÿ]…ÿ>mbü¡nmò-vKXU1VSì[ÇÄøBþx÷\ˆ9¶Ëmä´˜˜h77wúksê\¿ž†jº?–]®JrŒS Ò1oÙòÍ+ƒY‘¬^æe¯ÏV5oXŒ÷#_2¹&Máq…5€ä±ß<ýAÌ»q#m°c·>ÔÃŽ‰»bóKÅäÛ Ö¾Â~Ûã³+Vs.2çPÖX´öÜ¢µ‚&Q¹àõ¼4Ê%ÙÑ.î¿yó< ˜Ëª'qKÏ,éç6ç"S÷>ý bø&DÆ+£X;àUo\¨Ñú‰ÜpC£ÇnâðòcLIêÏÜÊ’‡a³BìvÌÚ@ìªûä'0aÍ*±co[~âèþc&±JQ¨‡uvçd&ó(wY ¦²³¯NU`bã*)ä"¬Ýw%rþpËò±g4’䱋% k'™æ2Æçr%%¥’’±gÏû¤t©ê©ÕôÒà¯Â<ívÕ¹Z+\§¯DoÄ`˜¶¶6[–¨ûäÕ2u™¼ŠoýúõÂnEÒ€ÊOÒh{]ŸtÈ“Uþ$JÞ‘#GÆŒX{– µí|m¿èêê~üø±îäIÒf@Ãå¯.·xñâððpºÏ”)S¤¹|Cì?¸»»GGG×$†™3gîØ±:8È"Ù¨ëâ2ÌŸPäq+ ‚$ãÆ;tèÐYYÖÅe»ÿnccsóæMABîß¿Ÿ›¿ª y¨ü’¡  ê#ÊøYÇŽ%ù²eËV®\‰ø“ ŒŒŒ$ym‘×€øc0†„-OØy@ågÝB‹- Šø«sPUU¼‡Ú/uÊ,Ëç‚ÀÒÒñW#<~üØÄÄD¼qÊËË ²sçΈ¿!))Iìü1™L±×”u‘?jy ÇœŸå ¹Áöšä|È;›œ­|.R“ºœ‚-"·ýå0Ò³[ÿÑ>~+Ê|fÛÓ‚Oà°ðu<æ’îj@IQ‘g°i¯ª®ùÀ±0Îë§þ7×kZUþµ{/—Qv£¦ÁXÞ»n;T{à Oߪ6'øN‰?;yìHþS²Š1¬t\:åç;_¢ús€¬ à,‹¡Ä|b·Ol8{H’7hýCß5¿o¥e/¾ü„äQ>Í :Ã|4ÓåÁÊ®‡»÷F‘‘V@À0Huj;tô1Öl"'Wi1ÃÏ_øãìï<Ê×hUUê1ÃfQs¶XÍAŸä«qSÆ»U˜$¢ß8/èsñÀ_‹-?,IK­Î²=JX³­89• sÎÏŒT|åbðøšóÉ«*›82ñÁË·³H cF/2v_ÿdr_ÃÅ7–:·â(f\0lMV ën‰cÝ^¾ù°%WO®s2 9«¡'l"Qü‘È+_©ÅÌx©¡ÛRQI)=ƒid¤–œ¯l(×L"ú#nº,’Çâ’èü®xƒ¯`7Xâžï$®×¹¨°H©™ÎO¢yFììk®7/!ksŒ—õä¨Û{\9XÊ)(®Š<3üü(Ï¡Ãú’¹£ÝsGøwmæówÊ‘é@‘™‘!;NröbaAõ~|=XE¾´9°Ç¹•ÅüË»l¸n¹TÈ“WMÝÛ\O ž4oš’ZBÆÃqA|Æ·•½5+’è¸$%~ÉŸ¸à.ŠJ’á¤bê])óÌûRKü/ç†÷Ób?Áw±Y|µ17uŒJE#›`’GcK&WûÿALbëݽYÿ©˜PYŸB.5Å£(ŽÃqz}9/òÒ¼HV¥ ¯º¶Ý×|žl¦[ÉŽÉCÅ4R!yðx:Âê8Qü©™r6RL×Ò›0îë¤_ÿ5WSͤ­›7},¾þù§¢Œý„E¨¢DïážÝøäXAÞ‹¤-{WîVz-¦yN”Œþ¤ŲÌoU®.çhþß¿}€{‹ˆÍïÄ\1gê×núªÔU|–JÅŸ( :XÑ'¼™€©CxMvçs£ÊÓ—¾|?üïŒÙ¸Q2ÏTÕþýÛGñøjcªÍ9n°xá?ÿµp\m^ÃkS³K¹ƒÛ·%Ð1uª§£½!ÂyÂLî°§_©U±ÒUGQÀÀK‹'ÕŒ¼:4þéãMÔ97^~TVa×TÏo];‚·)¥)ãGqôÛâþ‰XÄ™Aü;t"®³µÛëw™CÕŒF‰Å ‰ ‰îeæÉɳ§É¼ypsø€>|n˜ˆ9æxgÛ|”Jÿ)$4lÐäù•‹¡œ¯[ã×½ÛWŒu™¨Æ=¿Ì¸È«hò 'š [¶lñööæŸe= ˆf¬……Err²_òØ1»üÑ‚¤1~]_áà è ­6@ü víÚ RQQ‚Í×Ç;ñ'4””” )øL'Ý»w¿_ÐM455‘þDÄŽ;fΜYm°øøx¡¢œ<ˆ~ýú!þDlŸ²²ò“'OŒQýW·Y0¤¬Úþ¥¥¥ü'2}ÿþ\)Ñ·oßË—/‹1éž={Þ½{ñW#@òœœœ®^½ºhÑ¢õë×s É2d$OCC#//O,é:::J‚<Ð÷äÖþðøîÝ»V­Zq‡9}ú4iÍnXågAAšç—\HåÖÖÖ5jÔöíÛcbb8ºóŠŠŠô­+ccc]]]I÷ºuëüÊçä¡ö§D@'ïÎ;VVV¤û?þøï¿ÿ #;;›ôY¸pᆠè×rì;š““C:RRRüh“9PÿA ª·   ’<:èäñ\eïåÅÞãšœêïææFIñ'qPm“ ª?7™?K‡¾~G‡°7ã'§€²&òºÝ2«?Øáª5kÖÔÍÇá]~úŒéï1d,5ùšƒE{ àúmB>µ¡¡azzº€¥¶ã=oûcàý1D“&M$µ?Å û˜L&ÎáÃm²%88˜Ãndff·MêB’3d99}ÏË(ÏvÂ, S‘uNTõ6€ÿÝ%ìm{ü}®)1™n±9X›LüZ€2{ÛX˜´âc¯NÁÀ&.»>ÓIÒ }}°JÕl)•¨LŽVðwöä~ÖåËÞÁr‡×ÑìYS©Ö¦ á¿'%²ñ’&ßɰ°2\3oÍpd2ã×ÝÎñ³Ö" kò¿ðüùѯ†E÷7>hµài¤q↧si+ûùƒŠ ;³¤ŸTõW¾T̃þ·ý1«•|Œ•d£9£Ûªm¹ýÛ$†•c`äɽ{·zz†5Ù¶«iD—Á°ZŲ}LzBòBb9w“äÁئ»uÝ ëÀà5—Hÿ¨ ó¼nFí1€nÿX% ò@eó| C²Ì$c;༕þ“DÉÈþQýi¿ÔWp´Ýi0/_¾\¨f¶I þDG‡ÒÒÒ„½J__ÿýû÷ÕkÙ²%ÒŸ!š2¿ Í©‹|Så¯_¿ ¾W,âOüúõ«qãÆB]ÉkÕªÕ»wïø“З}Ä_%PäµoßþåË—Õ†'Íìð'Éd2 HÞ—/_š7oŽø“(ò¼¼¼¢¢¢x†ÉÍÍ­Ę̂XVÏž&*ý)0 ½þëß¿ÿ… ¸É>|¸‡‡‡««+}ÅìÛ·o¡gLLŒ¶¶6éC’Ê÷|Fó—¤ HÏzëĉ”r ¦¦¦©©©×®]#ý!UPmTx±oøŒøü Àox’‡ï$¹„nÓJÒ'MòçÓ~©»ý¿ºcLÜð"ù`±E§n5¤&…!þk*Ö¸.ñ‡µ Þþ[oÚ)ÜØ¸ñÓ‚:U³[%å܈‹ûéâÒ¸Žéïí¿”óÄÑý#ÆL¬ý >YÌWs]ºÒAKÖ¹\°`éϲÒ&r{…áAá[:..äÿ‚öÿ,»cÉìf&LÀ=fÌØd¯¹XÖ96‘“¾ÓÆÞªW¬ÿ(n’îW"iÿΠ 3«hüñ‡øCþÐî¨øD@@òC@@@òC@@òC@@@òk((ÀKJ$8­(**˜{Ÿ)$?$?ˆï×0EGÖ÷‰7—ɋǸô„ÞWNîßwìòþýû¡ûìÙ³ƒ ¢;"VøüÝzíÊÙÐýäúáý—^«ÿ”è€ãñbî-Å%5U\I“tê<·8§ïÁ|g›óÖ®°Ì¬/ ½æ§ó½-ÕÈ0ð8rSÚ–±ºFÐí›áZii7± í°r+n³O¥™™ã:-°ZaPrÇK¯öÃ0exŒ>vl ŽçßXÝw8±Ùlί.gÿàââB~?$Ô4ìðUsBì@P"1Æÿt*íDÁÏÇÓñrzp‰-ÖÔ(ºv!7m82}®±”®»ý•ÔØDv€‰G¹²H)$Vä°¼;ù(se°ñ¹ÿÅ7ÓçlûZç‚ÿöz%#Òs±9Oa¹z…&øaX¹ ›Iúy¨Š‚tl¹“:¢cÈélOã—Éï:ï-—+¯+X[’Á0°óå“¿bâJ´¦›ñüÅí”KãÇÍ9´Ëmk#¬8æßX77÷œw/´ZudËx‘ýæA'§1YwÏ*wì×JMüÌ;xôÌøñîUSÔÒª U}í`/½6c»š9˜²}¨Æ…¼Ÿµ–+ÀÖÑ+–µyw<$+€¥^ +5P• (klím"üÕÄ;NvVkr–Øk‘yU‘uŽa -ÐÅÿÜßóPžë{bÞƒ«,wÆÅ¥¶ž«W iíµó y˜¼ÑÔb±7LnÛX튛Fé€|=‚5Áíâ>J”'Ï›”}ù)8ðXšÒñ >õ uÆ1ufm2¾–vZ‹k®Æ&ÉðÌúRfÜ‹Éô® æeÅ~ðZåL‡øØ·cø°IXæl:‘ µnm´asØ­[¯ž_?6oÞ¤õ·µ;4‘Àø‚¦Ž¶¶(ÍÇŒû §1­q°u«ÇÍ›RÍ%†i03µÊAR{DûeŒµ——§µ`3¿0 XKZÓɧ¸óˆÊ.U}pà.»jṚ¬{½\DÒ”ÝF$“ƒ×~}°Š.B*XVÚêïn­L×u“#¥b/ERòóp±>›wëÅ ë’¿< k.F}RùN:ŽûÛ–»ó:)V®1ÚÏ£N”{mJÚDt]EK—Îz§ÞL&kÍÙ† ¬wËQ— æÍWÕFßÂcM'wn¼ $û ýØZâ„îˆyófÄÄDAÏÓËôøÃD×q=|SC®–$;)m~pðJ£q}´ˆÊ|…5‹+½zÞÒ 6SfSAžë<ë&aEZñtM4ØŽvÜ5 YÇ’U+GÞV ‹9IUo“Ö4xÝ7w-ü£ûTºIYl|Þ s¶¼Ű¿Âï3†éZXd$'kF÷²¼ÍÏ5·ÜöùœJ¸†š,wNÿ{yòŠ6ÖýºßN&N£aÿ³o såìP÷n_|* ¶ŸÛ10“WNûmÁ'û,€¬Ò‰Ðì}øéŸ×AÁ§š–tñ¨ó&Nùµ2ƒ=ˆKåEî5£±‡2Îí ßô¤¡öØ},ïîJn“âÉÃquÖ (¥(J½”´nçáVßbzè­PÖ¯yìSZ Ò×¢vdX­ªýü»lø bç0ŸVsö¡—÷+/phÂAì2}Öi»Joæ>þ×…™"†Ìd0L&³†(B9rd̘1õM~Œp|ûDÕ!ý¬ìÍÖð@¶„–Vtç*w ­.â2+oRŽÇññ ªMÊc&”••KJ¤j¬°ª}tª…š¦¦ô†¬Øïêê+ ©o`¡¦ÆîCèèè|úô ÉOö_i’'LóL ’8~ü¸tò°V¸«ÝÝÅ‘üÄ)ÔµR¬…„„H'9--­œœ4|‚ä'4z÷î-å©éx…ŠŠŠÔž¨¶´çëëä'Û¸ÿ~÷îÝ¥–œ›››Æ{`$5íI--Ô–]7$?fQ:=@éX*ONN¶°°3fÌ‘#G¤OÜÔ©S‘üd ƒ>sæŒt’SW—Æ~Å:u’B*P{ð8}úté³&ÍÎ-’ŸdµwðàÁñãÇK'¹.]º<}úT¢IJ-÷úöí+e¾`áòüùóúô6ôå¶P{“&MÚ·oŸÒ‚ÚKOO722’­,š5kÖ_ýÅáùáÃé4t)899Õ3í!ù µ·fÍ¡Œ¤Š¨½±cÇ>|¸.gHDD„••yÊ­½óçÏ0@j÷ÓªU«wïÞ]½zµþ½{H~lPÚ300ÈÌÌ”\BuM{‘‘‘òòò°Š£||}}ù_"5í•••ÉÉÉÕº-h$?éA¢Ú£#77WSSü6ïÞ½Û³gO>ÂÂÂ*+ÌŸ?¿ÎrµW¿_6$?~ •Pä’ÐÄÂ… ¹ýõôô²X{ªÐµW!Òb$¿ú¨½§OŸvéÒEBñÇÆÆºººŠ7Îääd&“É`0²j¶ŸQ‡ÒÒÒ$áݺuk8ÚCò«P{ªªª_¿~•Däb×DçÎ)·©©ijj*µä¢&‚ö >|Ø Þ.$¿ê!!í‘ûÜ‘ŽÙÛ:Á Ôž å³½½}BB’'Ú´ióæÍIÄ,öIÒúúúb¿arRÒ™Üд‡ä'($¤=ˆ]»v‰7B=ÖÎ â½aww÷:þ­É¡N yóæ&&&?cœ°y,iù1B: …‘ü$ˆfÍš‰W{666’¾í””Tû!È<”””ħ™™™¤o{ôèÑH~UB:ËÕÅÀhñsj`` éÛ¶³³Cò3Œ ->N²Rª9õþý{™¸ÏgÏž‰})ö°Ð«³VˆjW~´äR1¬+}]Ö>¹·0EvÇ !ÓÀòp;ì¦Îö,-…í!ΨH<Õáç’×y'L aݦÈ~c½è>=‡º?øøîóêînC…®¯>xÙ\WŸOü÷¯vjT”*´Î'ÞÑmßµR52jÇ}^:åç#ÄÄΰðu<æVêû¹Næˆ3ëÉÝ}DÎÞ ›·ö]iÑz}cŽ$¾3_X[Ýè½pï‰N+~‹KŠ¾Ú´Ó‘±Úy~aË¡<òïV/¼¼ˆWÓ}zØ]!· ­UIýıÕeC÷¬x±rN‘>‡?°e¶ç>n¯a@ŽkÔÒ¶­tä–¼)&nƒC{<Ñ®çð)+ýÞÃ@£ÚÀ›¶nwt› üµGä^£Æé…~‹Ùã3wvUÁž¾UmÞ‚C{<Ÿ…|³Tp>ª.þöãeÁoèàÐïÊʸ'Œó[qa/£‚g/3§àóO ‡öxBÑ&ñåã{'³öÕ^ê2™Øó›¿öˆ¾q3U27ÎíÝà_×å·qãFP¾vDD„¯ïR¼t®eŸ‰‹—­Ó×,úï‹Ð‰k*É ÇIÏuhÆq€æ­!!_ÂÖ,¨­”aàÒÐЭêå Õ"NÅß1èÔUØ«ää oS‡ t®* @jO(ÀKà…fºŠþÿÝ} aÐjOØÓ ±OO÷ïÉc“›ãW®·5¶6BEex‡ŸžÝëÿ‡}µ¯àxKÒ4^>û£6Õíûïº^gvÓ+ýkjîë)›?lla;`ÇíÜþ{S®-„ݱÿ}5ÓY+9ÒÕbáÕ zJ“<ôצ9÷ìÌç­’WPp6 2yWyv`\y¦½ò‡L]èÔÀ+¦SZ¨Pþê/)¿Äì—ªâ‡øïØ|u›PB Y%é% § ÌìÙÙ€ÍY%Þ»]MH‰›}ÿ«{wUꪘ“g܆ f·S®^oÙÅÒ ‹i³L'ìO“˜v39²/Œöñ¶¡&sNù{y8g¹ ™}øôö±dró/2ø‹ >aÿAƒµZüïïí¬îÛÓô”`†˜›-NyΖqVI|ÌAÇ!ú…Míacî°¤-%cÞqëëNW]è^8H­ÚSùL¤\Œ¥î™ðaåv…ÆÙ¬)™Würf!,ì`†÷:A–j?X˜%|.³Ón²Í­5GÙWsV©Xœ“K:¹{}úðs¹ÿtrýè¿#ëŽüþaKµa TÕäTÍNG˜Ý=´òâí.Eg7oÙââíÌ^ÖØíM{> ˜¼ðêâ‰#V¦iÉ-]WxvgçzÆ[·Þ^ð½äìý“ûfº<>‹ÅE­#_Ð[Ö¥|Ì^mݹ,wG‚áLS0Ï{ib̆·{–Z€åª„s!æ=\Áûóðw°'jwÌ/’nt0·†·QõŒ‰‡b¤ÿ³;«D oéôñ¤ƒð¹v|M#s£‡w“£çØ ^òò޵Ȍü(½Í‰y;‡æCwâøNš§¹ÏEœÕ+ŽÛOx›‡EqDRG𣤤)k¾Uõ· ­×Saf¬d¿ÄáسW…W¼Ö÷Ï3óœÈ­ßBŸª:'Cõ”N±ÄC–ý|împŸ í=¹~Áض?[9P],Lôö8€Ú#ni3{÷DòWÕÚøoùJ½²Òï ‘›·þ!|W^hÆ÷  p öŒ‰M¡¢mô»¬+C•®=vŸõ"ŠðÎñ/#Â7D8O˜)ʳW§=*dHXØ Ïù¢$À?À¹;©z­Û —½?¿wÕרEí!ùqÂwñÅ9ÿkÑ›’ÆÕ~rcÄ@A§wMt%†:ö:Òí¡ÕŽÛ³i ñ…F‘ÿ;Gâ.u´¬þ›Ê£Ö­ø—‹ãYŸ²?áͪðãóäŽB[‰ bíòôìåë*ºÕN¿Ÿ0rpA{ïVÄpî†È-}ÆL¸.U¬õ÷ ÉÔU›™©Vòyþêõ—ü‚¶­ZêéhW` ÐS+'ã4‰ü$íÕÏŸ?M:¶oܸBðf´¯£ü1Æ¥ýôÕ›L¿E —üÙӼǡ€Âä˜|y÷Á£ÉÆ?×Áˆö1CWt mÛ·åð¹“ô0õqª££}»Ös»ÍÖ……ó½é§ß¾¿“üÇñn&]š«©Ò²·®ìõ†ä':µk+¡˜;´clíÚø/ZÈ¥½¡§Y×?¾WÒžØ;Þ–ÝàŸØ£UTPp´±ªËï’_}ƒ$– +¶H~¡uëÖ***………bŒ³¸¸X¢÷¼k×®iÓ¦I"æfÍš!ù!H¶¶¶°?)Æu·Ë–-ûøñ£®®®„nXBÚƒ¨ËÚCò«Ÿ8wîŸýg››7oJîngÏž½}ûvIÄìåå…ä‡P €Úspp¸víZ ã¡´÷ðáÃnÝÄ<@²{÷n i4K†ú~µR{Í›7ÿòå‹h1899‘†õ455sssÅxoä,| j‡U™0K†äWáëëAºÇGiïùóç§ŒZÒµëÕšŒ…–””())a’ÙÜ€¼7eee™` ɯ¢mÛŠ¯”‡‚Ǹ¸8J{C‡=uêÿH.\¸Ð¿îú ”‡ðððØ¿¿P7vùòå¾}ûJb+DJÕ²õɯbÞ¼y>P{¤#11ÑÖÖ–C{‘‘‘Ÿ>} ¡|JKK9´Xë9é§”ö`œVVVòòòÜw£Ý±cÇòåËÉS±oÁ¦®®žŸŸOº%¤j$?¡‘™™ÉssNŽí4áiLL eàJVzÐAjiÙ²e°òLJJRUUå3ªAÅ +ÕÞ½{þüÙÔÔÔÓÓúèèèPÚ«9þúë/x3t«”ödH~õP{°RªvïZ&;;ûåË—íÛ·'œð8lØ0{{û… ®d¾°°J‹’ë÷ïßaM [¶;wîç¢E‹xjONNàðܲeKáɯ>jïñãÇ&&•v„úiÓ¦ G³c…Ûì‡Íê'N >œ’ásP`éééÜ ×çÏŸCyÓµÝð'CCÆC’_=Ç£G(ùÅÇÇ;::Bí‘§ôOyÂŽaRÚ °A T§V‰G¥ÀÖ&¬E”öüê?ÆO:nÞ¼ µGÿI¼ŸÑ!ž>}Ú¥Kž?‘Úãºú)S¦üóÏ?H~õ¤>(9XéI"~oooŽ>›••Õ;wDˆŠÔ¹‰^M¦ì ù!Ô>^½zennN.A’ök¼ÄÏÏïüùó©©©ðôúõë¢iÙ¥tss322¢÷‘üd 7nïò¿ª°ŽP³Y¦ˆŠŠêÞ½ûéÓ§‡ Ò å÷ý¦èXóm9#챉8ô eÄÇÇGFFRë$Š~ýú]ºtI¼ÍÅû÷ï÷èÑ£¡Ê¯䜣ò:0Ôw¹aÓûî×Îî¼ ÷-^Lb‚‰†Ä‰D$„ZÉšÏÚµk™L¦Ò»©F*Z99¹RÂ^dCm|zyyul©ô,²ˆÓN­pºðmÓ (<Ò˜ð)Ó-ø$…Ú€¥¥eRRR“&M$­=@›“#‰hë™ö“Ÿ‚C¥=äw’[ȇ’?îÜÍ]Ôñ?E&`Ë ¾»Ü3K$§@yyù‘#GŠ=ZÉ ÉLí'äYZvOJJG:YYÕ`2qþa`Õ"È6.ÁÁÁ^^ÕÏŸ~û¶´Ú[ÄŽlÂ=ÅãǹüÃ4i‚kkc2,¿”Mξ—ؕկԔWºæµé–Üï„9[f@¨ÅV{’M¡Â„ÜbõbÎÐõºÛÎ=•±e}¹ Ý@<BƒE%ù•~§ O765ïÈêÑõ ÃoúŸ 0…î¼»+¸DŠç$Ž«³E+‰³ƒ’K{Fû#þðð ¶T4_Y¿æ•Ÿf`˜¢¡æ˜ÈÀ0‰ìe00XÝÁ#Q1ñÔ¦ ˆqkr†}töZÝÚiÆ­;ÈL¦(ýöµiñŒŽd$Ч«©Å£Ôd*iÒA?vm”Æ\‰ïTU´ãçFŒßæÇÄ»–ÇðõÁª}v‡^yKÅ“Ìzè~~v¥ªÙÒõØÆË@ä©[ò³ ¸…€ˆóœ¾¾ÿÔuã–ŠP!Ùi·a÷}Ó¾‹x˜^Ù›kY‹27nÜH¬æWèJd'­l'„¬N–~Hê;xº»Ïß'¯ç=ùü“2ÝÎrh”fß'Gjݤ{=ñ"bïM§¸°zƒáxf »‘'/L¯º~ÉÁ¶_¿;Skø¾Rz¨H”%ËG&îw94‹ežÿ“„¨nû_%uµLüÎÆÞô{óŽ~ãMÄÌ.åAÓJ±lLõa2#HÁד¾ŸïŠHß§Ú¬¡HH·\ß@Pa‡]ƒtG†í¤@–µòTŽaq-9‰3.7ÝÎþUË’Œ¶vMº× Ë ׇÚc©"žòó³&644î[󊂌;ºO+-¥Š`¿Á¶Þ†µQ,¸ÁáC‹9é 1S"d·êhÖ ’‚è¨Ö¦‡p‘!à¶¢mIXíS(**~ûÆß¤!†ä‡P!. ’`mB¡   öhQí‡ {Â,K#ôMÁ$ƒ¦M›þøñCWWWŒiéèè())•””H­AòC¨÷uïÞ½’N+33ÓÞÞ~÷îÝäÆMâ§OŸPßA†[ƒäÎ’FB‚˜çöΜ9sÇŽyyyH~2 Ø£ïÑ»wï7nÔå&·¥¨½‚‚‚z©=$¿R{mÚ´yóæ ]{°b´²²zøðaM"766~òä ‡§°–ÉZ´hñùógÒMmK£­­ŸõÔ. 8þ^ =ƒLƒî’ì³–ø­Øbe¢ÃØ-¹ž°º÷Ù³g „t" ¬ZµbéR¥:Ê?.캧o"q„Ú«ðñíC0?uíþ'Òk†våw‰´ÿ¹Âð-×Á¹Ð }@ÛžxÆTèˆ{½Õ¥í\Žû™2¹YC¯ýèûÌgßø³…-1M!::ô ·BnL³VÅX£ö±†žÖª“xÅö½¨…m¾Â#/ð÷Ê…ÓáUÊ-:ÁŸŽ;FƸÆ[ƒl³HY›xâÝp3VC4iøhKÊ?ÿN¬º•+)a“øõ›Ó'¡$Ê óŠÁÃ×jvâì¶Ã¤1ì.t½]Ì¡=Öi“zlŽ ‰`T\E{´*¯ Ù³„ð›þ%ÏÔ•»¨qt‹Ÿî¨ð”ççÜMP{`à²mçVΡ¯è"-ñµ_ýÛZ§î`Ù²ä0# ¨=(ƒ×Gz =*bû5ùÏésVL¹R€YbMCK$ÅG)÷¥G±ñ³ï‚*“˜5k = "EÜ®\]T—’ÒÛhkåÑ\"?t¨x„,?Jîûß¹ï¯ä¡Ô–‹3ȯ”Ë“Ol‘Ïæˆ“G bã'ß:´lg¿ú¾³ ö2+¦¬:ë×”Ÿ€¾¯¨PÝwÿ$BC³õD¾Ã×ê&x¡¡—*1ÃÃ-*úØüeÛTd¾ –¦¿ºmÔî‘€áÍi/Ëöí¹³gW/ŒÀÀå7néÝ»@„Û _÷q±ŸWý¦ Fòã0#Ìs/Ô3ô€]•‘F§ÃÃ#9 Œ!éÖŒÚY`ý×_¡³f j¼òêÕ&NNSf ü9 wï1€ø±jÑB/9v¬täȹ‹ýêþ×H~·ÎXÙ¹@GÉÃuÊf‹IQ‘ÇÛy¸Õ÷ƒ˜Þ„À<ˆåjOèmíÚMð”\>ó…FËNHµŽY³Ùö¢œøk]\87¨ÎÌTIKûÙ·¯;t;9‰’Ä¢…lc)9Ù¯¥žur’ãðòe³ÏŸå{÷vƒnqÛÆ­§ò#µGtöºùá¸U•×fãq|aÝe>uwr´µ—<úX`˜÷˜v`A"±Ÿ’ˈÄîÔþÄ”點ý™ƒ –†h8ÆÄOï[6#p!¶´ø‰N8iàS¦ôa¾þ4u$ÃxFã?¶†\ YÂ:uàØQRxÖþ‰±ómC˜Ì$ñ‡5|Ðù qdÙ¡±ÝS±ÝS—íM¤ X»˜EØW"rE/Gê×Ðk_‰0ïþ‚Ç1kߢÀ†ýÚ C&­œ£ªãäè0@Uéýˆçhjf å÷™òÇ¿³þ+䈲媌ÉÏ7g™-Á0åàIlS*»|@™ÅùۀěTHmØÄP±snËlƒœtž¦èZN3-–Ó9ø¿‹Ëÿ¨ ­:ÈÐ#J.Ó¡àw§)–„;$–éÙ«ô']Y |~-’¬Ó¨š-ë;ËdFî2w~±³©ÚD{1°·,Gͨ Ç0f|RZäçaѕۢ¤£í¸ƒÂFƒµò”Ïœ³ø\ £o€-/55r8¥se9ê-${D”{$»À™G¥*7rš:B¹2Lƒ™×3Lº`Äd¾"_¾·L\ÐRãg(+)SlYòñÅ¥»nCB¿ó§£;tb¨§ž‘™oh ž‘ñÂÐö8xh×øqÓÈ#<=m3È]K©êÈq}º*Üx…û|¹ƒÉ¬´KçmVÄL]Îúé~y˜ûÕêêm‡PR3%e ]kBäÉM-¤``ˆ¾]¼ÝZ™ ƒ‘ñ|¹Å0±=g0Oì€bi¦#v_Ø6…çB…."[Žèr:{@ኈñÛü˜ø)dÌ0˜ÿH,ú ®"oòñ1?gïõW'†O±•yù±ë1ßÏí)Àiíî%)ɽÿ'å¾Ò•CzUª¤DˆvŸØÓl µÇ~#±Ïáé¸"ïÁCžøúxë¾_ófjEÝ’÷rÚö²B†\ó܉ƒ‡k뱾ʶ¶˜Y_\ÃRbF2‡1OlI*¢“¸³­·Öœu굘ÛñƒXI¥M‰ËGå>éÖÀ+–p¸F^¿½ÖŽxн‰`i»H1¼°¿B†$)<þwäëÙ•P{Z4RBúTÜÕµN'™Ç†&&&ÐoÒÙûYÈ‚)¸ÌËO–P†¥ÿŠ•kÄ&M)wgç¡ðØ®]?&3ž[¨_t¶èÄL» ®à,l»ËÛRíùá»Á‘áÁÄY:Ô^y =¶RGn¦¬µ|—õÞÕË©D3¯W;xÜ6…ó¶%Š-Θ÷E~O´3%h'Y#æÈ¥C[ý†„¯°®Ü½ÀjY‚è³û—9Ù5¹C= ÃDo¢ðA%R~æ1 šC·©!‘{vvö•¯nAY՟Ƨl ™Ã`æ4xäÕ3Lj²3²ÒóÖ„Æl[Ãd®†…f/²]ÔÔ±3é° …jY°b++çþ°ˆ]â´æ&Õ`Æ…à‰gn®vdÍù"n.«B€ÕH•Ÿ[·^^Þ¿ö2ž¿H{y×~ˆ;l¢–ä¾>zúª§çTzû¶$÷ùÙ›Ï݆ —SÒJK †7ìÔ‘lîîÝ»{‚çTXÉgdfß¾~~üxwØîmZÅXxÿn½bD]x…¯h‡-[ºkã%æÂEÐÒò0íØŸ:"AHÔ¶­+æÒ ,‹9Ëü|Ÿ³\Ø0¿ f+HqIŠÓØ­ã–/KµÛ•5¹ÔMðïÞCÑA =êþ—Nù1Ÿ$´4q°°””'…äañrÞG]œ­M·ªûuÙ¼$þñ ÃñŠ,òšCyzÎ4Qvx ¨=Öå¯HÿŽ.[©Øˆ#¬Ô?k¢@?îo ü‰Ÿ†?ä¿­S'ª{ã´!ívÈTŽöíô+Šÿ=|ñŸþYÀÌ*"ÃÛŽÜ>m‚މ ì<µnAEÂX8ÝK€lܾ“ªFV.ªÈ:¯¹+£\1ê1IÏ vÖÅWzdá¹ó^ÉÎ^LQ¤l^Ly²æ=Læ¸IÏqî2VûaX|òË·#fXKͲ¬¼Tž39ù½¬Wís[Ç̽¶´Ù@¼^´öíˆÏŒ-ë/äCùEhöêr8h|ÿUJD;aL* ž²k!6\j|3£>¡qK1ËjïŽ3þЇÉ>£|”§†%pF; ÇÂÀƒ :{ö,G˜<?íyDKõ9uu[Ê:U[ß ¢kË/”ôË ×*Ú·žgud273Ý™bfn^]ü:ãFíú.f2+Ë«yÃFI«øŸŒ*F>)á•âxñ½U=—‘jLÌÃí@B¢Ž¿"+Ì;aÎÖÿ2ûð<æl"”_> ð‘»2LÜñ4zFg Cßâ…k›Í0Àt“¹ª¢}ûx[“îs4ü*Eo"HI~^‡2£Æz+æÕø„xuS{.o}Žó;a½¬oC½MŸ>½ÜÏ@±©Ø>Å »hÍÝÝ=:Z<•¯¼&Hêêêêùùùuÿm*'åååKKK¥–‡ôñ!»E˜lȯ4í |‡Ö/o`pµ?…ÈSêZK»Is+ÇAXœžÜ>irí<ózôè1oÞ<©¥XPPP“ËëfeUÆ‚œœ@‹üäÚÃñqÜ-OÎSº[;ö|4ö©UÀEœ5ÎŒûÐ/ÝGR8-ÝJAAAR“ßı'OžKºbÿõëWãÆ¥ðP$ÊÊÊb©™\·nÝ>|XŸõ™™™ðX\,½iǶ¶ÄúÀɤ%OOÏóçÏüøQ Õ¨Q#²”2}M›“(=zT?û~õ­[·¦Tqýúu©¥ûîÝ;I'±oß>ÔœCò«ÓþT˜"lvÂÆ§ÔºdRèÁ‡jÞ¼ù—/_êwf"ùÕH:‰Ù³gSÕ;“É”ÂC1¢V2óåË—¨öC;w–tÛYæC1ŒZÉÌ=z ù!ž={Ö¿‡ª­ñ777$?ÙFll¬«««Ô’4hP=ËÀ¬¬¬Ç×JÒ>>>H~²   iÊOUUµže lÕJÒõi¢b•Ÿ……j×ïß¿zôH„Ų'Ož6l˜ò¿>m¦„ä'  ö $·3'Ô^ii©¼¼¼4JKK+''GäM"¤£=ww÷¢=$?~ô®¸&Lø÷ߥó,¤Ô¡öDŽ¡]»v¯^½’­J.’_Ezzº‘‘‘„"—šö‚‚‚BBBjɹsç¤p«²b‹ÉO€Ú«•b„¿¿¿XfÛ´oß^ wÛ ´‡äW=BCCk^uTÒü¨änþË—/R˜é&.\¿~´‡ä‡À†ä´Gä~ÉæóæÍÅÏ•+Wúôé#…’...É¡îÝ»'¡•?.¹Ûã'rŸÔÔTIçsCÓ’Ÿ@5jÔ›7o$³Dg{‹ñ¹Ôf¥#ù!pâõë×2wÏô•é5ÇŒ3$}ð€kÓ¦ ’'dq[eñšœ9s¦¤oxèСõÆh&’_ƒFbb¢l@ZZZ¨ñ‰À_¿~•ÐB›´´´:ˆ7Î+V\¹rE¶rX¢#ÌH~² ??¿;wJ"æÓ§O/\¸P¼qÊœö€`“Å‘ü(:$!ùݸqCìòC@ò«W055•PÌÏž=o„8Ž×'\H~ÀÙÙYB1¿ÿ^¼Ž?ÖÕˆ2$¿ú''' ÅܨQ#ñFxæÌÄ’_½‚䦋kZ&…1cÆ ¾üê$×›RWWo„nnnˆ/$? ¬¬,ÞÅ»Ë5’_}†¢¢¢x#TPP@¹Šä‘giÙ=) ™ƒãK@T"ùU £/ÆÆq¼Ú>=Lrrý§Çe¸1‘à Ó§>O±Âçaíz€tè_w²æ]vþùsçz8§{â¿ÅíÙüç’ šÄ|4š¦~».к'öÁÇoÐQ”Ÿû0ñœéSEˆ0lÝz‡‘“•šÑ=oÆý;qôHUQ·_ŠK±ƒ% ›©kö2Æ|vïæ #Ü{$ÖÌaà÷9Ü?Ù¸ŒÊ( ø[‹FEŒÚFø!;ïóOy kÄ¡=ù¦Šƒ<çÃ;|z'~ü0ÑÍ»?}ó¡T¡9Ì^íMqeç 3aIWÎL0Jؘ·ïÚc3x¬qwáŒùܾ-þ‹eI~”œ‚ûibØÌ×8Þ†í9T]þ½U=—•+°ŒÃ“ĵòê.ƒøµ¢–{¬`“GY$—Ö…Y±©ß¸ê—·™v‡\–¼{nÓÃ\À˜ž¾…Ú«6Ø Ïy0f3Ýjú„Ò3+kCíUáçßÍþ·)Òϧz»(aáëxÌ­v”·‹•#¼ÃFyïºvz²xÄæmŒžRm0Ë>ƒaÚ °¥n A³÷ɨ½jƒ œäô¾ÐR_EfäG6í–\'O—͘_ ///Øb"}Ô{L€ÞåÁyz²QÆÑ;¼»R#ì •ÖÒhåªUC§/<¼R«NÇãÎpPmHø2©6o!xÌüxåÎÍÖ­ßX¯Çã&Žpá&88xØ !ª…ß­.Å_ïç(ÄÑC§.¢= Ù@%÷Yš "'²WSGÀh7i"HW‡†^pü†u«‰æetT^v“Ó'g!§ä‹æôÍ05à¿'%ÀD™s‡Ì8جi;¶bKõKa=êºù(ö«õ¼X&”öH´µt(-+“çklùú‹ÍÔ„þ¼^Õ+ò Ç…Ò “^N¡këjw…„† ¥=Ú,þü)àèQXø†³…MмÚ0·Ò?ÃV«¸²·ÉoúÖ ¶j,g§CC#`M¸ÂÇÝÒÉ‹ìé•fß·îk‰ãÅVÑÅ'<í 'Q©Y–ºQ'Â×—ý]˜4Ý£ ôîöæú„-µ™ßË~ºLÑlÝÓÜŸfºUÊoÃæ­}FO-æþwtÊN«ƒ²Š7eóØîâYRüü…õÙç”™éV/¿Ü´Gâþ»¯Ý[U¹Øríúý'Î-æõ›"ù̯»ò>g5øèËZ±)zE¹§œ–eRRlEÒ¯*÷¾¾œCXtÒMFÛÆvR`­nñxëY¦F =‘/ÿßÑØ £]yþ$²ö K€EŸ!ž'/'´6}Ûµ{or{·×åð¼ö襦^+‘㮼5™ &ùëÀdi¡D:3½â¿~x9z¤F¹eíÚ5w³‹þƸÏúLS˜?¯¦TMhqa"çc–„¬|;«|< •=ìžëý¡Õ'ªë ¸@–IÖÌ-Šî·2·ŒÆ0ÀÒwÏ Ã=ô# ¹æ¼µÒB>ÙEŒ*"ÅEE‚BBY™oä ŸŒLš’"`…ŸðË@ƒ^¡Kºs«&ý¬Woñsž#>d‚ƒƒí·Èm´Ò0 ?ðs^ìtöé·À¡¡‹od‰ÂÜ鱨'hŸ×ª ðçõö ¬ý|"Z²÷b%ݺ©o1¯Zñ_I•ü/Y¢Rò~ZB«ïUc-óY›eB¤7…ýÙ+ž'dÁŒ\òé3ZøCÍN IÈsÑïÅFNÒA_ÉÕ”]Xv‡z½6ú`@;ÄL*órðª>d4Ì#Ôìg¬Uø;WZèHvQè<åƒ×>ìNý©%ÍÇHÀàÎ(=‘š}†9>Ÿ2i_ñûA…ît½¤eÿ HŸçW€>Ì·‘žöbö„á )e¸‹~"àX»,úVʠЦ[?@I2“x9¤øi{]ÍÜ2æã¸H§‹áw2ŒÖïú»âñ}÷!?77 ÷œ¥…ü.­ë®¶RGIÜzf—•éLÝC½èýðçVï½ÔØå+¬Xz”t8l «ôÙcäT­ƒö‘>>t‡ÕR¢÷eÁb,ÈêµU½ù_só$ʼnÊÅÜoÕ¢þ}žeU¨OüD¥¦¬]ìôãLG¬ß®=‘\U-ܵ5–ß ss]ÏÜyˆ>=ëM_¦ÅJ³.•ûRß_]ô™l_±ÿ)3˜‹v¯K¯ê(LooYçž°w'ÐÙ3q¹ºA0x½–‘û;½ÚšZ.òfóH¾.ÝÈ·1ߦτÀ×ÜÙšRù$…kŸ·uo‚ZÐ]‚•®£˜Y­ž.Z•/E™a¸“{DÅÅáÇþõ8²î=‹¿=¯èõQWƒŽyÃ{œì¯Ìz្âFÝhjÈ=–*Z!Àkœ).ʘØE¨Zõþ“£/Þš«šYVËßXÌžyñy*ÖU€izßî­² (ãLÈ?—Ö0=øžSmGøÒºžÝÀ}ã–#{wROØ-–v!ÇQoÃbkù¯/¯õÕÕ¨¯¯HKݘZj¬—‰Iˆ‘ {>:„„»A ãxó{?CåøK ¹®1„oR¾IA¢ ^u°8”'³]/Å!Ô$òmœ&Ä%™­Ñô#aC.ö>l·á9Š%ûsá>‡qLõcÚò ǰ-<<çu3pü¤ $µÿ²ŽKÞBá|ážDÑàL¯þA@îùy,>ÿúAq÷!ÎÖs}¡?Vè±ØŽzë(øùz­³“OhØfã·'/=11Æ0¢TÔyÛÒo¹å½Å…²^=¦TÜâìZ«8Un± <´Ú·°Ä<%A•ø¢¬2ð¯óâ9û‚_¹÷Ó’Ô×I ;ØÃÚÂaûI%q–"ÿ5’âÂ_‚Ý~ªKõŸk½ðÈálõ±=²讲Ò9 ínžöTŸjî!t/„Å€TU¯—Þ„r~ÉR—Ò`njñ2 Á%Þônáîz³¥…4À„=yðè`Äæ;ôK®­Ñ¸(´mÍ27«ØZ<5#Õ)m­Ëæ}DÊKwï¿+Ut5_ÒpÁ À_òz­÷%ßÅ´çç*f-é)=ùšò¨¦+@C¦Ï%?2zòÜ&ßb•W­s|Ó&*oê‡+ù)"%‰è×ö˜jbtõá ‰>}›píƒKÿª-°©íì?‹4™0Ãd9Ì“xr3vØXó¦å¶Eó9L8œe1éæÓ̦ »ƒ¼R³¯À£ýâ{™yMzö*ùššÅ¤ÚÎÎ27jòëUSlÛ\‡èW£áƒ›ð!c;±´dp6BÄ:7Á¶Ì¼Sm2‡AUógÏØ¶#|ìŒ…Í˜ÛÆUl³§\<áhßÐÁzZнšp‹kÇ÷º»Ö3.¬þ#ëÙØ˜?Â==º «u± ÷û ¨g·–NåeC¤…Õ¦ÔS½„š{åzŸaõVMŽ=ö¿Å Hí;ÂÇ4@W_%Ç7–{Ì[ì>¥mZÿ:.9/6„{$ú+Ýëw^~ì.ZÏ6þ+")¤ÆÜCôã wbx~mòÒ ƒϱÿÙ-lBÕÅ\dyyù™+7hrh-øýõƒ¸°€r¿†ÚŸæ‰™>á»÷Ž®e ™û—þµ_`£¶¸ÕT×¥D/RXÄ>ÝZZJHÁW³˜Òä×»d!a'ï<<Âxzͳ±FÛñÿ-Z¤½’•Þ ÙœÜü‡o²¤ûrèÈ)-øÆ_^8Te0÷ä4D¿ºä…t?õF@€_mÈ Òg¨Ý¦¿q>>+3#FŸWðñógqq19 ½ÙCj`"tdQ¡¤Ô?%%ýûõ…õ1BjjoªN•íÛ·ô×]ºhk#ߨ5Xðë†-£Èøð)ûí»÷]»ve.Z5lÑ‚&G+!.j".JºŸ§¿16áòƇ“’ç¶<†èW?„…Gªm‰˜%z‰ÀÍáè/»Ö@Pz÷†ÿZôõÊõ‘†ÿZ"æ!Êýÿü)aš\D?^ÃÍ›7ÇŽÛ¾ÒüäÉ“aƵDÌÅÅÅÜüàˆ~¼__ßvG¿Ó§O·ý444ýZä"W͈ϟ?ËÈÈ´hšCBB6nÜØ1»¸¸ ú!´âmî=[âââìììZ4ÍK–,i¡˜§M›†è‡ÐzðññiÞcbb0 [¼xqË¥900°ƒ–•(¿òttt¢¢¢lllš+Âëׯ'''·(ý:nU½ëW¯”••›1Â… †††¶\‚]]]·oßÞ1ËÉÉ}øðÑ¡õðòåË”””‘#›m†îÎ;[4ÁóçÏo¡˜_¿~Íå Ñ×ðôéÓ™3g&%%=º¹âôõõ}óæÍ¡C‡Z"Áêêê-ô*¸ŸmD?^CZZÚéÓ§¡cîܹGŽi–8¿~ýÚ©5kÖñãÇ["f…÷ïß#ú!´*>þ {öìùãG3,á²yóæ+V899 ˆˆ4ç¹ÒÒÒâ÷sÑ¡¥E¬uÖ,܃¸ŸX)Ôßßþ–””4oRZè%ØØØDEE!ú!´6,-Û'Θ1ãßÿýËØXc’’222ºzõj³¤sÊ”)çÎk‰7°cÇŽvÁ=D?Ä?ÿüÃdŽÏÚµk½ïnüøñêÍ›7ýû3v¹ÈÊÊ’——o.îµèö;K—.m/ Ñ—¹WTTÍ6Ƚ~ýú½}û¶Q—?zôˆI?“/^4KªÜÞwƒDôCh ÷Š‹‹Iî-\¸ðÀ ¹2dÖ¬ªõ›‹{0;ÿ.&&†è‡À]€Üsqq !öd ¹×>‰oß¾IJV[ šjÐ`cŠ(4ž ÔÔT(Â-ñŒ½{÷† Fê‡ÐöØ´iÓªU«X} ÷,--O:E’Ü“‘‘!{)jÊ6O6²ÁÃáÇ?|ø°Þô¬\¹r¯%öëׯ0µí‘{ˆ~¼‰k×®±Ñ‚ä+嘎îÝ»ÿúUµw$UMî‘õØI“&]¸péCrOJJêË—/S"//Ÿ••¹×ù‘öû¥ýx7nܨíI9‘‚‚¦'“{¡¡¡^^^………µ]ÎÊ=&Hî)**fff’>ÖÖÖzzz{-ñ€JJJ²t ¦®CÝ#THîåææB[.))‰éß§O&÷`MMÍùóç¯[× ­àâããÅÅÅaœÍ5à¦&DEEsrrøøø ÷xã3!úñ&®\¹2sæÌºÃ@¶0¹‡aäëÜpÈ^¶\¾fÍš;wîÞ½ÛÊÊêèÑ£sæÌYºté³gÏnÞ¼ Ïöë×r° ¸±°°000puý«ý˜ôõõ¡ÕêìL,ú˜ŸŸÏcŸ Ñ7¹uLXX¸ÞdE”Ù«‹PôH"±Á××·sçÎÞÞÞ¥{Ðä‹ŽŽ†ÕQ¨'Cœ=[µsÓ˜1c i™Cêd/aX. J,î˜Àß ÑgͼÔ oß¾ïÞ½c5ÉÆ&÷ Ð]»v é«W¯( ôÙ@Óäƒ`Ù\ZZjjjÊÏÏéÒ%Vÿ[·n1݃ º~ý:!‰¯_¿B²‘“*`Ý•µˆ·èdz¨ƒ{¤ÖAîq<[VV&&&9°„’i“'O>þ<[H(€l>±¬>[¶l9xðàóçÏ™>/_¾$Æ ƒ\õ÷÷—””l¡ Mˆ~m¶ëׯg*+JJJÌÍÍ!y v±]iFrOGGçîÝ»l–——×±¼š;¤›F£©©©=yòšŽË—/‡Ò¿åZkýÚ 5¹T“{ä<†®]»² WM܃Æ!ëj¢{ tùÃCîÁ*è‹/0 c¶ÖpìëGôCàxxxÔ$^cç1܃UÊ {¡4„{‰‰‰&&&« ¤ ’Ü;{ö¬……¢¯!77W\\œµ™äo&Aî±vß×!€äS]]ÝÚ$‘µþ ¹'**Ê{} ˆ~Ì5?wìØÁœgffvñâÅ¿Ÿl¹Çd]mÜëÕ«W^^^½Q‘Ü#¦BîiiiÝ»wÑ¡}ÃÏϤs!÷æÕÈ: ‰œœ@oP%m9&kÎAîÅÆÆš››Cîqÿ*ˆ~õ}àÊ6I~~~ø ©Òìõ:È=r–=z°6ö@5¡)^è{vvv{÷îEôCh¯ Ç‘uëÖí÷ïßÐ#eªÙAnÄÖp u¬ÉÂÂVGy›{ˆ~¼rä'äÙÒr7211aérâĉzÖž={‚ÊIºˆ~í***¤£Sc› VîÁªã_r æ¢_B‹rs–“¿¿§)Õ 111Ú¦ó"ú!p;lmmÏž=Û £ºÌÌ̼¼¼ÌÍÍ›‹{ ²/¾ãªŸƒ««•Ç–ñ”¿%j‹.íˆPwµ°uFT&%%Aî½zõjùòåÍmkÒð>ý"BB¤¦­Oé…òq;Å_NxmnÞ¼ Ùæ¥ä^xx¸££cÇ­|ZbØiÈdö®*j€ÌT ½rk¿€ewžz«|GC‡‡‡KPPHÍ~X„Öër- ¨T/^löh׬YÓ¡éwšþ;Úy}Fˆ7Ã+35áÑ+=ÛôXCúë|ûó‡p¯O F‘uNÄÃÖó·NÍŸ¼ùÛŒ·cFË{ÆKCéG!o£˜²Yˆú$š  Æ´¾xé¡j>Îχkz ÷e=µ9%w…¶8/ÐZÔé~®›o_e]óÂN3€­äaeÿŽù‡ZÀÄÊÇi6‹ŽôØ´u+Ë¥¬ñlZi‰8Ó¬0 ÑnYvìÚµyY%Xa¡ÑÞ’Ù”ùËàç÷dŠŠé@‹ÙÝDª÷òyÞã Õo\“¦Ñ.’1ßËÆ]¥1šœFÖ‡ÔØÕFŽñ$7ôͱð»ZoÑÀHÞ³»¬i#<;ЮíöKÈ]­/Óϯ®vɘõnÎN¼ ~ó`ý“ÛµÏÉ&’~ó‚o®ì£±ñ^Ø Ýçé¯Zp°jTO/¼‘¦ùðéãÇw®ZŠÑOqйfôH‹ê5 ,ßzó½ ¹#‰\«¢ÜÛ:ºiwÒr¹sÖS—¢^`=ÇàÆ5  ùž¾-ÇA2äO ± hjøfxȽÎ ÝM-‚uÅÌ,)˜iƒnc‚–»«zòùºXº9å'/Ð/ Ç£Êò5µ GÛyfDvš¥é?×ÅÉ*èVRÍøä¶nÝD* `·eÓYæÙ“·âcçyyÏÒØ÷/Ê´qqµ« ³iÓV]¥ž 5h á¿÷6”GIÄœfÁ@YÙ¾§éòB¡t!òe·êçqQȽé:˜’Ç{‹–ýÕPlcc㉤ ¿X!Å,œîl›&‘¢P-ØéÇG)”áA~¬­/Kˆ½æXÒÝËCèþoÉ«éVÙ7q OÊú±ð÷2 —˜¼Rž¶ƒ6m{#Þ âU ieÚ2{^MêVžÝ®¿¢¢¢¢BCQïPc''§°°°V¸hðœt¶ñÖõFÎÜ„ Ñ¡=AVVöÓ§OÍB¿"88¸y—Z‚xúô)Çuý¸{ðwìØ±7oÞlé{©ªªúnÒÍK¿~ýúVY&Ñ¡E@®ÑÒxöìYKD›‘‘è ×#ú!´? UTT+¦´Î›qU²ïß¿óóówêÔéÏŸ?Ð-&&†è‡Ðni ##óèÑ£Và¼×Ë—/ ÔŒqB¾•––úúú¶ò’ˆ~Írç“V@ór„››ÛÞ½{ýÚ%.\™›°·4FŒñàÁƒæŠ-$$dÛ¶m¶¶¶<ùiý:Þ¼y¹×½{wÖÍŸ›düÍann®‹ 1^|ذaˆ~íiiiýû÷‡ÜÐ××OHHh¡»Ü7n\³ÄvèСùó‰Obbb¦NŠè‡Ð^1mÚ4(€$÷Ž;6{v3Ï’’bn‚Ù³gÏ¿ÜÒL[[;%%…t+**òêwAôë(€Ücº!÷ÊËËýüüjþj`l¬Ðþ ÷DEEóóó™Ü»zõª‘‘¢o}x>>’{&&&[¶l2dH“£âØ£ÈÜÝ¥¸qãdš¿¿?ä«?sѯ#bñâÅûöíc^ºt‰tÈÈÈ$&&ª««7<699¹>Ôô‡Ü«¨¨èܹsÝ—8Ö„544ÆÑQ/«ýÚ7HîÍš5ëøñã¬þ"""……U+V<}útúôé#Gޝ^½âx-o(è‡HîCõã¸ÚêСCß¼yÃæ¹wï^///Ȩׯ_CîÕ} ___h^úøøÜ†êª¤¤TßXK²#pѯ£ãòåËðŠÞË—/‡^wàììì©S§ÚÙÙ±ù/Z´(-- 2SWWwÏž=sæÌ!ý×ÐÁ¬Ù’”†¢Z[üæææ±±±dÈŽb£,ˆ ,, ¹·lÙ²­Õ–E®eeåôôt6OXó;v,9#Töûijj®[·Žã.{µÍý­›™ˆ~<È=r€š§"##Ù¸7a„øøx999&÷˜`.TyçÎX³Ý´iëYÈ=hþÅÅűzЉ‰}ÿþ½¾sD?„*@î½ÿ^AAÕSUUÖ-Iwqq1ä›ŠŠ ä^½±éÑQ“]{ûöí[¼x1y9ß1¹‡è‡ÀÈ=Öz&4 ™Ü311j¹×Ø8Iv 0àõëפ“{§OŸ¶´´DôC@ ÀZϤR©ººŒ¤™=„MäÞ‹/LÓ :,÷ý8ÃÓÓ“œ¸Àä^³€É=RK“““IEEôC@¨9Á§EƒÜƒ¿ãÇïÈïÑȾ;GGǾ}û¾{÷®Ùãg|swwGôC@à€¹t´DÌ{eeeüüüü #ú!pFllììÙ³[nvüÖ­[{õêÅڊ臀ÀÀúõë[te OOÏ‘#G"ú! pÀÇ[úõÚFôCè PRRjé[Ô;Ñ¡ƒ¢´´´¥oÁq|)¢`zÛR™Ñÿ ^^ŒöÒÓ—À˜“~þüS>~¿¿ÅæÍW¬"Ý¿~3oqâd¾Õtw¬ƒUGýlßæãê&Éä&Oî @äçO¥2}œš|‹½{7ÙÙ‰3¹Ç†™V¢ìñ²xð D?„…ȽzÉôêñä‰ü°a=^°ý{õaj"ú!p5˜Ä8wî ü2…Ã.Bšëß?Ú PÓ_X¨ó—/éRRÊ ¼EЧ@IqaWAaD?^CÌÙà©ÝI·Î”aD'¸Žï¿)ß"JÏÒ·‹‡‡Ýºv†>t nÝ!…ãÈ«~ý¼Y7ý"öøÙ;HüM"ïß?¨oà„è‡Àk6¬ª¯7`[¯@‚mÝj¥ÝÛî=±Œ8WR^I¼Â­ás¾U­kÖ@=}t66½þ2‘ú¼ýꡟ†Um§f‰Sÿ­ÇÌÆ0Ç+ò1LŒyÈ6ˆ%XÝ‘ ´ú*vaºóñ="ðµ[&Î#vrøÚ·‚—‹­zß)•ªŠSÇNjDe²«`'ôžÿŠ~ÝX¨Yq§è‰€û¶Þ¸ÿmODýd‘åô%§NGÅÅŘš2÷@,²™çÀηŠGßp26Ú딕¾;÷GE‘3.ý½]Þtݳ½jcTêõ³Y¿0^ÝW‘«@îûŽŸÒ€XÙåÑEÆÖÞzÂÞz}Á†=äáÊ d½ðó§4™>ª ¾Ï7(´ž ^'Ì5µcûxþ/hÈ[m›«Û®f«BâòH™%©^•m**xx(L#+Ÿ¤ˆÍϧŽ!5 Ä/¦ÊÉ‚Ð3!zа_^ö›ÁΗ¡2ƒ]ÈE]Éß½·ß»ô§Ué^ÞU ÃiH¹9¹´†Óï¤} UDpPHÑŽÍ 3M€k©›`µVÄÌ8Ì”øÖö'üjÜ"£·ä€M?’]Ök÷é1ö{:$ª~ˆ¬¶Àÿ¦{âø' ëÝô¡O½¾Už5!4ÈÖꨂ ™EŒ;ŠOˆH+Fù»Õpï}©–ahTŠaå§<ëѵû*×ñ=9…’½å~‹™{[E ožKd›`ï gé!|?(÷þGß¼Àqô+ö‘1<̽†Òª†öY½q½@(bc$kx~öF‘ÇT!µ:·Á½p¾hžM¿Iä¥KÀÄ©ÏÁh‚Û`kFåþ»gÃ(ø~Xëô¥~ê¾Å<÷ÆŽçfƒñ„E¨òÙˆ"±jw Ëqrjè¨6éûšÝKRºw½W|ñ\ÙÄqgŸtWÖѯVå>–PŠxÆ#”›ÛœœVÿþ½«[·¦dqIéé æ¹r]Ó&U@¨ ›š^jGEš°ÄÐÂR<ùÄ:²¿Î Ãtï{êZ=X2Ƨƒ´‘;Ñ­ÛÿšP?üøq”¬lCwîìЄ[üü1¹GO€èW;ʈMÛÒ2iã­7âÖÈ NGЋÞFJî`Jt©óéÀ`ŠËï¾Û:ºòJ3…aôMõâ>ã8ñ -a_Zº[@ ¡ľ}#+ÛØ¾8{ßa ½Eáïé=zöê¯þ/è×Õ 5n†²L¥m@èÛºÛ¹ð7ÿsú©¸;~¤øi{]Í ÖÁ¶ÂŽ !; !÷òKq”ÿÛK6Ök¤%ÞÐÕ³í-Ù”[`Xƒnqñ"nfæ Ü­£¼ù¿Q¿¢ãwE™]ï+½ïŒÒöJþvvvì†xàvæ/ àïi/*€©¯NLõ8ж€Fü X²D´¦©¶#4o©³—®^3ÜbëV77‰šJ¸yË×îkÍÌ:Ökÿ ú'ùZù2Žè¿ ÄÇo3•éšzó#Çã<± !€¥‡Ï=‰O¶·'&¶èiÊ¢ÜÏ%pt\ÉÑ©s³ÝbÙ²µýWtÈÆþ‚~=ŒØT˜‡¸kµ€¸7ù×¥fà={ö LÐaºÝýýýýýýýx€~h.ª{" ñC@@@@@@⇀€€€€€Ä‰?$~Hüø! ñC@à.””àÛõ#¬_¿}G$~ÀŸ?ÀÞ¾‹GDÄôø! ñk··T;ð®sjŽ þáúÀ­„òo;ýn=z „…ÇŒŸ³ÔÖº…îC¡`ŽÇsVë‹×rÇÌ?Û›–†6ZHüJnc‚càßu·s7è÷jóG*ýö$5þíÞŽ>Cv6ïd©ž=p!a¬¡« ÏËŸóA¤¥r:Kói´È¥ä1VH‘ÆT\îÄ{êÒu1ÿ|LìïR0aê9»è¬†%8j÷„K ;¾$BÛáøÝHy[ …hiX¶~ŸÝÂE4™K)”.*¶—ã7̓÷ÂGõ» O)*IÁCE©¾5­:9ÛË)›C·'‹4Ú­Sö˜s,`D˜Ë ¬«« èæ¨èà êÑð9³U%Öô§]XyÆEÙû»?íÖÊÿ({§g-g/¿ãaàpeÔã€Ä¯yQ©|Ÿóã("B¬§ÂMÊ—þYo ¥áqFÛÈÚEb“F§Q"ÝÓË»ú)ÊWŠãü,ª‰€Ðzõnhöͯ²Þ>_¥Œ˜øÓ.ÁÃg·GbœRP©Å@£S­Q}ˆ4¶«°ª40ºÒ'êÄJƨ9k`-_Ò1€üI÷Dmœr€•Õ#6`üí>‚° ¡…޾¿fÔ'ê¥ SQá*ßÙ‡ñ£sãpûÜG†ÚÃõ•eX®PÂñ·uÇ™ûþéppp ööö„!ù"³kã³õÈC½G_¡õ@^‰/K阙ÏïÁ_9~ «&qú=H¦9üÂúa{† %D›ü6ú}þ¬¿Rä *˜¿–.e„íj£S~iEþªBà”ç0è“ó5qôÚI¤^Øñ´XÍÖJ½ „6¿’dLPþ=ô(ÇFƒà§·¶!€c{Áѹ¤M¦¾è0þv.Q&\ZÓ—03 [gš¸s•°ÛŒ7àœ11ÖXÝΞ_&>:Th¥î[HKÜ"£·^ôÝc¤rpYÜâ¡^øSÿ¸ ÞQ¾?·(}Ǿ¤á=«—³Z.wÎ’IÜÊFNT&¬¡ÑÖÔ<5iÓIää‚®šl—èfÀôTœàC£ùn Çxš##ˆeàË@7=Ìâ¶?ƒÛ×â WNE9ܺaXGÁö+Òsäå­n‰÷»\´`¨`¾ü‡½¿û#ñCh ñëÚoëÖ­ðïå^ ë(hùå0dôÖM¹˜`÷þ½‰+{M‚>¯SœWm4›q à⩟ŠÔe蔓°ÅqÛ}.»þ70ý|èÈÙgƒÂâ3ËJŠ{tkãÀoÆÊÆì)Ybùi¢Þð4 /Ϥ()0ƒF;9[uÜ{‹Ë k vƒ \]\ ÿþ´[+‰˜;ЮíÅß(J’@b%í‰?y;"01š£[Íÿïñó±²é:Òý”†»ÒÓ T @axŠë‰ƒñNZ¶áðP€," +¶H9½—«Z¡òU DV°†Âh+ïEvzÝ Ÿ`áGT¹†*€§ï¶çõ3.ãP¡P$´Hî¾þ‡7y ñãeWh˪r£Û™œÚD}75|‚¹ßU˜=ˆá¦ÏÀË,ÚÃÎÚØ©Dž‰˜†y§³•1{§DºÊ¯:ùáÊ\•èÚÃCE_o÷S&¦œKçsØc ´z£Ÿ_¶S&EJIuöþ+Á ŸY €ÍmMV>ÛÓq$]e¾5HL Sì«Â‘)_Å1lùpåi¡ªÈŠœÙƒËñM‚Ï‘” ¦¯»¾cɸÔãKÍ—…mI,š£(ˆòd›[~2nnnÕH2DO”>ç°‚2ÞÛ#¼9ŸãºÊ<˜·lÛi‘ÚQ§â8^¹xeÈbfǧ!ããA’:NÆmdÊ¡/qqXÝàW|{-¨Ÿ¡Ñt¶óîª:ZýÔzïèj­9~ÊBWF9[CE!O ¢TÅpçÐk~#²ÄI ×ÊwàäEð»*\ê^‹™C1xêñõóD Y¿ÐðqZ,ßùçØÎ¾Äµz2Ê/³3—é#¤ Û+8ñ¬cT>ëà˜q¢ÄŠîZ;OµÇ5P‘R –žÁÃYËó/;°p¹;tNßþz‡UÎá/eøË* ’'òŒêXZÊîÊ3Pùˆl¹=kÕI, ìþr~º¡L‹¢P¢A®X0— •°zƒn9G‹¥ÿü‡—çƒFö´fNa¹€tP·ÈºG¦p|Pù¼.¨ƒz…ê®JZ ·NoºrA!w]“9´[(OrS³'B3¢+ÑÁ3hRæàc¬ül¢Y÷uQ^èY†Žýk<U×:Ìê¤3;ÅøøÄ¼”ôb E/îo0ʵo1w³²hŒÑC¯öN PæÑh {¬/õÈB5·"-òâ7½–1!!¯@²±®)yhlŒ“U¡I&U¶ûÝŸfíwV±‹Y¯,«UàK@Yµã’ðgܘ)ÆŒ) SiÓ‚x;C†}ügS.î”N‘¤írýŒç8À'j<{9mörX8íw`‡ÑAy÷v:Ð&Zã#éÆ4Ä©ÃǤ”ËÑ ™Î)’Xj6YÄßfĉåßÁŠªò–Àï*Ý$¼3cý>ËÏ­-!òê#¸š[ÙíI(_sOk—`yð´F\ÙE‡#SNâü*ü'¹,¥EBG¢«ŸI ¬ù&§šƒ±Um-;V¸L]³Hüxd÷Lñç-ë’Ÿ¥‹õQwv÷¥ÑÂÈ“ëWùI d˜ÓÒúðy]ˆõàMžÓß·M Ñ#Í)O}»ðnÃgx…ÒðQažžë\|7†Ð22œ]Ls€íš}gƒ·<þO ðõ^º*:r‹çµ»)@¨÷Ô4Z<×çg¦O\$Ø[ÒpòrÛã›ÿyËÿ0²æa©çPXªÏ°°ÂAÑëCýÆØ2=§¯½%øôë[”c(/Â>Øt’hAÅcIJY¢/e9È.)ÿ)Ó²?R¤«n±é ÍV[ºiiO?4wœ×QÂ¥ Þ­¬WÞâªB€lpf6½’vÇÆÛuZؾÐKdíÖûßc‹¢é­Ö ²•›¬”džY¬ë´~²åM~”~Vr<ëw¾`ÁðždS!ÓŒ&/5Þ$ùú%«É-„ðuÑð²Ÿï6yØ<|“+Ô è™Ø3‹cèØâ1oú´Õ£&- ¥Æ9I†åüU‰±av«ãò~ã9D`2¸Æ¶ô¤(ß-Ǹ\e«;ŒÊþò¡uîçJ ;x†è©£Á‡ÏÙá5©rr.ìå¬ÔôÒñ¥e{ÙYh K™Ún&òmq3·õ œyä^ßÉÖÓ €‚ÿÁC뽂ûõæ«tÕÙ'À‘)_ü-úúÈÙÚô;¦Ú_¡y ©õMFà–lñœ~—Ö©—¨ŒO@ˆ”*[Xüj›6ÀœrÐΑaD·PB>®'­i”q߸§¦·ƒSU³0¿¤ƒ“A•ñÀª@ö©{˜îÙN³‘+†FT¶$k.ת `ëh[ãŽRC¦¾2­å´Çp(í¾U·áL£9³ä9âGh@µI ° µ% Ùj#;8»ñ>ÍÕsI¦Ì3T²‰ýÆÐh·l£ñá÷¯%Ý¿Ÿ÷ãÇ£ð Ó®v;¬nql¼½JŸÅª=BoÓÉËʲtË#ûk´´dZ9Pz½™x.s ?ó íĨ|ÐŽ¿·uôS–;¾:1wìd‘ŒC².5\‡5Udwï߀¿GßUAQO¹EWeÎ¥F¤C×Ü þ«Q¬”(¶9<Ú†ÍÛxþFãêßVÃÒI£êH`©ý2¦î=Ãì"¢Õ¹QväÌÃgfV>β:HÇÆ¾Ú˜ÂñUIj†ŽkÈ›$üÑ|æÖ¿¼ }€%P]‰?õgzß ˜ ïuŠb>ŽÓõâ†I²^w-³l|_"Ú;Tô½Ÿ sg³(}p&Äñ´¢qE{{k¹‡aÉ9Žô¶8M #òrïÀ­záËN ÏâøA9§Ê¹íßp¼²1¢ÄYoZ9‰¾2É gçßµºBžõ8ú&pv?Òm£ˆE¿c\uñý;Þ4ÚÏÉ<ûû64I¯|ÀFÂ#Ï AÉjm¼Mfùpvì"]´BgÈNù?ðe÷¦E–ƒþs–î–xâ´#Wh‹¿ÊRà.ó’œÏïk&£ T«¼fpÁ›©6ô ÅÏßb2£8eQ>½•ñxe—§D¥P !$,ECE~XñÄñ[š¦37Ñ!<'âᦘÓ%0KULÙ ïmŸ ívÕi”Ę2|°Ö©w ¾yêDáø)†Å)ŒaaŠñ¨®fkG½1Œ°8+gS8_Í 1ìU´FT׆$¥‘Ï×õ?–ãsn®é=Î/hNÿ®2ÄÒk•†¬&Ž?„Âg éðÍ ²²Ò¾$l¯¡L”cd³ôÛ!Ñ. ööVdÍÆÛSëu÷&1=Ï>%´Ð2äƒ3½ÝÒVYš³ÞÚÀÊíhlN6Íݦ°´ +‘õž?¿ Õ%½´“¼îaÌ0óTAtÚ©óßø±@i ¸ó¹î‹I僘M†Ï§¨8ž#;´™=,qîÄ¢Žgn9Ñ—ÍùAË%«Ñûò´ªÉÑ@‹nÁ¥øi{g5ºHþFq^Œ¾_Е±ZF¨‘x(kƒLµ¤Ø ì ÿHŒõ€Ù_Äh’ºÍ°ÿ-tªnA¶Zgs™ïß¿‹‰‰µô]<<×5<0F_‰‘ ‰Ñ #S×8\®av³ù0rk6ÞZnH´ä” hœæð‘P6ßL£mf ¿4¾rL&yUí0;²åÏŸ?+**DEE[(þÆîS?w‹r0--MUUµEs,Úš±ÙÄÏjϧ™iÌ1g¬ƒâtLh 'x]/`EÒ“ô¤+¨ZK©°ÏaÃ$¥÷;:`X£…§^0úˆ7L߀µò}–Â\òZ!ãÂï())ùåË®Êüü˜dïæ‰êÀ‹-úö후„Dë?Hµµ4ÚZ+Z.[ÂÜÈÇÇWVVÆ%OÊLJµ(áóÞ¸qÖ>‘ä´ñc¨KùgŒ¿Ï|M ÖjjÊ·2- âÂ=8¾çG&2”u\ ³Ï”þ©]¥%ÈÈï•cïÈ?%ŒyZŒæšÒ?´j—².ÞóÄ®R•áï¡´¢bU¡hz£¨ÇÕ\‘êñ0QRÊx"úU·Ék÷¦<§‰áéŸJôDºòðgŽŒŒ„¿_¿~íÖ­Ûïß¿yò¡òÁßáÇgeeµ’uØ š-h¶–—Wðñuæ½/E²¯¼¼\WW711‘ç YòyóóóŸ?>dH™„ÜÎÅ~^¦ž=Uk 2ÚÇ}YêÆÕBö0b=ô¢â^,×ê­†¯¯õíŽ68^5þ¬z<5³.Æ;Äð ŽÂÂB;;»½{÷òØŠˆ0šÎ?|ø ''[á¦Ý»cÝ[rã, ã·´´ü÷ßÛo{CMôéSµ¯KRRÏSOAAé633{ÿþ=Rv"~í¬Å ľ}ûBBB„„xg¢FûñãóðãÇW®\™8q"˜ 111ÐBâããžž>}úóçj# „……a…ŒW©÷öí[Övèž:u*ü¦¨PBâ‡ÐÐ××ïNDZcÇÜÝÝ¡ãÙ³g#GŽä™”‘‘Z^ZZ uBPPJ…±±q»žŠJnZèmƒ²²²Ù¼²Ù±®®®££cnn.€¡C‡þúõ+''‡‡©WRRräÈ‘Ÿ?nÞ¼ÙÆÆæ7¨DBâ‡ÐJ8zô(é e[…•7ÀÔ9mmí””x"Övé/_¾L™2åܹs<ð\’’’aaÄE:::wïÞåyê©ÐkÖ¬A£1‘ø!´`]›·°_¿~<ðÐ(ïÖ­iöñóócvåÊ^úLÓ¦MëPÔDË¥#ñCh;XYYñöª©©ñÀSÜ¿ŸtxzzòÞg2448p`‡¢Þøñã?„¶‚‡‡GQQ/ uaØ1cxéqxò35êÆŠzÌN\$~mÞ›çÀ9ÉO^^ž7§K—.¼÷ÈþK²]·ƒ>/|jTø ñChKÄÄÄð°øúè///$~\ WWWøË3”†`íÚµHüø!´1Ž?ÎÛxìØ1ž?Þk VVVNOO‡SSÓŽC:Tò ñChcŒ?ÞÒÒòÔ©S¼ú€¼4cZXX˜—>˜˜sqË¥K—vÆåææÆÅÅ¡’‰BÛƒB¡ððÓ­ZµŠgž¥G<ó,’’’LåÛµk×ÿþ÷¿B·!C†|ýú;HüÚ¡¡¡<3iº&-Z3uêTÞ0•xã£ôêÕ+//y¸iÓ¦Ž#~gΜAe?n/­É[[Û‚‚x6Ù›©y?ÄÉ“'Y•bܸq„hÐvÿùó'*pø!p †jnnË“OÁ"))Ù®Ó/**šŸŸÏ6Å-55õСCeQQQHùø!p òÉÊÊ~üø‘÷ÍÊÊÊÀÀàöíÛíýAÄÅÅKKKÛãTw Ãp‡ÊW󔞞oãÀDyyyß¾}Q9ƒÄ•W×Þ´io<Èëׯɕ‘Û***(Êׯ_kÛRãׯ__¾|é䊎Žfn¢‰€Äë•ïéÓ§%%%¼´ÃÄèÑ£™SÊÚ5à#´ ñ“’’zñâ…˜˜XÝ#µµµŸ?ÎÛœºÿ¾´´4R>$~Ür«‡@#ƒ—ž ÊFÏž=Y7¹mxüø±¥¥%w¦-22rÛ¶mOž<ô}—ê ¯¨¨˜™™ÉÛl"»9Q©‚ġ݀T¾Q£FÅÇÇwïÞ7 *ŸŒŒ ÛÖáí T*•{SQQ¡¯¯¯ªªº{÷n@U ÑÀk‡ ÂÛÊ7pà@XßBʇÄ¡]"99þÒh´±cǾzõŠž*_ll,,¯Ûéb’äi+œ9sfÁ‚+V¬X³f <ìܹsRRRâa]Û…W­=Þà ?„ …B2ùÞ½{nnní}DŒ¹¹9üi“ÿäääZí^;wîôôôôòòb®’3Ž¿‰¾óK—.ñ¤ò-[¶lܸq0w!k‰¯AKK‹T¾³gÏ&&&·ßg¥0´h?~ܾ–Tž7o^ Å|úôihÕùùù1—Ùü‡Žf¼Å¬Y³Ž?>{ölã…¤¤äׯ_·nÝŠŠ$~< :}æYnnnûµh!Ú×ìFh^TTTtîÜù:µ Öf”••áát:Z.å<9‹tôèÑIIIh­N$~¤ò‘S˜Ûé#À¹…¯:22rÑ¢EM»|çÎQQQ)))¤Ñ:ÉÖÑÑá1å#›Í›Ö剀ÄG•¯´´444ÔÝݽ=¦*Ÿ¶¶6) ÜøøøÆŠŸ¾¾¾··÷¸qãš½%³!¸KÏäv˜Õ¡ÑÌ«Å" ñCø[@åëÝ»÷·oßÚcúaþíÛ·ýúõãþ¤>}ú´!¯_¿~éÒ¥-[¶$$$´a‚}||`2x#Ÿ§§§÷êÕËÆÆQ‰B òµÓ‰ä;w¶°°HKKãþ¤~øð¡Þ0¶¶¶ãéhÛÔnÛ¶g”¯¨¨š}<°·?„æT>%%¥ŒŒŒv—r¨|×®]344äòt Õq6&&¦ÿþ‘‘‘\’Úððp777ÞÈÛK–,‰ŠŠBG⇀ÀPù8°páÂv—ò)S¦pÿâââµâÂjG||räÈܹsÛQš»ví3uêT®M!sžß•+W¸ùMòÆðÈ‚‚‚Õ«W#.#ñC@h¶mÛÖ¾ÄÐWãfñôý²ÔKâÅ‹<ƒƒƒ}||‘‘ø! 4\^@s•JårÛþ:::rùkl¿}°âܹsHüø! 4FFFí.Í\¾G—.]Nœ8áééÉ寱OŸ><Ѫ.Hüš“v—æ/_¾psòøùù—/_ÎÍ3xIüTTT‹‘ø! 4í.Í×ã"ŠòñIKKsÿk”’’B⇀Ä¡ƒBVVöëׯ\.'låæäuîÜÙÒÒ’û_#÷/Ðôïß±‰BSðúõëö%~=zôàrñãæÁ¨LÔ\ƒ´=BAAQ‰BS@£ÑÚW‚………¹9y¿~ýj;Îsùkl ÐÖµHüšˆß¿·¯óóóssòÚËö<{™ëé ñ«–Àñ„š%Ný÷/#Ç0 þz\Í 4¬ÙaÄj÷ ù¸ž‡«@‰G¬þL7˜}?Z5)[ Ã2NMˆòG½€/óöƒÇoÒ_æfúï¿ A¡‚¼ü -ÛJŠ‹JHõí7@_kxîÜe¼Î¢=¢R?geÿú!ÔCd¨úðm;ÂKŠ qü¿ÞùÁCTt4UÛ0yeå剞f¼y•—ý§¤0/7w“@YéអÒ2***šCrÃk|øâÍ‹gÏr¾|þŸÇ'Ð%//×××O «¸¤´ÒÀþzÃÕùøø¸?÷Þú"'ûSѯüÅEEññWº IP((ëŽPã¶4çüø™ü€úé}f~ÎWøù*ÊKñÿþë!&N‘S©1TŽ"Ê¥Ö¿n,Ùˆé骅POAaU¦û¶^Lx,Ø•²ÜoÝ !–8м]]Ÿ},1¶vZl12&&N@ t„éTfëƒùp¦òù{z½¤•N[dcÑ¿ö ýÔE ÷[©[ÆžÔíHÇ7f:•”@F8fý¢òƒ†Ëª7ye¥%7ÿ;Þh„Z‹Q¼ÿüÍõسcf,bPƒO¬ÏPýZ§7<Ë-ƒ d°ïÖ%Á.ü³,&µ}î½GMM¾c0݆Ÿ¿ #/HöU“ì[陼£”ËgdúÈL6ך .¯ÀÃ÷DÈ Òwˆ&³©BNe$üÇ1üwø¯2Í9Ÿ³\=çäèÄmµO?Žè¯(¨ô•>XM®ù8Uð8R´7ñ%R q-! ‰a©ôsÓ Lå~3 2¨š" 0?ðÓ…’2!³@Oê7)oµ@4?qµ¨®5x,m N¡›‰nW #òBvU‹~FÆÅû‘f#mUø1R¹ûN ‡¿×vN1üçÊlØ®m6CP¸»˜Ü@#¹&ZƒµÆ0 ”kÇ÷:ý³¤k‹µ•½xOK¾›¤9κÇÏ\Ü´Hº ™Ì_ú‡žæüœìÌ')‹¬ç4W¡´9Èd¾†u ©×´HøºN˜»„|¥E¿Þ‹;éæ¼´y_ãŽÝûÔ &vèվЦE¢>Æ„ùÝ/Gít_æÒÊFáæmÛõ§Ìƒ_SL^ÙP^¹I­PF™X’OÿWq)*|ÕÊ\vàe-ñNÂÃ)DëÚ4›¦E"!#ojëLÖ>ß=Ô½3f8f4*Êš_üªé°^»/z#±^~NÒšJ¿Cvˆ@Û0¿kOêÙ¦ä¦{ÐÇ?aXU]R¨jüX>3¤^_!Ù 2Úw½šß†Ç@ë‡ão H¥”^h^m*R÷þóím¦[ž 'Ú7¬¾éöå”-ªÚX ~Ÿ‹n8e¬ÕÂæÙp–]ú÷ŠÂ__¾¾¢N3oΩñÛÃvLŸu%•¯¹ *!­ih˾›'÷»9;59žëÉÔÿø»Hôé ¦fLžP·c­Ãä¥ÞŒ›5ÕTˆ¾"h“QPX|ôø ³™zS›y¥Vc› QX÷©,ç“ÑÝͽY_ro'%mh4Û¡£Å:u†ß¾êìwodDº4 ùsoQRùš ¤áÓ²jåJT²5§ø1›=Én³Ã>‹7.‚V—p7ÆVœ …¸ž{‹ùgUè/m‚cE÷ÖVg„ ‰÷Ò&ê_Ôo*¾Ðã ‡•®d¯$Nó¯lŽG`Ø¥j—èæe´m¿{(W0L“ˆÈh³Û•„lîÞSqø˜¤74¾_ß´4þ¶eï¡£Ã'LcÙ²[ôµZ ‘+Ñ»”Þ;Lt×hm“i-{Mm]`î½êµÒtl4ãCŽGÛÈÚD’Á0»ƒo"lW¼;÷µï”`}a¬†X ¾Ø-r¢EZNéƒK§ìÌkøUQ§b†êN„Ê×:‰„Å# (x¥ÇòfŒvÇî½ð òµÎSP•¿ý6YN`…ãÄùË£L[o)[çÄôÏ}Eøe¤z#ñk".r•yQçEU*LÞ‚ã[8kÚ gm‘ȯ­3¯FȆ@¦¶•æYý%Æãx0óPke<Þ±[üMšµE®0šmúòéÆP÷³ðÅçïÃô'¶r‚;wê,,;(â@¤ýBÛºCî;|Ü´-^éËMþ«¼”¡‚6Û8µ~"mþ9~îÒ¬)ÍÓôMV2Zÿ) evˆZ²°ÑýsY_ri…ÿ‘Ýá­‰n"b9P/_376\:¹_m„-ÛCÛDùH(©:zæÂœiXV^>¬g·Ï•‡ºn'wx˜·t:[ÝÔº¶¥ac™Î«­^©éÿÀ /Oºƒ‡†µ‰ò‘PÖsèÄéù3§ÿe<~þf \Úê)´Mg„îÜãüO#êè…Å¥ Jº‰´ÙÊp}ÔF'ÝO=RtVÊÄ%зðjô`q€ºsá£jl„¥_} 4´ªn×WM·´¬L€Ódùía;[º§~£dþÒ„”úÚ#j ðàùëñV‹Ú6‘à L7üÍXŒs×n·¡ò‘Пf³kïÿÙ5ttXô‰“ÚÆÓÛ6ÍÅXdùµ,ŠrŸ¹-óŽˆ>M(j¸X/Ú¾«ä.<¸—¬;I¡Í“1ÑÚ±¬¬¬! ¯$>J³ÙùÀf'á.Éû±qúÁ“Oc¾Õ ¶ÑÖi]äÕ@vä" ðã@Ã…aºAIË(ÿ•*üuÑð±""ÙŸ^¤ÑüšÓGßäà/û÷„LÆŸaX~ì)´û#qÕì¢ÀÀÀµkÖ°ÝÖäÔ ¸b§Ì÷Yuˆß½¤;º“ëšÂQöõÞ‰s÷­í›sEüÑÈBn––9ÏÓÞ5šk8Û¡¼¼¼É³ ~üøÉ ¯z€‘©î {X¬æ8¿®á¬±×Ú\ù ÄeƒCB—»8w´â®Uį" ãJ8äg–âBüàúáu†óC6:`†ã±ðŒ†Å°én±×¨®õÅ–Œñé^Ù9š{PV^>Ê„}„ iý0-¤f¿/¼©1U™’Ÿ?êè‰Eóëvq#.¦WÎI§Ïœƒ£F(‰•~ÿ=Åp *‚- &S/®P§O¯x•Ï÷d²zŸ3±»“ýºb˜jõ6÷,1ON‰¯§&À´øgA¡súÅ>Vó^ÝÕaÈ»ÿM)˜œBX~÷‹˜-\Úã9´µ‹¹8xÔøzè‡POp6¸sî)SŸK`دM~ÉjcÌrò $z‰pþî¦V„«Ó{uÉÁ}L‚. ·–ç¨ËÊ“½$ûfðú ?¬þÔ§ÖðLØï:¼äqÀ“•ù£ŽŸ\ØÈ™”ô¢CóÅ»“ƒFÀ:SΫ;fFæ-”W‚^2ò¯Þx¶tY^@Ýâ÷,õ~Ÿ!U5’ÉÒB+ÝŒÖûò÷gïv²Ð—kiÒ)ôŒ,¿*V¿3Yÿ¦eÒ´RÆ[oÄ­7Vž®ä²JGpUõåÇLMMãââ˜õhulÕãJŠaXßeI™Á:ÌÀ.AA!$#HMežÒ7ÕOˆc,Äö'g¾#TÃûOÄ9p !»¨;ÝQòØ[]z45;‰®ˆêæ†ÿÅ^{¾Çúkø©±ËIRÁð{¤…Ž`i3õTTÌœèŒF‚°4”Õ²PøröN½T*¡ª÷­7ïŸ÷Ñ;9&æËç I³´|¿v÷©«‡*Z/ÝÉÏRdFÏG*K@juÇa@X~аüüúHÃ}•|®ùT¿¤»ÚJjöJ€3}àK`ïB“è;ðç¯B¶¥4²^¿¨[üàëZ—X|JW0$aš‹~¯ÊÚ†º¦õÑ{|;bÂûAÚ§lƒwÑs|¡º”ù9J^l5~õ ýÅÖŽ{rÚpŸÚK‘¾@ N$þÓ%{i“´TåŸ[„êÒÓ¨Ùgr¨&˜:-½ðeáð0µgéÍ˰(Ÿ<Ï÷qIÞ#æ%dcrTj®ª´ÐAkåÐë`†!w*úñjUIÑ@_ †4óiïß5)óv{L}1ˆþª)*ã¶Ù«¸E<£Sý°úp{óuq>ÿóìø²yn»×%åY( /yð"êõ 8G]Qôv§>ÙHÖóîf•¿¿¤?jº²kÜ1×Nê ÆÌüï,-tg€õöòx—ž'?í¾Ÿ­%ÛsË4¡#ÉôVwzÕaМ]Gƒç_Þ¸íõ ŠÜ€ªUôàÛûÈÖz?·,ŒBýÆÿ–k<¶M œþ9¶“H˜ç2µzæsîÄ%*O'gÀœ}Ì'MŒ½pÅëJžÕ°? ']ŸC‘øµ º@Ý¢^ŠÐ0uÐV–©F'§ó©;&1ÕnÝíÜ ô"úÐ^ÝJ¼—û³ÔÔHÉÉmô,À‹Š ˜ íu•iùùëW)ߟ?Lý»¸>!oƒ~Õ<_¨|낣ܖÎáG2W›åWÆÑ_ÔYºmj¿šíÍ4céí„'RòæB[ýWµK\ž>2|:T> ¥O%ˆúñÞY²b»âdưc£ÁÔCƒÌ@Ð6ã’AüN š Ö©ñóØHXŸ-jöáüeåìµ¼R[´ˆO Y4ØfjäLYóE2ïöÓE7‰FØÛFžÕ)ð¤‹SÂ¥uúôúT>XL×|±›­”j»]i9çad¥¥•þx•ÚÑ^Ý?èërêÚ“êõ¢ÉWB}!NIמ-®ÃùNÂUƒÊÕQ•þ‰N•rßþ|ðP¨K7~~þÎBD‡ó‚N|Mû ·}׿ vù–’_ÔZêâŽÎÃí‰ÖŦ±•5í£§Zd_¹xÌÓlv º4ñ¶-½ö¯vž þ$õ<¢ÈP0a<òÎëC僿KFV-¡¹$à®[wb‚0T>øÛ[Ål:¸}º¾DwfyXÇóTGz“¥õ>zá¶_+wx V—b|ç5žêRòÀ³ªM`þ„˜ÁüR«‰Š‘g©ö¾†“®3G,[Çò+º—róÌÝjƒYŠžlVó †M~¿W`«¢ŒÒöJŽ”||ª´Ž’âW7·³9˜ utWHðªå6—‘ÕW'¦ú¢e~Ø¡2 _'ÿ„äçÝY?æk¢È8uÿåÅU^ûñ8­«‰x½4-ÓòacZôÈáÓͽö;[élž:HÇ ˜{%xÀDߘGÊØ3Ë)óMSRPÙ<~FªËË'»8ϤjÐj„9Ÿ³š÷%ü‚V¬ÉMjö•Æ^X’ÿµ× ¶Œ Ä¥ekmòÚ;éÈ=æVoHQ—¢^K=BOÏRÌXdç Ëbx|0ô¸”‹ Ó]¢JØ(Ë®äÁbÚ:ŠýÅÖ‘B%EEŽþƒ(åÓ~ÒB§* PÊÀ‘«£“Œ¼1utE<¸õj‰6¡‚'|¯q °ÇÂkIvÙ¬ ÆŒRG"™ ‡6»lëA_`öÙÿMÑ÷Iüi®Ä7q¸s¡J¼º»O\ëÊs®ÜýÀ´±2öNR—^DÍ. ¤ñpúfkåÊH=ê'búï›" AWŠ;ÕîàÃŒJÌæ—:…w5¸ß$´k¥eååëYO¢s³?2× LÚ4²fëý/<5dKšÙ6¿ôÆÉ•N*ýFyÉ"e¡øg|T ò1VïSŸíw#lî¸ñ«Ì·§Y›Îh8é~dg9ÑŽVⵊø'ië­ðA¾ZìY Œⱚ`›µy¨-Zm^¼¦ÞüÈãûAÉïÊfÏáÀhž€¯¢[ÐvœgoQ%{y0ñÉä¥ööš¦š ŽÞ£§)‹¤®&d¤Äã® Ô¨V-¨Ù’Æ?–ÄÈ@çèôÊ.ò1¤jHÊ‚jW¥°µ:¢fÓ»»R³]á_ÊKØ0ѤAãD i“–¼Ø5jür#ë®Þ jv¨‡´ÐUæÎ›z$:†0·‡·®jbï·›:ÙìKy-U » TÖà]µÉWÄbfy°6Æ2ýâ«:Aw=ö[ ò²^×â¼€Ž„xS­fÉ*J '݃[ñµe$~‡FõNa €»ÖÔû.î]ëµQ{öTè5©z´{ÂÕ?¿¸b¼\Òù£Žö yo5ÉôD Á®ÿªñD/]ù}É(÷¹ZYd8Ì!,¿&¤Yg4‡6@)ñ^QÁÛȵ§ÛéÔu”n…ß«lþ‘“–Àì5I=ë¥Uuë¥+˜n‡ }UþŒN3÷p3æµRC|"β7;WÆÀóÝØcÿ,nô²S‹Ì=½{ƒ1aƒOulŽCÐÑš3ï¬Ü}­ÜÙ=•ÇÍ W}¸ BUâ5«fà ÊEÇ×(Å„ƒÎÜ‹ qœ¦ͰzëPKØ\¾—&%ß”ŠÙk¥¸H“ø@Óa¶µu,îÐf¶U°™9­í祕ü\dÛˆ¹”ûÊ7vÔï¦5Úó|SNÝ›q~§ïq%>¢}jÒl¿=Þf&›`ù]<°}õ*Η»:'<ÏíÝ–[¬=¹uÑqI]Ó7Ì™A. Ö¦5¯ß¶sgÿMË:?Lר â÷—¬FMU,ÿ™@¿¶eý•û=ÜÝø!tt¸:ýsðøéV[‘½ŠW’Fís4lPÿs—¯Ë«é4üÇäà%Ç€cAWvD‡×¦|€Þ´5HºÇÇâ~®mòJi/ΟUÿ¢‘K—ØE¾Ð¢Ë@×ùÝÿã/ý)$ø·kCÏ·œ|þÖ=9å¶»ˆýù­¢Ø§Q—˜‰ø?{gWC÷Æñ3(*ÚKEÚPiQY*o‹—²”½ì•µBÑ¢E¡Å’·D"kxQe/»WвT–,¡(\» …ÊþgîÜn·Uh¹Ýžï‡Û™3gΜ3sÎüæ9s–£ú·n®úÿïºúO(âð83&Y­[×°«ÀÔ‡Ò‚·ª]$…e~õÀÑÃßy𨤓L»vMÝi­>³ãËHIŠ–”$Ýy"ÕU©‰“wóLÜìiõµçì¬F6‹ÝÿãË%)aqY•‰mÔ@ýû5ìjVõ!?/ËXOó7Fè;̰=zꜢ®QÓ×ô3Ñ›[­òø5ãéæzøä)¥>&ц{T¤èeq6lŽ4ÛTmwß> þøZÏ4c[Ö¼o¯ UÿX4áü[©ñ1sfýš’a»ïÁÃ&M7+Íé=‹¼<6ÎYSÆoØ´Íxœm“åâÉ3ÜüÇNœíe0°É^8$Û·ñöpG­? fèfG;z5ê‰RNîw˜1M§!V›?×þcAáÞ˜C ¾+'ÄÿJ¯&ÄÎýõ®¾>‹ð3nÅŠ}Im ñ÷÷×™õ;6ÜÔ ãšæ¾§=2mʆV>VI˜çÐ4¹¸¹ÐuÎ/Û¶mÛ)c†gÜ}÷¾PA½wã%¸ÍÿJ¯Ÿ=6{º-jõ€øuŸ¡ø÷hÂY©.å•RBÈÿ%ìŽÀ–“Îì†\#^\TdþœÙXc‚W¯¶˜ÞÀfVöÍK=TT°•ÙûwÓŒŸqô%]2ÌΩa k²8?íòü\ÓažâÏïûÞØÃr=u¶·NÜŽßtì¦4Méݼ}gï–‚…´ôþ8½ÅÛÓCǵ!§ÄÔÕR×Eècá§=QÑ Þþœ“–¨¬¨ ­¡® ÊâÔ“1ChÇ© ÿ1^¿é3ø÷×v/xþèfò%梵½m-i¬1t3VÁÐ5úìÄ;ÿæ”vÄÿ~ü·Ë|è0žÝuF4X{ {¡ÕkÖi þ½þî4O3’?|œ6y"BºÊ ù\›jÍšãôàÑ“¥ÿ#5ü~w˜o^^Š?B·pjû4é‚™l}çž½ÂR²Ýu ;ªü¼¬[©In ¨q½==)Áâ"®ÎÔ¤ÿX·mÙ2hÒlþö¿ýªqùH´‘±‘ކºŽå0x”ø¿ÉðÁ³ÝeeeGãÏ<¸›ÑCS¯KõN2U—zóìÑ£ÌÛ--†w“c2½öëÝd Æ*ȹ”Ý£ìggÏûQVª¤®-©Ð]°cåI8ÊÊ ^ç>{üàåÓ'úFÆf&¬>½ç7âBwž +Æ´>y–—p ›ƒ*š½¥•Ôª÷ßùñýËëì‡÷3Ò$:K5JT˜šEgx£wîŸ0¦¢ÿHñׯ‡ŽýâyOí¾UÔk°¨ÊÊÞç=~t/£äÛWú¥yß»hx6oéiWÑþ± ðȱù?ôÐÖ“QéÕ¾Cµ9öÊJ_?{ðøÎ-¢M ånÌyetþîßtƒÁ± .â(½ÿ]¹–rù?ÙnÊ jšâ]jè<õõ㛼Ç÷gfhêöc¿°jϱ‡ˆРE§];ëÑ–h´eíArgËAÆÜ“æž*Š=Uê|tÕD}5›+yÝ»U_ 500п¢S)H5ï%`Z™Uéׯß7Ê/£žÙ®^U\T¤ÆeCòòòºuëÆºÔ]õ‡ésOšÿþKÿ«â‰S‹ÓÌÚQ4쥈F[ ÄZ:šššœëœp-¥¥¥-ýRãë\®|-†OŸ>AñDAAaÑ¢EÁÁÁ\žÎwïÞµôK½ÿþ)S¦´¬4KHH@ñ¤cÇŽ›7oæ~ñc0-ýRoݺµÅ‰ßèÑ£¡Ž€ø‚_íùe¾·fáêÕ«222-ýR·8ý&IrùòåPG@ü€‘’’0`À¼yó6mÚĵ‰Œ‹‹+eÂ×’—E=þ|ËJðøñãccc¡Ž€øo²~ýúèèhnNááDZyjbb’’’ÒB/r=?~ܲÒ\TTµÄx–œœœµk׆††pg EDDÔÔÔ”••[îEÞ¿ËJð®]»N:µÄx–‡&%%ihhôêÕëþýû\˜ÂÀÀ@>>> ‹qãÆa+°Å]aIIÉ÷ïß·¬4ãë<}út¨ ~ÀËâ7oÞ]]ÝŒŒ ¨ ~ÀËܺu ÿbå[¸p!V>n3S<==iñ[ºtééÓ§±…zïÞ½–rmÝÝÝ[\?—íÛ·ƒòøï“™™I;V¬XñàÁ¬|ÇŽÓ××ç’Ñ´6c^¼x±ò‰ŠŠríçINÆŽ{äÈ‘–U’““íìì R€øï#&&F;„„„ =´™L@YYÙW¯^Ñn¶`åÿøñ#7_UîOau"""œ¡F€ø@«`È!lw·nÝJJJöïß?yòdZù‚ I²YöéÓ§‡²7{ôèaffF·"b]QTT|öì^ϼ¼¼´´´§|]»v¥mkÄZ3gVZ;—ŸŸ+_çÎß¼yƒ˜Ó|à_¬4=jâ¹`,,,’““9} Øn¬|—/_Ææi¯^½¸çbÊËË?þ¼eM`ýáÇÿý”ÄZýúõ;vìX•Y±ò=zTUUU]]VúwÔ¨QwîÜi‚TÕØl¸bÅ ssósçÎÑ›&&&ˆkЯ XùZÖݧ–=<< "€ø@«cùòåÕ§03f þ0a¾}ûÚµkGÛ´òÝ¿ìØ±YYY”žÞ½{×Öl¸fÍš*>ÍÛ<{áÂ…øøøµk×Ò†r BNNŽÁ`°?© ~Ðêªm×Áƒñïܹs544hÏ^½z±•ÏÒÒ[‡¡¡¡ • ‰>Ô¶W[[{РA/^¬âO+ý@o‚+†WGGçÅ‹ƒ™´ {$%%eooÏ e€øðGœ;wîëׯµؼy3þ-++ëܹseÂvÛíïï––&**ú{)±°°¨Cùh°òIKK¿}û¶ú.úž YöŽÄÛÛ;''çСC’’’-ë#Ù–-[ð]Ž‹‹óõõ…â???ÝSã'õ¹];Z™JJJ°üäææŠˆˆpdÂùÀõôô ž7oÞOÓ@Ï*’PŸcåSSSãì Ê‰ŠŠ ­|……… 8_:uúí‹3{öì§OŸ^¸p»¹áCNŠ‹‹»uë†SÞ»wï9L ¨ƒøP‰3gÎü’X²‡™9r?U±=T½í´ú7))iþüù|||›6mêׯíi``’’ò«³Š`åûï¿ÿºtéÒ³gÏÚÂ`mf§óòåËtoŸvÅ´³³ÃÆë¥K—¤¤¤sÆ“tׯ_tëÖ-AAÁ7³ˆMJ¯^½°‘„¹_=p,ö¦££#–™+W®ˆ‹‹WlllÌž±åãÇFFF÷ïßOMMÝ»wïêÕ«±PáÃ-ZDD}Ný÷ßÓv^vvöO›˜˜pÎ ƒÕŸÈ××7::‹Üîݻ٢¸gÏž–rמ""¢C‡Üvw®^½êååõêÕ+œkkkÚ³{÷îܹâ- ¬|õùøW:uêtòäÉׯ_«©©ÑMŽÃ‡7559sæ¡C‡°­\¹²n;Ï…Ia°I7a¬999ôâGáááØe°`RwRk´™®]»æííýôéSŸ¦ùf¶eË–°°0|^üÞÀö0`@•!ÿˆ V¾™—²¨¨¨K—.©©©Xöddd8›w2©rÈ7<==±q³fÍÚLä„$É 6lÞ¼;fÌ˜áææÆžqFTTtv`¬©ØŠ6mg Xƒ£¢¢°_¿~ÝÈÈë%P÷´5úúú—.]ªîóåëëûìÙ³åË—×f³ÖÁƒpjýüü8Ó=S@üh°òa¹ÂRñÇ.^¼¸¸¸Û.BBB¿ºöB¿~ý°Òܾ}ÛyNNNXÞ\]]ŸÖ°´´ÔÑÑñøñãÿþûïÈ‘#iO,äXeñ5ÁV)í©®®~ìØ1(i ~pXù°ñÄ–ŸrõêUlÉmß¾Uÿ³`#oìØ±XØ¢££ÙýMz÷îÍiluïÞ=%%…ó(ºßæîÝ»«ÏJSZùê? 6NÆ?LjÜ{ïÞ=l¨?~Þ¼y+V¬hÛ¶-ç^¬mØÜl×®{¡ Lzz:–:Nå@üàâÚÛ®^õ700pÆŒ˜ü4ðêÕ«CBBRSSUTTsbIìþÕ„Ué·‰åKΆ ê8+_ll¬™™Ùo¾§ÑÐЈ‹‹ãôÁö¨]hh¨¯¯¯¹¹9¶ù–-[Æ`ïÞ½lwII‰‚‚ÂŽ;~úñ yðöö¶¶¶ÆšQ[l½xñÂßß¿îx´µµ±<ÐS'ÿ´Õñ7`¯ó÷ùóg,-µ}­¤{Hbœ?þžñÝ»w={öÄfk‡FŒAŸ±ÊâØ9r¤——¾ŒlOlã²çÒ|ðàKXKÄ®[*µí¢'cÏöY…]»va‹‡^~¡iV@Ìž¥´½ÿÞØØ«Kõ0Xù"##ííí#þû÷ïO:5##CJJ*??¿îÀØ朧­ú{uuuZù°b“¿I@yñ€+ÀFUqq±  `•§Öüï¿ÿ°´ìÛ·o:“æJ¶¤¤$­|]ºtyùòe•½¦¦¦ß¾}û¥‘|t÷Ÿ^½zýê44l8›jsrrôõõñ¤ÇxÈÊÊÒʧ££“––Vå;"â@SƒÄsæÌ©2ãIïÞ½«Ï¨‚å[03ážôÓÊW¥çjÏž= êù­‘^)é÷:¾Ö†²²ò»wïh·¼¼I2§|HêøD/I²äÐ,¥ Ô’›G^c¨IŠ Bî+Ъøøñ£——×¼yóx,_“'Oöóó›8qâ¸?µ§NRWW§Ý¥¥¥°@R«?²ø!¨½l Çg÷¨ j §Ò™’Æ0‚cï3°_õÓ8׬¡bLeeó¤Â9ÚÍT>Œ`¤²ƒ; ð>œ#Áé÷θ¸¸qãÆµô¬aå(++£ÝOž<9xð b®>qûöm.¿ô]ùÕ…‚–.~o7®‹5QA‰OX­—ë†è¹SåïN’O¢¶o´²0öUZ¡d±ÔLŒÞ¥²Ö1;¼$ÂÝx’$ÙuMLL,::š'³&++Ë`0¸9µEEEìÔ¶oßÖÂmâ'l^inY‘>uL5›]û.·Ø\7ÎíiUBv©-ZÅáKHrI¥¨àÆ­ zÁÚ‰m¹<}úTII ;¸\ùª¨u3ξ4­øðüѺÉÉwÅÄÚäçÿïOâÁm¢Ár”Oþ¯!ÖZVèÜYfëÖ}?6ÌÒ Â¨];¢ñî¾ ø^ôèÑ÷OâiÓ w" ^€øóõëŸ>é òë×?Š)~ –£ðõËü$ªŒ j‘‡ïß&a+Wúûûsù½h׎D Z4·øÜ?,¦aUÅ32ñÙlžÈ~>Aˆã?—óIcQ( ~¨” øiWf©!X¡ö¦Šö£ÑÑ×—aÁë3r^ ˆ)/puÕPbÉH)#ýÜ­×%%%cÆŒ9°ÉïÐÅûFÎsŸ6ï:º=(útºÆÀ ÎXïwâ_}'Jøǘõ puÈ|ñqnÀ–Áš’x×*o—´ì—㬚hÒƒã uRqYå¹ —¨J·«ÏöU^§Órú œàS~&Å«¼}0¾™›e7¶”¿ ÂLXÊW¹›‰ç&ã¸s—ѱCyÌ?ÙG‚ÖMÕTÛJ3çìùôÊFKæðsLÖÓ»“œ´âââN§6ÓŸ>³_Œ7—ÍŸH÷öÜb?bÛSfPmǬ©OÇ{™Û}ž>ZëÛÓoNb%¦|û€@òÊžU&„Zç@v:+âÁ¾Œ]å«„ô?1þ-i!…úD:sßåœc™/´Ùð«”¾IÜy,ÓÑÁé—×vÔ®mkÕ‡ÍÑï&¶ùÅïÛçŸL+2ÄÊçÿZ¸îv×2üîÓ£ÐRZQ‘S>ÉWq„,Õ‚J«ùõ=#èÝd,ŠÔº"D‰–%y›ší,Þe އy,5WPê*sŸså§ÌJX¯j±]õ'ˆŠæû„p´ñ0'?Ë.ïuíÁ ©6´ò¥+ #$B’/¢+€{°‘#.¢> ÆÕRjŒOéòYšÛÏ?s‹yçi"Ù¢ó•““%,¯úàâž7í´¬ëbŸ¤ÿ<ýYËh´–¼TE°ÌËé_w“6ؘíYø<ó\rªx×>ƒŒ©K¿¾|þꋲ²*gÌ’ôl*eùÇžüR‚†Œ±•ldùúêr`€_=ů³©£ƒiGù,tÓ¶ñã ñóÉxàKPSõÄ@΢·Ûu³g*?Ÿ_n¦,BÈ›vnd*V͈ÖgÎë/ô ,‹5†(Ÿ#¥©Ïʧ7«1žÊ´g•³ð³>Ö”^ÞØ %à>:"ÄÏœ‹Ùž§Û到hOW99ü§Â`ü+ÓoË*/Od_ÏÈ=¦oD=Waå£^ÇãÈ9Ð?.?ŠXò'¯Nñµí·”$—Vlw0­ôù°ò¦Ûe’sdºþ¢³ä¢Z6%FÖ1ˆ™‘n5é4KýÔIÒî:ÀݘÒÝJ|¿„•,·ðho!ÉŠ6CÔ†ÕXò!+ñ¸ƒ±•Þ´‘# ×Ük´òÜ™=Ú­6pzˆ•NÎá)FÎû±å·Œdepèâã©A;ÑJù¡î±gÖZ>ŠQ89èbñ¸¿Œ:÷° ©µC±Þ½ËŸ'}è÷õæ}¿·cä)­u]7:˜U©©ö§=¯&>DFرkå¹Ú¢ÕÑD3}b>×þw•Ê‚‰°¹IühJl‚BI•v´ÈȺc5ÎT@Pzþ²Hly9ú­XµlÉÞkJ£lÆ™"AÅUÿîö÷YÓ]º„²iðõ /»´'¹â]Õ¼ÓŠ¢e™fŠÃéÝ~ÇnJ 9z‡kÉ5Y¾8[ü<Î’ånážÓŒi”kÜ>Ƹ}ˆ¬ž¬8P¤ç$cÓ¹”Á`½U[ß¶®á#o üýÔ Èšøû®¤Ý| 5^·v¢zøj«}½k?ÏùãÛ/'ÒJúÈòÑÉõ²µ·Øp¤ûúŒ³©7IŸ5tÌ´_„y/‡5•’’ö^wˆºq"$”?h¥ ›¾°Ö}´âWÌXëÂ2ìäûOŒ><‘v;Îw/¯ÙÒ¾!{jˆÚ²aÓà2ÿªгiÝ×MPÑÒq>õÍ%2ªê4Â!Ql·ã|s*®¦óç›Ò1Ó*†®¦gΰŽÕbß8Ä@üÄx‚mÛÿ0†&IÝ“^6}¦hBCCçÎ+$$÷ñžâŸt×®]ÃOÛ+V”2Æó@ŽØü÷ß8k«W¯&I²E¤œeee¥©© Ä€FÄÀÀÿ–••a󨨨ˆ—²6hÐ Ú!,,üéÓ'.O-{¸³‰‰ ï­­âÉvwìØñË—/¼‘5Ω3>þÜ·oß›7o¶ˆÔæç燇‡»¸¸@ùñ QpppàÜÄ–Ÿ€€–ŠvíZvÕ®>«TZZZtt´M‹H-ˆˆÈ£G¢¢¢fÍšpøðáÂÂBÞÈɋН¯oXXØ×¯_±¢wêÔ‰kS‹¨YFpR{öì¹{÷nîiÄx„=z,[¶ ;nݺ…•OTT´  €7²†•ïÈ‘#AAAeeex“k•ÆÉÉ)99¹ÿþׯ__°`”L?š‚ŒŒ üðÅÊçèè¸eË–ê q-‘ùóç;6$$$&&fÒ¤I\›Niié·oßbG÷îÝ¡(‚øФ`ðìÙ³[·nå|·\°„¿xñ‚v/_¾œ;ÅïùóççΣ/uxxø¾}û ‚øФ`哸ðávÓc11±üüüšyyy¶ûÞ½{nnnaaaÜ“¼¼¼¼•+WâW™3gÒ>?†Bâ@3€•ONNŽÁ`Л´òݸqÃÝÝ=))©e$44ÔÃÃÓgÑ¢E/_¾ìÒ¥K³§MEE%55µ[·n´‘M#""Â3½@ühy`åÓÐÐÀ¦Û§_¿~´òýøñ‹ÇîÝ»‡ʵé?uêTß¾}«(¦sçÎÏŸ?ð຺zÓ§êãÇÊÊÊÏž=ÍÎή²{‚òøÐÌÐÊGD•YÁÚ¶mûúõköæÂ… ãââ.^¼ˆëÜlœZÛ·o×€nÅ©ÍÉÉi‚ôDDD,[¶ §GVVV\\¼¶n´cƌᙶ ~´xhå“––~ñâ??õk˜p†wwwŒŒtpp êСC“%õèÑ£ÉÉÉ¡¡¡u(¬|t§Ö4`V­Z<{ölœ ÚÇ™I‡Ð“®áÄCañ€»`÷ù444=zô¢E‹j ‰-Å0&Õw½gÏžuDþêÕ«Í›7:tèãÇC† ™)**¾{÷®mÛ¶¿!–Š}ûöõîÝ[YY¹Jg!!¡ÙLªB+bii©——VbìÞ¾}{=ÜÝÝ­¬¬–1©çÙ±qÆ^Q' ##ûüRúéñ‘ x ~´ „……iåÀ¿ØÆ:sæLý½ÊÄÃÃs Eu>|ˆM7ƒ={ö°WŸ˜Ì„&ˆIõcwìØãÇ!7mÚTŸ$ÑýBˆÅUVVö§áé5Aó@üh¥ˆ‹‹cå#IÒÞÞau~ÿþ}tt´««k±Y©¢¢²~ýú)S¦àM55µß«0‹ §OHHÈÚµk333%%%k;êÒ¥KègóÝŒ?þСC<¶0âÀï@V>kkëØØØÚÂèèèܺu +_mmí5kÖ˜››‹ˆˆ`ul¤z1a«¬‚‚BVVVçΫ‡ÄÊgdd”œœ\}W—.]^¾| ·Ä ¬|ñññC‡­¾FnusÊÐÐ0&&‹Ð;wš8XeÙŸ‡ ²råÊ~ýúqÀʧ¨¨øìÙ3NÏaÆòøÔ€¥¥åºu몘wúúúœÊGO+Ê9¦¢9{ö,í‘‘áœ×+§ý‡MÒ#GŽÀýñ¨¬|RRRïÞ½£7>|xõêUÚ-..þñãGö„Ú\­|’’’ì¦WÎzºººÏŸ?‡› âP+XùöíÛG÷[;vìƒòòò°æ±‡p-Xù° ¦¦¦Ž3¦_¿~ššš™™™Ø§n+ˆÀO Äâwÿþ}¬|ÁÁÁÞÞÞݺuk)—‘‘Áʧ¢¢’=pà@ìcjjZ÷¨ Ä€âúõëøwøðákÖ¬ÁÊ×âÒ•îÛ™››Ë¹^.âP+"""sçÎíÝ»7{ÊìV>l¿bñ[¸p!ÜP?€z±wï^))©ãÇ·è\œ8qbçÎp7Aüê…¼¼üï­À=øûû‡„„À­ñ¨/¢¢¢ÿüóOKÏEqq1ÜJ?€úÂ^¢E#((·Ä f<ˆM¾ò¨øËžj |âí’“ç}ü°éÓ—oÞ”åå–em“ôôš¡¯¿§§ôº°®n  ÙÙÇ”•_É 6C‡{{‘U«}|ü¡,øÐBl¾²âÂ/—ºÈUÒ ‚p´u1§Œ©Òðm‰T0rëO£*-ýŽøÚóýb\ݤƒ‚}}ÿH9Š>?è*ÿŠ Ú4×eôñ‘ _éâ²Jˆ-€mÛÃçÔÐé˺ñ´%´@AHsv¬Òqô-D>XqULo7–C¬‘EäVA¦X®MG \ŽZ”¨DÇ`☤C’s))ôß3[.Ê%Àn½ IÚV9—¯¯ìµkGõõÇüv.Î]81fŒhó^I©¼¼Ûݺõ†Bâ×”ä«(›‰ŠlJÊÖoüÆ¢»¨žH¯é;£æCAZ0y¹çkT>6ÏîRš˜B›}¾·‘×­„S÷‰OXPÌ4¯J™b± ïí«,p³â°4Bi[>¹5€i•áÿ“T?%œÊ—6>~XÍ'zñ"W_ÿ7sqíÚ®fW>š'ãæÁ3¡UŠŸ%A$Ô¾—$É?‰œ üë—ø>Ф†¾U9OÓÑSTRRu0}TÃ&)e£t$4ü ” EsìXêü5÷Š,bÖ$Å ÓÉ C±éÿnI®"ÖôÏçÃ)ƒ^K„¼ð½øö#@Á1¹ºsï[rëÛÄqÒ&at° Ë«-j:ÝØ±í”•¶mÇ÷¹8{ö™¾¾\\.œßº›ÇŽ–†BÕÚ-¿Ê¢r— ´ñÿsÍ% nmÓS)ôèMäѹ”ã[ !0 zl—óIcQ„ë VV3m¦ò¦¢~-%ß UäS… r*‡É)¨D]‡]"ðV•ÈúäÍ*^Œÿ–t´;ìwÝß6MÊЂøñ?²–ZÃùˆ—enââUýU6n1M½`ÚàãÀd(I­;%mÚ´¹÷0I£× ßÈE÷îêP|í„AIºÐv¹`Û+Ï€‰‘ l›Š¾Ë$¤7d8A¸ÐM¸*„ãìÔõo „ë %3Æ•>¾Æßs'Î齨÷øºO$×åùŸ‹‰Ã¢¸­Xüj²·L±ò•>a)ßS’Td‹ÇæS„É}XÊ7zyt&þ[pc¹X?öññ‚ZÊR¾ZBÖŸ»E‹ŠÊ¤¤¨–Ÿ¤€&)“”ù&R'ʧ•ÏxvØéM®‚5½J 2XÙH^ñƒÒ´8êliòÄ 6 ÷´©Äpƒ“A©ë™/Í™T¥!‰HÆK¬|=:})Œ@¨–·¤ƒ—L&îGHÆGûŽñ|h/foÇø£ ðQšÎw/Ïg¾üQb€Vaù1N/ì2|-UÚ'_^ñÕ–’ÏZîÙÏÑQ€éèêàà€_Íþ÷!V+bÐ"kÚ!Úo*XSôõY-çS”CÙÈÎyRÿÜã);Åp¶ØÛº mwcûn»ûÕ^“õš·xË÷ªÿoœš>¾ºôæÚÚµ KIÒ›µ]˜Iˆn¨³Ý¯¬ ˜lK7“ZHþÂ7…ÒR>uUãßËÅãÇ_q…­#€.B¾ |™n=?lâJO·“ªÚ¥á¤Þ‹úWÓŽUýMŒGŠŠÈÌ,¬Šk"èPw³gn®€‚BW(W­ZüXÃÖ¤=A¨'­4"VRm†¢ý–Ò:µ-jB,{IðQeEÉé4»øúŠ ûHꊄJ-W„UDjÖ².Öžb9²“vgòÐ˨ؕ±ÎPÏ-;òIR”ã…nsLš=SÅÙŠe'oõïÍ,%5 ·,Т°7¡äÚöê»»»¬w$lo’Q}©* º!‹ÙóJ×Mæ—?#ì(¸vJ¬¼uq $ù.鈴ÉiZ-pliÕñ]îàÁ‚©SÛþ^.FŽìл:¬#·®«ä¡H’#9„D¶ÓIÛéœ!¶>ݺµê ýÏ{ÄÅ=sw‡bÕ*ÅÏ>bY ñí«0‡Ÿ6¡V……asiÕª07Ê(,Íð\ÐÇ„šÜ/æ\ÆD3¶½Xú>mŒ•ýl³>ÖsVdAÕfYÒ!íŒé¬St®c„k‡îk×Rfè@-Ö¿l’Ì89{Yt=Ó=q$úìzŒGH¬«wÀêàÒ%‹œmiòôuQG!™ˆ˜ NY$Ö®\‹œ¡2vÛîy>ÒíÈ¿—žÝ&æì&Ç¥h1HËôZ·.ÎÕµÖ‰]Ö=ÛN8†-êã®9'(u}ÏZºn^.°«ÒX‚”ñØÈq§ «4‡Ãۼ·ÕÝ#EKSó·s¡£3vß¾¦Loöç§íT(T­TüÆ8ÕüÚããæV¹µE,`]T@í0’}âÓË·òëj±©²Ú)j ‹[µ0ºcìÓÆØW¼öŸ;‰½i<Úÿ«‘›o¥xDuǺéBáZ$®®K³²Ö«ªÖÚg„χøè·G¨}]7KXŠW†Ð7ÚYÊü·u3á¸Íh<9¸®HÁÁ¯½½þ$ã­] ?ínÛŒWrÇŽ³f)C‰j¥â×Јýáè~ŠŠ²ÃÛ7;¤;óÕ¡l·Tµ®›å}A‘¨ñXº¾Òᙟ ³Â2ÉìgÊ÷§ŸÌùøJ †–t8ËÏß<“¼„†¾ñðX e Ä€–Síù:Hwv:w~¹Y§ŒVßÝüÙ°M›>ü¹òÑHJ+ÿ(›•™¹ISS i¯WÊâ÷›xë!5ø§‘ºÌö’‚+KÄŒVz{löóÕCèÎ)`5@ý17[¸cÇêY³šh½õÛ·D»we‡˜K†°T­¸J_˜"’Ä* HæVH ÒÂÿrB"TªBðs f·$É“,gÙS‚¯R3}|N±…’ 2kµ.O`` ¿¿lãåq–ѶKoƒFŠßÍ}Iä"%E@Muà°a0žÄïí½@¹¤#¤Ê1>To¸Ýé%>RÌ7*¬v´1t©ÐÇT˜=Ì‚sïÃÍ$h£Pˆ Þ’dÀ3rø?C |α-¿GÑ“UmcPÅ …‡¡Žc{I’r(ŸV>öx¢{Ì*Ÿö>«ƒþ­j…Œ´S$ñXO¯CÃEL„¬~ãéîÓCµm“åâüùèî=òÚ7àC28ø…··Ÿ¡!”¿† ³‰²4ûÊùö{¤ù÷p*æÁìDÖUîÖ›K®çˆ¤Æ¦‡5kch‡Xå™bžl›Ô=óàBÍ k8wåD†ïÙ¶ŠÐÊÑí3!ü½|q7zï—Ò~§/IÊÕö³ÞϘ±»½š|ÁW33Úñ0+)!þ’‹‹tÛ¶¿3˹sm ò¿ŸàŒÝÞÞP:@üötØ—ß–KµC|b³¢¿g&9ÇQóã–r„|õ²!‰íެyÓiÛΑ ¶Ñ¢YSäsìÌ·eP† »MÕÒÎÑL_{ ±â»G—¥}BhKº¾$ˆ®œmª»öÌu\LFkœD‘œÓñÙ¬8µx¨Ass»–.Â"--g[ZÂýšUü(D´êîœétˆtâØ¬ØŽ$÷Ô¾¿¼Ö¹®íjÒv5Üx?ñ?ñ?ñ?àñƒ•0û”@ù”@ù”@ù”@ù”@ù”åP>åP>åP>åP>åî$00°E§ßÏÏ ¸(¿€ƒƒËMü·o¤€ÜC”@ù”@ù”¯9ÈWQ6+Ø””­/÷h"ÒÿÛ³9&æã»"ñ®ºs]ë)K5ÆY>Ý\¢6j%ƒAþ<(‘/'+~‡AJ½€ÆP>S‚¸Ìt$É YÊyšŽž¢’„Zˆò¶è.‚œlÛèïïÿKyçÚ¼Ô?#rrÔØƒAvþÒŠ[2"6‰flšZ[øä mÒðTÿ¿½¾¶¯oHRìâÅ»’­²Xþj!ä:øYåûS:ÂUþî|âàGI}ƒ~9‹.æ‘jÌÊä±0x—-á{Ô•+_NúåÔ¬G2Ê}éëâÍÂ×OÃ"îŠLfää‰ÿjkéݹ›NÆ¿AúDÄs¤ eš»ÐžöSf„‘…v(h+çÞÉñXÀôUÐC¹é &RùFFj¤ÓÁ‘R{Ë{‘6½à– | I¥Y!G…,sWšy(g‡5Þ Ð%oq¼V‡‡ Ô‡$o"”?v¬%öñK|h"EŽXZR»¼5Ÿ ÄÙFEo«ØU˜Bˆ`ûOƒ[ 41‡`ÅÛh«¸!­AvKÜ÷a½–źtc ­‚Cƒ/ž9L^–#Š\’ŽxqF“ã\¸ÍC9õ¤Bd,R‚eÏrù½ÈY”V “#îprài,‚>ÝX®6ÚÏçpÁ|l€ÊõZˆHå4v›t(u­5C¼×d³îÊ×`ÜXN;^‘ØÔ»=ßÀ6âéÎñÅ;HA”OË^dâ³Ù& ůR„ä°b¥­Mÿæ®÷“h-Ëe©v,´½àÑ»O¹ì]Î)0–ùBv…› 41) ²ðEÚR_ûØó{&^܃}¹ö½ÿÐsì¾xúh ŠúDw7| òTc$ª÷2¬ÂNÄn¿›õ}/À>üøÿÊ>œ;…e¢-6±òûc¦¾ kã_;æF§~øç–LÑJ‘‡û[ÓŽqèNê¸e(_ƒRÂ2øvŸHö™h¤o³´ÙP%Èž¨#S ]e 9zÄ|­3Òâæ›s™ÅÒÃÌõÜÎò´BÒé £w+áš/’}™X?¸¯@#ÒµÏú=éë™no"jݘB/²{7”’‡zÐaTrskýÆ–?ûoûõqv·”“/·Ù¨ÏèÝÊçãï$TŸÄðÃý€¦T>Ñ¿Zо“Œ}'1½-‹žžd~é£Ã$mwÚîVn¨+ýtêÀ/ôŸè Ñ38ý³Ù»‚±^iEûÍD”h:Nlós xÈ`dû_½Õõ¯¼2Ôžzµë“’r³úQD‡ª>¾”ìgÅÓæ¡ï¢ÕÌwBª„?(d™wOï¥Áî³ù4I–8Z Þv*‰åñ,^ˆ ŠHRÙV¹=ÀÞ>p;;´‰²à¶»_í5«Æòògƒ _¹ÃÒÁ¡ Û÷ë‡5%à;ÜT )Qj“ŠÐ9›åÑÑKm˜šVdØõ/üW­rÙ¸xû¨•»ÒÞOï#Iw`‘wL¾æÿ—8B^@.FÕ";Dÿ‘“Q§Þ ïÕ74ÅŽ‰Ïe¤oÆ18F=‡ ^™’­Û"lÜaì.мÊG ÕÖ„Ë[™®G·©v¤Þ‹Ÿ–MT¢Î5; ÿCÌNžBÒT—3‡%ûìΤ¼q=1{¸ŠÜ_=^ãåþ[—ô«ìWÌz_?ëc@™}7öò̽¡º3H0îVtúÿ…Ì@S¡9ûìú·ãDØÊm¶e{^|@â÷5‰¾+‚ÆŸô)åËòîƒeÿÑŒb/ÈÉpÞʘ«ûL¦û‚Z­ÎˆóÔ ³üóîÚ¾åúS–ÊÉmÁþÎ6¡ÑTè2Ž7¼Rê3b)§°÷Tóùþ¥¢åWyxvé é+8}Ò_‘2Í[‰¢À€…“ܡͩ|å£Ú雨§© í/)Ö.c¡ž~;Fù$)Š ”*½KWS•}ø)ÏþÓŠ¿÷â슘®c%-íÿn )UÑ}T…$Ÿ”?6ÆßÛAjóÒG>j²Då‘ËõÈ 4!Ö¾‡­}kÞ5=üÖôðjžÃ*^'1“Ø›¦²%e ƒ±„íïë……û®`0ÊE¨ã %ÅXîê&óÏ’ó/“mr°ìu–ºÃ•)½oåºuÖ“%à= åC‰_o}J«¼â¥Ê`¬u®)Èò«¼æµÔ[ð[dGÕh¤'Mµ[8¸Úl[Í“Vö€hGµÿHsû–){E÷·õšC½z U^c“§¿ò¸ê¡e½&ÚcˆMèYÚ˜`0¨W +9"¥ç*…G>t_ KïS‘.ðãù• úãé1_Ha࢔}«ªÚ”ß/É)ýMOv꨻öëÕÃÆïTÈ*†Ì8Ï!óCYž+’Bf5x¦rr²„åU\Üó¦–õ`jÐwÒž¿þ¬e4ZK¾¢cHNæåô‡¯;ŠÉlLÛ3Ì!á¬÷§÷ϳ>‘•»‰¾ÎúP"®,úy_üEñ®}†ëbû vß~þΪ£WÄVy˜9P+¥Eøç -{Ô“CúÉõ«¯Û©P–ßZ­¡™œƒ7œbÞ-6‘Ì<¹tˆKž9¦DoAßR䔜<¾~Ä(ViÌeôÇŒ [¥ˆ ÏhÏi%Ʋ|ˆx+'ÛùZJœ¾¡íÏžŒ¶Œ1î[Ï7F^ÙYàÌ]m•vÞv Âa"bÖ¦kJ —_Ñ·çt†Ðž³V_[>µmW» åˆÜòä±/МÊg±Æ¬„øöU˜ÚèЇú˜æuìÄ—Ï„Œ’†»ï²ÁzЬ÷Q’L:¾-êé뢎B2œ:Mdõð¶ÝCjZEX»‡‰vQÛ¾?V·ókµnØÒûÆ:G[5|lT˜ÿº¨Kˆø2b†w óz ºcÉ»¾c¬ì_!¬ç¬ð±¾*,¬Byú¶ñ RlBE¨ y·–Ùô i…óûˆ 5@)©>š‹Ž¦,½ËUÁ7xø]'R‹¯Ëý¶^øì°ú«ùѶlzU·)ù˜ ªXö\Ë7–½¸ÑÆfÙp÷¹¤Ì‹]¸2[ù]Ø0gPzÌüîÆ:‹§(7𪦮%¬S>ÉõˆƒÌt=¨Ü F<û‘Çë}í)ÿíª‘ÑßÔmæ¡®újû¬bœXt`žZÕÄ€Ñ3üD•7Eϵµ•ïÜÉ™ƒ¦1»PMÃÌá‰Pû£‚ªRêrÄ3Oµ¨-ØÕP™ÞÕ^?Û+½vòSóñ³Þjå… 3}xtXk ÕÒóþËÍ©[ŒeïN.)Y–#§¢¢ 7Á8xÂ¥[Ä…çt©ó³"&öág>Ü©Ç;–=*0A™˜Ú#ÿÁwÖOŸ(WÁ9¹Æjág¿:—ç®¶J‡=±ìùïL5í¥Š^ÖTST¾Õt)„)ÙèÏØ@U4Oý!#H¡¯dyAeÎKP$'ÛQALm.P¾1NUÙg»â56mÿÕ¸Kw´söhçò-%77·*lÝmÝj’É>ñ‰éìM7·Vrór™ý%zMChúÅówç÷aÙa‘ FSMjër}þ“»åæ~áã£:Ä‹(ŽÃ¿µÄí˜Õ[H€ <È)šáH:cB¦¸2SFù¤ †î3ç\ZÔ™¢«4³ª³Þ©Ñ—D¬‚IT>âø¶d3Ä4!þ‘ãàjQ|®Û†!ĪÌDZ¯­ýklèn„vÕ<̼–Áv"¥s¯_Rè?pâPÖŒkƒ¬ü£7Ô}?5*C¢ç¸òëLÝAÿ3’XÔø”:kMˆ :õ8z®ï’„e»—Å‘Ûåˆu©…ž†Ô믕R‘´&Z•v¡EÛŸ#Íé§™ÆnÊVCÇæ¯tªŽÃ¨ÙæB‡ÔTS«_Šç'gSÅo_]ÑÖw³-,(­ñ”Ä[ÊÎf¾Û‘BŒGgjœ¬hÎÖN I-¿š½M+=zHJåVÏÔd7%a?ýX[œ‚½<ò¡MkcŸä7•EÐ-ê54‘ÝÄDñˆz5xŽðã¯b£³ñéÓG©Ç[G*1[]7ž«C=Pd‰Yn.NÑg™í´ŒÚã³ ÿü5Úež£Ÿꃆ¡Ð«ØñKÃ̪¼u5e=µ³R#VÌ‹Š ”‹ ¬Õø ÅÜ ÐZOýhO¦€ÅdY›°¦¶éÕõÐÖc‹"¼h÷µpc9Ž¢Wï"C ìf=£ÚsL<ÒÊ€•¤Î}¸¡Òéc½Õ\SÈEÕ/Å»gyœ1X¤¦iL¯ñ”ßî㟉*•¾æÜ)è€òñÔðþG•êBÞôzWi°sñ÷’×—<°ì­?óÖZK q|#¡èT«Õ•|fû„öFêĵW$s¢ñœCÖš‚7ëOÐNÁöXÿøŒï~fÛÜw„9³Úꊢ¨Ž}¿4Ìœh¸u‹äU ‚£Òƒóãä4¬ÓkŸ…Âã0‰/nνóžöæ &©^ßô,d¬0gCâ×âBv`ö mä×óóo³™Ÿ¹®ÎÖTSª_ «: g%˜}wÏä‘Zð´åk 0?ÚzOwòd2Äÿh=ŽK`?éðO_C5Æ]j®GZöлÔf/wj ¸4–é–u4’õê}fOô-é ÞÃŒ†Îf0fc™ÜpîÕä(åä!ö 6xºŒY*/Ò¸_Ù5§Ÿ>4´š7ÿ²m——Qy >x¨XLj¡z¦”VzôÔ‹:†™·¾v‚Ÿã-GDUþú!›t+'ˆ %9ÞÚÜeíÍJ Y½/xûVe ³¸«¤=o]ÈXªëõö¸ûúÌ)³÷­d¿4Íb ÆÒÕý<êHjôº}!£©žY™QAt¡®&>DTKæ.Žéâ~J5EðõÕê—beÊ4´òêÃ2D/ñ+š¦×µ ²5\IÔq þùgÏ­½3uè*¼+þíôQƒá ÊÇ›8&äml¢É›êúébJõê÷äç_¶å亯߰hÁ|ê{êbÉbIs´òœ¡Í«^Ek#¨³©áƒO›fMðš8n«f›ôíYÍŒBoŽÄ…½™jç8¼wLÀBìãn.+cþflg\9ƒÖì9±Ð.¡îáòÙØâ®‰Öîö>ˆ2P>=Ú­6púd— SŒœ÷³¾ó‘”iÛ­·2ê¤ïo˜ÐëÓým)õ>EmÃÌ/Á9³„»©–=O½˜ž)¯eb¬Y±–HÍ Uí¡Ê|í¸xämÑiÅþúZªXÚs²_°û¬R_eT%™¶Ÿçd!ayyæÆ‰ýŸQ5] =59vH|TìþíªæÓ´$ùJ?<Ü{⊸¼æ(3Í?¹#Á¯žGÉÊã2  ia®Óõ\ô6fWÃiø^È ™ŽULN Ï„Þ«ÖP3¸u@| £¶EÉ] rM¿¸ó"BN{X ›ñ^ rƒº}º¼6u›Žß½ÐÄ5 /¹þ–ë݆/XH »XîéT[Jv:t¹-`¨ý+}¡ûT¹Ujø¦?C„RÂýÚÝ;ì›ò+ZGÖPS†Õt)ødrÔpvŠXK‰9êÚ_0¿†+‰ßö PÀÝqÞ£ÔŠ}WnÀolÓGÝ„'$(Ï‚_±¯‰Ü}äÃAŸ•çÏOû ÈšøûV¼/c·¬Aù°H½°‹>â+BcY:nŒ¤:O öòL>$ê±ålÖë.Tóà·‡Þ~a³ßÎw¿²µ£ß®Ó‰*ƒŒË"ƒp%Ó´;rWÿ\àš­'Në=Ëïðò@AJj¥qJB½­Ž'—°vIÿgLãÌ™ñêËxó8Kž§UŸ‰ÖØÔ£mÜ>mçýz²­µ©+G!Y"çªÇìoO}PI}ƒ*Ù.ýþªâåcÀkfþ{ buLeu%¥»Ö²ðP =T‰‡ô¼-tïS*äëFFêtWé³ÝFFÓÑÐŒ©9ôÔ,Cîmúû¡B?ª»ÿ Cºû+b/ND§ÄéðÑô`ýé¡©h£ì9xoÈ®8þä“[vÊyõ¼ßTïmóiucNŒ-f~íhŒÏ?›®e\Ë%¶¬í.M¢}yo‚|=N?$޵ߙ´d-YAºó ƒ\çm•U@øo¹ì8Š~aOFö{‡)g.%Ìr‹`É)ä°F¥\ÒúNÙà3’š~XÀó#Z+ýö^yÕa]nµú%,—v/Û”«§ íÆYn=x·ŽJG9LËkYcM©ñRP÷+6Òk_B’¥÷¦øj»’Ì—àQ÷;¹¯<_Ð9(üüôñ`ð5¾òmóúgãêìlìTqñsZèÆ3CIV™¾”©cJ’—¸6‘úCíñ¿ªmVM+z²8:sŒ£þòMí/»è¿ì8Ã+ÿeø¯òH:¨‡Ðsì ‘ËF:°¼çÛ³"‘P5_¿Í¼zJ<‚ã[æ97O×Ôu¢FÏÈÊžô‡;Î!Ûúîwå9Ç€×8Ìüϳu`êßn» Ñ®×5.<$ZCU[ųW dœ}ÎJÀÖï¾9«Wìªé”÷™Yͤ۫ýl{™Èjà¢Ë`0‹.sœÜ¾ì²)Ý™˜„1b\™ŠÊ‡p$ý5!âêfÐh„3þWÝ_¾ÿÄèÃY¥q~ùµn'í²§z`üvã[½, t‰ŒJ¬Ò²=Ÿcr=kgöš.úV‹ÏXU-· ‹GHTEýšo^G¥«Tûj«)µ\ kûkûz\I„d4ÆÅŠÕÊ—±c–ÞìÙáËÜñ¿q«“ã<þâÌwàÉ[Ú Ju3½£x]£ª“—#·}T\ 1Ûi«ôP-}™NÛ‚Aá SÇ?½oú UKCЬ] O Ð‹/=u)Fèy,5#¼<‰r2ð.ŸÆ¥5àPlê”ETÈ[]Ù©ú{)ËmäŠ"Àm€º”¯4o?[öl=üg›§Çï¶s§æ?ìi”0´‚«ÇuØl{3u„ëÐ,¼Ê-ï‹HR·@ˆ¿–…‡jì¡ÚeRî]I6Ú¾.¾.ˆË8kEø*3DlÄÛcÔä°-{àNÙU·cÈdKÕŒ´8 (Ìê4É9\Ó¤¿û¼ÍH±ôôÐà^å3P˜B;ž’¤"Ó¡Ñ3ÄÖ-„ž–Ìr`0yÏ›z» °™X1I´ò0×ìSa´›é—òu™!{²ª]Q… rX›åo~K$â¿Eä!‚5ûбóÁËL‰òM¥Ñ›rŽÎ¥Ý6-0sªXöÏfÅ©¨ÅÃ*ÇS"DTÔóŠ…_#䆔{[F çµÛ)() “Ö7ßÕ/ŸEš9«¨­¬qá!¦Uí¡zÒ•OB+îUP‹_Æwï7‚9?È„\’¯Pë§ãÛ4ùn¥ZÌÖ¯f5ˆKßgh݉:|>ͱ5H±ôã]n¸H22] ¤Ü«|¬cZ‹«¼´U,'‹t‰ÊSK£œÓë"‰ž®š†Cö0‰•çó̦×UgÏ$Ï–=LRÄ"¢"èÓcó®ÏÕDïN-,—==”Ó½d¸îàwŽx*½Þ†ád ~‘.­=”ï| Ð`äý;ºt%5¡bШ¿sÁôu”wÕ…‡¤^ͪÞCu[þOP‚š\¦‡ei#4ÁÚ Å Š&ø„øšÍR@ÈZžª¶Îò(b×°Ò êŒÉ©‚ýϼþUÆÀ1û(FÞT´&}©Å-?”"˜óå«ÅŸUŒG¨ýØ—,Ù½‰dZc×Ö1p;†PZÂKdÑ¥ŠXk„5½V-§<· ÍHX»<ìx’[Ža©²  ~‡"4’y2súÔºðPõª•×bÃÙ+µzÆOãùŸr¥ÍõÿÀm€:”ÕKâæÍ'hR÷CdÝ`©†t…Ÿýçõ늉yŒ•§ÔõR¬#5ª8ãQV«&éŸ!&>?™U¡Ÿ2«Ûf_stŠXJQ%UZÒÚÖ ]ŽãŠÛ°m[`cŸ"))ÉØØ˜ ‹`›¶ 3ë<ù?’hC@•nÅrÏž=vvv­$³˜ëׯ÷ïßJ÷+ëS@Æšá(´Òƒ‰^{6âdÆÔ~Ød¢D¥!QÖNV©ŒL#¶"²d¯ëò9õ埣¿ÌOx÷ìyŸ(î=ÿ&8 A§OŸn׎»æ.hÀ¼««÷rvvvrrj®„•ðÖ£¡Q‹e@@@Ó{nÈlvvöæÍ›ãããAo¸_ùPæÆÑšNǘOÌ$y’ö ›Ö6ôÖ~â´Ã!jhÑ Y±9”ÝÛÁzè8Oî×h f‰«î[æß»9õ>rԤѾITŽ. j¥\TÜzš:?}ú„GŽyêÏöêyøð¡——WC)߯ë¥Õyyyü»bÅŠ%K–´†üêéé}ÿþ$ÉÊýû®T>yGõœfÏøê7Œ–:„¶!ôtçxbg=•´ôl–Yhшý–YíŸkÿꓦ›ž˜Áò¾¿ÈÛ‹ ©k¥.yÛSËÍÄ++«¸¸¸Ös›ûô¡sÉÌÌäílòóóC•æ~^¼x—-[ÖJ”~ïœ;wî–-[àîs»òaÒH²Êp=ÊØšúOz´7íÞJ’²ãcópX–ì9’I•îî—ò™_Ó>|¯´Y>³"M•ö¢o•·_0£zš°XÉ‚š=){¦$¹ ¨.!«}ØÊÇ>®¤ÒâmÒO–+YPv*SöTÖg»S³—%òüm~òä ýÄÉÍÍUPPའҟ0 ðÃMWÚ©÷˦y«ÊïÞ½¬ççþýûAùZ†òa&Dãu8”PË.Α´ŽÖ¾)V%p›ŠÃWäŠÚöV9ÐØ?“ähÀW¾„$+½fºµ²û=hРììlÞËWrr2íX¿~}“)_£~ÊËËSWW/**â±;¥­ÍzTZZzúôéaÆñv›1cíhÓ¦ èM‹Q>€°´´¬°ªKJx;³?~üàŒôíÛWLLŒ÷n<¶{Ò¤IØLçíÉÎ/ÎiXX˜›[k{Ù嚉„„¶ûŋǎ=z4/ep„ lwqq±½½}dddKÏÔ»wïðïáÇÇãú'NœÈ¹É{mæÍ›Ç¹¹dÉP>P> y°±±ùüù3/åèСCœ›GméÊÇ`°¾OϘ1ƒ—”ïàÁƒœ›eee·nÝÒÑÑáÕº¶yófÎMž4âAùndåÊ•U|Ú·oÏÛY.,,ÄTn¹øKèêêÒŽŽygå º‹cÆÿøñãVR_¾|Ù>m‚òÍϘ1cž>>~~~¬yØ œþ¼™™YKÏ~GáÜ´··ç åÃSóšæææšššÞ¾}›çkŸµµ5»4G |@S£®®ÎóyÄÆä¢Êw/^jðd[B¼—©ºQVV†G(Ð ðññ=~ü¸G<œGº?dKçõëל› #&&fÒ¤I Ùøûï¿y{0;e÷l¾É“'çååݾ}›&™«?œ d |@3°ÿ~???^Í––(7CÏ]yÿþýÖ£|Ÿ?ååš™½{÷ò°òõë×”k‰Ýµkje½üwîÜéââOP> 9aÏ“óRvx¬oçìÙ³éµ ZU/ÿ£G‚òòÍÿôááÜuêÔéÊ•+ýõØ|ÜFqqqbb"ínUÊwãÆ xì€òÍÌ¢E‹ ylqZNþûï?žQ>^²ùTTTØó ðññµž7vìXxì€òÍ ??¿““O΃Lsþüù%K–ðF^JJJx&#QQQìMž\!¹ŽwMxì€òÍOLL +/5.ñŒÍ'--]PPÀÞä±Ï±u°yóæ¹sçÂ3”€—ÐÆ…—†ôñ†ò¥¦¦>yò„ÓgäÈ‘­¤®ƒòò\ÁâÅ‹Oœ8Á«O^ú¬òíÛ7È…§§gRR{333SWW·•Ô5UUUxà€òÜÂôéÓylrg6ÿgï<à©üÂ8þÞD\…ì‘M)+#d¥•’TZhIš’–æ_)%)¤H*R4¨(™ MQHd”‘‘ÑýŸ{_Ýl×]Î÷#½Þ{Þ÷>ïyÏ9¿óœikk }>ÚADD¤ÝzÍW¯^=|øð`Èe âráÂXÚ@åƒÐ <ÁHHHÈßßÕªUÐç£:...YYYíNFDD å³¶¶ …¥ T>­°o߾ݻw3jtæÌÆP¾ß¿Ó¯ñ/_¾TVVfggow¾²²r䲎ªÊ¡2ááጪ|?þdŒ¡ßY aaa‡êø‘¥¥å`È_………qqq°œÊ¡-^¿~ Ä!§Ùw–1ƒÁУÙùùù>>>Ö«êëë÷îÝ;ò—ŠŠJII ,g òAhìÅêààÀÊgcc5sæLzVVVº³8:ñññ]5'€JÉñãÇCþrww‡… T>-òåË—={ötÚ$Eï¬Y³¦  €ÞŸ‹ÅÒ—Ák×®]²dÉþýû» àëë;”o̘1?~„% T>B×c(º>ÀðáÃéÈZ~~þ¢¢"&&¦nÂlÙ²…áóT]]Ý©S§`Ù•B»;vŒ››ûÇ ö\>>>7oÞ477§ë§ —…ÅCBB._¾Üc·VDD„³³3Ãç)ÆËPPù ŒÈ¥ZZZÏž=c°çZ¹r%½+¨”и…õõõ‚‚‚……… .ì1ð²eË~JÊ+ ìAåƒÐ<ذaƒ——#=ÔË—/¿}û&,,L¿ÀÏÏO³¶áp8 à[·^Šº{Ð Ù䬬¬`y•Bprrnß¾ÝÔÔôöíÛ óP \æââ"½\¦A€;EƒV(UTT<~üx^^éWM˜0±÷h­®®¾uëV7£{ Pù 4‡˜˜ØåË— iú-ðùnܸ1oÞ<:µ_\\œ¦ì¹yóæªU«ÒÒÒú0nvåÊ• œ}@m`õêÕׯ_‡% T>ÁÁÁd‘¼`±Ø‹/Ò¯òÉÈÈЂ555:::ÊÊÊ}ë:åççgàiݹ¹¹ÇŽƒ²•BÇÙÓ××?{ö¬¼¼<<ÎÝ»wyxxètcŠqãÆQñÛ›››çÏŸŠõ{÷î½|ù²Ï÷Õ©W¯^1j~ñððºò ,: òA蛄„PÒ1LÇ ð6¬­­/]ºDw–‹ŠŠ–––òññQòKëëë-ZôñãÇ›7o†‡‡÷ÿ†ˆeÈœj‡ñññ¼¼¼°Ð€ÊaÆdOVVöÑ£Gbbbtý,LLL7nLNNÖÑÑ¡;ãÁ[ Ì¬üŠŠ ssóêêj x亭  `qq1ãej$eee°¬€Êa4²³³`ØÚÚÞ¿Ÿ®ø¯«W¯¦GåKKKPå+//Ÿ3g¨ܺu‹ìžÙµk×r§žÔÔT///({Pù P {ôÛUFÄÏÏTÒKKKéËì”””¸mss3<àçݽ{7))i€Œîã¢E‹,GìÙ³ÇØØ˜áç&Båƒ@ðžðüÖ¯_¯¬¬L¿OdoöìÙwîÜ¡#›ß¿OÞž:uêèÑ£ 64o@-———÷îƒe yW¯^¥ýµu Pù äÁ××hFrròºuëè÷)–,YR__OG»ÿ|ýú•\·RWWWSSóññ±··h³›šš\\\, èëëGGGÓãÖQ¨|¾¦ŒŒ à4P è /^¼mÛ6†IÞ 6B•B6èwÃúúzº°³¬¬¬›í2hj'Ḹ8CCCÆHØ7n<}ú4ÌàPù N033›7oÞ7èÎòõë×Ó…@غR>QQQšjŽ;þ¼c$숈¨|Pù .¡ÓÙNk×®¥‹±©]i›››­¿`¤éÞ~~~0kCåƒ@ºäÎ;=öEÑ ÌÌÌÛ·o§q;+++;žÄápuuuììì4e*…·8\\\vîÜ ³6T>¤K„„„ÌÍÍoÞ¼Iw–‡……ѾòUWWw<ÉÍÍMƒóÌŒŒŒ#I=z*T>¤ž={Ff¿}û–öüõëW»3·nÝ¢ü"Ô¤0}útÆHÏ #á¨|äÀôh¶††íYSSÓîÌþýûçÌ™CkvÖ××Ϙ1ƒ1Òó‘#G`¦†Êô€MRR’®®.}™=eÊÚ7²®®®õŸ®®®/_¾¤A;cbbfϞ͉ùüùó =ÃL •é™Ã‡ÓÝÐ>cccÚ7²¶¶¶õŸ×®]Û±c Ú ª>Œ¡|¾¾¾Pù òA ¤|tg³¦¦fyy9-ÙzÆ}dddzz:mÚI³†õ–OŸ>Áì •! Ú_ ¬SâââæÍ›GËþþý›x¼hÑ¢N‡zÒYYYŒ‘’¦·•2à¬Y³†Í~öì+_CCñ8$$„fí,**bŒ”¼xñb˜¡òA $¡­­ýâÅ‹ &ЗÙoÞ¼¡q ‰>ßêÕ«iyazܲ£#ååå³fÍ‚Ù*B*W®\¡;åËÎΦq Ñ@$!!ÁiøÆ¶¶¶0/Cåƒ@H%::šîl¦Í)á­!¶vúûûÓ²’’’ †cbb òAåƒ@zÁçÏŸéÎf...ºðùN:eoo•o IKKƒ*Ò ÔÔÔèÎfQQQ·°¹¹!¬’CãÊ'##Ãi¸´´fd¨|H/ »N>„°Ü6]ø|´¿Ðí×!HAZZfd¨|H/˜4iÝÙ,..Nãþùó!¬Õ c’())ÁŒ •éÚÚÚtgó¨Q£h_ù"##MLLhÙHGû1I 0#Cåƒ@z_vv¶¬¬,ÙLû³Ð}||h\ùŠŠŠ„…… +**ÂŒ •éiiiô¥|bbb´ïó=zôˆÆ,..f å?~<ÌÅPù Þñþý{ºóSiÜÂææfyyy7²¤¤„RoUUíx‚@åƒÐ´¿$J;hßç£ñ¦N„°è¤Þ?ÒãødT>•ÉÏϧ/ƒ¹¹¹SÅÄÄDËFÒþ¾w?þd€Ôûùóg¨|Pù ^C­^eee´ìù544¨««ÓxÒìÞI½‚a¶›€@åƒPZEèÎfà¯Ð²òÑ…¨TUU1@êýöíÌÂPù ¾¨ÝÙLã¥vë=Ùi–ÚÚZH½ŒÑ[ Ê¡4tgó¯_¿hÙ¼Ö{²Ó,uuu°Þʤ >œîl®©©¡eóZïÉ•o@©¬¬„Y*ÒkØÙÙéÎf/µ›ššh?éBžé½Ê¡QØØØèÎf/µé¢µ“¸q<]C]ª¨|¨|Œ/-ÌÌÌPù`J€@åƒÐ.,,,tg3—ÚC† ¡ý8D·Ï¥w£Í•Bñô4”þRºT>ŽÃAõFV¾3óÇ_øÌ”^ª€Ë„oÓ \Þa0ÚC•2h”¯*ÃiÐé'ñ_¬õûµGsÆI#U‡˜®2UCÉ«ôtðÿˆvç+’ÔÝ öÅ—9ëóÏ×¾÷dg?]65y¶¾óÓ–ûàpÿzQН`„–¢‡â ¹`ò虼¢²éé_?ªüQ†Á1VAé˜Ç©ßµ¿˜‡± ŒSTTT“CS765Å=Kÿ”õ¡¼èëPæa¬X¶ŸUÇO®©ªàÉ#&%;I{5ß}v~QÆË×Å_¿Tþ(2„IYYùð‘£@Gò ˆKJëMPáàAõh¬¨©M~žžÿùÓÏÒï¸?ÍBÂ"‡ýÓÔÌÁÍ#8J\m¼¢Œ¸(í§Þïå•ÏÒ3¾~É­(ûþœ ©yøÈPàppó‰ˆKj¨(Š ÒZ ãÅ›¬÷™oK‹ k*2³ £ÆÆ†a¬l¼B£F‘ÓVU¤ñEhBùºîÓY6I"'ºÌyOŸï-©£‡ 1½½ŠKg‚à•ïÀ$Gg\ñ¼óLû–ƒi<ÏÓÐ9gÒ’s(¦œh¤Çª¥$<OøýØ÷/_ŒÓœ$1VYFMüt2îÍç·OcY˜‡Y/µ`¥^/`î·Òðë×Fò ÕšÄ'£~º YPÄÅ$~x‘¨¥?ÅPW‹2æ5ãpWoÞ)Êÿ,¯9IXjÌh }ðƒ~d²rsë95rŸó’Ô&êMŸ¬Káh¼Ÿø<-ñ‘¬ÊDiå "ãÔÀO§Á~!ȃ”·oŸÅñ /?‡ÖÊâ[ãß¾x2NCObœŠ¸¢&øi‰ê¶ÁÊäurú»gñ£ÇXÌžIEƒ‹~TÝ eba;A‹_L™¿ËM%Sò~||‘ø=?wî¼£¥ÄaI50Ê÷—#iuNª¬-uê²§,|Úxí1šçŒ‹CO–f¥»{Ÿ+úQ¯9uñzë­¯}—tmÿÉ0,·Ìñ3.o¾,jhÀ03ÑC0íR#éê%$9ÖÉÙ©›ÑÇ'ô-‰x·Aþ)ŸÛÂã\Z—!Rx#ÿ ä–(DZÉÉÁ+"LÒÐØäíë'9NErüDðCR]„OP×t 8HÏÿñøzÀRK+ QŠnƒ“üâmZо¹µáB/‘VT?à ôÞãŠâ¯¶+¬μâU|½5¦™)èLSÐé9<3„h^ø£¤¯Y™ëlW ´´aö¹È+,>ZEËDV‰”KĤÁ8Hx—÷$2ÄÖv-•[Qê›ÎùøJÈ«H(hˆ+hr‰ˆôXð¼x—¹iãFv,EG/'¤¾yÿhò‚eºfKI φ>~¾€­EßÀ+üü|f3¦Á‚k ”¯þW ‚´(3ïDPNáã[Ê &÷oÈ    ËŸ8š æc07ˆù¹þ=œ„ÃÅq©¼·–•¥Z‡t;°«cîÆm!´Ážy[¿^`Uñô£È¸­Ë·\¼ÂýÈz|ûg^ò ìuŽ÷ù‹²ã5 æ-ïÛåØáœ&+ìË›š‚ºlqØÌÆÊ:ÐgæÞ»nh±j²´Bßî z¼‹ðøYMyÑ’ùsÉk^S3î˜û±Ió–Í\nßÇvy5ðó0íýç7©v«–P4ú_—Õ6YØ·ËGò ™¬ØüùW¯¯ËŽm[©5êô9?à0Ì_Ñ·ËD¥f,Ûø²àgâ-ïÛ)`pvþ÷ˆ›×¦-²5Z*Ó·;hãSìÅ›2bâzÕ` F~åcei=å¨$诀Ï]ŒPÙK(iÔ㪆Á¤ƒÌ€±ÀáB‘ò;-b&jË Ý9}W؃¢î°ªÿ¬©AÐ 6Ä£ìpgAÈ…®º´†cúÿ#çõߎ‚‹–êÒÌ6ûo«ª"9À˜éõ[TY#ìà‹’ ×?{-€É¢Yùßããbµg-êÿ­˜†ú—ü>¯8ë­åÂyg³û©Óz³—Ùëÿ­ÄÇ*#ˆòI/ïÕ+W«Êëaâ ¦ý¿ê]ù]º:ÓÈPDŸŒqXðýç­Ûẳ÷ÿV,ÃØÀ{ðüuCEÉ\“é”L½¯sòSSRôÌ,û+vΑӭׅ=ˆççdÓ×Ò8›Ÿ:­=k!½þßJeÒ îÏ×];wÀ¢ŒÌÊ·k"gG/,²$üÖrBûêì€ìÿÒšž`†j#ÈõF ßµ YCŸiár¡këalÌ\Xf‹? uX×ÒÚVp¶³šöOÄa)rA¼ò?•õ÷Ú†*ñÞ?QËù©ãì][ OÌõ-§=æë{ÁTц‹WÃ$Ô&™‘ñž¼Bbàg€rcm}ãYŸsSInÛ$à.Ä¥¿Çb'ëhöóV®ÇNL]¼š‰¬ÞxAÙåßcbC—/± Ë ¯FDòŠJ“EöˆˆHÅáÆP²>w1HaâdÕÉäÜÔ^FY£©±ñ˜ÇÉm›ÉnpMÝï³>>S­&ã=1˜!3—o 0˜¨!.* Ë4dàf5|ø3²M Ä`εyÁ’ó¾=Æþ=¹]qËèän9_¾¶ˆaO!Qôþ‹@œñ;®…|nZÈó=y׫}MSó¿ ˆóX䛂´4_èÁùm9åí£k¶”iÈ€t#ÜxÌãÔ6{2Þ³èGÕ½ûц䖽–‚[fl]MõÕ‹çõ±€ÃᎺºö¹y³{¸x”'›œòò¶ß°®Ÿ·òöÐ06:”üËÇ ¥°‹Û±Û· tê~ÿÔI C™™§-^sÄÅe×Îd¼í·²Ê’Wöˆ(ëg}ý\\Vª©¢ K6ò”ôÛcÊ\§ò ­æµ•=€š­m«†æÚº’F„•ä±Ù„ìa;~UIGEKÎ`dÝ g¤;«êÈ¡ÿu&Ô@µNÂÑFöΜÔ×^=™¶Øöø©ÓŽöÉr·òêÚß³3–o8ƒÙØGÈjL ¿³pîì>\~àÀ9k¶œy@W&Í_ááéå°iCŸoâåëJ8¦[op=æ¾cÛÖû ggçjBÕÍžŒþki寘¸Ç*8ˆ”o”dEù÷ÔWoÕ•Á ¹úùþVi‹n`„ð7…¸·ûZôññiwUÌ__¯ñ¯3וÇÒ1ä—Lº»…Ê äÙf§gø?W_‰í4X¸½âÜSovýŒ?³G ñ¾¦;׊_ü3²x M×Ãn¨ìëûbJš ORôµ{×Ó\„.‹Q ¬ð ^µli®½¡5“]ÝÆVë®Ùˆ7e¢ø¯G]Ýœvô÷»ê#nÝ!oŸBW ß jò ¾ òfOr·î šÛ"ˆ/8x·?¢pŸ™òpûÄ©nO$ò|z™*¯Ç|ñ-7òÑfÛ#7„á‡Z±(mÁ½>ñèd—o½UH'Üë£Q.=²ã›|AN€t|Í™ÅÏ€1;~9¥Þr,‚@P^çäQÓ FÎN=€~º)÷ãîÔŠØ©çWÚ„ÔÕד>B?…RæÏo¬ÎÔÌìœq²Ò½ºðC~‘ØØñÑÈÙ•lxóÛ`GæÆ½„Ô·ÓÌ(–Yf,ÛxõFøâyýú{ÒÃ,ÃHA@T*öqÔŠÅóA ùûµ|p ¾¼8wÈ›¡ëÄ ßºZ˜ÀÄ—ß̲Ë„0ÈøãÑÍ=[…tÁ`ZÏÉënÔíD×ЮˬÌô· VÞ ^‹ãŽÃ¥¥¦ªLšAÉ/Õ›kòòµÆx¥¾]îJ1]A­èíãGb;íû¼oc5õ)iv8ÇËgi½R>ðÞâ'LCI;5fÌ{÷![^N–\7®qiéèQÒ”| !9•ïe?xGöíroÿŠÉŠÊ䙃|´gÿ”UæÄ ¼_e ×z­æ‘>‘ñ{<&6é‰ 9êyTà×Sà ÏxÓs§þöÊlç]ìÏ^O””?ãw)|‡Ùi¶DC ¾T“àÛX_ÇÑu®/RƒÀ’Y›T%5yå‰3êq¼-5Dßã©¿0ê-aŽ9êñÒƒx鲓^Þ“¬¤tBdf~ñêMß”¯¾±‰o”å#Êp¡MØ­Èùsz76@úaœÖd7wí[H ú¬Ÿ¾¹…dÆHFå;ò6eU!4!\<Ó·nËêºßÒJ(Ÿz­×æ¿~ú|"ä«Ñ3mG·í¦Õœ¹,ræ²Noa³ó” iãڄœ³A¾Ç xÕ:™¿É§¹È¡Õ¸t½Õ[Z/½åää€ bÊÕ¨šRå«'›÷­wÊãøq tïuÞdÀÎÑc˜ÐÈhÊËÞ_OziyE)ËVÖÔ*êN¥Š‘:³_ºf½h~ÿo•UPd¸È†*Oalµ>&.išA¯×“;}Òcæ *Œ0`”ö*BÀÏûÜtëuÔúvö‘¼½½¤¦®A˄ҋ|+ÈÅ/Š(© x%tY·Sè~””P+>ÙØGøœ=µË©çª¥ÏY#K;jÙÉÄ2Œ,÷¹~9ÀdÅfj=EVÖÇÞ*_Yå/ƒùË©e°ÚÔ9’¦êë"ƒÚR¾Ê|ôÿ_P¨B3§nhBEä4ôoÜŽšgÚ‹Ý®Û<AüóIQ퀯«ˆ©0Ñ›QT‹þ5lDw³s$=ךAÍ¡zfK»ßؼwe}j®î(¯=õöý‡¦Óûåt–WÕP¾•¾ójº¤·í‡~ç¼)ß6ÛV­³ òQŸÿ¾àþƒúC=‚BnŽ7˜I]оö*<=¯ô:!üVV{…°ÚBG2ŠvÕÕÖ²a ½É5ÌìøRS“—ó Ë#Âlj?ÈûÉ-=nÄ_Qø–›Y‡pIKµŒö®«mdc©ÉùôMLn¤¾<«°´Izl›%ðå&7xVÕþVÒ¥~ê’KzƒgZÒc“ÑÊ´ãaqG9å‹8äàYÿôtüÌrýóö¹6T‚zC;$§¿å‘¡KÞ½L#Qù†aÙGÈ/Î(Â}›tÝjU€óƒŒb·öEÿ†;È!>!ü–Å^ÏËuCQAÉl r:73é’þLE¨e%áÜR³Ñàÿc~™¤AF‰p¥@8b†)UD›=·¦ì«ohè¸ïîËÔ§Óh@ù@ÑöøÉ³©zïþ÷$ýH³ßB}<ù&.¬ÄC.“Jß&%½ù¤4m©4/sô•€–ásçÏú¼ÏÊŸ(­¢C ©÷ir²ºò¯5ØîeRr4-,«ª •o€ÈÅ`Úd­„{7¦Þ» Ëpw…EüÕÏïU‘¼g¤îaâJ¡rñöõK-Ê+¦æÛ·&a!ÎÖçF‘\"ŒâoÞ4ÍÜ3æ&^Þtd|P‘h30õG!‚—#;Úø™óÀu¾&OF«Æ½Âׯ12ü£G’ð¸ãñotOõËξ¯±Ÿe0„õÓq|ÄA.xýHIŸÒawfòìA__^ØÈ&2Û—ûòeWÊ÷ÿÞ[²§©¶ ÕGcÍO]9ƒŸqô¿ªöê“•H*"ë¿>U-£9¶ëìYj4m9øß)ÚÜA‘“ðR@ù„Ä${ïëÅ`8Ú›¿?b·µòÑï†zùµeË–=)Ÿ¤üxê˜Ø6Óqñ }øôENF*™±þ+{ñYÖú²…Z#G¥N$Vè xóSÒï¶T÷0þm±³C­"/%_¿´;“~bªcïˆ.9¿Í’ÌSn«3N›ÌxœQÔf·EaI’¸±©ITmQâ[[• æþ>;« ß9Ø®G&¹!8|sáÖ‹ñL/ kpÌ{…°wÇ­Š vz“öOÐTŽõZ:ß!dÖÉ7F3õwŒ6Ž\”&‡y;β«µW²sšŠìЯ¹M£“å:s²³:*Ÿ d´“÷Ënð-ìf<ê9Ï«v›‡ÙÈ>ž”à¿Y½ïÚYWÛõ{ÿŒ¸d/âC­8¡HŒ;nêànºÆÒB©w£e¯nšœ¤ŸÐòÕÃ7ü õ†£¢º£åÑ„¥zïüìÈ’Æ7—¼¦X¬DkήѺèl6\&Ç~*uš”FI”z ŠËFòÿ[9³4ù¤Ñü[½¡I"òôA“{ÉnaÇL—ù!*ß4£µüodßpÄ%’†û>iþV+ÓyÒ,Èç<„èùI:>Îu7xä½iêúÓÄ;Xº´{:‚üÄ`¸Ñ3»´Ùvýõý7êÛüÛU/0­ÂZµ¥:s~óôÕ§Z•ªºkp‰ç Èu 'wûñ],ÄÕe‘øÞ¯ú’G•'Y,¬gƘÌÉÉcãA«ÿR¢ã!±øy¹Ù3û¿UqëJsòJ…åÆ Ѽ/›˜Gбt²îåHAQR Îúò•i¸(@#ÜΞädã>ý^Sßwãèñ#Õª–±·ü2y»¢Z-߃'÷nåWË(ºŠ øârâQ{‡Mîá+‚š™tÆÇ—Û×íxÆ-ö¦Rd 1ãMGZzkœv9‹bÕ Ú†}«.sÚù¦òÉÅöÛz”~ïd¯X÷°°+䆓©­T­!çÃ'~ÂˆÓÆê Ÿ£«læ[†V.dfn¬­E°Xt”é¿ãÛ)|£º~ï-›ÙþƲÿë~8ÞöÒHQÛrc,ö‡ŠPË[Þw§`®:ÏÛã¬Üß_ðéòÖ­¢ç€~>ÕWq%hÛç*ò“ÿjÂôŒ7«Sÿӟ톆o©`¹¶æ"í½wdóP~1üÌ f®MA. aŸGÅ#SµóbSŠjÑØó±Õ:wçu;1%pÝš-MP—ÞÔ*òâ{|3y÷d¼!Òï‰éhßp›‡ÒX•që41Ѓ÷×õhsö—Û½&m7¸–7u»ÜjµŽÑn—ÃÿýÉ­²b… zh±±e Ëþèâ6’v–ÎÙÝhÃúL¼|fd^U§l®EМq`–Õ’»AW¦zöÛ×A>¯5]n0"ü±æÃÙ¹S ÝЊˆ†4ï–åÃŽñ´‹Ø.å‹»ë÷Þ¢uÂÀÏ}º×f+îôÑ­l„\ª£÷Ï»M:jác¤(ôàÀlѹ@ÕXSõ®õ­†± kRj_akì¢.íÒA05x…»!#Ö ©ËR‘¯bp8#î¿Ç70“ç¹¢:ÔBÎ.âÒ.¼° çè´»´G:¤^§ QŠð‰X"ȧ!„ÅÚMýÈÞ¹”"MQÎd¯•œÍl×ÖŽøèdoÍ…§v3”÷ja­ñ!ñN+÷°Åeþ¡„—f¤ùÕOœ8ÞXe¹NFÀ"wMüwid«(ž8¶çµô~–ÿànëeµ¢¹¿€êK·ËÁÅÇNY:ª,8æu›Çzʹ‰¯ðâµ…|žOQ­éÏî#½ÏtÕ•?¡ò‘Ÿø/70øw÷N¯c9ÝÒ s<àÑ–eø¹,N¸).FèÖí¨7ÓÒ!Ã}Bþv:ÚœÚòv€þy$®Òi~á(UTö&ºàžàW_]ƒÁø"ˆ–Œ·'w !¸ü⢵¯<§2úÐù‡Á—çâKU3ÏXgIe6 Íx½±RbSi±]6iíz&;ÓßÎÈ7k«mµ.ïÓnÙ_ì\l±ÑrÔâË@öð'oïÓ7=ÐÍs[qÃX€öí~ÄPJ,aÀÛh/[… ¾‡d7vXëw€6œ8Ѿ¦¶®Ê·ÍÆNùÉÉVYùénh48ã»ßÁ”â­ÍÅ'3|GoömYS<™±øNX6|”êl¸±á‚C[mB d/èHjY#yÜ™ !Aª‡ôñQÿxèØÃáš»TÙ!_Ò@Å;Íco2¾Ü¡¹Ú¨ÈXRøá£¬£-¶îÅ·°µ¹„ßèñ¸ƒ9i†èŸB0í#¶·/5òÏ¿ae›|ŸásHSEÜõK[vÚMÚ|ߌðó ö¶Úð#;§upµ…Kß]=¼Ó>mŒ•‘ñ¢qn6+í)(al2Š< eÄ{|‹T›øÉ¬D4Ç‚b—µ"êéªD|HçD¾ç#Èhó¿…¢ˆÁ”.w,ûú<­uÃfk¦™ã—Æ!˜GõÜ#;¤í$ËòKÛQ.òE?úüâ­êô–7Uñ M\Š_¹ÅiÓE"CŸ/>ÀôÉÄ»•4Uö*Óa0C[qG±Y XŸÈ tkÚ/ï &¡Ûæ±L<…{Ú~µÖD#}‚ Ø3õZ^]:õªý‡6a¸Õ÷ˆO¸¤/l¿™å:Wt¬„ÖÕtrVíàí»m¼º9ËNžH¯›,„CJBU”[4"#˜‘ú ßð…à›kêËßMT˜°~]%òw8±Þü¯Í'7¯³²¸™sxÏ#˜8FRiAŸ¨x.lh[…o‰ÒÚZf–Î7ð‹Üh„ÙØ.²ÄWÛ×,Yì¡Bò§™‹cx¾èaZÚ÷øã4õ£íGï=_ŠñäjNðc®ÔÊu(äÐHKypqÍòõfrØÈbBìduf^‡$ÖPÿ««hÿVX‚¨s4áô¼þ"¶U oüžèhéÑÕåá|¢%%Åèñ§dм7į(-@Læª"Híï_ÊÍŒíU¦ÆŠlÅ%”¯ôÛÇû7f¯Û…®/!¯Ÿó·ÑÉ+î¾EöFÙá ΂ÿ¥1˜Ü®nÍÚÒ±q<µi‹Sg•n|}öÑ­K'ü•ŽÃæf>Ú•„¹ƒê?:;Ûɹ`¯©%fSñ²Wèl°s÷Ù®e²øPÊü›+欹šü¾©è9>…á˜ÕðudËè·nžKdî¾6Ï(—¤³öùoÒœõûùê*J™„{à.>J¨¸‘Ì‘`¤1ÖÿC­*g¯/ì´K¯úgéð.:Øö„~=÷æ´oKRQ°Áå‰ÏF…5SxÔ’_íÓQÎTu˸¦ êoÓSˆW©nAá…Y_R±t±Rù³¬«~UÞû**ª›£üwüóð¾^—]Vl<­FoyÛú£Ú*|cŒ¢Ñ|äг "MxÊ ›Àê^•eˆp_”:º:ºBD^~œµµEÐù3·ÂC¶]O0œc ~@*Æ`ð­¥Â…®#˜ö'D lùÚMÏ(þ¶Ýfëù!P£ŒðÚ”œQœ·×ÆþìùÊŒâ싗ߊ™;^åšvá¨Ó06îˆø~…óEµ‘^Žž®ûDæe<˜G¨Vl·\xòLQJAvÐ¥wí[ÉŠ¾"ãzV>Až‘Ÿ>|ÎÕ—i?‹ó~TÔ´Œ‹Ã?{͇×yâŠãúy"£D;«óIŽé$tÓ—M»\5yÿÕÀŽ¿¸sùnž†]JÚ9Ÿ}ÛõŽ„¯À/{fgµäÖ•è9+OˆˆîƒãsÚåÌ<¶eûïŽÛe]¯OÖÔ@è.ý3fÓDÄóäL•“È,«•¿‹²b&á}¦§]véI‡5DEÓx“…‚§;>÷±ú.€£ñáâ‚åƻýÂdë|A¬ã ï×Ý@½ÄÿC7F– c%úÿøk ¤<õÕ˜h»Íÿõ±UJŽZÈñ=Z6/ÆÔ=|_¨˜&cKnÝ=yçÕÓ¥–3oý‡ßÅbÍ4!¾iÙ{e nÚº}U,Ò´uâ]÷D›Û䆼 m¢Î,¾ A6.Å®pöhH ¾“@%ËðZãw·\’~ 8yFZ,À÷„ÉY‡Äj>:áî}ÿî8›]çîaÃ×@D2ŠK=·Ùgdä.ÿÏß~ÍbpJÃñ‘눃¡÷+¥'eÅ\ô=6ô¯JÅô¸ÏËÎ.ú“¦"~êŽs¶¿’›ûå§… |8î¨ÇQµnwEdce-úœ!,õo8nÇ!ʪóö©¢õ™Qš-Y¬UµÊÀö²A«áʵ•Ȧ¶©…ôL§¦¬4ØŠ;J´vFâpkflñ½ßî|ú÷FBó²Þ‘ˆ3Ú(”†¼6ýµ[r&¾H%ÈÞ$î4ƒ1nÇœ€òyÍG6„.OCd¦Ë¯Ÿº|A,íF<þüŒ­AQ{ð®Óp8´¥ôFPпÊ>lêìC=ÂFš°äOs‰!?¿Iïµòýzˆü§Ïi8ßÌËUÁQ;èéæˆ¸¹«õ¥Ã£¶¼H[¾“ùË\|´RËš Ñåz:ZåDæØÔAk–­ä–Y@ÏrcKœ k,ð _ð7ÌF¢®{úý»‰Ñ²½Fm7¥n}‡ÖwnlìÃ`ݖͱǫÿTbbûïž|ò¦ç£Ûï´ÙnÝ2îPÛ?ÿ}ºþ:n}Û‹×û$­ïâæ9PçH†gäãûq£ÇkRÝ=ýI¤ú©£{]pÔËCڌ͓.aùŠˆÊõÇàŒäÇÚòô‡VÑÀ›­(Ì1Për eQAþщRJêÔï“ ú|­®úøG/³xú:žlàþÌœFòÂi}-œXÇ­ÍHosÆìÔs³¾šœþ$a*T>È`&+í Õ•ï{öc= [˜š¤æÿìÕbó„í¿¥PŸå¢ùåQ¬ý²yX[ŠK‰‰\»#§Nå?Ÿ=ŒêFù™/¨¯|¥9o§éôkm¼ô¸{ÓÙR÷)>>oFêÞÎfsf•áþP}F˜”ô ,ë òAþ1Ãt.ÕmHM|Dºò133?¹b¸Ð†ôû3KX"ˆíƃ×ö-W2ÒP×=ôìØ¡#ž† Ò×¼uva_jùõÕ–K,ºú4ïê+Ÿ²zµ™©Æ&Tï)ñ1ýT>)9ª?EQÁgÒ‹ E_¹®6…š;8þþQ´`õß>T>5QSëwéÊ#ªé¦©qùнºDAY¥·ß’QTë¾i‰ï±‡‡BÓLôÆ"ˆÒÕLð…cŠÓ}ÉGËörJÃýkUvîèêÓÕ«m>–ÿbçV”~y™lÖÓÖ?ZêÊ®ÞP<“jïýO“¥Õò~ÞdÞ,ã1‰ÒŠTs^qµ¶6«zuÉ„ÊÝ1#B4w9!ƒ¨|6ððpSñÛï_õݱmk¯.16ÐõU3œÝ««¶z¶)'gl°e¯ÙÞíŠÁ5/X¼¸›Üœ±Þ.&+ì©¥eÅŤÃc¡â{¾z~›£\®Ï©¨|Q×Tz©"+–Xܤ^'+¦±ÞÆÖnptPù m07™~.à²ÖôyTɇó-úÒÞ((ÀOµÂ.ðŒS×ÊÊ•«>WWa‡SaÁØÏI6Ë-I ¹È|ö…+aªS¨Ðð…ùó{–©YnµÁÖæFL‚´âÊ?®æÇÚuëúpaù·/ÔR¾ÈË>»œvBåƒ@𨫪4ÿiîÛ ŠýR‘`ßU¤SL¦¸;alµ–Â7V|ßÐvZ^§ ð]ºDó0ÂŒÁ^VRG‘çÈXþ6ÕPg8mìÝŽö}Yxõr+ªÔ5k¾Ùâ¸eЖrPù ”OIþ¨«ÛŒe)ù¥Ÿ=ê›ì¡¬µ[“œùIHœ¢{Ê¿~ž4AŽ$jǶ-þËàÙ«(Õ›¨vÄÅeærŠ6Ì~NOÚ±}o¸Ðl–‹Û±éÖ(ù©ú({(“tt~þ¢h«¨å¼§£<*ò§ÛÏœ¿¨3k…¾ïwµ’¼|nÀ1‚½±¢'*‰¡”«z¯—º2UO'ÿgùˆ‘<”1ïSJÜö^ö˜víÜéwéê#3Ê9¤±FJ|™_ÙÖ-7¢ãǨjSÌõŸ9ø?w++é}þ‚ö¬Å”ó³/ž´íœPù ݱbé¢[û°6X¯«Ÿ?f¤X˜õ×256ôôöÑ7·¦@ä$ܼÔ[U\T8#*š•]ƒ™eØ@›WõíÓÄ *}S”ysLâSÓ$å|j3×ü29ÎzÑ|òjC‡ªËþ\˜Ç+">àOÑXó£(‚œ~?ï³Îf%ÅZâ®_䲕Ò%X6¶)Zj·îÞÔ˜n>€µþ?Mñ·®m\Kž ț֭qvvž³fû€ÆLrDðú56}¸Ðl¦‘ÿ¥`åɳ»Ù'¶ÿÔ–ä‹ ðˆ öírnN%Yñä䇯8J6Ùûsï’÷Îõ¦¤ÅEËÊ_~Ëý(,5fSoSý›§ñK-ÈÓEç´c;Ro|ØÅÍ×Ãò *¤KxyæÏ}'æ²®ñ@ÜŸ©±î]J¹deÿþý§ÏújÏY2@#t…ømÞ°¾Ï t«¬—†ÝŠUÔ603ü~|Éäî—lˆ‰b‡±&¤&QÓ˜êÎï´Ç÷NöP4UÇ¿Ëú”ÿùcçÛeôÿ)~Wå¿C.Ù#¦Þõü]ósÜ wç†Ê!ÁXj6ëØñdÏ_2’ÄDEÍ#—Ò{âql}[¸Ë’ùx¦ÿåõü9&o3ß¿I«©OÞÕy++KðÊú+!>SCÝÓgÎ-%ó|¯¢iÃY†­XJ‰>-ùÑ2üå×B/ë͵$ï?>{¤ 7fÖtò»ÅÀóó»4V{ÊpNrά%8Ùgº¶•Â@IdèPÏ\­@–¶£!šîzíܾìCˆÌ0œT]S{Æëôt2 vNj– K®‚CEaœÒØ1®nÇŒ­Ö3 %ClþUîhOÎÚ +ëvGà@ËOœÌ-8Š,…oT —£ã–lìmׯµ«yœÔžiÁÞÙîÁ½~ЦÆÈ ïí'[½Â*'¯àþ+:³—冟3’™‡` ìAåƒô›å–ÇÜëš.âàèã]pÃ/ëMÒßí4à &`Ç Õ£ÄäœO¹3ú>Œ¢<7óuêS‡Md%T”žoÞ'&'é™õÝ#Á4ÖE]9¿ÆÎNÍ~@šÈ€][Wwò”çä+zµ2x#äù½ëãäÇõgâJØæ°¹´¼ÂßßËp‘ 3 kŸSoÂÍK†Ó¦Q`xˆ´¸èúÕ«¯†°rpQëûº¯U_sžÇ?Üæ¸–`Pù }„™™äùú††Ó§½¥ÆK)÷bW‡ÚÒ¸»a&sÌ6®[CI› õtÀORJjr|ÜD“ܤ®ö”7‰êjkV/·2°=\TÇ‚ŸŒ·™÷ïÞ™8{!Woª?ó³Ÿ<Š´µ]ë4Àuy,Û®;ªªkΜñTš8YdŒ"é×6ÕüŒ Ñ™d`»Âšº©8;·o+*-»àwRÝÐD@\–ôkÿ,~t+d–™ù¦õ]Ž`Ùbü’F!áw~þü©nd6t(©¾2¿kÄãÒïßÖÛÚè«+À² *¤¿°²° UÈ×ï³bÆ e&%¯È+&Ë2Œ­Mý÷§¦äk^væçïÕ´&Î0œ¬­HµÔºêà§¹¹ùntìë´çRr Â2c¹Fµ[¯äweyÑ—¬ì·¯¹xxLgÍT¶0§Œy* ãÀ8ˆˆŠ~›ñBF^EXz,Ÿ`Ç⬲8¸ ßòr'êOž¬£9Yƒr[isŒ`G=¶ä‰±1¼B£$Ç*r’ê¸æË者¯Y™^§Ë)(›™«ou Ô+ÄÇ»›°´flâÓ§‰qÂ’2b£y:›üPû£8?ëݧw¯”Ô4M§OÕKµÔ»p.~MÚŠª_7o‡—/­¬*(5¶ã)\ScIÞ‡ìׯ@Ö›a2ÃbÎLXXA僥±£ÁñÏ¢âïå?êë†ÂÍÅ%&"Ä"4FWy ‚Ì¥ƒ™˜˜æÌ˜~Ð? ŠJª««š›ÿ°²± òóá{³ÆŒBŒ§PËB³™Fà=þQY•ÿµðgEå݉ñÉÏy¸GJIˆ² Êé§ò^<:TÀzÜÐØøéK~Yi¹±á¤¨˜Çœ\¢BÂ|£E5G‹"³Œi9õNÑ›~ZD®®.7¯àÇŠyf³BÃossqŠ‹Šr Jj“DÌfшÁ\ÃWZþëöËÿò[ɯ_µ¯^¥khj ðñÈHˆ!£4Œu4`Ñ•B9D„û<‡Œ*033K‰‰ÃiÓBp8<…¨¨(}œ••¥¥¥“T>„Ñ6líÉÊ'##C_CåƒÊ0&·Æ©­­mlld¦à²dÁäÉ“éËàäädkkk˜G òA ŒÆ!C=zdhhHËF><11qÊ”)tÕ3gÒÙìï´´4˜A òA ŒÉ¥K—h\ù888ž„aggonnþþý;-Ï6ÓÐÐøúõ+]Ç3ÝùO UØÙÙÁ •a@XYY333.\G³FNœ8ñ÷ïßtÏÿý÷}ìéééàà3T>„1}¾™3gñ£Y 555‹ŠŠòòòÄÅÅé1’CBB@Ý‚¾löóóƒÊ•aL°Xì¾}ûrrr¤¥¥iÐÂû÷ïÏž=ûÍ›7@?®^½J‘looOwÊÇÁÁsT>„1œŒŒŒ»»»››[II ZøðáC |ïÞ½‹ŒŒ¤ÓH666¦;›?sT>„1áââ¿]Ц…ñññàwffææÍ›é1†ÓÓÓ/\¸@_6ïß¿ßÙÙæ¨|cÂÃÖ±±±QPPxûö-­Y˜þ>xð`ll,ÝÍg722*++£/›Ož< •*° ÛÐøúú­]»–-D§ á·_½zuNN}Åð–-[è.Uxzz¬•aXPEaee:uê»w︹¹üøASZZZ‚ßêêêà÷ôéÓé+z-ZtíÚ5ú²yÅŠ/^„Y*°7SRR¿ø={öŒvö¦)//_¶lòw£ƒ3g΄……ÍŸ?Ÿ^¢—§!¾yóæ ¨|##&&†\½zõÒ¥KÖÖÖ Êÿþ}1ïðáÃ'Nœ ,@Ϭ^½š^”oùòåáááô•nÞ¼ùâÅ ˜/ òA ŒLëíR7oÞ ”È???ÌpbŒ*//SSS«ªªèb¶+++Ý¥‡µkך››Ã|•ad„„„ˆBòþýûÜÜ\))©ï߿ϛ7ïÆT7OCCƒx ”OZZzäÈ‘?þ¤ñˆ•øòå }%†°°0z_*!‰> # ÀÅÅUQQÁ`<>wî\êÚÖz–!± 6..‰ÎD¤M233ïÝ»Gw)á¿ÿþ££>T¨|¤ï¼yó†èZåä䤤¤€?Ç—••…60R˰ݻw>|˜ø'±KRYYUhšR;;»„„úJ¦¦¦48›*Zo½ÍÃ㢢’ŸŸŽÍÌÌ>|HÅÉãžžž­•ÏÆÆ†x\RRˆû¤5øøøJKKé+ ÔÔÔXXXÀ¼•,¼~ýºõŸ@ö,Xpýúup)oØÚ9RÙÙÙ²²²à˜……8U4¨| –@w²‡¶¤»^I¨|¤ïdeeµ;3þ|â^µ³fÍÊÌ̤ü€—S§NuÜ%g÷îÝ¡¡¡è±¿¿?­µyîÝ»÷òåËt—DEE `F€Ê "€Èµ;³páÂ1cÆ|üøýsܸqaaa‚‚‚ÅÅųjÿþýöööíN>xð õŸ@ö,,,ˆZH]:âMXX˜¾ÞþªU« ·•tÈÉÉu< d¯õJf ÈžººúÕ«WÑöÆ¥¾¾Ý¢¡ûöíë¨7W®\Y²d uãeúzõ6l~*ÌPù ÁEWk•Ù?~üË—/‰gRSS“’’,--Ÿ?> &)))ulƒ8::Þ¹sgöìÙÄ3£G~òäIFF†ŠŠ µ"ÐÈÈ( €î¼=OOO {h›6*2¸wW©“——÷îñŒ®®.½ &8p`ÆŒdR7û¬\¹²Ý’åË—666¶žöN1(ÜL.vîܹlÙ2({Pù A p¡êêêØØØ:ÉÀC‡wJ[[øU­Ï¿xñ¢¶¶–‹‹+//“““¼öHKKw³‘]Ç“ ^Weeå´iÓ(o@€§L™B²üc໳³³ÃÄ•¬¹tèPà3­ZµªÓOYXX’““………¿}ûÖú<‹­¨¨xýúµÁ—/_ȵfyyy÷[ävìØ~Z2f̘w0÷ññé¨|a‘OàÍt­JF€¿»dÉt§xú",,ìæÍ›Pö òA zSE@TTtÍš5{öì雋/îQöÂB£]m"ÈÌÌ dØ ¬¬LÞXª­­yÿþ==ÊßÇᲜPù H $ŽUñõõ­¬¬ìj#tBô–-[ÂÃÃ?~,!!Aº ¯_¿ªIJH666SSÓnöQ>(Ð'™OŸ>‘%~êëë%%%ƒƒƒéÑa²²²£Ç•e òA dçÎ$†äää’´ 8@çÎë4Ì  €®®îСCoݺÕã(˜††/// ¬$š;ooïuëÖu`ìØ±@öV­Z5bĈ“'Oö9fÀMÔÕÕïÞ½[TTDw¯uß¾}À›…)*i(ÜÏž=»víZã»Å‚«¬­­7mÚÔi,‹6¢–——g100PII©cÈæææåË—_¹r…tƒ1Ì“'OºQ>ð[SSÓÌÌÌÉÉ©Wq²gÏž   W¯^Ñò¦]^eVVÖ£G`Ú†Ê@º+èIW>”ÔÔTTÿLLLœ» ÆÃÇ;::úøølܸñÈ‘#@½À™ºº:##£ÄÄÄÞ|ùòe‡)¢óî§M›&..ÞãÈO Ö¦¦¦À©=D€¾^"ˆLôu€z LÒPù Hôyìª+W®üüùstt433s7¹¹¹@‰ºØ[€ì‘¾ngLL ø}øða ½@Þˆ»ý¡DEE­^½h†¯¯/qÍ6:bûöíÁÁÁ ­—€@åƒ@ ÝôhCŸg‚_¸p!ŒR™2e ´Ý»ww^JJª¬¬ xcrrr¥¥¥ <(¸§N JpÒ¿×ÛÛ{âĉOŸ>%1ün555ÚÚÚÀ•••õóóÛ·oß¶mÛ éî­íÚµ ÄÀÅ‹ÝÀd •ôŽyóæUUUõçJJJè´„S§NíÙ³ç¿ÿþsttì hŒ††Qiøøø|  FFFž={666ÖÈÈÈÎÎnúôéÝ|#//oDDĦM›<==I4ØvôèÑñãÇ»¸¸444Ü»w/>>~ݺuô²¦ ¨(¬X±"33<ò0éBåƒ@ }äúõëÍÍÍdY¹ßž88þ¶¶¶"""­Ãƒóû÷ï×ÕÕíjVbmm-Ð`àÛM˜0áðáÃDÛPÐ-é`Ï;7''çøñãfff´öjp8ÜÞ½{f›››Ÿ;wÄL®Pù 066î~ÍÌ>`ChiiÕÕÕ>}Z[[›ô;hh}æÆ—/_~øð!°vöìÙVVV@Mìñðð”——£a®\¹âåå•••µjÕ* ‹=6ß1::=Þ' “'Ož”‘‘¡ÖëhllÜ·oŸ···ÐoÚgK=ÚTÔ €/(**:ÐñŸ›› D7""béÒ¥Gÿ“%T>2€ªÓõ9ûÌÚµkKKKÔµ>9Ÿz <3àʨªªéÕž·÷ïß×>~üؼyóæ{÷î¡ç/°hÑ"àÞ¹sçÉ“'àæ¦¦¦@Kú ] „I‡¨ ¹»»‹‹‹“+Š€ ëââbÉÞÞÞÁÁ!88¦C¨|„¢ÄÇÇÿøñƒ››»Ÿ÷Ù±cǧOŸ@±Þ}° Ðcggg *@¢<==YXXÚ…|õêPˆÛ·oÏœ9ˆÄto8•º¡<’vbúàÁƒaÆ͚5ËÊÊj„ $>ðƒ‰*ˆÃáöïßüã9sæS¹¸¸z9àIåäävíÚÕÑ……@åƒ@ E^^¾ŸÙܺu ¸q/^¼èí…û „ÅÉÖ¬Y“‘‘D8s‚‚‚7nŠxõêUodOSS3::ºõÂiÅ2%%%(((** ˆÙ‚ €‡JÊZ£ Æ™B>ckkþ®g7Wùúú¯Q]]跘ؠòA Z!22²o½}à*>>¾¯_¿g¨Ïß~ðàAàÍ7nQ77·>L·xþü¹““ÐK…®Â ÃgNŸ>M<œÂ€€€ÇÖ¯_ßãZÞX,–8%ãÎ;@¡µ´´‚ƒƒÑØûöíÛ’%KÊÊÊŽ;Öºû•ÐÚÚÚzzz½]Ql×®]lll}[¸‰[·nòôôÜK€øÑààèÑ£À[ºtéé² .nпv+¶tƒâŸOž<9~üøýû÷ÍÍÍÇÖ}Oälàh'ø^àªffföy…T>BQ†Þ«ð@´€«×[7ÑÕÕ¸t@ç¶oßÞ£ž9@Å xQ$nApöìÙÇÛÙÙñððô­ÐzÆI"""@SõõõÛ.**š4i8þüùåË—ƒ3 öž 8|Ýlü Ê@h‚ððpPŽ=#%0_¯¶»pႽ½ý™3gvè­mèB‘‘‘Ë–-ûðá//o÷áwïÞ­¬¬üêÕ«þGËf豯¯ïþýû555ýüü€¶·5%%%++«uxà:£Nð’%K~ýúuûöm˜´ òA …••uõêÕ¤¬‚Á`p8)÷¬©©ÑÐÐ000š·råÊ~Zhbb<¿ììl™¼¼¼î·²'((X\\LÆ(Bûíttt8P^^^]]ýøñã®ÖA÷`žâ7Ð5¾!Pù ÍAJ-""BŠì%$$Ìž=ûéÓ§dßI@VV¶¢¢"))ÉÒÒòË—/Ý„Þá£G ÉõÕJJJ rœœÜúäÎ;/xéÒ¥Y³fu¼m°JùìÙ³¨¨(˜Æ òA ÚbãÆÝ@çÌuæÖ­[ëׯÏÍÍ­¬¬8Suuuì á¶mÛ: ÃÅÅåààðúõëþ……ø¢NoåB,[¶,==è"G»0ûöí¿õõõû3• !õõõ¬¬¬~ºpáÂîe/++K[[»¨¨èëׯ”18[­WïlÐ*WW×>ô,ÉÈÈÎ%)žk`` ø ôXFFÆÙٹ㮿À( ðýûw˜Ø òA š€……åøñãn3¤ÅÊʪ›k»CÆUÐH„™™ØÜ©‹/JKKw põêÕ>+Ÿ¢¢bhhh¯l%$$JJJÀ‘‘8&îÄDtšìúA@@ÀèÑ£a’ƒÊ@¨Ïõë×;U>PLwåW;w.--¼cIz p§._¾üèÑ£Ž3ÇSSSŸ={¦¥¥Õ«ßWRRø¯}6 ÝT€6£»øyòäIDDDHHHëYŒ¨|„:t:Ü]9sBBByyyvvvT·ÜÒÒ277wÛ¶mÇŽkS$ :}úôŠŠ ÒoõâÅ‹   þÈpð{ÆŒÀ†Ö{š™™Mž<œ¼ÿ>LuPù 5QUUíxòôéÓç$ÔÕÕ©««“EÈ…””Ô¦M›¶nÝêîîÞú<:žD€Ì‹ŠŠ’¾á;) ÛJ ¾{÷Ž8¿ž““óÎ;šššÏŸ?‡ *¡S¦Lé¨íN¦¦¦^ºt‰ì3ú­ 6x{{¯[·ŽxrÇŽè+Ý,""2mÚ´°­¸¸xx®®®?FÏ033?yòdÕªUþþþ0íAåƒ@ ´¢|;wîlçðÅÅÅ’×+"#ÇÿòåKëM{T¾ÐÐPà„ œmèÞüüüùùùèZ&&&tE7t©RT>Biôõõ/‚þYSSÓn˜˜ªªª¥K—ÒòSX[[ëêê&%%ÏL:µûKòòòêëëTöˆ”””9rDQQ]íš——WEE%77WJJ ¦@¨|„Ò$>>žèùMž<9%%…øé»wGJ³!Õ²×z¯ù3gÎt¿ ÓúõëIY¹\ìÚµ+33sÞ¼yèF¾&&& Î‘S T>B€“GT¾‘#GÏå8pà@HH½<p〵 .DýªnZû¹1oß7n\hh¨ºº:ºh=QQÑ‚‚˜¡òA Jóöí[ô ¢¢",,Œx^DD„º“öz ;;{PPª|aÁ—N•¸_”—=¢‡ doĈÕÕÕaÜPzzz§Ãk!Pù ÈòéÓ'ô`åÊ•7oÞDµµµéKöPîÞ½kjjŠn¦Ó0õõõÔ5²ªªÊÀÀ ..N@@@SS³ûe¸!Pù ùÉÏÏGˆ[´ƒ3ÞÞÞtú8rrrèÁâÅ‹;~*..ž——G] 1Ìýû÷wìØáêê dïäÉ“Ä!Pù % n9KÜACCƒ>777sssà¼v\h¦¦¦& €ŒdeeÕÓÓËÎΖ•• „Ê•P...„0íU> ô+{(膂¼¼¼QQQ3gÎ$ž—””DW—¦fÍš5eÊ”ØØØŒŒ  Ó@­aR„Ê@(„€€ø}øðáÓ§O#„®²íÛ·Óõ…‡‡_»vmÑ¢E­•Öšpì¡Ã;W®\ •*¡|||aÓ |ÇŽcŒIf«W¯ÊGì¹DË©ÐàjÑÀáKOOðàðS1 LPù %¿Ñ…•Ïž=ÛÕŽçô…««+B˜áGãvòòòŽ?þëׯ ,•˜¡òA  ¾à÷ìÙ³NçöÑ;ëÖ­óõõ•••EÿüðáCdd$mš dÏÃÃab*¡ÐÖN ‹iÓ¦eee1Ìs999¡ëd.\ØéN„4‚···ŸŸLŠƒKù<ìmó0غZV_x¢Ÿ 7øïáOœ!|¿A¡|@ðTUU'OžÌHÏ5}útômÑ¥Y€Kúøñã#GŽìÚµ &ÈÁ¢|=ý¢´¡|-°Àw pssß¿ذagΜa¤çÚ»w/qzF»}Ûi &&&sss, •o)¡"¼¼¼!!!±±±Œô\rrr÷îÝ300puuݱc[ëîî~íÚ5˜¯ò}KòÑ[ÓúL ‡Eš>c˜Ûìh™[;S’ØD¹=¦Ìu*~ˆZEòž‘º‡‘¿sZ‰þõm¼ˆ¦UX«r¢ÇÖR˜ Ïoøå3|©AÇ»wï*++ïÑ®^½jaaáëëKûÊgkkÛzd)_sQöô'©$Äg€v 0¢ìéÏÔOˆÂÏ:2‘ÂâpÄ&|Vb%˰ÖwÍð6Ó_ 4~¾~uXB²LkL N‹Yÿ“=ü %$áK… *¸¸¸rss÷îÝËxö@7»ôÑÀín·³>¾†ž·nÝj³ ÆÞ«W¯ØÓÏZYèÊ—t*,âä?o•ÖF-3Š4¢›=zþªüÍë»Dxx“äqWï`üÑ%lf†Ä!OMúp·‘+]É=Ø¡ášÊ«0ýµTÍÞ…±7n³õÅbÉ¡C‡XZΠwí[`<ÿ*v„Ì 7è$þ/ƒÐ#jk/AìmmÌJ#ïÆÞ¦ºï£¨ÞœÚÿµDÈk8r£ê‚ 1@åeÀ-: sæÌaW>Öjæ,áìÊ'***++ÛfS‹ë÷ìö·¤¤$ÎŽ¯|ì¯qõ·d¯Èîçû$#)2£fpKtvq?ü%29#l™Šå6„ÒƒƒÓé]s=O˜ôÁŽ<òAH±â!+.<ãY›†[²wiÙ_ÃÇF1zÌL\»†z¥çšIº1¡do_êk¡x#÷Y…¶Ü` cÃþÀÍÈèP¿=z¼ÿžv·ýîâââ´{ÿþýP2;¾ò¥×í¢â“ïÝDHe‹­¤ES³IÖëêRoÓ.(Ù.¨ñ#ë…´j¬Ë tHΟ?¯¯¯OK`[îÕ«W8StÖ<==Ûxjéõ1ÂÂÂÚÚÚP2;¾òð»ÐÓÓ£×®]ëhϦ®]ë‰J'""bîܹEEEP,AùøŒÑÇó¬]»ÖÇǧ½,½D¿v…9\@ùø dEÅÅp8âÔ:÷åÅÅ?ÔÈ/ €m¬Öi,ýò…l­9D—,ñظqcy9Oyyk¼Â PáæòXVJVVýÐTTFxmöÿÁ{ÑMqt¬†kP>êàí½ÁÆÆýGb£75Iojyù%# ÀÃÝݽUrô¹}þÜjîÂBòóçÖ‰ª ÇWië¶½gÏþ‹ÿ·{€ò@§W¾]fÃ<æÈx£@>=E”ÂR•¯7F¨ŸùølÇÈ|åëÛŒPÝ €òQ4><(?†¨Yo:Žò·­™Qe”ymíŒÝbbäz ;T–]ÍØ6šöL‹=vàï˜r©̲³œÈŠáÔ©S\\\½‡SæÏ³t\]FŠì9$ƉPñ½%+=Ÿ•ñyøîÚ“N@٩ث\¨r¬¡ñãä£nþ‘"2ZA>Ô<ô÷“¢=÷æVذž“-y¬“êηš©Wã]Oi~ªãºmˆYY{Š;ÉG½ƒc{÷ìêáZ÷”…yðsÚÁ’=Œáªh Wbpð©eÖ Ù+Ï&ø”X{ÃÃÃíç¡)oì4Dñ¦‰‰ þ¿.d§Šsm‘¨ˆàço/JŠŽcm^."õ…¨LŒŒ°ÏÎU£ì·\§wouNt©ã‘ÂÜÜáQchVûI­g¡J’¤t±&žcþÆæKN1D{¥¼qe$ÉŒ ¢jôÙà·”síÉõv„±µs²d/çUÕPáWì¶)fÇfƒ•ýÜmçÆ½ñ·ô¾€7±ì)[íÐ}¤c± oL SœsŽ,{Ñ÷ö© ¢WvDzç{6ó•2½àßæÔ®Ýw2eï^%)Çùˆ ¨yl9œy8™–=¯£IZ].èÌ &=riæJ^EïÎ0eOjY°çøª æŒ$|+¡ÁÛOX¤Þ‡ï›7zTÕ«„§slmìàªþ~›fŒN“³¿W==H;|ÓË͉ÂdùU‚—²ÝßyЫåQL_:¡ –ÞÌ·†ûþÂÿU,¶QKR£rBˆ©X·Þ2V–3fÍ^=u¿‹ÑpdDú0Þ8ž;›íª¡iwºrNiiUiÕpŠdt²ø8ý?„&³âQ^zÅÕ\ !-§~žù؃šåú¸óbzoeµÔÑŒ-çͽa¸(ЖøtQBn|=¿‡…$\™¶‡Ûú¥ÎJ–-T>ØNž ÜèQå/=Ö¯åkCÊwåb®‡Žf£»J_=£ Uj–DàÑmx”öÆå´Ckþ¹…Þo,N×ÅÃßÌ5>¼V™5ˆ“sŽªp죺žuçŒX3šv̲àz‰vçå3Ìz_¸BùdBÚ œT{Fxv¥Nw»_Þ‰Uk4@‚(,lçʪ‹=|-ÓWàÈ©èé³ìñ°äå½KW’¿¤Ù”Úžˆ(=ÿÏÉOŸ‘Šæd™Þµ+Ë'ÅG¼ü¤gl!ÎЋGîu—$JÿŒË_?zQ$#3ˆù(#1õÞ}qU=uå6uÚýì4ÊÇ\ 6iSkÙXš´]ËLV•B•-3˜**ëH]óÓá°|ì‡62•«Ç©…Ó u{ë–._ÂÓpðL- ´=ºÒK 顆¹±ä ×½EH¡Ð•ÚnáÌ·nMÛ8çì 쎾Y­C½ÆÎØ®8i[ý AdÕDžZªÏ02¼,úïŒË§=¥Wj÷æü59ú³SËÙ]`Ü,û%D¶Õéw~ûø…ËÕ瘓òŒŸ/«¿wo„rðwà8Jöî>!9‘£a¡ÁŸÅgœúìŒ+8–V¤--´Î”0WåzRHrþ¼¸ž)vP¬zŒeï@:9¡7åi*A˜+.,¼µ‹©v˜çG½¯PúMÉÞôÂÂãØTEðŽ¢÷zètÿÙéÎ<Ñ•º5vgÊ9„V6ˆ×AûÈTTr%³6Çn1Û´AòæKôÄOp®ÿ†ÎuðÅÏGª^7ÐñáÃ?ùøø)=p9ƒ¶q—”6YáëÖ‹zü 2Ï;RÉ OÛêNIøñÝ¢È Â?µd¹†à/Ë—#ÙgC™Œ=›j R‰RR¨×ð"BÒùK¨gFìØ¥§>sÇŽ™Øë·‘'³¶ÍIÒ¦éÈÿ>Q€ÿÇŸ?Åhbâ¦Nà‹~@ùРõ¶Ôñ·^3×Mu3–=ü}þÎl›¦• –Ö$¾PKsH¿IUkz¨î˜.íÈH?ðû•q(VæŸáêG55ÿ£R¹½¶oçÁ?þqõÚÎÃFÉ 6Œ*$CÖ;…ßxˆH‘ÈÓabœH´H/:jjÃx¤µælß>‘µ Nmȧ='’z =t8¬4qçÅGUåÝ¡m‡è—Òn*ª))7Fš›¼nܬLªGB%j±ÍWEUJÞHÑ:[ù±Š}R>e-“ÈØæ-œ=ð§¡¿³›s`.uFŽZ¹²U]ëvuun&*«õVl›æ®;ÌYÖGBkžsÝJ­Õª«U5Sí‡BÚø Ú@ùþÃ&ÜfºƒÄ½c èP5&H.m‚„nbvcŽ<ä%0fÅ„>³ì½g9M—è5â%£Q&fízôçq:ÌÚ­»Ö-·ãü噓ž¼¡]vRw-DÌž8²……) Uǯð(5s“d? ºyÈ^í PæE똩¨”Ró&ÏoñÚå{Qd²žø9íZ2eShúÛùª¢Ø_¢wiÛä4wÍVIsS×-ÍgZBù¸Aµ(DïDÙ Í.ÙMÆØm0þ÷wÂ[ºÏËöš´!åà·c âã\Wn*/ÏLyLUøæ-J>´w$¶Ÿ´×qxÝ9é–Rø‚ëê´ÚÌjBOr½ÕoPœD¹±>ò†+$þ0Úá<Ñq©öܸüW «f[aì™q:@eêŸÑÌö¡Œ‰^ 0z¾„³æjº6 ÿwK#wJ$˜/SâR²wÖÃx’û^ ª 7hßr[ë­Ø!¢æé5ý¬Ûd±šúµjkÉ^³×í„„É<_ã¾K]7á3b7¡#ê£]g ÛìKÉ!O£v Ù³/Öÿ™b\¼²Ý8”:<Ü×ûÊ6ö.|~8Éíkÿôް²Ca”ó£kû‚Nß|^RUHn[a‘rýüHëSQ†|AÇ);c_!™|vç¡c±ç³‘‹{ø2۹سûðå…y³­mf_¸û§óÎ_'{øÔjž……ž¬MñŽ……Žl2Àhö‘›WX8¯á± '¨XGZ×nN.ôa^¢€[óZ'Áîn›zë2Ç6uÝÜ]}‡˜9¼ŠZsêÚFs¼ömu¥|…Ǧ:êºewZfqÚ2vïöTg%*Â!|쎔B2p…iÚƒ¤!³B¢f :žÅ”:%¤”­KS;g- žUã–Ñ´öÖd>þ—ù„³ÂØ:Œ¥Z“ìñ§~¼’ûÂà2[{·¯^7[Ʋ÷®7݆ôæ'Íë„a‹=fŸ(‡ÚØtᲃò(€ò(€ò(ÊÀo 8ØãGûömrr²±±qÉ/Úúc9bg=ƒÖŠÍÝÝý§Þ‹;vX[[óòòB©åàûŸÅ_ETTôÝ»w™™me‘e‚ ~~"§OŸ¦økll|êÔ©“5)))ÚñáÇŒŒ •¶œÚ!C†°ß”©S§Báåà§`cS»PùåË—;RÖŠ‹k׸3fLIII[NíÝ»wYnP>P>~¯_¿f¹KKKUUUé6·ö΂ ÚQjûõëǾY]] %”€_DnnnÇÈHhh(ûæ‡þúë¯={ö´ÍÔ>yò„}óéÓ§$I”€VÆÓÓ³žOYY™””Ô³gÏÚu¾Þ¾}ÛÐ3::ºm*_¯^½z.]ºtûöíPDAùhe¼½½kÉ]»òðð|úôICC£½çK___DDÛy]ºtÁ›ÿûßÿ888°ÀJHH´µÔêéé=z”W;XžAAA | |´>¹¹¹:thjjª››[IIIß¾};@¾XCâ,,,vïÞÝ­[·ÊÊJ..®¶™Ú¿`‡¿¿¿ŸŸ_hhh\\œƒƒ”OP>ZIIɵk×b‡œœœ–½6Þò[¹|ù2–½K—.;¶'‹txx8®‚Œa…”€Ÿ‹‚‚ÂÉ“'ÓÓÓŸúÚ‘#GÚ¾òÑMmgöT”:>Û¶m»~ý:–½zý Û5vvvøÿ™3gÚx:õõõãââÞ¾}knnE”€_Daa!bt¯5j–À£›7oZ[[c‡ŒŒL[Nç»wïìííc–Ñ 6@QåàÁêçzìØ±`|8::ÒîââÒ–Ó©¢¢BÛÙû÷ïååà×¥‹ßÊ•+åää®^½úàÁƒvŒ¬Íœ9³Í΄)..þòåKÚ=xð`(‡ |üR6oÞŒ•1&óôõõUWW×ÒÒj§yyóæMll,kÓÉÉ© *Ÿ¤¤$KöH’l³SÌ |Ða‰g{[ºté©S§BBB¬¬¬Úc^ÔÔÔØ»êhjj¶µÊÈÈ<þœµ¹páƒB!åà—¢¢¢ÂÞ·ÓØØ[NC† ùï¿ÿÚ]^–,Y¾yøðá65ªOHHˆ}) Lrr2”@P>~;w¨àáá¡7ÅÄİì 8ðÂ… m¼‡$;ÊÊÊ™™™õ~oß¾ÅÊáááÁî9dÈ,{þþþQQQIIIm9ýýû÷g_t=C[=Qÿe8::–——ß»w¯á®/^@S'(¿õ'ݵ«¬¬l^^þ_o×»wïÞ¼ysNNŽ  `[K¼ššZ£²GƒwáÄ/^¼øW& ŸîñãÇçÎk*À°aÚI3ÊÀ¯ÀÒÒÒ××wöìÙ½{÷nôQŽyúô)6.\¸qãÆ6’l33³† ‰ìpqqa‹¶¤¤ähöÇGŽ9~üx¬µÍ300Ùå M°téR+++l‘4 OŸ>t§üˆˆ;^^^¿1Áòòòiii_•´uëÖ >üÖ­[?/%óçÏOJJºrå 6‹›yôèÑ;wBaå ­oooßüÓy.ì¸zõª££#???V—‰'þʤöêÕëÕ«W- ŒeOQQ1##ƒ““³µpçΜ÷—/_:t(44´%‡à«:tèP,ØPÒ@ùhCè1èѣǃDDDšÚ"ïß¿¿qã¶“=zÄê×<æ h7–¥-[¶>|¸wïÞØsñâÅBBB?žªÛ·o3G>{öìï8<88¸   ÿþ?ná!ïÞ½óóóÃáããsrrÂÚyôèÑo=oee%¶Pa (m#F&''&&&>¼åÇâ½z“$É`£0))ièСS§Nµ´´ìÓ§OË# Ãf6C±$ÿH¦¤¥¥±ìa“´¤¤$<<¼Þ^œNœH¬sñññ8¿sçÎ]´h‘'ƒï>ã¨Q£°ðƒìòÐnÐÒÒúðáCUUÕÈ‘#±µtäȑ ˆ… Ø=± žÅ‘Oœ8ÑÔÔ´žb)Ò××9qâËÖÊýsÚ´iøÔ¼¼¼ØºUWW711Á§˜É UÎ2eÊ”ÀD- |´K899SRR°kÕüùó:„uâãÔfPÏ›˜§Nº|ù2=q(>×îÝ»[¥KÈéÓ§±|ÆÅÅqss˜™™á¼`fÆÞ}“&M8pà?ÿü%”€vÏ4ôÃïøñã­nb†„„HHH°BHMMÅ¢uáÂ…‚‚‚1cÆ`›ÌÜܼù4X>CCC/]º¤  €E+èT CâÿÎÎÎoÞ¼‰ˆˆøÁô;99•••={Š ( úáîåå…í³øøøV‰sÞ¼y‚‚‚ Gh0ðõõeùc:/999êêê/^láôÐA$ÙL€¸¸8“½{÷ngðIÂ)9sæ í^´hÑåË—qœ}ûöýêXö¾Úìed”€NMZZšžž^Kú¼`£ªÙ[¹r%ŽäÆ­¸¨:VPÄXÆV^^~Ê”)>>>͇߸qcnnn3c*vïÞ 7”€ÎV¬¯¶aº»»75…تU«îÞ½{úô韔<~~~¬g´ ˆ »˜˜˜¦B*++8ðÁƒî•““»ÿ>ÜnP> ,{³gÏnfÂŽ®]ë?I°…·`Á‚¯.ôÓº&àòåË_¾|Ùp&3,{6lX·n]=ÿ/_¾ÐP>&ššš$I6:Þ¼gÏž —fíׯßõë×™ì±Øºu+b,ìîïﯥ¥Õ0Àùóç*6øòòòà.ƒòÔbgg'))I¯dË·.^¼ÈîóêÕ« &äççÿÆÔÞ¼y[~²²² õ ëñÙ³g'MšÄî9oÞ<¸Å |õÙµkWCϲ/­ùéÓ§ÌÌÌßžZqqq,{8yYYYõ†Îš5ëãǬMŸ†V Ê€ŒuttÙ=ׯ_ÏroÞ¼ÙÈÈHII©í¤ùÁƒ®®®3gÎ6lËÓÛÛ›=Œ§§çLX€òБÁ†û& ÖX‚Ó§OOž€&áàà X0¬¬¬°CXX¸¨¨¨í§›}bbboÞ¼AŒa´g½×–(@}°yG;<== ÃÂÂh-ià¤â4cÍž4iÒÍ›7ÕÔÔè…yP>€&?~üåË— >|ˆË YZZ¶£ôWWWwíÚõàÁƒXùfÍš÷”à+ìÞ½+Ÿ™™Y@@@FFFûJ¼¸¸øË—/ÏŸ?ÿâÅ‹‰' åø .\@Œ“3gÎtrrjwé?räÈ—/_JJJ¢££/^ 7”à+ 0 77·ÿþ í1ýzzz£GVRRJNNååø:cÆŒ9{öì?ÿüÓ~;FvïÞ}ðàÁÍ/T €ò0ÑÕÕˆˆ())i¿Y8zôèáÇï´Ú(ímmmWWWz= v _dd$???ÜMP>€¯#**Š­¥É“'·ë\¤¤¤ôë×î&(@gaêÔ©?†ëÊÐ"þ÷¿ÿµ÷,Ìž=Û××n%(@‹¨ªªjïY7nœ¿¿?ÜJP>€ÁÇÇ×Þ³ÀÁÁÁ¾2;ÊÐ]ºt鹨¨¨€[ ÊЉ”¯´Ù |´>•Ï/ÇGefËÈpõëÏ%Þ‹³[wާO·¼y³ëí»/O+ï߯èÒ…ÐÔT6lj›ÍEéÇûqWb²³?ÊÉq÷íËÕ³W×îII½zµëÝûê'ùU÷î• tÕÖT4x,ÜtP>:#ä—OGŽîââBÆÆB††<††â ‚pˆ‰q–ç7®;cóÕ£¼ÀÈ“EzcÕÔLÚŠU÷ùõ¡ð^½8ŒºM™Â? Ãà½CóNœHçâqNNÀÙ³ÅS§h ¢Å”€NAuåÿÀ=ÓŒ…çÌþ¦ed¹W,ÇùæÄ oÁn"ã&XýÆ\”—=ØqdÁQ++Ño:PAÊ;xð_9¹þšš3¡H€òБùûooÞeKÅ$’éÓ±dþ/8hÓDÃÉÒÒJ¿Ü\%ƒ‚½&O\µª÷D³`Bü¶oœ?¾pi( |t4*+žïÝwÐÑ¡gkEhc+VZzíàÁ‹ ,ûe¹øPr;êdŒ­­XkEèìÒëõ«NEÆ&°ž(ˆ'ÿ)*yÔŠ²GÃÏϱ`AwŸ­V,_÷ r‘™¹_¸GÙ‚¢­mÏ^œÆ&è—ååà§sÿ~8_Éðá?kpúŠåâîîî?5—/ŒøƒKP»]çåà§ó8/ªk×b))îŸzw÷Þ~~×þ¤øÓRƒ²Çñ³sáí½aåJ°ü@ùh·|úð_EÕóÁò<¿à\Îν~’l<vQF¶ZPóäbåJq??Ogç5Px@ùh—þ;º;ƒ´D6N 26¶mÍHɪ´´[ÓL…~Y.ÅÒRN«œ 唀vFÐÞM¶‹Ä~ñIÕT«*Ê?ððvo­½}6cAý•Yàà ˆ®O ü€òÐÎ(+}0yªÐ¯?¯”4—··kµy>¿èÐÚýQ[Â#x «'(팀G\]{ÿ–S/_ÞóÉ“Œ¾}U~<ªÈ“ÿ:þåÃ,þK¬ôÓ;~(K |´>—çÿõWvβü¼K·?ÑnN.îÑå[6Ä¡” \H2è›ÎÞ¥K—ãÇÏ._þ£ÊWøì’Ýb±ßu 8¼·ìZ¹ Ì>P>Ú»÷†;;÷b÷É>òñêJ''ì~yÛÈð;œVC¿Ú[’¿¨Èï;à`ß³¬´ˆ_øGrvÝÍ­÷o¼Œ³fƒÁÊ@;aÄøSõ÷7`¸¦Ç:9͹lukâ›ÔÄž#c/™‰Fy±Sª–%ì1º¿sêÀJaaÊæ#ˆÚîš:ëV%xôqÚa½ãÞtÚ·ÚߪO½Sñðv Ùbe½üûó@’3göø½—±OÎ3ÿì›<ÅJ(mšüÇç´4Y¬u«µðÖî(SY1¡,{©EÁêB¤*ak¢áTp͵&ÃÕŠ3ã„åìIr{ Ñ žï ÂuCß7qÑXöžTs¨õü ¡ 3Â=~hø]Jêþ‘#¹ûÅ,xöJ(mKq™Ö.Ü“|„ ŽÔlÈÞ‹ÓÎ`×̓—¯#4´ ?”å1Oàц}FüÏýÖ#I},lì`Ù³9¶I_´‹ßúóýáç‡%oÿ¤|ª]_¥&Là&¿|!8¾sÖ•+WžÙû·_̱c¡Á”ïײËløÇoȧ‡~ö¹ÊnïÐ^x¨¤$ãáCJ Юù_ueXk6™¤Ë0[›c²¼å•z}®$ívœÉ)8ÔÀ€œý~½ó‰A’KN’–¬`Ø(DgÍ 4µÿi‹ ’§†ÙÅÄ:ö–&žŸŸãÆs#FLú¾\ ÂÛ.æÀDáó;’C¡\u2åû@ŽntÏ¡„|K¾?w¦ÿ8çKˆjKiä·ZùúvFþîVÏ¿øÚa­MMÅ©¼:9ÃSó[SRYY’Á8´w†*6ÑmócýM>¶ ú¯ö›$¤ªŒÐY}}ì™¶=${Ä´õ–lw\d»>¨š ìb*ŸŸ¡mÂX]zÓtžš›õ]ÃÄ)Øm©ì·áFã“sﻕo˜"o¹ž·³Ò@ù:Ÿòq5¹gžn¿¼‹o=~ 5 ¿¦6B—Z7·ÅUpËΌ܀F~´•Ÿß£¢jæF?-ÎOx“äÊ‚¬Þ+ÏI+TÙ_R̆ޔ±aõì(q9‡ÿߪ 9‰$­e¬mY†5ñŒyöìã÷e¡º²¨¿Ì¯˜¥³ðôaíëêyÞš óøñ+(TOùjðJ/wUaÎ{[õ6…KlvlgêA^¥=ßÜÏØ¶{ï‹÷ê³ì,'²{'ù¨»$_¾»¶Tå\¾õ¢²’ìfl¤*éF#öÞÁ±½ûvõp­h"%Bšž$éI»"–®ÆÖµ…lI¼…xzÌûk±¾²û®¸ÓaÇ#/—hø“\ìg4q’ªS±—ðó£[Ÿ?´D¡ í…êÊ={5¢ÚIÚÍ 2ˆî²bæcÆN«Z+îÛÒpH_^ ù‰ˆ~g•·ûôíÒ|˜%ýl¹÷ûyëóÑýnè¨§ÂÆ[^Ä{KÉ >F -Ý’s,'`ÆPžªü;\ýwPÛÊcÈŒ™÷‡2OA(1IJ¹oÞTB¹ê¼ÊWñ©!¦òqŠŽ´@(œr&Ð>²ñ¨&dxx¸ý3ܬR‹È‚=8¤¹÷…ïH'Kö°hÆ{JêceM?õK¢Í:R,Ñ¥êŒ*D@&š'ÏgYWÞ°Él ÙÚUUåm'1ÜÜß©ÕUÕ_ ‚ÿ4©_èÅÐTcTL=V†3OºêL€¬Ã,WUîß2Ë¡Ÿ6Õ6{õàeüã70•N}Ÿ;¯±ÏWç²áâîåªó*ŸÛHÁ†VXÌkJB4\éwu‹°ìá¯ôêëD×Q¨B(Úí/:d)£æŒ-çͽٻq ñqÒEï¸ób¦Þìi,dK©¬¬,--­ª¤Œ6 ½5 ›eæ½3–éÎÝ›t¯P{„éßàð]–}èæÓç$É ehopq ´ÄTVþï;sÁý5c±ü!¢GT ä·ã…Uêà;‘a¨/³6¡CÚÜý´­ÉÙ¦–Ó·qG=Þs-Yl_4Hžz2 ’7±TëSœ†ëÿ^™øÞ\Aù’ûžTgr¢½±—}»¡¼»Ïi7«bµBùd6[^þ3¦~-dóp–å +5ºkÎêöç(!Ô‘—¤}&ÎÞ{x{ûðÆ·¬ "$$G’‰Ñ”™Rzüx¬í²Â¼‹ÿ"ccÙ¦"$ÛÄâ`mÁr‚p¢F’+[ž˜÷Eßi€ öF¨¹!äÅY-œ8TÖ¹¿Õ¹ƒmoš.wgì,ÁýP‰®öå„$Ó°/^Âöìå7 ÃÐ $b8±B„î 2aè)Ït•s¾š˜¾@¹ê¼Ê·âÒ[oÆ/iŠ ª?'ŸªMí/ ••¿®B<ÝZ#!‹¾#™E„SöV¬ó8@ÌÚ²væ!!Í5•o&h¨%Ö4·ž;âJ !ɇ …Ø4øn”Í`(=@û‚£k×ç…Õ’MõÉæ77Ð 6e¶äO”ôA^«Œ›Œ°´ÈO,N»„•†$¾í×øþ;û†ˆ÷Vûòå6G“­>Bêjeu.ÅõTÛ(Æ\’ñÚ/+4B‚‰É â"3V½øæ¬%ø6.Ô¦Ó± ø7žÿeøl3SðŠ´h®ƒzoq^(WWùxj~Jä‹(¢·)% Èö‰ÌŃ‚êw÷ºTcëUÕsM™q\ Cæs"Ën3ß?^.#õ©âškmY×"UM¨«²d‚uÀ,relH\§íOàSŸ´Re ž@ûãÞ½ÏM+ ›†ŒÔ¢Èüi´U† ¦R×ú“p–eÆñ;U¢¤S+.ûù8§–ùihœeÔz· ô˜}&³æ—q‘ùα~ßÍ­,ߌäô®wjÆ&õÓæSž@’PÃþ¨¼Rõ|ô½7Þ_OŒ 4ufåc¸i6cÇ÷SÏ×K¢Ë+FøàzVLHÆ[+Q?³¾.QOi±ñ:h¹€6%2k{œ“uK¶®dÖæØ-&ß‘4N~æëÐúà ‚i´ù'ÛCg4m­*ÿ‘B5qŒWC”òQ-±ìUSïÇ•>Œ¾\Ä|’ …´/²n—éi²±Å"j£%±¶ MõºS6›Ò+m0 §deJ:›øŠ ²2˜¡ &R–7}ïëȇkU1VÛ¢WÂ[JŠÎGˆÒ3”íGÍ-wÂr £jÉâÞØs°EhvD8þáHù¬ÐA>‰(Ów á‘2KÅêïÌýs‰ýsY‡ôŸÑ0š ¿±ŒÉe…ÝÛo9ú4í ‘Íw驃ЬבV=Mö}Šy33ÂĦÞ$œQ?î!Úl†c)®2F¥§úÒ‹°"-tÜòcÞ¸ÑfV£¼}Û]IÑà»sñìY›E—Å«¨Ô UçS>žÛ·Sóµ–go ç¼sšÍ6éyµ¶dW¬i±‡6xP?¼áSö¬ç«m… C¶8í9‘,Þè®}aÑS —¤ÆOÅ )Ðkè¡Ãa¥‰;/>ªª(ïÞLê,wúTìaH²t‰¥ÙƒwŸD†P½W>[š>CüSÌÍ”†³·®wõ8“”Žý‡jlpßÔO˜º>üb:T6+˜£ç•—\ô*÷ãáAÏÏF¡A3  íˆñã°)–ÜL€s&ü*k‘Á,ì6%o£“p6=i‰ºËb´Ô Ÿ„>¶™³œ¿ðbîœïÏÅØ±Ãúý“§\¸ôXQ ÊTg´ù$úÊÚÈÖýI΋1œ×hV«¬VµèduBNµÿêdyæv. üøüÃbÙ·ýâj7x%×û¯ohåõÓuvÖe÷quu†¢´Gzйx1vܸ&«Œ|ÊŠ&úÿmºo#jtÎæ‡Ç†zá·VgÝW~Òú¡apÇOÍÎöWTäû½sø00ø:©òÐÎÈò•ÙkcùŒv”EZõ¤j±Võ'á,®¬¦×c ¡š±OÍ´ŸAÏm‚%ƒO{4' Eï…ÍgÌÿÁ\ÄÆ|ø½Ê÷ßÝ fBqåkJžÒߟàÆÀOâÏËŸìí#ÝdOÃ?2 v³Þ$œBÚ&¬ù‹˜}&±Ï¦ÏØ „þh¾×fPðÝU«¦ÿ`.þú˦¬ìßo›BåÒż!C 4òµëóÉõpOàgÂѵ뱣ï—/oõ˜iÓðq³ƒ>²µ™ðãçê.(î³õõŠŸ‹–ðâ…ˆÝ|(K |´\œ]srv+(´òÀ²K4øûç®\Ù:ýÂþZ´èõ›=Å~ÃÀÚèèû‹sAAå ™}œwî …_}Þ[·»/[º°µbëÖ­ç¡°{»_½RØù ÄâÅ®PŠ@ù¾“S!^÷ìÏÈ ÖrЙhºÎ'P_¡7Üø˜›;mݺaù/l-üRÍû¹Œ—£kk>sìíÜ‚ƒ6ÙØþºµ—>–ËËÉ@ùåû>DipÏEœ‹BhkbzR•–¬W|m°Ö&ÖÜ¡´ç%«._40èökNød‰óšVvÖl«œœ£­ÞrÛ”µ|#½ROO (ß÷`Y#{‡î[ê ¬*~®!,ÅXùPRq¨¶®¦´<¶9Z›ð!~~¸ð ?~N®j³Ó3Žªªüôߎ÷–—+W­û1wëÖ«{w'O“ûöáþÙ¹ðó{áüÄè,Êwù½Ë¢â“L'_éš-³˜b*Ë…Ö÷#ª$Y÷.’¡iIqáá!Ÿ?P+KR&#Y§Y¿‚ çdªY²¶ì¿%K–àï›ôæ‹-mæfÎvD- ¡3ÑÂÓsMB¬#À¬\µ.åúÉâO&NøQ³‰$y¼½ó—8Í”•ý¥ÕJŽ®]—/_wþüÁnÝ?hŽúÑ>/UUÝ·ûÞ_¶laëöG:±òQðÅdÒ_óï$ê+è2Ú?×È2¥þâÎI[Æé0LÀ¯S‘ËtÜ ¸]§U$i]³j|â¹psÔZµ+NÞ÷6EFŽ¢Ö¤=v,PDôúߣ_ªv<šd¤·j•åïÊÅ„ ÔŠfl2„[Cã{º½”•v ¼oa¡¹rÌÌ Ê×z¼)¼wþTÔäÅnôü~ý†êäÕ4Z¢'/†gÊžÔ"²`þ–%ˆGMEÍ#OûÞ¬vQåh¬6J™q§Ã¶oX›ñ»}¦Éy·`ìtÌÍðÿää©i¹ÆÆ¢d¿¾%IrÄÇ‘YY/-çsq™Ýr±pá2üÿÒ¥°Ü{&S…¥¤¿þ¬ûRÍsþ|iþÓâù–¦ y |­MEBOÉÑ”Ãî>۲毙ß g»e6`*ϰ`|g7*{/ž—"$‚§7—ªÙ»”LÆm¶¾X,9tèKËá!»NG[~"Qª%þ ”Nj”#$eØÐÒš®¥E9îÞMHK»ùòeiß¾½zñvëÆÙ•£Ke%ùîýçgÏ>½{W¡¤$©£3Iß —¾A›ËÅØ±–cÇRŽÛ·/Ýø÷Vñ‡ i)ž=yp&˜¹xú´äÓ§/Ç÷ÑÖ26šoýAù~<º†1VÉ;D‡êíL½AuK©¬`.×@)ÓXZðPævMÕt匄LfÐÿV­\Azû0WÛ7»oº/Š»IƬT0òÎAh/AÜ0Õ}•@vÚÿÅBçomŸP?aja!ÂK3—\Ù€¦å€v¯|$¬Ô Э>¸`õ`õ`õ`õ`õ`õ€Õ€Õ€Õ€Õ€Õ€Õ€ÕVVVVVVVðû()!ýý7ØØ¸Ã¥ø-{¬X±Ž——€KVVVVV´ª^%>þqŸ°@½_+ÅZ™|ë}%·Ðˆ3´ùà*íÉê«~ì·ts.ÁÇKm”•—õ˜±ÞS_,Rš¢Í:=Ü’X›º—‹®ê Áeie<<<à"´MÖ¬YÇÁñS&$Iòógü®qK!Äɉ~ÒíȽ¸vÁ|Ï'MP4Ùuh÷bñï½_eÏR.g£-/ök.Wù‹D·õë°ÃYɼլ>¢4ÉÇaû3„DÖOúMV<- îî?wÎÛêjrÓ¦ p;ö]Àêk VßÓË;öŲyˆÏ]®/!·‡†§î&\‘ŸÌ#ß öø\øøJäUÕÈÛÖøæ;Òúõ’.։ˆ0œ}&úÙ¾DF] ^Ÿßa±pÛÅìh;•h»MçË(ñ~»±T´ßqÔæT¤¾"í—Y}?RxÙ¿ä2xZv&ºp{÷üt{ì$ÜÙß ''¹kXÝ@g°ú€NKUéç>=EHnó…ýKëËTÏ .ÎHI}Ü¥ßÀþâ"uL¾œ¨åÖÛ¾!œë™´i¡'ÃÞÛ6¢õrŒAšº„å°;ùfµ†h“Iúò(Ômž[xrƒ²n=²nΜì~ÙÛl”¶_®p´ûq›ú ûîú2E3_ìp9ž!yÂx鉧ì{õœâ"Vj÷š¼pçÅ:‡MôË8°„ù’“•‘õYÙîTˆ×ÛFô¢|ä6§’si_÷¤Š&ûïZ( Å ¬¾¶Oar°µ¶mlÓ¬·Åû/S¿¯KõãeY\hò0-Û˜0?Ãþ¬:DÑú~=<ت+.½õ6¨óʱøÚa­M¬ÍþK¯<Ú6º^ý%i¿Ë|«?¥Ñ¡ôÖ*õk5èÅ%KÍqá “¿“K *XˆzII0¬¾û®ƒ%\5æx9[ÎR•ïÇÇfZñIÔ“ª{ÔóP‰ h§éº8ÏEzÔƒ•(Íø{Õ$—k´#Ö mÉe³e/ÇŠ.™—‚‚müE÷1UU•œœMv›({°c€®mã¹½l«Ó6·ÎûÎ\¸íâþåêû—Ï‹*ÏÐÏ—íUæïbVÝvæülUʼªzŸ¾ÕZmÆìú1sò1þÛg¨ µUg³×ªˆð¡ŠÜm³oOEñúx§áÞø×SäŨ3nÂà5tÎYÅÝ=Ë´‡»ãë@Y}Ü5—‡“ésßU}$Ò[r*Ëyª('õŽ4rõXÇð¬ìè?•ž¾¾{f• ”4°úÚ._ÒͺªEÕõSÑÑ >>KÈdYVû–éí[†Vœyá=IœöÉÜm¬bwºÎa2*:òŸ^>ËȨ9.9ÈH&õ^õ¼p³ÃC°BlVOÃZ7û–OßFN:ÜlÅ8ÉÔ¨€ÄË9 p<ý|VÉ*m0ûÀêk³gIJ›|ë®–{è²s+Ûe6xû£~ÊýûJ è-Tø_gSE×ϲ[_ÎEävö PÊnïP¾Cjâð¾RR½{w¿û¸\¢?ï&õ~Ĭ:&ßÈ-E×W2OêíP®-18˜¹/Fc€+mjæŸXT×äS~vÓX’-ƒÓùí#¡ ÐÁá”Òu Êt ªÙ®.*È»w'ãÚ•ØÃáq™ñQøƒ½MýïΈ*þ»•C‡K0—mzv™ûßiíäD­n´ãh}ª?¾§rf#{×ÛÇ%¯ªŠš°úz÷êQ7ó$S{u¯3] gwf_×U•}E¦¤¤»×ÓtNîš®"0 ¬¾6 ¿@OöÍ £y™c]ûëZLm0Ú`ìŽÛv g´äæ®ÓÝÓO˜ð£]:-ðA£'¤W: q¶bJ‹¢‚Öñ(ûoóÊ%Ÿ?³„n²ï}±%(q™‡N—¿°{ÏÝÎfòaøì¯E€Î¥NÂÒƒ4ðg¬¥ÞŒGì„¡fYE-Y½`òq•/•åÌpÓ/<=®ØŠRF<óúCz'ÃXS\qØsÛˆ¸ÊP n·8¶ÀU3%¯ÞH°îü¢•Ká6`õµÎÁ¶dÙ¨•”|êÙ=ÂwàÛüÑŠ«î¥o–cr|V‘äÈËføÖ‹0ñ\8þ°O“´âä}o“-LÉÓøŽŠÜüOu}n‡ùÜn.ª…EIˆõ“FIµÁ2ƒ{ÖÇ3ÂÂ%FAY #Ò%w¥ø`ºÇƒ†SÜÉ•z†z—w7‹v‰ÉJð!DŽž5 ¥œÅÛ'¶„Ý:¼pxÐ_²W…tÓž¢¯«#-X§y¯¬äýWÒSñð?ú‘,·¹®É‡î[À2ù*é¯nƒG* ”„^ùÿ°zµ[ðÿ]ß¾éó)Ž8á>`õ} ^EïÒ›vWÝÏIOKù79>ùrÔ¹:3¦doĕ¾–ÝÐéÛHrí..¼—~#íßëÉÉ—/ÇfÔ™5ÅgšÜ¥ÕÉžš8öŠu .®úR²ñDòÈ÷«;çŠïÍjUޝå­èM~þ?/!ºKVTÜ“¯­ñåQäþ£¸òH é/œ¡ÞT…®êUÊáãW>HDm¶™F?¸lÐÿ“÷~Q e"½9¥2§3A} G —ÂÏò/rsâÙzZλ²™1›%—Ù¾WïMz­OEWÖ(K¬A¦+§ŒŠÞß8³)0…{øÚô›Òt÷Î*æ»ÁìЩãŸZç{ÝÝÈwõÔ!=̇è£ø8jr%‰H ;#)AôàJxd »p$˜Ï[´mî±ÆË"ŽD ŸS¸k¦Ø.¤jáb*K¼¸˜’_úLB¶ƒ†V‹®F·tg«_g$¿v==ëéƒ÷o¯=ùù{Ë)©¨iêŒW‘ƒ²ˆ¢À±=¨—¬Y:«Õ÷:éÔù¬»yYwïä^]t+jÙ09eü±XÌ ‘é?NÅùRMøgÏ_•#!Þ7÷Ï'fç=¼“+žZz\]bþTü±¬ ™®K¨±^f†Ä½ñÔC=õÌÇ¢[¬ØÛÈ•¦ä¹šÍüì5\S›Hªð„icÝ’j]ªf?‰íXLÜfëíw»(ôî-ÞGL¤›˜¢þ4eIá)3§º%ÕŽÜ·ÀÊܘ}ö²¹Ð½³ ÖfžžZ¿šš]®ËÌêM *qÙm3µF³º“X}Ð$¤”ÃIÒ›f§%Æ$&þûßÃ'ϼ§šÒøGÌY9_Yc”ÖhÅ>u;ó“=mN’6½¼s2"ìpÊ­Kÿçñ¡ÒfN^Ú“'鋳÷óUé¿åÔ½Š¼d9×°½„›HŒðüprþÇ쿃#¯Ý{ÿúÎó»„„²CôÖê2T]ÛwôôÍDææÞ•þcœLŸY)…³Êž%ìß±ãìí·Ï$=/å2+dý¬?¿$OLJ 0P ãæí­ãîFÍÝWwP“r 4_¿IÛ²Òãëzù©² ôPPÓ‘m¬ž@”äf?¸ÿ€Šös’WÑlЬ^ù¶à1#æ^2}ê‹­9iý]eïr³oß-xÿ‰›}Fݤû –W$ØT6³²s=ùTÅÈš¬‚’bý¬@C8»÷ךd¯5éÛŽ:m™÷´¯‡ã1[lÖÂH»)ÎZ|ô5ŸÐLj¬—WÃ]lKr_ô±6´©;ϤaxVŒþç“sGÔ_ªÛýd¡­FY&¿¾¾9ü‡¢ùf¹FAƹõv\´Wg.˜A¡¤¨R’ÁV’—¤½`¬~ñPô)a®ÜèÚŽ¢®^¯ñ—3Y?®cY1ÚtÒˆ¢c‹å××íúƒ“‘•]ÛTkäsgß\fâ‚‹ËÕço«Ù#;R_®4ÎzQ»º—ßtƒ¾Ié ÌxâÎVýMii©À5:9…•´>*èimH#ÏÌ}ŒqÂß”å#X}?Ÿîc±‘Öä^AUï˜|ïoUÙbkžÅÖo?NÒ9ò‰s“{7—TÆávAÉvAßvÊ~×äš&w'’ÎPT;uWÖB(ÙKiÆÎl\OÚutE ]Wè*£ "éŠÅøýO.¬­ˆwÉvWµ™˜åÊòŪ‚¨"} Óä“=õp›Bœ´R·ßÐÞE[æÐÍêµ|l:5»Êr.Ò&ßÿüÃ3Ø+p•gܧÌë…®|¤ë,ÃWY³:™Âü󽯳G–¶KÓdÓõToýñqV¶uåãêE;vÚ˜#¤ë~À{ì þJ)Qœe§¶ô\ü–Ç¢´¥©&£²W [I 2W Bã#žœÃ‰ØFì dº)Îs®=«È»û'=,M#ãƒ,4‚þ +Üh@YbÌÛÕX/¯»j—ägˆþ H[7ý¼ëÑŵZó=еP¢–1Ð[t<Ëuº('*Ë?i1Ê×o<¦ITžaÔòõÍyᇠ|3ãŽ^Õ5ŸëhÑU WÔ³«­£®¦­¤$'ø# DQ\ë’Œ~âì‚üüèÜ{zÈ—ÅÆÜ·5”û¦GhÕ“à¾#mûê,½H[ƒ¦ÔjбæJ£e]Õ®;ü®ÏÌ©ÛÙ^!º8An|ŽyE`îÌ=ذÁ1ט|ó.< eŸo6ÙKsÆÎë]éØpe=£ó šÒòRQtvÂÐÉŒ–¸égóŽ×®I”›`K>f²ƒcy˜o ãÈV­ŒÆÊPv!Ä(NG”¢gÏÎ1h½{ajÒÿ—‡ÚE2Ú5ã/g;¨j!Õ…älT<Ëøïæñ#¯ž½{ûêc¶äéýI*§`Olêá Ü•%ý$– 3§)Úã54Ôd¥Å&{œŸ\[×Iù»fi²áŠñçOUÖÆÁÍÕ[!ªr“à›á¨§Ò^¬‰Aë³®Ô¾|{ws mò©;%E³ÙÏ|½t7D‘FÛ5M¶]Gæú§NZ®AU\ºaã‘aõñr °ª¹"rÓv¤Rwê‡QÝ›zsJæ†Ì¸Ñ«ãqÝ»ÿ ÉßhÈ´ÏùúM Ž\­hF%Ký÷±ƒêðo^ß¾õ¡Ño^Já<êáó,=1îô¥¸³‘—3³ãÃð‡=˜Þ’SWLýfä’FÿýUzû#iÝ?ͦÑ­¤('*93ââÌÆŽùú#4ûl(sW/m”{þvíŒK€Ku$J¡Þ %ì=ñ¯ö_°Çà^·×p·Ó4PejÞ-)EH±b³vI½%F´\¶éíJÇ?WÙL¾oÕ”––Š´ƒ›éÎFžnu”ˆä§Ç Rþ°ú¨+ðßyYñMÁï•Ö_ YDª¦Zß12¤"{®«¦¢ºÙpEY±žÝ» tç*ð¡=ù¿¯'93¥pfÕ»ìÓïû;&0%2 52€ÝöØv!n¶¢ ê*0 JaôŒŽ\šÔàxß¾²‘—Ÿßž&îSŸ ÏÞ¬ÿ$í_f½!@[" É£®'d# -jRм·Óe·§¢ˆåêËk(ޱµ¶u1Ó‘û±Ô ôªk“qÖÜáî=êÜjN>nÚQö±¼ÌO^ßhTfœ ºñ¢Š[hČڿqH‘ ”êäyø³½™£ìù%ÓqáOQ¼¿qߨÍw¿¹qk²÷ÓÉÞ•¹×Ž9r6a¿þÔîî3+â‚ïÁo|„ò‰ AˆÑéï•¿Çò3 N*Û·/zò$»ÞH¼–,ÉÈœà[çþ óÒ±M€oTºzõÓoÒ”–– NnæãQmXÿÖ«Jw@ùÀꀖ2n¦T§‡ÄÜuš=¸1á&ŠNîaŽU’ìñcçBUõ½r¯D×Tå©VÅ´Ý‹˜ý0=3ë­]V vkû?ûëÌZZ5ˆ˜mWËADÑÌ~þ°UÝbœFLŠAéËÆ ½;Sì0‚Ÿ›)uª¢êöê(T}fÖðŒ6ÞÙ÷ç¯À!³ì$¹ŒvW=º—žvåR䟔+AŽøƒÐÜÝù>Æ}¼á³ñÓÖ7ÚDé̓Ž©ÔàO“Ú¿ôÔÕÏÏJN¿~F<:ï¸zcF>ɱÞׯ=—Ò¤ÞqÝÏ}Z…ðÓWwõì%yMË øÃ–ëøs7E?ý{îà{oîž]"ø-P~æhù†=ô~ôWcž¼œn«Z§ƒ_ÕûìœvSª¾õý0|ß¡)_/ü¢LÓë`Tª­j]õ.;õ¿"a¡‚½e¤© XZœe¾Ž/X}ÐYáp9 æÿìŸH_ɾ¹­×i þ¨}K+8°îjlÚ$í( Ý º¡kmó7õîXo5„ÅÕËO©“…?´ÜL\|Ÿ4å?øëAò/ ¾¢Öö‚㘺¯ pÕec×[o]M¸~‰C¯r ,ÂNŒ¿‚b„W¾¿[%e¸ õKó&R ‚†Î+ '1áÓžÜô‹l³û°B"Q^¤U aÆû:»P…îú ™EóÖÊb\ØÚ’Œ—­mœ+¯½º­5wB)o±eFH«Ÿ|Šf”ÕÍ×ëÈs—fFûÝ$ŠV<œÅ€ìß|Éð›1×­5 ›î;mxÛm±Àâ€Ðkº‰^[-9—]—»Äÿ²'q‡#×ܬU‰ï™~ÅSEùñúèÎÇ#RݧÛé=«ô1a?”Î~ŠZMßÛ'ÏÌ âo=ás:7é+¬ áIÿËæò5Á(òð FZ›˜^ü'Û~Gû·¤µ×KwiµÞuQ|º±éB 3¡5î_@nŃǼý..§ÃŸ³E‡^W“78è}|‡TÇ5Æ<å‘)Æîÿ!EçgO$ê0‰wÕßyNŽ­ÙJ[«irŠ™^û$7ϵaÏ6*óõéÌìq§Vk ôŠà¹·<ÀÛ$Ÿ´[°>-ÜA!Ü¡g‚¹rvM§hj/ªÅþ’€ž‹g…}>e#~0 Ü&¸hŸæÛm?HGâ¸üzDŒ„+T÷ÿ øM×÷Êý§ àªÀ º&áMÃÖÍv>~}±Äõž—ÄãëîRô>ä1ÿúÀõdMÅsã)”¬­=W¦”j$¾¿¢]Ê 4cv$Álqb™ÑCÁ¾.B*EVzwõêmQWûj·&}¤3oÕcã,ß‚>¢ÑÚuTJlÏ ±™{Ä›{ôïñ‚vÞ©vÞä}Z;»ÒæëÑ•vk>`kAÁV¤!?úæ…И§O££¾p! õ<OÏÕ_¤)Ïßµ*­J«ÂÍ%ñÑ7Ÿ={ùåKrDFÏÀ!4m“û¦yóMåùºî7MÏ VçD\~øêU~Þ›'ß%fŸ¸>_…?)øØËÚ&júi=ºä¦þ¥ßmþ;wê#¶K/‡ƒ§Öî8œõo>ŠˆM]Dø¸~ÜFA_|üP_ªÕÿ|~ÔbñÖ¯ù·lÕouV¯­Ï>)1k=Á¹_qÒi³IÑÇ–Ø}à¿UÍkGtø~ÇS­~µ[ªõƒ=‚È8ݸtˆ£i(y|q“Íþ À½çv¤¤¼h»Ï>à;òfÍÍ÷vvRí…³9×þíì$àÉ6Ïô%ѱ!ž„ÊÞâ'Ï;ÒwöV‡"½!2p¿2áJh|ÊQÿࣻ.ó¸]ºÂ1p¢J$"ȇ=ö;yÓã7Ò7áøÄ9‘ 9nÓÄ õºå­üƒŽʦ°a›ç:éýå[/hîfî:°”à {_dRàÙí£Û¤åÑmÑbúK¿¡mùùi.Ñ.}WçÛºøu½¯ÓÛ¢Ÿbë®ö±>rè0™<û‚ô¦ûÝ»o ï”]R¦§ LO ã!™×®¯ïé»0[³€?v%.bä{åÌ -a›¤À–¼Íù‚ í½Ç a±èST!Q ¦Ý¨ªÝWWÚ]FJ@ù^ýþd0¨¸ûÛí8˜Éʽ&¿„S³êo—ܼJ–ë”v8ýíß|´hF:‚?hͺ¾ºÓ¼ ÍÜ’)Eˆ·õ¿5Iæ±j,½¤KAÁªúz†žC”çÖßq ŽÆ5 È%QDúO•оã7Ëéè:‚âÐqÏ^X°6°3믿ã¬B@kb\[\²o}p¶¼>¸éúßhÓqºM!ß®œ¼ºÐëîãåöîN¾æBTßïo X¾‰ ãÎZw¥œVj]@®¬»‚o:êñv^\àÛ½òíÆë¥[µº¢ }þÁòÉÙEßïj‡F-gz꫊|G'v :ô“öšBåM8ÐO¨)É÷¬ìÆ>Ó¨= ~W„ÒŒÐ]½v4ß“~ÇS¥—Ùšt­Jmé~¤ØËX¸lHÅfdžUËmÚëà±Ø;ÇØí!rßáæ{+;)ºŒK;£Z,vÙº‘šûYÈŽÿ¢vH_rSž¡.äÁ€Ú¦ƒþ+ÍnïIÅlÙ¼_­ˆº¥-âÛZ°¬ºGÍÁYZŠÝú‰«++%Œ˜˜©ÿâóUŒP{ —Æ_¿ò8#¯µo1 ‹e6ºr]oßqá¡‘±Y…ùèïrÈÍ6³6Õ’ìó³â/GÊÎ'&§¤¥9[k2÷¯ç¢÷ôTæ&†‡=ËÊA‘žŽCZMw~/ÇÒµ“Ìø;w"² +<Ÿ˜ˆìÝù‹TQYáò'q~S‰Æ6=ãã#DáfëZýEÞU¡Ó„úöAû¬‰,ö²-M{ÊNúOUH»«¾YUчô·qÏî]=}3.ð-: Ȇ¤ÂBxDùŸcæn­Ý|»³ö—:„:À¶Àäñb±Íu}È­“aÙg5ïnnëÀÈó¡Ÿ®ÎˆŽ©oruÎYôåC‡_ÁöSªïy%ˆJwLQÓ—Š~ùúáGJëãÛõÑG1 ïd t˜·ÂÕ ùà;WÇ}ïxz¸ÍS¡”ßêëâèÜw71Ä·‰Y첬­N&íôõ £Üÿ‹:³dI`eë®n¶2„VaÊs-‘ý­¯•'ÿž}¿FUЏ]Ù„œ Ží¬èªü^"a%Ui}z]Û¶ÀXŒ€í¡_™àRéú*bÃOkþ߇åë€Rþ*¾— wœœ!ž#¶ðÐ ›[ˬpwÏ8m¼Ö±ä³ót®cöwrüMÚݤ¸ÑvŸ÷®pFÿÙ½÷?3Á¤ó[–¾ßµðݶ—¡Òгþ» jãÏhÙ%Úö©žwEÂúvßGË#(ÚÙÎ AîïqF÷¤à~y“L]ÂFM»»D[ºN'„mP9ég:©úÜŠYžDÎ2n¿ÆAãªêÌmSõg œïm<ÃãC¬Û¤V{—sPKܵ#Pß—Óó'#wrNµfÏô„ï°lí^HåÀu/+MÁïÏýí,öåtK’¹bI¬!ÁV—G¨pÎïÖâRpÕ³g‡4EY†ü›–Ä*WfO¼‚¨-Ûi¡Ç‡Íþïü¿Ûëqª\¼Öæphõt‘Çm‘iøCl× ² ÙOB¾?á-—¯:j£¯>kâýýˆ§JÅ]V~ðô¨“ õF„Õ–™¨r"Meùob|ÛƒNÞ³iÁ¿ ïÙ$ç<ãsZÛ2#Ï=yÈÙn›íþßã¯)„ï‚/lêx b$]ÒCJäÍ÷·#_ë1º[h2aKïøu&ÞÁ/×IþÝwsÌØwqå{Ÿ<äÃþ=òFNƪ»…~mí $­j‡& \NÀÀËú¨»MêhI âWqß.iv·|Ç^7lRîp6à AøU+è´;Ö—‹óÝ——yhq ãÜ""Ý÷·8±î&!þä¤CѺڛ Ž/>Þ°V®mç"GðzÓUøÌºìPbøëÊõÅ}“i´s"ѤÀ,-yÏ7]é9°Ãœ¸ý//¡Ëê^À`¾Äœßèìr7çOÒTýf™‘÷níîsĬœæxï|øËšµŸsóˆ§{$µU†iöÛ”Ü<ßÀ-ÉÑØ\‡0É›;zš;þv=&y«Í>V›{Y"6ÓÑuf÷ûO•cò;tèG:éæ8­›Óë"!eë4~·-†WÞd9:üþú ÙÚi ÕÌv¡CÙ¿„~Àõ ¶Ù–êHjbçtÜ~ï7{f*ýv7Ig¶W):ÚËòw|¦ÅRôë×hñýË toõÞÃ×U ZœZ¤{gÝ}]#¶_QÁ*“°©në>OÜv½™´0/3'sí³eýú›ž~CûçæMH\ÿ]ìÚå†kÆÉÎÎVT˜lnnq5àú8œíÛÝhi)Fh瘶—©uBBÂýû÷]]] 1«€¤Øíæ†m"¿§¥¶öô—IIÖÖË|ý.“Ÿæ»G¼n5†}þl!×÷  OiiiUU=- -™‚›ÔÀHËõ!j;.­Ü)íÓ5ã–2F;*/ÖP ×Õëý–I9´6º÷nÿ)SñI‹‡«{gGPÆúµrÝoÔÆ„3‡3ˆgèÌ•'C…ëƒÜ»Y>¥]ñ)D^roÅv[}xjãÐPw§êêý`‹z·â¾Êä Ï\¸þ‰î* *ª±p sçh¡ãâžÞ¹}ÓÒÒÎìo   pww'µTµ´´ÐÑѱ±±=}ú4>>~\fÙMi)öÍx 3''\Ÿ$ %¹å«>|ˆZ>t$(èªÏ9zzè¶÷÷ BžïA333Ôò :8,»uëœJׇ RšÓ¨ºæ<5lý/¦±lÅ=E)~¤6ÿmìm?¯°_ê3Ί)i‹™Éêô! v²mWY„³<]âÑÇ6ë´=‰q™wöÊ-ØG¼¥èÚh-f²ÌÇp "HW}Ë7W“\fªq#¸²×»Í¦zöˆ.špïuåv]Ö!þ&ýÊ—œCþéšq}1u掌ăm½kÔß=dMÜ£0fصkWqq1:‚þµ··700`ffYÈ UUUWQQñí۷Çoß¾}<†“—®`xAo¨¹sçÆihhTTT²²²@–1C\\\XX:’——÷ôéÓÇÏž=d€aq}ía9ý¿FZ‰˜w Õ5þj_ˆÈÝi,Ü99É&¯gmÄgÒ¯%pËìÀ-½n*~:áõZõÁ–‚ýå¶ìô¶'Îz…ÓÌ;¯Îsžh£ÓgŜלê´Ðzl­zÛòÃzŸw’´«{×Ä ³ªkVú!9†C}oÁˆ@U22§¥¥åÈ‘#“hæFWW799”!#"""RRڟ߿߻wï’%K„……A4ãMMMïß¿¿víÚÒ¥KA™±¾¾~çx]]½½}nn.ÈÃäú¾BØ8„Ð%®âMRllLì󗟾µ/fà••’WÓ˜¥­«-+ÐGj^㨶=”~LŒŽº÷ðyræ—âv Â#«®1Ëx¾¡¦߯Û1piß!ìµ±‘_G¬û1M\rl¿Eûbj>nŸŽiÖúÓ«io)ÔøS@¼k1ÍTÇ£M íeQ+BÖж-nl ™Ý=¤Úäù.Çô’*“×x<® åŠÏ•ð'qµŒ|HaÂ+«¿Än…U{?òkí6Þð:qëuA«Md•R±h¼†ý}zz&©Ñ J½´ï„+{}Åïjø½¸¢Z ‚°ú‹¬¬Ì DØÌÛ§ïäàÚwÞÀÌw™£®®ŽÃuµ5­¨¨(..vuu=pàˆC.XYYORRRΛ7Í¤Ž±Ãdl‹M‚-*û‰——333š¹$ßS0~ZŒjjjÖÔÔô˜éèèˆÞk@æ(((Ô××wNÆáÅÃìúˆü›’Æ"tØ8ØpMR·E‡~o™µÑuVßÇ$j»Éµï­Ù Wl2ì}½¦íf;·ä×X¾³ï˜l¬2ÆeúN3¿òŠ=èÐÇb:QË'iƒEõûôü1I­¿Ë©²b:ô²Hv³,Üc…¸¸¸W¯^õ˜ùýû÷3gΘššN:$"} êêêˆç ¹Õ††† 6 6c,)%%)¶¨ì'Ë–-«¬¬¤¡¡QTT\¸p!\·¤Ì¹sçzm‹Åb/^‘5LOï;ýëׯÞÞÞ...¼¼¼  ·ëào³`Á‚^ç755éëë—––ÂWmçÝ»wÑÑÑ¿ÎÿöíÛ•+W–,Y¢®®*ýuòóóZ[µ———ÛÙÙ•••QQÁk”D),,\³fM¯‹š››ïÞ½‹B ŠL©¬¬tuíý?“¥¥åÓ§OA%×c GGÇŠŠŠ¾\;;û¢E‹þûâ÷4³fõY¡¡¡aþüù%%% Ò_GII©Ó6022¢nÊ‹H ‰ß,Eo+ssó¢¢"ŠL™4iR_‹Š‹‹©¨¨®^½ºlÙ2 ÀõÀ¡¾¾ÞÏïw]>&''ûøø¬\¹ä"MÐSSVVÖ×RÔºÓÐÐXYYƒV‡ÒÒÒÎÉüüüøøøÐÐP333‡Ô••%nîÕ+555[¶l9zô(ÈEv˜ššߌ¿RRRâââbmmMII r¸> ˜˜˜ðóóóððêpâñøªªªòòr|?þDmƒ““š7%Žeèùòõõýu>êôÐüÊ„6jkkŸ°/›Œ2¥¥¥>>>¿Y¡°°ÐÑÑqÑ¢EÔã§¥p}0N™:ujdd$¸>2ELL¬¦¦¦±±‘––Ô ¹'¸>²àܹsÜÜÜsæÌAÇ‹‹‹%$$@“±GFFúœ„h.®Æ)sçΠݱcHA¦ðòò¦§§O›6 ¤ 50 Ôð$ ÜÝÝcbb¶Î6***ÀõIÐS¬¤¤:¸>§XZZnÙ²ÇSPP€䈨¨hff&¸>d„ 8t qV¯^=þ|¤-²?///h2V]ŸŽŽèàú`œÂÎÎ.--jnnj#èéKMMHjjj(ë#q^½z…>ý:Ç¢®õUNŸ> :¸>¿˜™™‚ë#S¦L™: ”””PÖGÊ´´´FFFvÎÉÈÈ””eƱ±±ììì""" €ë€ñ‹³³ó¿ÿþÛÜÜLEw:ù1cÆ Ð¡¦¦†h.¤ jù\\\TUU;礧§kkkƒ2ck×®ƒ®Æ5LLL:::^^^[¶l5È99¹ºººÒÒR...Pƒ¤À`0X,t MÖ®]ËÆÆ¶{÷nâ™ï߿߶mˆ3ö¸sç!`àú`\ƒú=GGGp}dЬ¬ìýû÷mll@ ’‚––¶¹¹t AŽ=š––G<‡Ã}ýúuÆŒ Ï#""‚››:(p} :::,,,ׯ__²d ¨AvhjjÆÄÄ€ë#50LSSè@jxzzÞºu+99¹ÇüGMš4‰’’$cxyy999®€VöìÙãêê ®166^¾|9è@jÐÑÑAY©±oß¾»wïþjù¶€Ämü€±A~~~JJ jéA ×@+&&&žžžçÎ[½z5¨A^hhhTVVæä䈉‰¤--mcc#è@:,^¼=)/^¼èu)êúÖ¯_*16nÜèìì :¸>º¸xñ¢ªªª££#ó$;fÏž}ýúuWWW‚t ££ƒh.$z"¤¥¥cZZZÒÒÒÌÌÌ@«±DnnƒƒA ×@’’’vvv yaccãææ®¤ §§×G $&&%''ÿ¦0üæÍ› ÔÔÔ ×XÂÑÑÑÃÃÚj¸>zrôèQ4daaj ,@ó7YYYÒÒÒ é¸>ˆæò×™;w.++kyyùïW 655¹Æ7nÜ(..†Z»®€Þyò䉄„„ŽŽôÿF^XYYy{{Ÿ?¤ ˜™™q8èð· [±bÅóçÏÿø)¤¥¥åÑ£GW¯^ÑÆ X,ÖÉɩװ=€ë ž•¯_¿RPP€ ä‚›››ˆˆÈ¹sç଑ŒŒŒÍå¯ðýûw55µ­[·þøñ£?ë_¸paæÌ™¬¬¬ ݘÁØØxûöí’’’ €ë OæÍ›çîî>eÊ”·o߂䇑‘ÑîÝ»÷ïßjÌÌÌõõõ Ãh’ŸŸ///¿téÒ‚‚‚þouæÌOOOPo̰cÇFFÆ;w‚®€?àààÐÔÔ$''—‘‘j ÞÞÞ¨c‡ $šõüñã;;;H1Ò|úôIGGÇ¢Ÿå{DEEáñx###ÐplàããsïÞ=ød àúè/kÖ¬áááøòå ¸²=_ööö+W®¼|ù2¨A ÐÑÑUWWƒëQ‚‚‚Ö­[çååõýû÷Al¾{÷nwwwqlxäÈ‘?‚®€`ff&##ÃÍ͘˜(%%‚>ÞÞÞüüüÏŸ?Ÿ9s&¨ñ×abb*--)†ââbB]XXØ@Ë÷:¹|ù2ƒ±´´=ǧN:þ|vv6Hàú0ÒÒÒeeeŠŠŠðEœ,¸}û6š.,,„^ªHÁõ¡·è0ŒÔÔÔ¬Zµ*&&ÆßßÿÕ«WCÙUssó†  ÌãØÀÑѱ   33¤p} öþ§¢B_¥>>>üüüP]Ä™6mÚ¦M›ôôôž-ZäââaÉ,+--½~ýz___PÀõ0TV®\ioo¯¡¡¡­­}èÐ!„”Ù±cǧOŸììì ßß…ƒƒ£¸¸t---§N:|ø0š§?{ölTTÔ0îÜÓÓ³®®nïÞ½ 3Y¼qãÆ·oßòðð€®€azPQ%&&æçç ¡ù0Єdñóó³²²rtt„ïß4'ZTT: ˆsçΡ–ŒššµdÚöŸ?þ|NN¨M¾ wÖÔ©S·lÙ·€ë`Døúõ+‹EGäå壢¢ ýiìâ⢫«ûèÑ#PãoÝ,iii ÃïÉÌÌô¾øòå (O¦TUUÍœ9SAA!//Ôp}Œ,ÔÔÔùùù8nýúõ÷îÝ»s玬¬,ÈBjœ>>pY’×'ôú‰E/*‡ àú !\Û@s~¨ñó÷÷ß½{7j6@RàØ±ceee'Nܹs§³³32ïN**ž·oß***ŽÉ¬««»qãÆ­[·ž?.**:þüåË—/jƒt…¦*##ƒ——®Ir!<<õ{+V¬5\¤›Ù=Ú‡sqq Ü¿ÿˆFhú''g~~>êÆÑìojj*d‚GIIÉ—/_Ž ×W^^ŽÞË!!!oÞ¼AÈÔÔÔÆÆÆ¾ ’MóÂ… iiiËÊÊàR$—klÁ‚ÕÕÕ?~üA\äƒ9ÙF}}½µµõ£GüüüŒA™¿ˆƒƒÃòåËUTT´µµ½½½AEII)99ÙÑÑ‘ìR^SSœššª¬¬¼xñb[[Ûõm‹‘‘¹xñ¢‘‘\‡¤Ï•+W6lذwïÞøøxPÀõ@®ÐÓÓ¡#¹¹¹ŠŠŠ aaaPÖôמéTToß¾E3ôè)ÈÌÌäààMFUUÕãÇ“ER“’’.\¸pçÎNNN ''§5m£ì±±±fff>|@.BR¦¥¥eéÒ¥ÉÉÉ111 €ë`Œ ""‚ú täĉÿþûï¥K—,X²ü¬¬¬ŒŒŒ$$$üýýMLL@‘`ÆŒ$[·9--íØ±c·oß–‘‘Yµj•­­­ššÚÐÜÏÏÏÓÓ³¬¬ ºg eš››çÎÛØØøôéS ‚¸>Æ&Û@ ''§»»ûºuë@“ч™™¹¤¤DSS3))iß¾} Ȱ#$$„þýöíÛĉI!Ÿ:¢#GŽ ã[·nµ··¿råÊÜÛÛ=(èžÄ166ŽŠŠjhh ¥¥5\cEEŲ²²”” sçÎÐdô‰‹‹[¼x±““Ó… @aGDDäÍ›7Ëõa±XOOOÔ IJJº¹¹­jc¬J|îܹ÷ïßÃUG²øúú¢×!zG@ ×À¸CYY‡Ãý÷ßììì±±±  É(sëÖ­E‹mÞ¼ùرc Æð"**š••5Ê5™Ÿ={F¨YzøðáÝmŒy?þŒrQQ\r¤Icc£¬¬¬‹‹Kaa!¨àú¿,lÃÜܼ¶¶6::eÂÃçNêïïïààj #âââ999£ó[çÏŸßµk—±±ñ‰'233Ç•Î:::¡¡¡Pc4¹ÿ¾µµuVV¨àú—/_²²²¾zõJBBMâââxyyÑܳ˜˜¨1\Lœ81==}D"88ØÙÙÙÒÒÒËËk Wàü èáëéééêêÂõF‚ìØ±}¶@¯‰® ªªª¥¥¥“'OÞ¹s'9vtF¾ÐÑÑ]¾|ÙÈÈ(++ Ô.„……G¨ÚáçÏŸ,XÀÆÆQ^^>nÎÍÍ €N½I+++jjêçÏŸƒ® ' &''ÇÀÀ ==ýäÉ“ Ȩ±hÑ¢K—.íÝ»×ÍÍ Ô†ÝøûûoÙ²ý›‘‘ ¯^½z÷îÝ””” ©aaaÁÎÎ~þüyÀõôɽ{÷lmm—/_>öâË“2—/_Úºu+¨1tøùù«ªª†koÎÎΑ‘‘±±±Ð¥5Ô÷¦¤¤ Ï ‚Ý8X>×ðg¬­­W­ZY‡QƒÝÁÁaãÆ ù°ÀÃÃ3,®oÇŽçÎûøñãéÓ§AÕN<<< ·OääÉ“éééñññ €ëèAAAºººû÷ïßµk¨1:€#$$¤®®1úF##£ Y7PcèÐÓÓ—•• Âõ½}ûVGGu5yyy 㯠fÕ¾MèÓãÖ­[TT{p}‚‚">>^QQ±¨¨››6m²°°×7\®oÍð Ð !4åo¸qãÆŽ;@’ÂÙÙyΜ9¨)\À€qss311‰‰‰5F555Tm===Pcˆ022¨i_jj*šiŽ—••õú¢¬¬ìãÇæææ é<žûp}CeóæÍAAAÖÖÖ Æ(°páÂ+W®€ë:ôôôýw}nnn·oßþñãH÷®]»6sæLФprrrww\À ˜p}£Ã²eËfÍš: ::ºÚÚÚþ¬©««+//ÿöí[íDGGëë냤C|||NNŽ‹‹ Hàú†„‚‚š-Þ½{÷¾}û@‘FJJŠáåË—ªªª ÆP@e¬¯¯ÿý:---’’’îîîË—/ÅúCrròÙ³gAÒÁÍÍ šY¸>€ááØ±c¨ñð𠤤5FMMÍððpp}C„––ö÷e}8NXXØÏÏÏÐÐäêïÞ½£¢¢)H„ììì´´´Çƒ®`àççGsÆ{÷îE¨1Òœ€áÏFO:5 ÀÁÁÔQ£¢¢@‡¡@OOÿk»¾¥K—†„„€8ƒ --M@@t ž={ÆÁÁÁÏÏR¸>€áÇÌÌ,<<\ßH3mÚ´ÜÜ\Ða(PRRöp}NNN .œ1cˆ3Þ½{'!!:wîÜ™={6èàúF„ ìÙ³ti„„„+++YYYAÁAKKÛÜÜÜ9ùýû÷ëׯ———ƒ2ƒ#''gâĉ ‰ðâÅ‹µkׂ®`DfaaILLTWW5F>>¾ŒŒ bpPRRâp¸ÎIGGÇÝ»wSQÁ»u|ýúÕÈÈt ÒÓÓõõõA×0RL›6-&&\ßH#((øñãGp}ƒ†¸¬/---555::d4………ÐI‰€^̬¬¬ €ë)fΜùàÁÐa¤ÊÉÉÍ„ :ËúÜÝÝA“¡PZZ ®Dxñâ…´´4èàúF--­£G‚#Íĉóòò@‡ACIIùóçOt¤²²2&&&,, 4 UUU )ðæÍEEEÐÀõŒ JJJÅÅÅ´´´ ÆÈÁÏÏŸœœ : Åõµ´´ #Ç722¢  MMuu5gbb)H?þóÏ? €ëYÄÄÄ^¼xqÃG!!¡’’Ðað/Q**‚ë»v횟Ÿ2 Àò‘_¾|²>×0®/33\߈ÂËË[QQ: jjj<ÿîÝ»ÚÚZmmmd(”––233ƒ$BQQ‘œœèàúF ‰>€#íúª««A‡ACAAÑÜÜ|ãÆ Pcˆ”••ë#rrrXXX0 HàúF))©ÈÈHÐaDáã㫯¯Í„ ðx|ttô¶mÛ@!òãÇVVVÐÈÎÎæçç\Àˆ#$$TXX:Œì;€ŠŠ’’²ººÊXíú~þü™••¥§§j ‘ÊÊJzzzÐÈË˃žú\Àh ,,\ZZ :Œ4tttàúê™ëê긹¹YXX@!R[[ËÈÈ:………\\\ €ëqÄÅÅ!ÐÈ(ÀÀÀPYY ¤ Ž & ®OMM ¤:555`žI„ââb>>>ÐÀõŒ†ùùóg}}=TúQ¨©©ÑÜ6è08(((ššš$%%AŠ¡ƒÞì444 )ðãÇ ÐÀõŒŒŒŒEEEbbb ÅÈAKK[[[ : Ž &`±Xbè466BMc¡ªªŠtp}£š,++×7¢ÐÓÓCÏACIIÙÜܬ  R ¦¦&Ш©©h.®`” ¥¥…ÞäFÁ· ¹mÐaÐàp8h5\®Š ²%$A}}=èàúFÉõAåуÁ€ëŠåCááá)†ELèœDhll„rW×0J ÙŽºº:ÐaD¡¦¦nnnGyy9 .ZZZhiiAR‹Å211®`4 „ÊF : ަ¦&T@Ða¸\ˆ@"477SRR‚®`4 „ÊFÚZÿüùtX,\ßp^‡èÕ:ˆ×àú†æ/'6|OAÿÇöÂ4,<’2STT§*‰r‘‚j•©—·K ãhMxC²àߣ†¢ýT²â »k\ç䬘ŠX]ÖßmPúòÂîS)„ßBúò:±Þ;d×{Yþ™õiª6”7(8îY«Å—{ï¨ÛµlvŽ.±ÑES×îwTçì+©Û·ÅW0ÐuýV}ýO×cD Ù0’àš›«jëêëÑ‘&\sAqiN~! †•‰ž:îðx|MCcM]]]}×R×ДWTúîËWTdfzV&h[Õ ›p¸úFlyeêúÊ+«1ÔT 44Qä‰^ŠÍ?›°¸ÚúÆUµt4TtPÕó/=~ÑBsKK3ú®o¨ª©£§¥†°‹Ü€^ë¸f\ËOôéAKEIGKÊcÛõ}‹9é{wàÛì{|w—Î_L85¶Ô'Чs’wåÿ Eûà¸Ç{ìw9—êºøUÞ¹]sžÓÊæœ2éký¸CæÎ'‰f¬úÐ\®øíƒ€À‡=Vö ¬P¨»©Ö[F:Óí2Ïà_fϲݻA„n`H9¼oÅ?Þ}ü˜—û¥¬0ÃÆÍÏÄÆÎÂÁMÇÄBÏÌJKGOAA‰L`BoÓ7£ÑZƒxâ‘Òj¤¥²¶¡¦ j«*«ÊKk+Ë*KŠh™˜ù„$$$¤$ §Ø)ò§¼¢¬ÙßrË‹ò14´l<|L¬,\t̬ L,jT(„S7ZÃØ¶¡‹ä—77ÔW6ÔT×V•×”—VW”ÿ(Χ¢Âp ‹‹‰M‘‘bfû1Zðø9ù>},üþµ¢¤ˆšŽŽ‰•“™ƒ‘ƒž½JY0´´ ‚Pÿ³¾7"H#*{#¶©[__[ƒ XU]^ZSQ^U^LCÏÈÍ' &&¦(=y<¨×ƒ5u™ï?}ù’SZ”__]ÉÂÎÊÈÂÉEÇÀLÏÂJCÇ@KKG1uË”^ÁQèúߤߌ«Æ654ÔVµ]Ѝ˜Õ•åµÕ¬\<‚ÂÒ“%%E„Àcô±Pô£*ã}öwôÙ[”ml`åæadál},0210³ÒÐÒ£6Å™ù*ÝäK‚Ô5㰵ئ&ôÜ¡WuMeyõòše8\#€°°¬”¤ˆ?ÈÛIÑêÌÙß¾´ŠÜT_ËÂÁÃÄÆÁŒŠÌÄÂÈÄJMOOMC¹ÃK£OÞú¶‡m¬¯ªª¯«ª­h¹² ÛÐÀÆÍ+(,*;Y\|¢h ³ëûÓ§u¹°MµeðÔÔÕÙÏïy‡Æ¯uo÷lг¿äùKÉÄR4M‰‹óŒ¨ ózz-BxiNÇô—Óó[WnRgéåRöe­Ä–OüÊûÓ“~·ó[Vë"rüñå׬h³|ò n`¨èäô÷)ÉIÅüb“Ñ[H”oÒtè®('P2²°£—`/KÓ‹ sRó?gU”‰HJëjjðòpŽ‘›[ð o2Þ¦¼DͰ ¤Œ€ødN¾‰妡à öFGψì<½ää>VáŠÓRò?e•ä}“ÔÖ˜)"42|•uõOŸ¿Ê~—ŠÇÿ—FdæàUœŽýß š“CóÍèÀÄÞû…—]Õ\œþ¦ çCÑ×4+7}ººì$ñ±g-2rò““ s³9ø&ò‹Oâ•bš¤ 4i`¹ è™Þ^–¢ïàÄOÅyŸ³ >¿Ç66M–S˜­1}:ê?>{“Ò²Þ¾~]^ø_tŸè$^a1.q9tÐ~0Ô´èÀÀÄÒû½ƒ OÒs ?ø–‰x¹)S´ÔU©ÇMñ *òëwŸR“_–äå‘à•ä‘ä‘A‡ŠLƒôŒ½‹\ƒ ±™¹Ÿ>äçdM˜@)«0eÖôið• o×ç¶~ËZ­_ Í6îôBÿ~»$.lßé|‚Q)îk•»n\áÓ­K켟åöºgƒ-7¯YÜ‹9+|¸læÜ€/¿.P¹žq­`õäM]~S%¦â¡6&5#7Ñš‚¢<”ýÛ§QTîijÆë2ÑêÑÍQ‹‹36O÷0ÅÿjzKö[üCbÙä?| ýrqþAǪêÌDóêýœæ>#Œ‚åû"|ö*íU|,†šZrŠêôføWjZaiEt L~oj|z÷QNúkVN®ùFF¼ÜcÏéÅ&½zý"Žž‘IRIWXBCPbÄßTVS$M˜,kÆ¥<ˆËNKFÓ`h /.LN_£Ëªëî?|œû1ƒOXPXa:Œè/RRQñ‹I¡a‹ÿ÷*;5éçÏŸš³f©N!×®ÞQ§÷êݧO75ÔKN™6q²¢òlã‘þQ¶ÉÊ3Ð0™Q\ýéÁãï3E$L ç±³0ÏgoÛc!åMR<55­¤’*êô¦ŠŽô²qñ¡ƒŒº6aòÅÇüOoK¾çJÉ+èéŒ=sBxÁ¥¼x6aÂôÑ! .¥ªo:Ò?ÊÊÁƒ2jZ„ÉW_J³ß$~Í––7™§K+dçú~›¡ëñýCœ‰¡Ã?5¾6§›Ú}±™ãvÁ¦çÞWã “÷ŽZ°E ޼¼»¥ó¥Ž»¹LÌ2 ¯ç‰‹#Ÿ?#ÈkW¹É­&³«°‹±³6&½€zÛµû"¤–( .`õ²¤}F‰D îèEŸ¿aû€(µ'–í3{¶{&ñ:™þ»[>•ð¼W ˆö¹˜Á9d9X‹«Hw¶&Œûßò«‹?tå•G,[AÜ oqJˆ}ß¹†ú«»Ÿ òøJ˜<¿b·Í³}m²%}óìýëm»xxn¶-Ü'ÀïiÁãc^¼z÷HJy¦ÌôÙè@j)¤c`"äBš›q~׫ÊJÌ› “U[TäûOSžÊ¨ÍRИ‡¤–Bjº)³ Ð50—o„•ä}]df*)"L"É+üQ}ûö†Ú*UýEןô åÊIJê耎G=KÊz¯9[WSm ^Šx<þy껸Q"ÒS¤U5ŦL'µ¢â™&V­¯º¦†Ó¾ñÍ-K›sq²½g/×ý,1ãe¼ÜÌÙ¤ùXàÖµ\Žäü( ¾~’“›×l19Ååj}öƽL}þDZU‹4_pì¼‚Úæ­U½ «*nŸ:ÃÄÌbaº*<¤îúîm1¸·åk©ø¼~äØQ·³òùíEk_Âöokêx;Ñ •¯‰—zn¿²Åx;R|ê"ñ|SïËn• è׆ÅÞ¤Ó~6€ä÷ܧÍù€…Ý÷{3d@û$FêtöyÉU“Ÿ—›í7Èpã"øIÛùľøØë«J¿;·µXŒè®ÿ¼=”Ö·{Èý¾Kåh¹:ué9çÜ>=Þ”W”蓪ú†k×o¡#SçÌ7´“#ýSQaöïš :¢¢:}޶‰§¹²¦>èúu †FY×ÄPj 鋌Ôû¡#%UáGOÈÈËÏÓû‹éILûøønØ-}u3²¸­DeUС‡ ¾ó ²$ÏÎf)‰Ô‘«oć†×UW©˜,_GúJRÓÐi´Ù¿´ïß“ü|´uç’{Q'Å•µ×¯1±q(jΛ(«Bú fbçÔ[∎øz0 ’ޱá$2¨€ÍÀ¦³Ø‰OŸúìþSsÙI¦ëCËÉiµââŸ[+Eÿ1?ÿxóâzz¨oÏßýÀ»Ü áBh¸D„¸ï³å¦üRñ€vš­ò,´ÿiï¹O1é¡ï³ § ¿§rez™î«N.u‘.¸³­µX²ƒ#©›”ÿ”EÀ"˜).1Û}õgf¬ÜxÍÈŸNÏ3¡Ó]G­‚hÍ@ß~ëuä °´š“n GFf YýeÎ õµSTg,0˜CŠ~¯®Éûè!Q9eBÁÙfAæÚ¬Â65¢"ËN™f¾Àp4Ç?My÷üþ&–†vëÉïõŒÁH«j¢#¾—*J 6mÙúKHjqgNzqð *ë™PN ¿È™|B†v.µ•åè¥8CGwŽ–B¶VÖ]:Z\^…‹¬ûÃÄIòèðéˇkW.[Z/—’!I¿×à{î´Ð$Y “¥ä(2¡ýpAÞ×EV Ò“ ¹h.OË<ºEsé¨fI à„Å ŸôÖ"©Î5&ë¢/E¢ Ô½° ëûáUšJs¿w{ŒæT =É4& ÌžÄ>{"ëàç{/¸³Ý]Øú­q{œ!VàÚÅ~îMwÿ5Ûà í…{1Â]íM/rÁ ôB}#îr` ã§mä~,tô­GÑT_ä„—’ÊT=’É Ö54] d`f"SÓТGz¿cÞ§¤ddæŒFý¨¤ôìGQ¡ªsÙo wµÙ´>í£î×þ(stX>ÊMþqÍ×n¶üü9Ïv 9~ß!†‘•½ëjªyÑš­7c*™•û•×Ô_½|‰ƒ"ê`ÉýªæŒž‹¼üoa‡YZY“N ¨ªú†+W˜ØZ? ’»È\‚¨Èå%…Zh¶XFr¬E ÈÛõýýÚ»ø©^sÕ7vùº•òtÑ'SC×µVy=CÄ­«Ó¿Ä k¯ù,!*Ñ.Š\f™n’8//7''Ÿ¼©Ñ Â6É׸Ûkùþ³Âr!q‡é8?ÛVÅüã>ëýl]½³K“7^ESu4e”ˆÒâ#ŽcôìÞúô#A)2~W€FWC¾övŒ2‡Î¯”†Ûø•;âs²Ò4ÚRRQ™ƒ¢¡§ŸcåT]^|àÐáåvÿüõÿÅ<Ë}Ÿ¡iºŒ U~ãýt-WÔVþ@E¶¶¶±þʪë.úúȨi‘cùÞoPÔš‡Çÿô¹teâD‘ùú£Tc6:.)ãuÒ,³emŽ˜XÐ }yÑ÷ƒ‡¯X±’,Úûáñø °ÈšªJBý½1§ê`×§e¾¹sûöêU+iÿjOå¨È7"”åi.´&÷Ýò¦Ü|¨È9ŸÞEÝñZ³Ú‰¢}‚ë#aÔ6<¨˜¶MãsÂ\¦P\óªHh³7´Ó£Êî¨pvµjóµ’ôµB” –MXs!¬½³‡{¦¨Â–ð™TvÏßÛ®]A/Ÿê±Q ˆ¸¡Ù”¢P¢Ø1é°®¯}š™MùÚ#Í T¹úÎ;Tf}—IëÀíé5ÍöÈ:ÓýÆÊË–>ùDóįGn†r> ?jêýý|'«L'´{0sðڹܽÎÈHge¶è¯¤¡Ó±ŒU‘YÙQ‘=ޤ¤ø¹ÜÊrØ÷õ$á}Ú«Ù–TTc°~:šÕ˜o]ôõÓaÏc«W¯dfb¹ßªihòóõ–Q ´Å{pð é/w¾~ƒŸŸ‘‘>)'õ[QyÐe¿ióÊñM“çBTVIPRúä©3³´g«©(þ•4”W_õ¿0EÛ@JUkLŠ,(!Ã+,yÞ×oÚ45­ª¥×7ê`±EÝg4bûr&»ñej*œs»ŒSâ6Š€¨¼W†èÃÛä5_ÊDÓ¥s…”{W»»,£+ ——©÷œË½ó ÞêÞ>QÃÿÍü|7”КPüسš“J ¹Ùû>C;öé›x§f«,Q€Oßç5j²¸™>½´KʉHe¢"P¥]ñÁK{»©€hЏ· Œ…÷­+>3:ËK5[ŠRõcC`ñ:+çÁ=+G õïvÚ¼EÅßs=¾ÞÅy”?<¿z—ó0y%†šfl‹¬<Û¸¼ðû!Ï#.뜇«›©<þ¼ÿe.BÀÀ1 ¯°×R‘ ~þºsæ*ËH€þôÏß"n]Ó³t¤¦£ÃJ¢.z¦‰Uþç¬cÞ§Ö¯]ME’õî?ù.%5¨c©ôéWÐ7Ë\ëUY‰±ïÞ¿ûÇz´›1Ǿz›;ÇfõXª^ÑKŽƒAŸÞ$œ¿xy•½dlÀõ.Ìs^÷ÖÿAïpüae~u¯ñ)&b°ßÝÇŠ»sX¿Ý'‚<Ãoñ”6<À¤¹Š¦{Þ½e´Ó£úÖöwãƒÈ'/r²2È"dß°À#$¦¹péñcÇÿY±‚‹st~ôΣ¸¯ÙïÇÈ|B³Ìí¼¼¼míì„øx‡¸·ªú†ógÎ)ëès ŠŽõ(©¨æX9½ŒÍË+öÚž÷ã_f¥¾B/űm3:—ffçò óRED=|<,‘Q>Oþò!Ks‘Í8T’š†Ng±½Ï¹uÎ.¤Ð½õµðHŠ ”2jÚãð\°óðOÑ6ð>svƒóZJʬrr÷ac}ƒŠ®ñ8™‰SÍÀÜÛûäè¸k\éÃh´?R´œ¢í;‹ç“gM€qÄë¬O¯“ÆO½—^2‚tôZ‹l|Οߺeó½“b¼ìæl,m3ßä2súTzꦂwq‘N‹ö¶­`ì•þ¯åX·iËHÿêb1Ô´³L—ùûú¢"cØ'÷¿|YnÆl4›8n¯Re]“çw®%½~;Ä0iÙ_ß$Äϱ¿÷;=#ó #Ës.lÙ¸~DÍÆ¹›PUY1ÝÀ|Üž !±&…ê‹×íFªšÉ£¤”¢¼oó—Ž[‘ÑǦüŒÙ.^^»r€ë÷`&iAÇ–Àø¤‹»ÿ_È,s»q®§ÔÔéA·BG"à$*r ÏÕV˧ìù2ʹËñhè/\¹¿ôeÐýO42ŠmÁN(*/-â?™€8…_¥ÞQ&FQ¸ÿXbçLlÊu=Ow*¤ê°ñ¸§Ž8]çÊv>á´7ןùÖ±Š˜Ýù[ëtôÔB‘ÿßÿÖxøGŠ[zäÒV›i„‰Æwç¦ënFTþ p¥:h¶ó]çZüÖþ÷ý”9 -„±ÏÏ,sÞw§s¡æ WŠ>u``a“UÓ ¸qËÞf`9°°èÇLlâã½›U³ûWÏÊHIºw,®9*ôºæ"›qÒ–¯/ظù$ä¦Ü½½ÔÂôo¥ákQù›„§úËÇùU=QzJiþ×Çq ³5§û΋T'ÄDù†¿¸tY^îݘ'†z:í×À8%,2ZT^™‰¤•›ö4ôræûlY)ÉayÆB‹„[o^§lSåÛ†š#óMÆ3Ôµd¥%¹9é¹T­mˆÂkcþú*嘾ÃÑDÁE_œ]B‹YóîæMþ›Ù*´¯vy墥ç_¿ huJ¸üÛ«§Z]^¥{+â„Qå ³Ãè|ã#‰ÿÚ(Œe”Û¼Ý[g]ÛjòÍGƒà°­uP‘×ÿ³Ýµ%äs½x›Ëˆ= »ñTƒ<]È·“â4¹Gyd‚й³<_wnë {À`cÂï/,£ôíCÆÛÌ,EÙþZ¸‚òÊ÷©É†vëàÅ`h”4uo…ÿ7PÛÜIhd´ˆŒ# ;ˆ)>eú“[?~Î$.òWyGi–þØ&ÙOµ £¯œž>M‰Žv˜cG߉ˆP˜©K…Á€ÈòZóî^>9Cu+3#¨®€qGqyUîû #û 9uÇOž ¯ë+þQMÙ¯p=BQ÷9îñèä°í!lj×Rr‹¸³hj¿¼±¶ö²™n?#Ü^Óf³ŽælåiKß®îX^Iø§¼=fkGÉF`ÁÑÐí¨ÓË»œq@#õÈá¶Ù¦“›}ž¶Žâ©©™å$AŸ¾:*>½³o}ÿ¨½âK*†sS¨«ûPUÔ¤^"¬pÁ¹£÷kjí]¡›“[K ”fĬÄvd¸°T, Êòür3´cŸÄôßõÝ¿ÿPFUcœ—Mu"0IácêËÏ_¿‹ tÛҪꜬ4ÃÖƒŒíù`õYÅLÿ ÕÞ^¦}hÁ5ó‹M†³€´µû•VV‹ˆŽ±X8œMïÒ²¿TWUL“R…‘¶þKäU5#ïEÛXšƒàúÈŠæ/~[V;zßÿãŠbóv\»zP»—=ܸp·††¦©‘f†…•7Õp%lDv #CìóbrÊ C'Â?[š³s¿JŠ›Èñ ¢²"ãÄ5£ƒÓ^ÿÎÞ_ÿÇjã½&BÈãòE½²Q÷Ýù¸¸å7–ÒŸ‘ðôytä%§c{;Úû0å>š%ÓÒŠsK±8G¨„9MLTF’¨oNáÓA‹ÛD ~pÕ!¸>ÒéX¡Ï\FÝ›Ûg.¿jä$„̨«ÃK¯’–7e㢘@™•ýEZòϽ/ü¨®Íÿ’=EÇ.ÎN&)L‹ñB\xÀU‘Ÿ§æÕ-OtDpÕÝwmÒ“ûü˜óÑ®g»ß×k1õ6“ƒSHüí‹'9ßòÅ& ŒpªJþ;øÛUû91ù ï7ŸSí÷;#Ÿðd™IÒ2lôãôª–Ÿ}ùÎhS{P‰Ý5AxúÖfxž/v–ŒµS_^Þö_‰ûÿì<•ßÇïÍÈ&#{“M×ÈV()J{ŠH)¥Òø£ùkÒ.šJCQF%£¬PFvŠŒ¬ŒDö¸ÿsïE’dg|߯çŹÏsžsÏý>çyžó9ã{Ø5ëI’ì¥_ÜZÅÖ*uE„s{ï¦.åòN{¿7ãìžy©ë,•ÖYº‘Pop7f:õ<&­L^µuÀXܱ éVjâ¤g?¶ÂóäB@k¡#«Éb—7÷1oƒx´9ÛF”V…í›ïD5cÙÍ]Tzau &æá™(¿®µ5½$äØÏ¬âiõì=ôº:—OT"19±'ª/:î=—X?;úËã#¿`°õ,rú2|ý+²pR:”°‹HÆ…677÷ÖI*Š3™ÆVðj·©sûGeâ ôTu¥yÉÑοôl;Ða81†¼u ÞÄñï>æð”g“û£'«ÞnUwâ¶žŽÂ­Jõ÷$D%]õÕ¥]µÚîß³¸<Æ×_X2Ö†$’“Spð ¾}Ÿ¢¦ÈOEOÜÅ2¾?Fhhl*Èþ4yšÁc4•¤G½ILý0š<*wÿ¦CRa1ôPÕVWú¨¾‘@åË¿H>…{ùïÿæÔ­"bïCí?_›kgZzl:Kv´_BUCâµÅmÚŒÐø,¨‘¹ªªEBCS¬C£fm~tHxdL\zfa-±ÆÌ)$ÓÒÔÑûµ¯wÉjy_㣂_…Æ·¦Œ§æ”›ª?}šÊ¤n65%ÞDFÇÄÇfg~+«­¡¡¡¡fæQPŸ¦©,Í Eè-ùÅeÍõƒÿéÉ!›ƒè-2gƒjûKä{Ñ{_ŸRx„8Ÿ$UUÿ>Ÿ•‹?>ôÅ@}kAé·¦†zzÁÅO¿à®[ÌuñÙ©C˜×÷Ú¶ž®ŽÂ4x žÉôØ -ÛôKú¸K?µHÿ^¿ú`®¢6±ê¥‡V2kyL¯-#øz©hÛñØ”÷×'Š‚SÀƒ Ôx ß’ë!Âr+çîÙ&ß9‘ÍN7¹HU­¦:â¿¿tõuÜI¯yñÍÕª«÷ªàöþö¼ªoèÆ,,\¼ï‚{bÀ/99ìýs¶Uº]jVXëÜ©¸ÈŸB·gµG?3¦½>¸KéÊèÀ3-÷+]XÓHÅøÏ¦Â¢ú1#3[FVnOdó¯÷{=s‡ûýGàþVÉ×:™óœ ½µ–³Aw¿×Úµ||¿©\‚ê«+J‰}ûþë ~<¯äd¹ Òi¬Í/(¬A^Ñ Ò¯-É)¬h?TU”S^sål2¡d}/Ê)ÊÁÔ`˜yùé;œõísôÛ¨„ŠºñTJZN¾I2ââ\?7U~/Ï-n½$„ÉËrrª*ëÉ:íì2 _bdÈ^½©¡ß^ËL~ŸðNÍ`q§÷å\' ƒ·Wþní¦ŠœÄäY¹ÕSO–”™ü§F·º²ŒÔ÷ Ë«(Æcñôœ‚â¢2¢ô]+ŸŠÌĸ¼ü¢o?I…¥Ä…;¾¾åeW6b1äùù:øoy:TU”‘–ú¾¤¨ºK¸RÂ8qÞ®_4e!_«°èšÖՒדe«)Œs½¤Œ,F–‰d]ùdn̰@q5ÑÖ_<*7æ¸MQÙ,¾1Ðãj˜‘Á_ÞtìÜ|ÙÙ™P} úF5U?~ù›Ÿ‹áâë‹Iý ð·“¿y혳-¬óÞgN+ž9šöá¡Y ]v;ê·.´zúá¬!E/’%Ät])·áv^— þçÔ8÷¦ÂR¥ã34ÿ8‡„¿šFáfzØJ1èµzJNAÑ/UÀî™4›ãöŽT¨ÀV¼²Ñ6½ì»uÓœY/bLCÜÍ]ÿÛu©cçÑ·?~Ê^œX¢«ŽjÍvĈÙ¿²oOªÓÎÆâ°‹Ûw^zß! ¡»Nmߢ×Z¸ÿæ|’øRf¿`ÁËœ¶ÏÜóÍM{ZG§eeÿ^^:PFÎÍ/l52Ù¤5WÒ{²$6•è¦øÂMÝD`S·‹/´ûk:ò[Ý~ïŒjÓ;LJ[Ÿv7Ÿ“^ññ…ÿë~'…€ÉB“>˜…Žybå·²žôVU|+SRïûÀæ_6$H>ùý;&\5µ¹¿M~³w‘ ÿ¯º¯äí5§ÍVa9?÷,<ä¿sí´º¶ÑŒL¼¹8¡Š½Ù÷åA±“Qy»P7‹šõ³boø—}ˆ]ÒÓÖ؃Û[˜Y'æôJõå2üñ~w÷}½wËt¾ßJ ­Éíš?]à8g-I3òã$±ñ©ÙíGæ{乓:™?\³XAh3úÕ'-‘äkë~R%{b!ÑÞKœæ±ÖȃØáS´V‘¡1ÿÉjEÓv/²¼ª˜ao~¾wû™(2T%_2˜íø³½ÀËÂÈë·F„¶¤” E‘•£¢ìë?{‹¿!Ýþ›µ™æYl9H°izX\Õ.5’N‹;?wÑP­ozÝ™µ™/Ú­-¿ËßmË´¶‚÷á„îN»×^t]¿¥&þ¼0 ÜÒ÷ê·^嘓ºy’Èêp—{d^!”÷ú¨Sˆ-& N¡¾ÛémLéâPU˜õ¤™?kC2ššì•aíóY—’¼”[Ýÿb ö¬:Óž]ÑA/>`0V´ŠdQQ1s×ãC3Þ^RÝ{TnëÕ'\ŽËÓq—1Ñã¡¡–ô]ÜΘ9»¼÷´½¡êΨÎÞƒQ´?·±ÙjÃÑŸ¯ÂiögÜì s¤ÈÍrßÞt ,S㢠þªo„À1ËbfãŸ;çð·=æ…µæëÎÒÖÕP›,..ÈFóûЀ å/Æ`ÂÏZ|&©}ï† ‡ôÙª«h¹‘¬<¯Okõ¬ÃIj^íŸDH,눖ðž¶çÙçssÅ0O³Îö,Yô$xm@=­ãùÇÃÙ’j3ßnÛÌYy&’´ßJ•Éïx‚¿-iE¦oG´:J¾©W_ß\¯Õ:ö¬¦ðÍ5çøV¼Jœ¦4¶v›<w 'Tý¨¢¢é±//ú.")‘¤îZ‚ƒ´öw¤¸­ß+G.â›=ó±ÅBËC¦þþç’"ÕÛ^öÉ™¶™Ÿq`ŠÕB·ªÆþг?g¾Õ¥žQÕ݃Á¸› c¼‹®üæ^ÕÍ‹¤í±Cì#ëQ¯ [? =ØŸ›Ëè]J @ÂEn“|Ë=ò®Úê|åå®XúxúoàÙ‹^p´]?{Åõ–+`îÿÅ£2³& FõÜ\ÓC„&ž­+UD:àˆ·ó總&âÄ W°CÇïQÍKé„ÖÏF q@Ä»CVÕVmÞ‰ÎÏZ(ìq,.A¯mxZ?Ý,÷íMGAËPWýê? úF 4ë=ñKS=V.}”Õ¹ù&ô1Ú®üºSØÚ忉Íê¤7 ºùÙ9ˆ®êZáäaccakm¢°|Ò`ÖØXÝЀilllÀÐr³µ¡ÚýòÅsª™¡m'~ŽM­À2õ(ÙšÛfË;J¾å×sn¯noX°ât„4¿ž¼Mk£Î³ n~X)FhÿµæùúÖm/1v!.¤ji8UÏÄáÏ@éúDKSS/æÅîÓâÜ×Õå·Ò¯ÈÐêm蹦2;âõóÇ÷–W|¯¯ÃÔ‘f¬Å'}*UgíÁ4WZ¢Gù,GG™Y«çÌUQW×=QcäÜZ1zôWç“j-/Ü/Bôž^ÛÖ·@Á=ÏíÙ~Õý~däMb䦦–¡^]ˆÇ5k†}ñCFFÆù{)mi&ë«×õÌ[+ìn}!t=…nŸ€'ô\ÝMý¤$y't‹Õ)·­„ÚÕÛóÄæüYnîö­ƒñLFÎŒÚ'»µUíÊ« Û´#§WzÚÜŠ·Zu6ÔÏŠžàQ+Â0EÜ©çNl˜oî½p—Ú›7:e}u}¯Ninj÷›‹ åO }+U_âßù†úû¾O ¾ƒ¶ŽÑ4lœÚ9§ÓÉòvW:J>„¨®† ú0å%V­Œ»Ý:v@ÞîÀ/’A¥²åôâ0›û(èû0Ƹ¯Ó,ÉÈ(››‡ªàû©Pt×*#1ßÙùÂ6Ò­Šö"J>„´t}ÄsŸe’‚Sƒ!X;ýŒKÒæiè!LÆÈ‚¤2WøV1ÜVŒÂBK=­é * |¼¬zŽOô:h°ëDɇХåª_œPIno€ïÝûàÛVfØÕ±™Ï^½ÿ™Y : YUL,AÊ…¹?|«l¡ØYcŸ•x[!7Ž¢¥¹Y×á¤þjþý6tss3åxÚ®1ëºÖôÄ£r¶­‰‡øò:qŒX[%†8{ù¸ïçþKIa/_}Rž'Ò–ˆ¦›×mÞ)§Ùß³~Çïò&륪Þz¾¶;¨n–ûú¦C¥½¥¹€êIÊOÒÔ3³µ=£±"?99&2ðUHHØ£Ðøßßû.Ö.Ö˜ý¯K´zæ[…‚âsà%+kÿ¬¿ÅüÞ‹V§Ì¤/?»¯áwï®^–™_TCµ Ë쬞ý¬m‡¹Úh¹ÚtŠ={é‘Ý·h ÂðN PŽßÔÔcm3É)ôõöîÃëCI£Ý0ÜóžÞ1S”šš‰úÇË,Ÿ©bv“ ™üÄY@%bb^¿xýü?ó#Ùí/H7É8Ÿlh]ï®®Ó¼²^ô755RQ Œ§ J*ʦ†(l]¹ºî@ÈÉ)šÈÆ÷úá†Ô×B;bð±)GçùOqǦ;Kgn™ÎÙæ›¤ºÏ?dÚqß…·pžq;Ýg_õÝi.¡ÊµÔÍ“Ô×Ñw©½±^CÃø^:SOIÙÜôÇ¢HσÓ[…¶}NvÖ} :¹ÀÐ3~ÚdŠßoO€º!yLQµ6|ªkHÿ~”[Vƒ!¨>FÊþE*ÊñCUð§†V¿Ê}öt<£ˆÜRAÕšKª¦ãœÂ_„4þ§ZäÃÄ»ï<ÏÚEÿ&Ãxù1ØÏyÙ¬m•~“§…&e)×¼|/ÆzžGÛ/bæÅ3cY %¾õ~`•d8Ç%Ô,b$Š)>wÊÖï·ã‚¼ü˜¼œÏT”¤+ÕzY§LøÙ`ÕRGN.å€Te())›ºmù«Ge‰f„‚–¨.‹?WtW³$^¾nDzéf¹o:|s¬^ªoCÁÄÓ0F›åÿ:ê±7[µÛ‡>p´pµJ±û« àŠ·Ç'LéäkAxÅ–ü ô¬ ?BWÚ¸ö-›ŒkºfftV[V†)/Æ`mÀLê{ñø½¥±7Ï]ºíîšÙEügww£ eõJRòiá ôˆ‰¬ÌÉ q•Z]ÎGâäM·PwyRM[ñ`ÇN’äk]‡€Ôøû8±Ô¶µÙ[âéø³a2çű“§nâu.œµ›f 6ÅÀŒÐÞјztŠ®c\à›ª­jºu>‰§‘ÔPÁDEÅ9_H·m÷`™eçžžVkkÆaÇÑÓLmˆƒ•%1¶b˜—„ž;8(šjQÕ–žö¯1iè~|+cæàéÝ|{fN¬IO?w|¹x§ƒ¤‘Q7V̈NГ1œƒqôÅ„?àq µ;¤$À\oÕ- Fè\z²zëõG-¡"ÖU÷G‹ØÎ7WŽ^«8£SÄÄg>ý,Šu?°ã°´4CØfÚ€abb*iõÜ‹¡WÚÿQq½èÜXLü> †‡\Òд‰QÜ©Ç=õBDÁ"e`um?ÎùÏö+.x‰‰wœÉñͧh-Ž¥õ./õK(´ÿ5Ù†ÌèðïÌŒ4¸Äù©°5íå¥ó¯øy¨õJѵ^¶ß£veƒÖ«ï·¢ÍYquE#Cã×Äø03Ó„ìœì.s%ôÌ£2žœ¨@kêng­Å†.o|âŽíˆXþsDwÉ Ò”TÍ™RRúŹq]ü RœiÒè‘ÑÖöÔ7Ë}}ÓÕ}/§e`‚ú¨¾Àï’ì\R­e—"‡Qõðõÿœ'wˆŒýc“¾½Ù£æÎ_ÒÇÙ‡ÇüéH ûá«_¿¥»ÜvH–Xëýux¹¦ÓË{{1•Ÿ‚Uaýÿ®¢í—ÇSɇ»‡¶tX·0ÓlïÝeÞki ¼=@Bˆß«´ßÒŒ×ÝĆúB¿òúˬ5Üìà¶Nøgœ±ûöv슓O1g 9Îböm9`èèàßÑÿ$¯ñ_FÀÇïÛåg-ǹ;¸œÑÇu¸,´ÿÄzTCêóIy[Ÿc•ÚvW~ó`I¬ƒ6þÍ,ßKòXÙÌ)®˜ ¯gyisSS—®ä† =up:P|/úÂJß­ÂÁù­¸ wª¯9e§äBsƒñµÃ¿I>—ÁÅsfVW²ì”Wóå¹;§òI^ºdÔÁi*á!}.ÎO=ä±Â"¼˜°¼ÖB»ôA*±_ðk'¥Ë=Ïø,Üê“J˜èjuùÈŒvyØ#w©½§´¸@O[«W§HŠ<õ,Æã[Z—Á çæÃÆØ=ñ¢{VcÓ5[ëüŒ2„­Nqnç*ÒKÓY½/Âüêz¤þŸË›fD»´NbÄVøíœ¹÷VRçØxzÒK5Î;¤jk«/aÓƒkNÝ¿ž‡I¿>o=ÕËû Ú³s~n›?«Ý›{;"±µ"ýµu"×ßoü¼Ó’~¦›îZ”sݘßå &ÜQ w¿ÍßU‹Øúóvw a0ñÛä¥vû¾6Qh²„­دgG¨É½1Ð÷j˜qö±ã|IEË+s­Êχ ßDÒœ 7#†Œa»Ç_‚ûV”¬šSpàœ¶IÔ‡Ø%šTóR䃕uÒÈÌŒ}§8µ k¬J½f®kÕi—ás™Ë—’Wʰââ­ «~ºìÊp7ŸjFŒ/t)=Y™#¿ÒAáàÌXÂðTýƒÜq{‰÷iù׆ºDq~çÖVÅ_1L}½GDyâßv5?pÂì§_zäQƒa%øONo}8ØÞ÷cw00=ÖÙ…2×2·äYÉëïÎ7ÞÏ\!Ã0Pn–ûö¦+ûZÀÎÉ@õ ˜”vx™9wð®‰n<+ fÅŽÓógªŠò°ÓŽÇTË{åëºíxè¯çâŒ5ÿÔÑ·çôõS-HGXQÝâçPÌø›îÑÖêÊ1¥±{(þ×)Ñ7Ïb+ìt™þž,zÌXÞ~zun{ÇJؾ)òÙn/\×’"„ŸàÖÜÑála¿¯ŸôÙ0ñgTåm~z[­sv]Öþ…4lbÓg*b:¬V“Éôô¡@NÎÆÍ[˜ý‰KH¬›hòÛÞÆoëArxZ£+5F]X°ür{Ìâ´Gà^sû/sÒþê|’0ÑÂ!FÏ¡fÉÿœ),":PF&##›ÈÃ_”ó‘[Xbh.ë·øk,~qGÉ£m{ØÅQ¦Í}cœ_>í‘‘'‰NЉ‰–›Ò£K‘Æ wƒºÃÇøöÒ"°ñqÍÆ?—Ië˜ëŽ{ºˆ,¼ø~üâŸå¼w%¶—4|/Å´`¸{Yu£¤ `aç*ÎÍâàoz$¾Á'Þ,ßï íÞsOÂ/¬E[×ÍC³NÛ ·—‘ÖÕ;~ѽ:D?y×mÇøû}0…® ù:Œ”á\ÌU×n=¡u§´Éh¢˜¡”™Žñ Ä`>Òâ$¬½DZ¸ÙßÀwÏ gb/.šr±s{ÐF7os}‘Ÿ6ï2{Î3±(þÂ3 EmÕq”)Á'U•µÕUÔ´m£€ðlk×È´ Ÿ¯ÃyÈ)¢lŽ5“âø¢õ¬çytdï‘Îéâv{yš^‹˜SQÊþ…{¯Dž5>ÛEkR&®òÄ*¦iGâ?›œ˜«~')~ŸK§Û«Ý·è žiÍÍi“f¾Ä`|·ÈûnùgΑÛZOW\"Ââ*,†ž0o–mc@Íü··ÎÝžêe¡ãeѹ uœ²H¯éZœz}™¤K æÑùGª?¸åvf)ǮģŠQh^5fR_û¥¸¹ðÍ-UßÊè'ü6»§Ç•Ù¡ ±ú„iâ ôàTM·ôÝ觛徽éò?RS)«Pc[õ!Œ.©ÞrCSjMÇqi·ÛÜ>ÞÍIÂçÞÄv\ AHYƒéà ÷ɦ‰X¢ŸtÉ£_C/Êß6ü™ø—K*ì› Î]²ÚÔþ8}=}aðõΗ„Å»K6ÅŽÅ0¶)ÍNIâ¿¶± ñnë&º­ë"¿ªG¿F¶FÅm}G¿T~½GÛ)Ë'¸ýÑ«± ©{ôAu(ë@Ï‘‘Å%%%v¯úÆ-Í9ÓÍ3À$e'ãâcß …êkwGɹþVº‹ iµŒ”;æÓ7¬”9AëØO§Coä)rRÏ|k~|§¡c„âIâsZ¢˜”tN””–ý˜žØ®úHšÖÀÞÃÀ¾‰t¹¤G7‡¤7ÜßÐuRz…‡­Å2ýi¡á9‡ã;EîMöþ˜gbQ\8WЯÃŒXü/‚âR9é‰â ¿¼²å-ƒã-»hw0qŽ6qîIÓ“C Aw2 C¨p¶5¶7¨æÛPV/Øø[ªlJw -zòý˜Žm|õ_CŸyí°!霣—Âê"ÈÈï¥UuþÉ}Ú͸’åf¹©¦â{yÙd) ªo¤@#¹:¿špÛ”|ŽŠ ›õ±$³øé.ÂÓqp°‰J+Ê«MQQW™ÄÖE×›öI<Þ)úÁéãç>Wÿ CÐNQÕ›ÏÄ"ŠÍq7¯Üô ûAlj),ÂpHÍZ²z½©±WÝrµÍý3§Æ2ÃÌ#(®`"Ëø—d[›—ÄÅáa0Q>~Ï^F¼Mɪæ¦-Ê*æ”Rœ6ÇÈØÇݹ7·î.~Ý]Lm~X€PÈÛä韋ZÐÑrðËK+«Î4ÐÓ˜@¥è-J“#^W–e`žÖ@d'FO’’¡¦Èɱj8™ˆWÁß¾N˜È9¨™¯Šw#­.¥9_4ížË{,á™HII)ÄIÍÁÞó«ZÁѧ Zzid9%•ŒØ7“§Î‚òI¬+7~Ly¿}û¶>œª­¦R]õ–~‘›òNTR–†ú8B›©­uñâE1yÕÖ·cžÔ¸h%eÕMs–Žæ©S§%”´†r€==q‘îÅù¿r³ü!.J~ ¬ÏªodBÁ&¨i„¶•}Ê&öž&]·pRpɯÿÚþp*µàâÝ.‹{Ÿl;L“TV Í¦7ù¥æÖœ·mP”ešîŒ·A†¦` Lc]rltßêÓÝ£3]/2fIKK§>E~Ò_a2ÿäifNž ƒç0Lʩ裌µÎ‘ë‹p×=4õ? U> ¾¿Nûmä:zzu¥^YF\äÓçÏ ¯Ÿåqžãšê_ù>2·°èg:k–/9qò*ŠtŒc·ï47)šrüx-µ)ÿ6+V¬ºyóºÞŠMdÝ:RÝÄùÈÉ+ ñqRúkV­¼xñ¢Þrs ŠñcÖÈ)áÏ…DŤ&‰@…Tcûé@Nn½yÓgg-£cs]~ÊÛ²â¢u«– ÞW‘‘Y[[ž>uZ}î’AlÚ'8~øÐµãÒØÎþ98í_RÞ–-^»²F6ž=㎧wÊ›`©ä›áß‚miôs¿¼|Õ*f¦þzµ¡  °´´:{ÖEgñºŸ>$ÇEé EyÙfkWÿóœpq°/4yrçŠÞ23ì˜~H022ÌÔÖ¼¯@·Ì²å+=î^™±Ìœœ|,.Pž2nÙÜYÓ¡¶ª€0ÿÁÜbÓ¥ ´MÖŽµŠ`駤ìÌ K³õƒýET””¨¶}îüÙ© VŽ5w%ó²>Ylè×Íe ®¹{|ˆ‹“[žÇÉðÍÏîº/\ÌÇ50èi×›™»¹^žnjF9žzLó[NÚÇ´Ä͇I~Ä…jfÎ ö¼¡k²n¬½w2ß½Âb0ƃÞ/ÀË5oþßûn3L7Œ5uó>²¦ºrõÒ%PÏÕÐ 3#ƒ­íö3Îgå¦Îdç#¿:-â%ß<’¯½¶½ÓÖöÌÙó’ÊSÇ΂)áÏɰØ~J>k—›zù=yá5eæXñI€¯©|zÿÚú æì¬ÙR€RÛºe‹³ËY噯ÌóõkÈGTÁà`cݵsGbz†ï Uý…ƒ¸ÖËÐ={뽯:Ëà”þy[[;ÌLŒèËÈÌö¼î‡øÉ)LÙlaö³±Êt1úû<èuBì;Ù#¥–ŒÅ7§E…äf~\¾|¹æî]Ã!KÌŒ »‰ÚïÖíët ä¦Í)óýš*¿¾öÂ+ ¸vÅÒÑQý$Ì¿g—]N^Ç­s|bÒJš#e*ZU~fD ¿²†ÖÖÍVÃ<«üÜ\Hûå}½ã~‘KPTBU{¤¸‰ª)Î}á;YQÙzÓF €ê`e‰Åz‚óýäô ß[7ùÅe'É« Ó1uu•QAþÍÍÍKL© íH¹~BEI¹~õrø˜óØý"· ¨˜’&åøa˜U|õ·È ÿqdd¦‹©ïÜ1Lr5Kw*Úb¹÷àn #¯m0<½´c1˜òÜô7Á/”T4–Ã"í·•8ÿ*:.Á?ไ¢º€”üðt1‚m¬Ž ü^^f²hÑöÑ8‰Ÿ—k×Îèö,øURì;œ¶>¿ÈðÌ*z,„¿ð¡¢¦Y¼p¾¦ÂÎddnމ;‰/‹!a±ÑršzÜÂÃÔÈ5•o‚|±ìb“…jÃæÙ €ê`"->IZœð¦yŸšþ2à-=£¬Ö¬Þ1…%̉úô‚™…ÍpÎlóukF´‘EøI3»22³ýŸø‘QPNÖšEÏÌúos5‹)Ïˈy9‘‹{®>n¸ö "ŲiaÍ뢒R_?Ïò²RÜԙá÷o¾!=&üSj’‚ŠºŽ†ŠöÜð/ŠÊò“ц$GTìûÐ@>!)5á0Œ¶±¢82ПŒŒ\ŽÁše£E222²93tÑÖÐØøòUX»ha)yQEõÞ1…ž½ß¾´?foáýN3µ5ÑÖØØÁ/.#¦¤INNñÏ\]œøœô‚#=ßTC„œ¤8iÁ¥Êªê°¨7‰qï&L䘄S²EŸ±-M¥Ù o#ÈÉÈUTÕ•å¤Ô'KŽ2#O˜´Ùjëê"bÞÅFGÑ22‰áTYy†¦ãÛÔX˜•šøö -š†º&NzšâˆYŠƒuýê(ÐÔÔŸó&…ÅäU9…Ćl¤¶þGz|ô§´$^AQí©&ó 0h’C}Š<ÚP¸¸ôÛëðÀ©ÉÜB“Dp*tŒÌC” |Ë¢œ„舚UŠÊªªJ8¥í òŸ@IAa0Cm(œ•—ÿ:$´07GPBVh²òxª!rf‹Å7ÍL}ÿ. eFEM]c²Ôz,ô ’üCáÜ‚ÂW¯"¾|þÄ/&-$7eÈ&8`[šËò2ÞGGb±˜)*êSp2jrÕÀ?„ž¶½ÒÜÜü!+;>.!'ë#ã~A6nA¦‰œPÉn¬ÿV˜[›…Þ¾ä”"bâ*J rj ºj cÁÈÔTTÓµ4ÐFú˜•›ÿ66.ûc%%;¯àD^A&v^òþû®h¨.ýò9?7» ;‹†Ž^T\BEQ^NKe––ʈ¶99¹ºm¤ùE_£ß½û˜–:Žœ‚WH”•[`ßÀô™ÔWå~.ÌýœŸÅÌÊ.-#£$/+g4ƒ¶Ñ;ë#CŒa`jccc\bjbâû₼‰\|ì|‚¬|B´ôDãÕ”}ÉÎĮ́­þ!(*¦¨HXbB'‰ÚâåZfJ ×ÔÖFžOMNú^^Ê% 2‘‡•Gd<Íè@lKÓ÷ÂܼìœOX2,áÙ«  §1e†Æ”±`d>.ΕK¶ù]|RrRRyi1'¿3Ÿ0õ@,l‹„teñ—¼,dä–æÑIâÊSå”åu”塜 úŽ‘‘IŠ £­ãÎêšÚOŸ³ó¾””~ý^^^]õíOMK9žŠŠŠ;nÜ8r2L ¾¥iÆæúÚš†ú†Úš* ÊñôŒ˜™Y&NœÈ/À+ÀÍEÁ+‹™" F&Ôöø¸Ñö‹^klÌÌþ‚øZRò­¬ôGÕ÷ÆúzŠñãÇSQSŒŒ%'£@ni!˜¹®®¦©¾¾¡¡žš†žŽ‰‘……mâD6A~^~>. +FMit[›câü9ú´µ*sŸ²órsóJJ¾–—•ÖT}'#§ ¥g@Et<55y}]}YY)7/ßÒPW‡6dÚªJ2 r&d?6!>öÉü1å1R)((”äÐÖqgÁׯYÙ¹ùù…å¥%H bFCÇ0žŠÝî”ÔÔã°Øâ¢"ZzzZZT‰÷{=28)ÚfBaäããáçgàÅÈŠb03à~ï 4ÔÔ¤µLÚ÷ ÇiNAQvNnQQQyIIeEyss =#5õxJ* **,v\}m-%a-o<:TW[ƒÕ•ßQ˜Ž •kvv~!^j.)Œ‚cFÖR›‚¶ŽFþRTŒ <2r2ò·²æ¦&†ñã‰={±Øúú: JJ,ÓB0rmc}]uUeSS#ºÌ,¬l„Ï+"ÀKÃ)Ž™,ŽÁèCa@õ0R¡¥¡–“’@˜bð ¤ D˜¢o•9Y‰IhûS;;»ÿþûǃ­þ ×ĉhëî@K+%%¶TÈÈȼÜÝÄaggsõÙÈüÜ\hë&އ‡‡²²2˜ ÕÃfff0ÂÒÒÒFÔÕÕ100€ÛÈôôô`T wXYYÁHSSa8P__ÏÈÈvl#ÓÑÑP}0ÜáççGsssùøøÀý„8Ó©ìðÏ),,¤"¦<ðx|ee%777˜ÕÃÒ(8sss°FÿU_mm-ØáŸ“’’jd°ùôé333˜Õà "aaa úúOKKKMM ØáŸ“šš* vT222888À¨>PRR’““GEE)úOssó?Àÿœ·oßâp8°Ã '..v@õÀ€ŠŠŠŒŒ,//¯¢¢‚‰‰ Ògªªª0ÄAž™™™ÂÂÂ`HTT”™™ØaP‰ŽŽž>}:ØÕ#Cõ!¡ÂÀÀpýúu0HŸÉÍÍ¥¡¡7n\xx8¨¾Hyyyaa¡¦¦&˜bPyÿþ½““ØÕ#ZZZÒ¸Ä .€ê묬¬ÕÕÕ/_¾\µjä_qýúõ©S§‚•´´´††F €ê€‘ª½IJJ"áçéé¹páB°IßHOOçççONN~õêXãâáá±cǰàrçÎÞ €ê€‘ó%¢­­ššzôèQP}}æãÇ222ÑÑÑôôô)))RRR`“¡'???33sñâÅ`ŠAÅßßÿÀ`TŒÆ¿hÑ"###iiiggç-[¶€Mú@RR’›››¾¾¾‹‹ËåË—Á&CºàÇe°yûömIIɼyóÀ¨>1PQQ±°°ððð¬ZµÊÊÊjýúõ´´´`–ÞòñãG555]]ÝM›6êzÊË˽¼¼¾}û¦TNœ8±fͰªFHãUTT ±wëÖ-‡Y³f………YzEZZÏ\\\¼¼¼•••RRRW¯^ݰaXf(±²²Bz›’’L1x”••ùùùݼyL€ê€‘==}ii©¥¥¥““ÓéÓ§ß½{gmmíââ–é9/^¼PTTD ‰¤¤$GGG¤¢Aõ %‘‘‘¯_¿ÎÏÏS ¶´Þ¼y3˜Õ# &&¦òòr@bïÀÞÞÞRRRwïÞ]ºt)§‡„††jii¡€¬¬ìƒ´µµÙÙÙoÞ¼ K8 Hf_¸pì0¨dgg¿xñ‚ô¸P}0ÂT_qq1 ìÙ³‡——7<<<99YPPœœÜÄÄìÓCÕwüøqPSS#ù6tuuÕÔÔ\¹r%‹û äSWWÿ"ƒ ²ð¹sçÀ¨>y°²²–””ÂÎÎÎæææHõeff !á7þ|0Q÷„……!å,,,ŒÂRRRÕÕÕHEKJJ"ÓYZZBÔ`sïÞ½7oÞ¤¤¤€)•ƒòððÀTŒHØÙÙ¿|ùB #¡ríÚµ888dggËÈÈdeeÙÚÚ‚•ºáÖ­[†††í‘ð B•ã«W¯rpp,[¶L]]¬4H ±gnnÓù›øøø3g΂)P}0"áââŠmÿèííÍÆÆ¶dÉ ‰äääÅ‹#s÷î]0ÔŸxòäÉëׯÛ?"Lê ÐÖÖ...&'‡êÊÀSTT¤¦¦†J)¬52¨ÔÕÕéèèDDDPPP€5P}0"áãã#Íëk}­’“ûøøL›6­  €ŒŒìþýûçÏŸHMM¥¡¡suâÑ£GÜÜÜH!·ï™={¶……),++»{÷î©S§¢3Øj`A…V\\<22’——¬1¨ÈÉÉ={VRRL€ê€‘ŠÐׯ_;îÑÐа··WWWŠŠB---/^Œ¢¹¸¸€—N8;;#ûtÜ£££ƒI~~>Rƒè£­­íÇW¬Xqûöm0×@‘™™)//Ÿœœ ’o°QRR²°°X¾|9˜Õ#QQÑvo.íX[[øðaÁ‚=Â=¾YYY999ÅÆÆÂBØ$êHOO_¿~}§ýZZZwîÜÙ¹s'éãåË— ¶mÛvêÔ)0ZÿyþüùÊ•+?þÌÌÌ ÖlÉgll¼uëV0ªF6HÂ111eff’¼P¶sþüùµk×._¾ÜÝÝ´çܹs•••âââ«V­"­O0ÆÙµk×öíÛß¿téR$ðÚUÂÏÏoΜ9666§OŸ»õ[[Û7oÞtê</""²wïÞ5kÖ€5P}0àææNMMí¤ú×®][·nÝüùó?~LÚÃÀÀ••8a„;wîèëëY£ÅÅŽ{÷Î××÷÷CK–,Ù¸qcAAWûNsÑ¢E0Ô³Ï444ÈÈȬ\¹&I6ÅÅÅOž<ÑÔÔk ú`”€ô^rrrÇåÚqsssttTVVŽŽŽnß9}úôo߾ݽ{‹Å†„„L›6m  éºcÇŽýéè‚ N:uâĉŽ;>|¸ÿþNÆzÂãÇÍÍÍ988Àƒ {fffÙÙÙ ` TŒ$%%‘êûÓQ$TÙØØ>|øÐq&ÕR"7oÞD;Ñß.EãhåüùóãÇ_µjÕŸ"ìܹSCC£“êC =uêTdpNNN({¥ªª ‡Ã-^¼Fuúúútttú¨>%ÈËËûûûw_LOO»}ûö¬Y³:ZE$<<ÉÂmÛ¶íÞ½{Ô›«ººzÏž=Ÿ>}ê&ޏ¸¸¬¬ì•+WÌÌÌ:ÒÕÕÍËËC`ÒT÷X[[#…LEEÖTRSS544Ðs@EE¬€ê€QˆººzFFF÷qXXXJJJJ9räHÇEÉI ú":úýûwmmmè8¥m”1uêÔ“'O"•Û}4'''SSÓßU‚††&77EMLL¤¦¦†BØ ˆˆ°Æ ÒÜÜŒž"""ååå` TŒZ€a``HII‘’’ê>æõë×SSS™™™ÝÝÝwåÂÈÈ‚訵µõªU«Nœ8AFF6šleii)))ùûj ]jéI“&9;;oÙ²¥ËûöíÛ¼y3JmíÚµ( åRÔ‡NOO§CÀ®]»îÝ»ÍÎÎÖ@õÀ(GNN.00ð¯ªCœX^^~ðàA++«„„„.]>,'‚Çã÷ìÙséÒ%T‰·°°V:wîÜÛ·ocbbzÿÚµk222H(’“w]]abbúüùsppð„ |}}‘PË…ÐÆÆæáǯ_¿îr= ``Aw¥½½}JJÊÑ£GÁ¨>hii…„„ü©WêwöîÝ‹ªŒïß¿Gº¥ËuÛ±Xì"vvv—/_FÚÕ5Ñþ‘h"Ó§Ogfföü>>>333$€ïÝ»×M4oß¾]¸paÞ¼y·oßž={ö˜*{ºººžžžÐ¿74zïÌ™3{öìk ú` ahhØÛ 7oþþþÍÍÍšššHõ¡Jd—ÚAAAqŒ _¼xÑÁÁARRòüùó#HòíÞ½»W’ÄñãÇÅÄļ¼¼Œ»¹‰Èµk×XYY½½½544Fw‘ÃãñȤW®\A/-- îÁÁ¦¡¡aáÂ…éééÏŸ?½€ê€±’aãÇÇáp½:‘ŒŒ,22²©© U(?}úôòåËî$° ‚yyy , Y½zõþýû™˜˜†­qΞ={éÒ¥ìì쾌Ì;}útzzú¿F^KÄÇLJ™™ÙÑÑÑÊÊjô¶›7oÚÚÚ"Œ”? /ÂÃÃ/^§ÀÍÍ}ãÆ …¿:JmÇÐа¼¼¼¸¸EGGçéé9¬lÒ7.^¼ˆä½¾¾þ… ºYê(JKK‘ÓÓÓ‘ØËÏσ úÀ ŵtéÒ“'Oö'‘­Dòòò&OžLCCƒt]OÖ"G‘Ÿ={F çææš™™=xð ¤g̘ñ¯ ‚Äž¨¨èáÇû/QŒ?}ú¤¦¦Ùó³ØÙÙcccQàîÝ»VVVæææNNN#«¯ ;vx{{oß¾ ûÑáÔg˜SQQìréÒ%???0ª€Ÿ MBII¬££ÓϤxyyPàùóçòòòW¯^íáN>>¾+DH‘`8wî\LLŒžžžÍy¹tuu=pà@JJÊ@ =EâÉH--­ÐÐÐÞž»”?xð ’åÖÖÖ(oÃVþ566¢KväÈiiéS§N]'÷×`“ššºzõê²²² .xxx€AP}t͆ ¨è¿êkgÖ¬Y………(àïïoaaÝ7„……{ž‚‘öÏž=C2((H\\|ÕªUË–-cddX#”””Lž<ÙÎÎnÀÅ!©†4¤””T߯‹b±Ø}D0ÄÑ’( ©©yþüy..®^rªªªNŸ>}öìYQQQ"pC 6555‡F{öìÙÈþ=_S@õÀØÅÖÖöСC999üüü›²¾¾>JÐß9sæ¼yóf÷îÝ[¶l¡  èU:³‰´,++svv~ðàAbb¢‚‚‚‰‰É¢E‹ú<®¹¹Y[[›––öË—/ƒ´¼úÉ8Ž……%==½?SõÚ⤥¥­]»öÝ»wH¦¢ÄÿäFu0@ üÌ™3ÑÑÑóæÍCWs?¸‰›ÊÊJdviii$ù³ úè)HêìØ±cëÖ­^^^ƒôHOúúú’Â(`ooÿýû÷mÛ¶! Ó[ˆ@òi ‘ö=555wïÞEù ¡££ÓÒÒ200@:i¹nÒill”““?~<’£TTTƒjd”¥ììlIII''§Õ«W÷35 ‰çÏŸ“ÂAAAûöíËÊÊ233Û¾}ûÀö‚655yzzÞºu+<<ýKKËN <ÂÂÂNŸ>®/2øþ6À,¨>ú’a|||¨f?ëÅÍ!B ¿zõÊÑÑ1!!ÁÈÈhçÎâââ}K“†††4 ®ãÎŠŠ $Wüýý#""ªªª´µµgÍš¥¨¨˜ŸŸ¯§§',,œ˜˜HN>D• zzú¼¼ª†Ž“'OΘ1ÃÑÑq8LBÊÄ„HûžŠŠŠ .xzz¾}ûVRRÒÈÈIAQQÑ?¥€Çã‘ rppX°`³³³©©i÷ßHAA¡N¤Ë£µµµ!!!HÆÅÅ¥¤¤äää°°° lLž}ª««C¢ðWõ˜‘‘t’vïÞ½ËÍÍE" 6 éÓ§KéùOj_‚Dss32&ÒÁÁÁ(q”s$ZæÌ™ƒpwtjSˆE÷ 2 ÐÓÓ«ªªÎ˜1•@’Àûï¿ÿÀJª†)ïß¿Gʆ¡ðë‹%Õ°‹‹‹wíÚõøñc“ýû÷ûùù¹ººnß¾]DDdÚ´iH· ¿}ðÚsØØØô‰t'''ÉÂäääÔÔT$Þ¬ª®®æááAê ‰ÃY³f!Õ§®®Žr;wîÜ7oÞ ]‡äkMM :*//¯¢¢‚~Å$"6lŒ_AFF6H§ý(Ï>>>H ¢,ÑÒÒ¢œèéé Õ:ºo‡ÊÊJô«ƒ‚‚ÐåHKKãääœ2e º $·@ˆÝ»wÃC@õÀÈUñQÕ¶¢¢bÏž=Ã6“Hé8pàÞ½{H~9rä:Ò!T5owÓ1>’‚HV¡ê;úi :::H·àp¸!Ë3?###¤%""""##ãââµCCC‘,**BfGÚ•ŠŠêÿû_HHˆ””;;{^^ÒUUU(@ÒäYYYjjê!Ë9éK;)œææfô+ž?ŽòŸÀÄÄ„òO²*ß*ðMMMQQQHÝ¡+‚~H}}½œœœªª*ú-šššFDà± ú`´3cÆŒòòò'N Ÿ\•••!¥çî«‹”Þ%"=<ɧµD:íOMMõõõ zûö-ÒQÊÊÊHF¢Z>G²Z[[‹$Dxxø»wï®ûúõ+B222JJJjjjHu¿ðiyÃÒÒR””——R€„ R€III(ñôôô¬¬,¤ZxyyI=‡(þäÉ“‘T¼¥)ÈÈȺœ ‰ÇãÑoGj0,,,>>žžžY]²9sæ ìýÃò“íïï4jlllAA¨¨(ÊØÔ©SQQ祪Æ"/_¾œ;w®µµõà9™ì HÞœ "++{úôésD0}I";wî츳±±ÉŸW¯^}ùòÉ'$ .\ˆbvŒVTT¤’^444H×!E‡ôƒ––Vÿx°²²¢ôQ ¤Z /_¾Lrú‚´œ‘îS¨©©‰ŽŽNHH ‰ÃÌÌL¤?QRÂÂÂH"M(//~ÝÀ.RÅbÕˆtÚßÜÜâç燬Š2ƒÔ)²’ÁôéÓvð-ú±H6“¦>"%¬  €¾hæÌ™ŠŠŠ¦†ªÆ:OŸ>ÕÓÓÛ»wïÁƒ‡ø«‘*@µó´´´Ã‡ï"2”ߎDHû$=RÏUPPÐÖ­[ß¾}KZùiQ$–V­ZebbÒiuøÁ‰–ââb¤š/^Œ$ܳgÏzè2 Ñn“’¨««#õIÆÇǧ¤¤dee!¥$""‚$.‡#Í^ѤÈn]zÄÉËË{ðà’Ù111<<TÛ¾zõ*E¨N?à‰_¿~}ûöíÇŒŒTçææöðáÃ> )²hÑ¢¥K—ZðïBïÒG$]:äêêŠÇã7mÚ„ÔÚ@ùÞ$-µ·jÕªŒŒŒðððþ/òÞ7 ˆü~¨¸¸ØÇLJ´ìAYYÙäÉ“µµµçΫ¨¨Ø)&2΃.^¼;uêÔuëÖ+++wòÖs˜)ÜÐÐpþüyT2óòòÐÕܹsç¿u¨>Ù,Y²U¯Ïž=‹DË@¥¹uëVoo€òòòÏpLLŒ‹‹‹ŸŸŸ°°°©©éš5kìˆ ½éèééí‰>¾}ûÖÉÉ)44tÙ²e¬¬¬ýLÿæÍ›………7nÜèRzýCØÙÙ×é´?11Ñ›HjjjKK‹   ¹¹ùâÅ‹{ž8%%eGÝ^RR²cÇd dØ3fÀm  úèHòiii ˆêCêëîÝ»gΜ¨ìeff=zôáÇrrr7nDúÁÝÝ}šQIIééÓ§¤0Ò<ëÖ­Cv°%Òç%899‘æ™?>úÉù!¥÷¿ÿý¯´´É<ôÃ;v{FGGß¿ÿÅ‹HÄêêê.]ºÔÐаç6acc;NCtÁŠŠ óóóŸ:uJEEî_ÕÀß‘””DŠåäÉ“¼nôŠ'Ož¬Y³&44ôرcýÏ’ŸŸŸ££ãçÏŸ­¬¬¶lÙr•ÈÈ2)2)Œdð¶mÛp8Ü•+Wú6RññãÇH™KHH 1ùWï—CI||<º:033ûÓŠçÊD:îIOO¿uë’‚ÕÕÕ ,°°°––îÉ7RPPl%‚!δ¶¶Félذ•–¡\ÚP}0òرcÇÆû¦újkkeeeW¬XÑÏñœaaa¶¶¶_¾|AbݺuÃmLcŸYJâââtuuYYY=<:£££ÓÒÒ‚jð¨ÞÜ«QmÛÒÒ2--­oUíšš;;»[·n!͉Ä^ttô(6²¼¼|FF „‡‡+++Ϙ1霞/€d0Š/ ››;ôþ]س··¿xñâîÝ»wìØôù&Žtl»6&‰ÀëׯŸ?>//oÆ èë»Oa!ðññY»v­±±1:ýßÊc@õÀ°cÑ¢EHTôJõ-Y²iÅâââÞ~WAAÁÊ•+?}út¶1ej ¢¢"Ø»wï¥K—rž6mZONœ={¶££#Rïß¿²Ü^»vmûöíÛ¶m;tèP{ïÜ ‚ÔÚ"¤ééé›6mòóó›;w.ÊC÷£d KJJPÀÁÁÁÙÙùöíÛ£¦Ó@õÐ_V¯^­­­ÝóøRRRVVV=?¥¼¼iËììlWW×ÀÀ@°ùA"‘‘‘–––ûöíûë)ëÖ­KNN^¶lÙ;w5oH™Ïš5‹……‰Òµk×þC+‰‹‹·ÿØœœSSÓçÏŸ# 9r„žžþOg‘ÖØHKKVUUEòoXM‰Tü›º5ULLÌ_§T555 ž;wnÞ¼y=L|Ïž=—.]:yòdPP˜ºjjjEEE?~D²äøñãqâÄêÕ«ICCÓØØØÔÔ„Jcmmm`` 9Õc¿.…§  €‰±±q÷ÑBCCŒŒŽ;Fª—=„ŸŸ????::š‰‰ÉÛÛûOóý888¶mÛ¶lÙ2gõÞóçÏKKKGœÑ‘\w¶_‘+Vìڵ믪ï¿ÿþ»ÿ>’4`F Ñ“gtttff&++ëÕ«WI+‘!=ŒÌ›““mªïÿìÝç~Ï9œ³Z,))©²²²œœ111v—;vøùùU À`0”••ÝÜÜÆrýz"""$«())=yòDVV–t111!Á/55U^^¾¡±\\\¢XP@NdK~÷î ÌK–,‰‰‰1b„§§')/ ~4 %@êh™Ú¶mKr;õ8qBFFFCCƒÝ«°°PAAáäÉ“FFF(Ôo\A$‡kii¥§§sqq‘.ýúõ;uꔓ“S½ÃïÚµëÚµk<@éêµzõêyóæµoßžý$O’¢mmm ÂÂÂP¤>€–©]»v)))½{÷¦XÏ÷·¶¶fwÏÏÏ———¿qãFU„ßE__Ñ¢Eƒ&­Ã‡?wî\½©tß³gÏÓ§OQ´/‘‘ÉÎÎ:t¨··÷‘#Gœÿûï?KKKOOO© ’””LOO' 999ÑÑÑì·]ݺu;xð "_3aooáÂ…]»vÍŸ?ߨØxéÒ¥u‡ÉÊÊš5kVRRÊÕW¯^]¾|yß¾}ïܹsàÀeeeR[T© ¥‘’’ÊÈÈ žžžÃ† cßݤ¯¯okkË~è4íÚµ›:uª‚‚‚€€À£GºwïÎ9@¿~ý¼½½%$$P«FZ³fM§NŸ?Ô³gO’œñHO¤>€–†„vêóóó³°° K—.[¸p!ŠÓ¬ÐétggçÙ³g_¾|YMM-<<œ3õ‘µÖ­[·Ñ£G£PßÄÜÜ\PPd¿äädÒleeuøða”©ï/”»g€íÑ\"¢¤9/:«_dâQ:ÊÐBHII=~ü¸¬¬,::zÊ”)ä_¬¬,T¦Z¾|¹œœÜíÛ·{÷î}ÿþýªîééé»wïÎÌÌD‰š`üøñ¤t={öŒOHHPUUEYZbêûb(Ü?”£ƒóõw›µÅêacd>LN¡¨v›ƒAQH}-…ˆˆH^^^```§Nxyy'Ožìíí§Ø7[‹/&ÙÏÒÒ’ýd6+++~~~Ô§i¬­­ÓÒÒÆŒCÊ»páBÔ %¦>^J¨f~^¬ø+ˆ‹‹øð!((HWWwëÖ­ŠŠŠÃ‡GYš-›+V §¤Tü96..îÞ½{œoY„&X¿~}ß¾}IrŽˆˆHJJRVVFMZ\êk”‚g1wîFÜ‹Š}™““]À¤ÓéÒJ]4µõôhË4|î« ëiÄ»÷b¢^$åf”&&­ÒQK¯¿¾N7éZgÄܸ÷’ÁËÊœ ¥jh¬"Zc€’÷q×o>§XC0Œ¶ªôU„ë›kfLdð­Ð˜Ä¤×…4Šb Hwèah<¨¿nç/,i|øå˾ ¯sˇWRTë9pÔXmŠ*Âv Ðr‰ˆˆ|úôéÁƒ³fͲ³³KLLDMš¹Ù³gŸ:u*''‡ ðòò.]º´¡w÷Á7¹uë–¤¤äÔ©S׬YsäÈàïJ}Ëd×}u°³ÏÄšÀ‘©ÒWj´_ûÕñ´Ž&†™«°šsO-ìÀñŽP×w« j\hšÿì‚ÉXתVý¥á¡kõ8úç4ïaáVïœ6¯©hpȳÑåÈŠï#&ˆô=‡àoM}………Ïž=KHHèׯŸ´´4jÒÌYYYikk·k×îÉ“';w¾qã (Ë÷#šä½'N¢WêË»¿†3òiÌ9ä·kv噽‚»ÇéNwg·¤ž(˜¶/ÿÚ*šs:íç…†nÔï L¦Ÿ^5ÇtÓUl¦-;õ•••>}úøñã(H󧬬ܾ}{ƒñüùsÿ!C†à޾åŸþÙ»w/IÔ'OžœŠQãvö˜C^Gwž1TSF„„;ºŽÙn¦ÙîzÇkSãNÁ/ï *í&i)ÉÐy(ºt·h¦Û^ÐoK3ÎÈgöïKï™ò•m¢ÓwÜî¦0DÓþ:»=Ài¼×ȧæ*ŸOÛÿ¯Fä}ˆqqvÅ{jd'm 2~¨¶+¶T€–ááÇãÆKNN®ÛëÇ$?´jÕŠ‹‹KJJjÏž=DÅšƒÔÔT[[Û„„ó8»Oœ8‘ÝÀ~ú -vvv²²²(Ú7B>>22’ùãÇ™™™3gΜ5kV) {°:Á°Zfêѳܮ¿œãªË—Éú.µR4q]¹xÑŒ~—wŠN[ºÆ6`yU{ØA{ƒƒöµÆ>uÃ’µ ô;üˆ%ý”ôøgû±Y Çf}aø¤ô7ù”JYÒ‹c­_<¡Ö«IEzÏvÖp݃m %PQQIKKûÂá/»áýû÷$Q())=xðEûíöîÝëëëûÕÁŠ‹‹===ÿý÷_mmmÔ­1HîÔ©ÓW£Óé @äh±©¢$íC™öTILÀ¿ûwó «gWVÏ$Ššr,ßg½"..c2—•¼‹:ê¾ßûØÁФzÆ ðYB>¥ìù8΢[£~ޤßOh pëZ]L,-üS_av6•ó–¢Äù[ý—•‹  … ‡­;wîüßÿþ÷åÁxxxùš‰7†‡‡ß¾}û«CÊËË“•‹È×x;v úòim²²2777”  §¾Š_Ã-=ȧFÇ‚gÁÇìZU_Wyb…ï:SÓÕ_ŠG\kîÊäSc´¬§>ëX쬺_.Ér™Ï´‹³é”¨Jß¾Tت!ïßK¦j<Í%ýÌ -"oÍ·Oè¯Yᱬ÷×¾WoÍgzºì¼æ¨[ãt_Þýã›_`Ch9¬­­=<<>|ø…ÈÇˡZ5$õIHH¼{÷î Ãèéé©««“õ‹r}###²GXYY54@ii©……EëÖ­Q+€šúŠ" úr¼½]ëhT ¹¦xe+½³¡AocŠãMžíÅEË¿QŒ[MûȪ®sŽœ&R5š„Ê ¡½¨êÔGitSa‡¯¶R‚œópÒ¦ÝXíí¬!Ræåj0×½áeµñ¾|˜ã¹,a˵5_ºzp¶«5#|«¬>çc•¯d>7– [®ÿ×ö,Ç• ''òÆ/ދܠV¾@þ§™,¹ˆÍ …‰ŠŠ())i(õݹsGXX…jVÒÓÓ«n3«¥sçÎ\\\ûöíC¡šÀÒÒò¿ÿþÛºukÝ^t:½¬¬lÇŽ¨@ËM}ü}B ˜ ôò®ü-9CKbFƒC+M ÈÊvvÑ­§jέ8/sÈLôYC£u˜rìnåÛtì®xEóš{sô¾ºRSreÕ,,íúxº«BmGF•&,ê­Zu^Ì¡9’‡æÔ3dŸ™w±Ó O§™ùOŠ»Î«îûxc7Á EˆÂ+ìþp$œ9sf̘1õüêãç?tèP÷îÝQ¥æ†——766¶[·nõ&“‚‚‚[·n¡JM¶eË–„„„+W®Ôê^RRB2!//Ž~Z@êãïhá¾mPåã:‹ Û ëRy]%¿–“éEQYñ¡—®݉OLMþô‘5$SHJI¡›–î¡ÆšŠµ‚¢1LJ9LJ*L»ætó~܉)oò+b“ ”‚f7>CM†(ŠÖªÏt/æt¯’˜K;¶¾ý꽈ˆ%(¥ÕœµÕT Yª4¥Ÿœê;û0E­;IÖü5ÞeS4sEå=‹ô½pýöýøä|)eÁ7Éo¥:¨õê?bÌØ‘å©õ«BÕŠÉ´*¿ñ ×…€°7ŸÊ¿P;µac§L™0\Q”Š¿ä~9¹„¿²8í°ÙüùF­¥¥U«û?ÿüƒÖ7[jjj$œÔ}';F‹‰‰a?ÉšÌÏϯK—.OŸ>­ê"$$DRßÎ;Q€‘ú(Ù16_BBÍ`.ù|ë„dõG[Ï7ŽÆ£1ÚÖk´m}ë0ÝÁå«ã‹tÖN>öß2Kq­¹‹É§¾ß1£mÕ°©´8<àááá¼b°OŸ>›7oFeš3GGÇ3gÎÜ»w¯úXC@ 00P\\Åù~‰‰‰bbb¹¹²ûüùóôéÓ¹¸¸P€–‘úþF·nÝêׯÅzÕN¿sçjÒüݽ{WHH(?¿â¢Õ«W³W"ü999üüüÅÅÅì}@Múþ`zzzYYYä·*E@ó÷ñãÇV­Z‘†®]»:::¢ ?Ù¸¹¹?þõÅ)6(ÖJCGº}ù{fÃ61Xr½²s”IÿM¹ñ‹8.$Ì<µÒaòªã ÍMi˜çÞ-;ÔXΰUÝ VÆWµºFÚ—º‹ê;ÕÛðäãÓnY÷véê,¨SÝ#"mt9Þ©Z¢%Ð?ºªUrSþÛ™z;œK­5¦¾íéÀÝéTæó^¶Þiµúv½÷îEë:Ù¶ hïâA6»ú¦fk<–£svªµ<íVå¿YrmÑȱ›¯ÖÝÙç¿MS:Öîúúº¹Þƒ›\yáÎ+„í©¯Ž»vG׌|é6È4<¼þâkÌÅõöÉ\©ÑnUl˜§©™]=‡ä@·AJn¥uáÕƒ1²•]ùZs޲º@W ™¬NŸÜà¢%Ùö‘¸²%Öß±GEU#e.¤-ªwÌ0÷I‚î N7åÒÿ$i7#óOëTf¸¬€…’ÆÛkeNcc•7þþU_õزáÇ–QÛ"òt…ë_ž·+i >PkóÔN›w¹åF,¨LÔ%Þæ¼æÞu†SV¦Êo¾¼b¢x[?R_]¹Áþ×k„ºQ#dš2ßôEší7sF¾Ñ{s/Zs>Lä®Û]ûK•mQcÛ÷¿òê–1+øi 4¦¨HÎÉ ßrÏß±7«±`± m@͹ޛÑš^1Ù!ºöÕ_!Ài¼Ý>5vÚt£O…†qŽ9±*¼<Ü%ØsA­¯áUh£Éº?®(ÂD ¯uŸ3îÞ :Vªì^Ö5#ßüëïvjËnλ½L´_õ»ÎÝ9=εâ(QTõÕ§L¾/<ït[T\Uä#$¸¸_W;â²Âù‰Ý5´c¶ß›#ò•ï[§ÏöÅP¯¢'Ç\ƒ1 ü˜cÁ{§!ý4⨓ý?ï~‚\µùáÁ“¸¼xSæõÓ àÏM}BmË8ÛõèŠ~‹»„.Rqå‘»¥) ê+o¬"Èàj/N1|t´:Ãú(c~êÂN}¹ò_ÀoC6þ¯ÿÁS-jè±—ÿNãa­¾ÝƒÅ6ıÖÔº µfF¬D‘ýìü*óñgƒ=¦ëzÌñÊX3HšÄï–B }Y³ì¿âê.gu’éKsƒ=þ1[wü“Î!'ÃS ·ô…)ŠOšä8ªÞ“Hu{¥‘é=«bSq Z;ÏH˜õ£O,áà~l™þ±eÔÖð‚©J¿¸¼âÜû»ÖîÚveÿÇâMÚÞvŒ§ï`7)*w§’UçñWŸQ§óòá©{ ë\­UòúÞÊî•ÇG ê†ÊB#bðà úê'3l[Üž¤n6ÕyìÀT…SMŽF1׬¹›&î˜7ÄÁ«:~$xl“diÒ“®7Á’ZîYÕ#c‡Ñœ¾É‡&pŽÈÆ¥Æ-m†¶SzÿÎ}œRÀÙ®1izuä£[Õ‰|%?`¦µO“˜5×t çKØ šáòN€Æ¬ÿ}¦ew¬9ÁnÔ™3œýûïéI[v䓟¹~(Çáfâ")UoŠ:gg1i(+ |ZnÐ9v䳋~½CŠÉÑ+ý¤Ù¬}b] ü¯<³2î\òÒS¡«‡Öá¨ä9†ü2¾sÿÊß´{ÿSné·Ô{—TV>º¹¢êËe?XÆŽ|: Â.,êWÛ®>Ç4Ù®7vëꪙ[ä'Ö£ÔZ·c1J€WH¸ò$RÛÎãvE2wqÌ$Ío;òÉϺ¹®ò4/·¨‘ͱÇ}¦ÞLg´yŸG ‹|ËZÎÜ:‹ù O%ÝÒ¯JvLAÍÉ»3FŒßyhE9ÎÞirk±ðï+ðß¼´*æO¶7™&noÊ[ƒ§ªV…&ŸœÖÏÁ‡¢ŽÔ3ÁÏî'|AÆÙ¹‘ÏdMü9]9ŽÔü†©|„u€ÔW—Úÿ.2ŸÐí<•ãažWfhIÌøÚˆë¯½[2˜}Æ©‹3¢©V¿”Ãiåw¢iO|TkTÙ·þ‘W¦ˆj©Ö¸õ1f»m;¥i¨Rߟ—ž,ÖÕ2¡êˆŽú²MŸ«Úÿ<׸äRý¼œA¢4òCvüøž)çÎEcÃh,ÿ±Ê´/ô¶9ôléðŠÓw*¦Ç3LS¥™qÑ|ÏÌx“™ýþ}yâ¼ç•Ã7å¾!¦`[9’5(ÊMSÚ’3œ3aâƒþÝÕ;‹ËN>v­úaÕýŽT4µÓ§’üVßÔÀ+īՇŠ(¿1dÿ™{úÖÚ-rmé È™/^Þ½Çn¸»S_fgƒcÝ yLéö£˜¢3$½›¨¼=’:æ¤sŒã…Pê¬,¬&tf·f½¨xÀÁ´q†µ&ÕVÓx‚f“½èIl\ůlÓ/lrϽ_¬ÿ›b ÏU‘ï{¶7ÝEG8#¡4uùÜ>É6}ñÑ{»ÿ?Eœðc7M´žÚµæ‘ÚˆõK³O õÕÆÓiJs Uš~jí¢/¼mMcÚÆ};êHÔš ~L÷WαêlÕãa¢ük=X²ŸÕ /÷Zïë£Å5v䢚§ÇÔÎÖì⽌75 pÞçWô±Á^¬ÙfÔøùÌøRßW•K¥aw-EeYãuœ}+#ŸáÑÇæÝž›ÓºW=q39Üks8E ±Ð—mõååùâ"I.‰aN XÛÁx9Gÿ¤sçØyZù@äåNjËI|U¤/Â] –%Ç"H2?‚yé¢âÒJRµï±zzeî‹C•mZ&Óµ:HHµn#ÜšŸéþ]‹À;rSêÈMŒÄÛ'}Ž<èrhùT÷–Ÿrìê¶iä8™Þ–βî3{ë¶ÊÉ·Ît”¨—/“è|Ë3+JŠ+žPûôNC¸”Ï3ÙÍ¥¹ÉO£îÞ¼~ÖgsÄMùäCQf{_l£PBULöQl:¥õƒJ £°¢iâÕÔÓê­fMÞÞ2ÞÖyRS¬òDRðglžBK—P–¯s…,okAü ¤¾/-kºòù4} Âê+ϼ\ù#‰è­e2×6Ø»a“ÙpßÁ ÷%IiIÃóÕ_Ç\Ñ”¾ŠÃ¿¸À”º“éUï¡Æ—ç«‹¤8|“¹¬ÁÞ¡L{ì_a¨­¥Ó­1öŸB\*"_£öV‰ {·x§Öá«Y¥Ö¡i=óÕäSÕ–ì>ÝlÝ…ÔfªO³üì…*Žœ»Í ¼Æy‰éßJsü jëÒpeùÊÇ3jG©d¿uÿ&ˆ f¢«^þX¸ì§!7#oß{9ÀuÏ0ynŠ[TImùL±ÝDq=0k×;˜¢ŽýÏÍaìÍ3¨uìÉnNœq¤ ÇdÃ7´Ÿ´;½|8ß½f§-(^qÒÄòÓ‰Õ›Äã#µN. õŸ2‚Š(?uf£WìñÙ=kô-{¼{ñÁÖú£È ó4“ò6}{£×îò1!ªâ– Ì| iMØA¾¨ðS:»!+"á=Uó\"#%> ; õÀ79s'¡P]½â„BòµåýfVýá/$ôI¡NŸò^õ†³Ï“ìøßò‘÷öU$ZîYçþó½kÜd”|~j?[Ö=„;¢OÙUß×Ç”U¬xi®|%r+<ÒÕ¡ÛžíqTÜ‘aºïùí™]u¹ã‡gG'õŸÉš®ò©„çúÂÅšàQ°LôßÅØ™¬”¡ò4ª½±í\ãέ™nž>èWq¹ËõÜŽ‘ꊤ¡­‚ÐÕK¯P”·¿'©Ò„¹Ó4•$Ÿ2ãîù­|<‰—syý,ŸMUŸ°Ž¢ŽÉ-Ÿì¤~E¯ž®¼„FÿðE»òkU†ÌìCùD°/1=o8wâP Ú›[çvE¼¨^H~Šâx'x›3¶ÝÊHêæ2 ™e”îøE£úªQ9÷}×í®xPèñÛ‡£È5›u'×Ôí-õßÑãøN¹ã©ü³ÈúñýÙßQÇy3{àÆï "d¸´â2ÎÓÖWÃ÷±ŸS9ÁŠÄ™õ6—¢ð¾>@êøK”0ÞU4}jì „ OÞ91²ïr@ºj(}GŸñ[bÖi‡v1\@U>½pÎɬ5ËýVEXq‘z³ßH~õÐRó< µ²ØR~ š›OŽA•Æù<ï6t9†µ×”®ç*ñËÃv[³Î#1%¯1Íîïš9zÁ£ sº_˜SkHõ±{ŽîýŸ³¥­.iƒ.åwRHªÔêÕ¦§SF†U˜xÚãÜÀ+)TaA»9öîC†ÕW—áˆ_Z2˜TifxàéÐÐ{))÷}ãò™LÁ¶r½V8 5N]ºúÍmû®ÍÈX[ð6Êÿò©Ð¸·ÃèBB&f‹†Žœ>Z_­údœèàsÌ´{§Nõ‹}ž'P€®ØÇ⢛Ùh¹œë§—ÇÉÊõÁ”´<Ï´¤¨7ñçyˆ½~9/‰Nå‹MX°^ä0“R¿ë½¥§æ^׺§æ··„àÓì† Ó· WØ_{>+ÎgXéJW®¸Æî œKX}%v.š6×ՙºœLphGÎykÙ:š¹o-¿r-66“Ý ?ÿ©àï 4$0㛳âäÀŒÉ ô왑1¿V'M‹ õ=2c3g+½óŒÀŒ\ ©ÞóëÌ‹ãH½®.žv†V¶†_Ì…²Ãf¬Ö˜rKöaÛoD£æKo§5Á‚|¾2˜œ¶©£¶ií®í[ý3¸þ5¨6ÎqÓ¸æSÞ6½Ê#î÷oo:3*[núê|¿ƒÔ]Âz—Y®ÿ²ŒŒúï|qqðÆ=@ê¤>@ê¤>@ê¤>¤>€¿õ!7“FûÍË™™)))ù{—Öª™® öÊááivoœHIIîÐA©ÙVìOß/šÃNA´j®ûR@c­X±â·/ÃèÑ£/_¾ÌÏÏŸ››KþýË«Q—€­.Xaa¡¶vGÒ0k֬Ç·° û·üõë×jj2¤aÍš5Ë–-ÃO ¤>€?›¿¿?ù·¨¨HLLŒì £&Í_Õi(Ÿ™3g &?ÊË—/ÙÍ[¶lùí©©¾Ë´iÓJKKÙÍ………222IIIRRR¨Ls¦©©ùéÓ'vsqqñðáÃß¼yÓºukTæû%&&ªªªVµ2ŒÕ«W»ºº¢2H}$&“yæÌÎ.ÊÊÊ="ÿ¢>Í“µµuLL g’LtuuãããQœïD K5g—¢¢¢M›69;;ÿÞ‹Ÿú ‰FŒQRRR«# ~=zôïÙ³'JÔÜøûûïß¿¿VÇÒÒÒׯ_Ϙ1ãèÑ£(Q“ݹsGOO¯nw>>>[[Ûƒ¢DH}ƒÁ¾£¯®üü|ƒË—/÷ïß…j>Þ¿?zôèz{åææ^¿~Âyóæ¡PM4hР†j{êÔ)WWWyyy ©àO2pàÀ/ôýøñã˜1cŽ9BþE­š ’:ªn¬ëõë×ŽŽŽ$¨wéÒµú&¾¾¾£FúÂVVV¨RÀãýû÷ááá_fΜ9999³gÏFÅ~;UUÕ>|yƒÑ·oßÌÌLnn_5Ö‰'¦Núåa²²²¢¢¢nݺ…³ßH}ŒÆ¼òððˆ‰‰‘à—‘‘‡×ÿ^$–$&&~u°’’6`À€°°0­1nß¾Mj+""’——÷å!KKK,XððáC ©àÏK]NN;Ý1™LrPK§ÓI3éÒ»wo%%%…Y³fIKK£\¿Ý®]»ŒŒŒÏ;ÇÇÇÇÅÅEVÙçÏŸ¹¹¹óóóÉŠ+a!CΟ?¿G¨X#ééé‘J’†åË—?}ú4###%%…ì¼¼¼ì3«ì=‚Ô¶  àÑ£G$%ÖûÄ@êhvØGºœ¼¼¼öïßçΉ'’¤GbªÔ|ˆ‹‹Ïe©ê¢««KÞÔ©SIz‰‰!UúkÖ¬!ÿ¦¦¦vïÞýíÛ·~~~»wˆ@eúZŽéÓ§ÛØØdee9sfòäÉsæÌ9tèÊÒ<‘X?eÊŠu¶êôéÓNNN(Ëwzýúµººú£GÚ´iCR_Cô¤>€?F›0a–-[6oÞ|òäI}}}ÜÖ<íØ±cܸqd•‘fSSÓýû÷#õ}§ÄÄD]]݇²ÏšÞ¼y÷² õ´@k×®UUUÝ´i‰Û·o÷õõ•””|ò䉸¸8ŠÓ¬˜WõX333›ŒŒ T¦iȦnaa‘––ÖºukÒêççG¶ù®]»¢2H}-¬¬ìèÑ£œœ¶nÝJZGŽ™žž®¡¡1jÔ¨õë×£>ÍIJeËÌy# ~kÖ¬Ù·oŠÓ#FŒ aïÍ›7U]víÚEB *€ÔÐ2yzzJJJ.\¸ýèNž¸¸¸àà`QQQ???<½ð·ËËËÛ¹sgZZgÇuë֑ľ}ûv”¨ñnÞ¼9nÜ8òoÏž=«:&%%Ý»wïÚµk¨R@ËDb £Fºÿ~UG##£ÜÜÜmÛ¶;öúõëx1Ào4~üø•+WŠˆˆpv$­3gδ³³óðð@‰ãõë×:::ææædîÕËÞÞ~Ñ¢E(R@K6{öì7náææÆÙ}!‹¯¯oÏž=;6mÚ4Ôê[¿~=Y u{íÙ³G\\|Á‚¸íË222deeçÍ›—ššZ·oppp||üåË—Q(¤>€ÎÇÇG[[ÛÝÝÝÖÖ¶V¯‘#G2™Ìû÷ïËÈÈ>|˜——ûHÞ>xð`rrrC?~œ¬¤¤$Ôª^áááãÆ›5kVÝ—UV™:u*©3j€ÔðW¸wïžšš???ç›Á«ôîÝ;##ƒ4,Y²ÄÃÃcéÒ¥õž€‚åÖ­[õžžª2tèЉ'Ž3æâÅ‹¨X•ÂÂB² Ÿ;w.33ó CŽ1ÂÚÚšlÛ(RÀß">>^OOïÅ‹k×®mh˜ ,%%% ,8zô¨••˜‡‡ÕûNœ8áââòêÕ+nî¯5mܸqÒ¤I¶¶¶îîîyÑJKK—,YrèСE‹?~ü«Ã;88ðóó¯X±ÛRÀßåöíÛ .ìÛ·ï;w¾0‰y;Y(ÖÕ¡$¢ oÞ¼yèС¨áwš;w.É{)))þôéÓ&L ñûï|²Kzzºµµ5Ù\I~ÛÂÒ˜±œŸ¤>€¿Z×®]³³³]\\&OžüðáCö«ü¾ŠŸŸ »5 `ûöí÷îÝ2dˆµµucäßlôèÑ #33³iQyÓ¦MwïÞ‹ŒŒìܹsË«ÏíÛ·ÝÜÜ®^½:|øpWW×M,Mˆ‹ªªªdD333lrH}PþÚÂÊÊêúõë< ‰â›FÎRÕJB §§çÍ›7'Nœ8{öìF†É–ÉdŽ?>)))44TXXø{&¥££“““3a„ׯ_‡‡‡ÿÑ'ZIY=<<È£¦¦F6B’ÓΜ9ó=Ótvvöõõ%¥æççdž€ÔÕØw‹­[·nëÖ­îîîM~k_­XTTtòäɳgÏ’Ãú6mÚŒ9ÒØØ˜N§ÿ%…%ÙÌÐÐPNN.((èN–”´¬¬¬cÇŽ={ö|xÉ’%½{÷&Yè{žøRVV–À.xttôÓ§Oóòò”••»u릥¥EÒ™… Ë/þ¦/^$[ ¼¼¼§§gã_}H}ðƒYØÍ$q¹ººž>>"""³fÍúßÿþ·ƒ¥®š‚‚òÕH–#ߎĹ”””ÏŸ?_¸páÎ; ãíÛ·]»v]¼x1 ϽYšófF¾YÔððð3f¬X±Âßß»Rü;vÜÅÂnÍÊÊZ³f··waa¡¹¹¹Ýo _ ..>€å ±„:_Û´i3lØ0’ùùù Hü#ÝW¯^Ý®];EEEòÅUTTH8ìÞ½ûwž?lŒ„„„ØØXv^%I;55•›››,‰©U‹AU‡å Óyÿþ½«««‡‡GŸ>}6mÚÔLÎæ±%'';;;Ïœ9“,d³:‰ H}Pd¼å,ìÖ7oÞØØØøøøôèу¤A}}ýæ¹Ø$G9::Þ¼ysìØ±$×­ciüè$(Þ¿ÿáÇñññOŸ>%ù0??_NNŽ$î]»jjj’ˆ¥  ÐÐèL&óÑ£GáááÑÑÑqqqIII £C‡ªªª¤nìÑUY¦L™òßTXXx ;¢Ož<900ÐÉÉÉÅÅåw½ýïøñãdH¡$_AFFæ'…m’r#""H‰233±_ õ@ËÔ·o_-HÃ7H¼1bÄþýûö† ƒ0`ÀV­ZõC¦I¾…———ŸŸß›7ozõê5lذ‰'²_:ÿMÓQPP¨zü&§‚‚‚ .\½z5,,ŒdH555SSÓïy+à ž>}š4,^¼˜dÔ¬¬,qqñ8}b_¿~ÚºukìH}ðW é(==dÒó3²ß³gÏ´´´ÜÜÜ^½zõ=ÓÉËË#éÔÇÇ'--¼9sæ 8pËOªNËÂÙ‘Éd‘ÅhÕªÉdI´µµà|7²ØÚÚÞ¼y322òûCÚ† öíÛwïÞ½ß~e/ õÀo@² ‰1¤aĈŠŠŠ[·nýQï|ôè‘‘‘Ñúõë?~üØ„Ñ †‡‡‡»»{aaáÔ©SÿùçŸÅ,¿·\$dá̾¾¾‡ºuëV=¬­­'NœÈÅÅõ3bquuu2Á… 69*wíÚ•ãÔÔTlêH}ð·óóó+((èÞ½û¤I“Ö®]ûS300““{÷îÝ7•½zõê£Göë×ÏÕÕõ–f^7’G²Tu ß²e &&&K—.USSkÚ”ž?îì쬫«ù­£ß¸qcòäÉÏž=Ãæ €ÔPŽN§“àåå%-- ""Ò„‰<|øpÀ€aaaO;»ví"9“ ïîå.c?vóÇ>Ü·oßÍ›77á}d,Èeee“’’v÷îÝû÷ïÿÖÔ H}ðW0771bDçÎ}||¾õÉ($°yzzfggõÁ‚‚[[Û .8;;/^¼xþüù-²˜­[·ÞÎBš322&Ož|õêUòe]]](YçÏŸoß¾}ZZš€€@cR4‰îñññؘúê'&&–™™©®®niiÙøk,ãââ¾6˜L¦ƒƒÃÑ£G×­[w˜å頻ŒŒLÕ;úH<^µjÕèÑ£÷íÛט3x:::¤h***_½CÏ×××ÍÍ-99›1RÀW<~üX[[»´´ÔÞÞþ«/Y²„ä½ÀÀÀ†ˆŒŒ;vìðáÃ===wìØñ—×v iضmÛš5kH`^¶lÙ—G111±²²6lØŠüîÝ;33³””l½H}rïÞ=eeeYYÙI“&}a°Ã‡ÄÆÆÖÛwË–-7nôññyýú5JZËB–üüü!C†¤¦¦’D§¨¨ØÐÀK—.íß¿ÿ®]»º –dB’¨ñø¤>€o#''§£££  PïIIIu{­[·n÷îÝáááNNN¨ä ^»v4̶víÚ+W®èêêÖ;äùóçååågΜ٦M›Z½ÜÝÝÉtê¾nú¾„¤‹cÇŽ <øÙ³gõ`ll|ôèQ:ÎÙñâÅ‹cÇŽ [ºt)jØxö,vvv¤ª~~~}ûö­5€˜˜Øüùó-,,N:U«—««ëÝ»wQC¤>€o6räHooï%K–lذ¡V¯Õ«WwéÒeôèÑU]>|ø ¦¦æääÄd2Qº¦qcÙºu«©©iLLŒ¸¸8gßuëÖ‰ŠŠ¾|ù’óìëÚµk ;uê„ê õ4…———„„Ä¢E‹8_âWTT´eË–´´´ª.¤õÀIII¼¼¼(Úwrtt\¸p¡AÇŽÿý÷ߪî4ÍÆÆféҥǎ«ê¸oß¾€€ © ‰øùùçÎkooÏ?\\\&L˜P•û÷ﯦ¦ÖÐ… Ð$à………?^RRòñãÇíÚµcwwrr’••---åæ.?ð»xñ¢¸¸x÷îÝQ1¤>€¦[·n„„ÄÞ½{«^~øðáG‘?:vì¸~ýú©S§¢P?ܸqã D*ìåå5lØ0Ò…$mƒΛ7´9rdÊ”)(RÀw¡Óé#GŽÜ²e‹««+i%‘CCCC^^¾¨¨ˆü{ñâź¥M›6™™™$éEGG»¸¸.$`Ÿ8q‚ú‚ƒƒÉê@•ú¾×Ì™3Ø©ÏÇÇÇÔÔ”4¨ªªîÛ·‘ï¸{÷®¶¶677·³³ó„ Øïy ’‘‘©õÄ@êhŠaÆM›6íÅ‹rrr~~~ÆÆÆ$ Ž?Åù5îÝ»§¢¢¢¨¨8iÒ$²‚ƒƒ¯]»†È €ÔðÜ9s¦cÇŽjjjûöí+**Z±bÊò+EGGËÊÊêèèèêê’ÔGr ¹¹9Ê€ÔðÃR_PPЫW¯455W®\™œœŒšüb‚‚‚»ví7n {×®]{úô©¶¶6Ê€Ôðc 8pûöí999¼¼¼ÖÖ֨ɯGòž··÷Ë—/ß¿¯¦¦†š õüÝ»wÏÊÊ*..þøñ£¯¯/ ò»¬\¹ròäÉ$~W½Äú~ YYÙ´´´‘#G¶nÝÕø]ôôôÈŠ ©ORRÕ@êø‘„……322F…Rü^cÆŒyüø±€€J€Ôð#µnݺ¨¨hÒ¤I(Åï5}útWWW¤>¤>€ŒD>>>>„ßNVV–½:P ¤>€œúxxxP‡æ€¬ˆ?¢H}?RII FCš²" ê€ÔðÍÊJ>eç>É|û2''÷ý‡‚……eEEŸKJ>Óé¼bb¢nnk¸¸iüü\\Bt>aAQQ1IÉ’ªÜ<ü(àŒÙÅYYï33_å佟W_PÂ^e¥Ì¶mEùø¸ÉºàáiÅÏßJ@€»uk~aaA11ñvÅÚväâÆ‘!Rüõò?>‹OHÌLO/‘‘áVVæo/Ë#'Ï+)I«|)õ©`g÷¿ú&ó™¢Þ‘ÏçÏwSÓJÓ^1^$3^¾,oËÛEUFMÕ°­„"Jý̲wïÄ?‰NLÌÉÎ.UTäSPà““çi/Ë-#ÛJ¦ü&¾V%Ä9†Ýòú&TJQoȧ¤$$%‰‘žV’”\ôúu©l{þ®ªŠj]…ÄQl¤>hÉJ9]¼‘! ÐJSC {wºŽ.MG·í™x«V­äåxÉG¯OU·b&30>¾8&¶ðuCGGY»÷H~6Xå¥)|q÷žÿÝ{ïddx54T»ðrJü‰óðÐ:*ó‘aÿª¬˜Ç`œ½¿0&¶ ¸˜ÒëÛ½G÷a\ܸK©Zféÿ]ðH””ä10ÒêÅ«Õë׽ݛFk¥¦&@>¬¶üœÜc×®ç'>-4 ¡¥aLãâú»VEÙ§ѧ‚ƒÓIAôú rJý²¹óò¶êÝ[|Xm¯_¥y„„~ÊÉ.nlÔ±cì(H}ð煽؇Ǯ^Mí­-hh ´`A»æ°Pb¢Ü£F ¢„™Ìôèè]ÁÁ{õRêo0¥eÇ?fÙÇàc¢óŒŒZ÷ê%л·TsXªör¼Ó¦‰QåéytçÁƒüaÃt{ô‚]©š»Ì7·N —‘æ9²MÏžRÍs!i´VZZ‚äóùó§à›nÑ1cÆ êÔÂÎ81™Ïž¹x9±W/A£‚šéºàá¡ Øš|ŠÉgÎlz›Yf:ÑTBR»R4»Œ‘àsÙ7iÜx‘l%ÿ”¥nÕª;rdeEoÝv£®ªžÞ„?]”„†xp?{挶ÎNRÊRóñ¶š8Q”4<}zåðá¼±cŒ:wé‡ ©š…„'ÇNŸIšg%¾h‘Ôú$$x¶ûô)këÖÕZZ ˜úgæ½Ò}·Br$ ôÛý¡ëBE…Ñb©ŒŒØuë‚Ç3TU5Ä.€Ô¿MFú5ïc‘'Š®X!Ý¾Ž—££Ô»ì¼Í[Vñ‡å¸ÇÞWSæÌ7ìßÖ…Œ ïÒ¥Rÿ=¸iSÈŒ¦RR*ØÝúà×bóqk+Æýçžßkˆx[ng'©¨¨÷=·­,ìyxšù3ŠÒ÷øW¿Ÿ“cK[:ò“ ÌÏÏïãÇÀ)S`·@ê€_$ãÕµc'"--ÄEDZ쀖]]ŸÇ¶þ:Ý{ n¶Ë{$,ü•õ< ZK]#F´ÎÉ.Ý´iõŒ“¤¤º`@ꀟëþýƒ±3%i´V-û›òò¶šÿä… Ò^¥š˜ÌivËÇdúúí`2Ëþ Çç4™X[nggÉýçu{khh Çn€Ô?‹¯ïöV\esÅÿž¯FqñÙ³{&L°ùíëâä©m:óii þ›â¨ÑBgÎd†…Ñןˆ©~Œ¤ÿ.ÇÅgÛüï+÷ñò‰QT&Õoj~˜a­“PaÁú§l œ¶›ý/Ñ»OuŸÒ¬Œ¼Ì\i•—‘à­3Õ¢Œï?•”7 IˆÈˆðUv´ÞcURÆC¯èRPP\ïR‘x8Û >¼Hÿ”OfÈKïÐéNêëó9ó1âι>}ÇÿÆuîÁÃCéõú›7ȉ[ïÞõLN>ZQA»'R|¯²’gÏÅZÍkô½|KH¤ª•¦dô6˜2=F%?ÎÊ£( ¾b‚5O³zJ·RoO½ÞéÇj5Š+1Uã¦^\òé0&¤¼ƒ’êôÁ’i/Îù¿,oU‘þb¤ •¿ÃÈ¡ü\ŸëâU âCô»Qš•oYâÏ MLf5jº.‰Z¥HQù{†;ز¦gÐ{dáÿNßðO)oîµÚkz£Þi>nœÐ–-ñ]»öù=ïÄË˹þÖÉY›å”i¢‡û99ô qq¡H}ð]nÞòÒРÃKþ·ï€DQi1ûV”úðÉfOv¢£7$‘z7œùÖGîY¢S1e·ýTТƒ6w •fø6ž•ñ†O|znPçÊ4——‘[B§×½Ì”®fŬz©zéiËE¦¡åMîÑ;m4ø©ò«Ow—G>Îó'’ÅÐÙímîªÑy—½ßW¿mèP ÿ€3S¦Ìÿ-ëÂïÊ5c“6­ZµÂf)Þ–[]MôfÈI#£i¨R4])#'òn–³ó7½þ›Qü¡ì#UJšx©ìÃW£Ë;=ešv®¢àUö+Vƒ‹®KÝ „§¦—R:.öWøO›,<£B?SÕGIÀn¯ñÆ ß]˜uÃOr°/i0p¶ÙTõn·üÁ¬ózá>‚4ŸºcÝ~ö©1©ÐÐà ÉÎ|ûL²]ç_¼.2ÒÃróJÔÕE±Y² äÙº5ÉР„‹›Õø R_iÊŽ…it¯ È'Ü®SמZÚ½4:H4‡ªåÅY´-B mù‚¾£F¯ÙjÜ¡‘•ÌÝ` æVÕjx#÷ÖÀ/Ý`O•¼<ï´ì*ÕVà[²°0[eäZ‡ ¿¨&÷׈j»Vµê/ ]«‡ àw‰ŽñíÚ•ÎËû-'—z¨ÙÙTýR²±ºCßÁ!}¾o‚öa}×ïÐ]úd¤å‚‹ÝF‹òtÒóYª»ô…©ŒÅÑEs7Ïë$J凟4u¹GF0v_qÅê³9Ïšò§1ÀkK?}’Ùòs""Ò’+~íqQ¬s‰U‚Vl´š}—ÞôP‹¶/¯³/eá%}»âoZL­ˆ£MY¹ÈÏ›—`ûAîÛ„(>ÎxÈG}ºe»øxáÆ®<`,^Ìñ4ƒÑAW›8À߈É|õªXQ±±/èSf¸žÖ•¿½RÝW è¸ØçŽŽÞw9zÔC "ŽË¦ õìFÄ^¯gÇ'íé%*åucÕäRìËõ¼˜û3^ž;õ(ühÐ¥‚ògn¶í¨t*tü$}vòáêk;y½1£ë°òV) ½õÛµù)&E?xÁ1çª5OJšGD%„éAå½ñÞriãùër‚T¯’™UÜÕjßö8‘òŒ¼ÜW"¢íÙªx—ËÇÛªµÐ÷>¹¤¤ ¸¤ÎsMkv,-((£xøè¤¥$¿üy§9e‚RâŠ2µþÂ\š—U˜—OV —¨ ]B¢ž“YY)™åO_Q”©1@IaÖº¡ËV…QÓ½´g””òð4ñHR©CÙåËï±§ü¥©Ïu£A“föKÜÈ¿©ÿ*+ÌN®ê˜±ãJô Íêßg%¯Cœ&ÏÜú¢Þ)w<í³eb=áìõus½!Þ)u{hŒóɰVq¨Î›Z7r°¯Æäâ|Wûí¸7M“+/Üy¿ñ‰Í<Ò}lúÔîZÔÉñqÎÓ¤®v¶s ê9ßXðòº‹ù¡ L]ÎüÊõýÆ*uþ\šrÐÑÚbçÕ«ŸÕ¯ÆûWä˜CS5çžàì2ßïåNyì?Ç)|üTã/ï”ÐÔ\ÒðƒôEÔ4—Ô¹¸²ó >nƒú44 ]Faº½BoãæÖ1 SÙ¢6F¿QnŠHM_búý¯÷ã—ýKS_vvªXÛï>Üz7Hd7ùe¾=’ãé5•×GîZ¢Ã³g¿¦íãªT5ݪ]¼GëÎLj{ôN{ þg§¨˜F°zªZŽT(z»ÓƒÝ_;º`®†³à¿Ûê½XÇZRÆÓ»´§²=½ÙìYð?æ³KUưºHeŸ<«{ íÞ«SH÷&|!~Z+.*ÿS¶ P[ì°]êûÞZ°Tn-X™ŸŠ¢&ô:W³÷x‹Eí‹oïô g·l$º•¾åž¿c煮xÚ\ÉÔûUí)+SIIåÒM¥ü_GŠzÎî!Tõ·2ºlŠªLRŸÕ#—x›óš{S LóЉâ•_X²ô•íWÅÖè¤d`Ü…zíSÑžæeÒÅ‹ì–{mAU$ŽÙ;FÓæRÍÑ4 º}zó*:º2w‡{˜(yPÒ‹Ó36È|q! žQW›•Ì™À·†û/Ä-?QQaY_Øàç*Èÿø+çXXXÀÇGûΉÄ»U~ÔÑcŒÇ£kâÏD°;Î(ï˜sé+¡õ›˜6¨ârÒý£Vö\²ê!õþSÙ(B}SË;êI ®È:tÛOd} DètfÆ¥ã²cÈôºÞȳ(\ä<¼¨‹– LjuÞ˜²ªCçÑÃŒ °8Oi:›]ÙÔ‰5³É_Ї«°ð=RÀߘúîß¿ÿLJ.?¿2F1y)Ú§×q·»-<]cȳ7NQaŸa{l^3ò¹†¼_eІÝìv”zqlN‡é‡+²Ÿ“ö¢.¯7( Ù‹3ójF>å“ÉÏM;T§•=Æ‚¶U‘¯¦6†!Ìz~Ì‘iÖŒ|Z^=#Ë1͉‚¶gQ1cöÚÔŒ|K2™ë+ï*È\©Ñ®ºïu»¡f«*NæÞñãŒ|ö¹ÌíœçH îR﹫ýðž íÛKK·IH)”©÷Œ?ÉÈÉv”w¾àè8zoîEkìv?õE¨ùíݪí—Î’Æõ™ù“È9ÃJtãmµ8î|ÆýAyG‹2å/Q¼GÒ]yfó,Q™Äª:êw"_}®·sÝÓƒl/và¹Xy8¥íubòôò7päŸÚÊþkö“A"–u—àÅ*ÚûçÞçËÿŸ=¹ÃXåËIÃ6 ð7¦¾ÇáŽ_JË3*È¢òÚμۗjZK9¿Îî|åýmL>*/гïæEGG,’ rOì>ÌÙ}ÜN_Ó?Áè6çoèú ‹_{šfû½9"kšÞ·NŸý¦i6Æÿ.2ÿÇþ©Ÿû,åiÚË7^^ ;QÊ(P±/ë¯Æü;Di;ØMç÷4xPÿaQŒù"_{ÒrØrmÚrΣKNkàE!¿„  TAasŽ}E/>1xéœ÷Œ•ä}È*(ãáá‘ ÿ¼Çù” ¶ùµë¢MaÁw­‹ªð¶`b»ªsk/N_`w´6#‡ÌkG"I‹’¥¡puļu¤üzN%3½ÊŽümÌ™6ÏVÍŠ ¦éc®{ïúÕ^3KY _½gIÿ­c*O9Nú¿Ñ ÊŰ·ü©¯üg“’Rrrù~”•“Ê/Šä$cŸž¾½Öõ„OÃjg(4ƒ'/r)J‚â“P”£ÂÒª:wë)Y{HþÞÓÇS¡ç¿ìµ§©¤úýÓüBL„ûû#RésÏ2ûœqì6i[­¡Þ䳚£‹óùg›ÆvjÜdÏhö\ç*ƒ}à$ÖÒ$|¦Ó¿ý:ÏÒWxÖ”ÿ¦6› S_+õoc^~Χgt‰uŽÿóî^—Õõ#‰%´ÀN_€YÂÛ­â]|JúCÎ^ !ÀüI•ÉÉ-jÛö—>ÝZ¢m§Üœ{ß3£„}³qÃ=±y>=ŒYmÖŸõø€ŠDW˜x`×kÒ:w¶jõú*|¾g×òÿtKužÂç†ô-åLÆ&7ÖêuoÏ[’—å*–}•ªºÅÍ7ɼƒçêM›·îvvc¥oÓ-õÙSC·/Ѥ¨ü[WÊO9*uHŽÏ(é #óo£øø‘‡·•]{+Àߘú\CÞ­ªñH’ÊË,Ù2vÈÒvx>.´èVýú•úu½z ]7FÄ‚Fü°8ëEgûëä\ªÖÓPŠî{[<ûÓlª¢Cá'Í­m×Oè½\¡[õ—ø²´à+™·Œ%°ßütrò))ÅjjßþòîögÓ-µd=£Ù.ÅAºVÿøƒûY‘OêT†7ë,VÁ‡ò'F–?­“Þ¡“H­,ÀzÎ$7Εõâí«œ²ö]d$tF2ò‡”ðpÑy˜$Ф§¾-®Ç˜”C$³¨  öÃ*©ªçUòðñ4õT`V™h«6Âí~åŠS¢hTnN©¨XºDtF2s{Üq#ð~ì&×ÿ”zvÙ¿wÏíÛG µÍz”ã“€ñúqýÚHÏå|aý'žòŽ|3úðQÜC˜YñOŽŽ;ºî|éKTVÕ¸5s`åÅSW-f®zä}ß÷ú]Ç+%WYdäÞ^:ŠU5´=êR´,0üÕ{6½Ÿ±z¤ŒbÓïMN¦ÉËក¿5õÕA·ñgör¢k_ë,ÕwÅžû§"Àˆ¨2¦\«ÏhEÚÙœ4ñœÜ±zoüÌgè¬,ÕNJRB\\Z}œIOJtÔäÑ.aÿoïNà¡Ì?€?CÂŒ˜ÁŒ³4”»r†µK—£ƒ´H9Ú”6ë/¶Sw[*»[ŽíÐêZÛv ¢-TR õK¥cP‹$-rÆfþ3¹†¨Øòy¿ž×kç¹¾óÌóôÝ™çû|¿m°EÎñtšÆ?`zC”‹k7›b¾µÌš(—Ô¼³œ•ÔþôÝÛ"ß½ý]E¾§÷RO¥Þd?¸‘}3'.Aörõa#y5˩ܩu°]ÖX’AkáYQgŸ†š¾žã̃·-¶N±e#5øFoO±a–)]9n|Xšš*Ù·î÷$õqÉëŸM±£=‘ðÝŠu¦¼þ!۾ɲNi{ñnø,Ot”«üiòbßS¼åʆv&÷'7݃rÝ·jŸ+/\•gœ ñnëµY{yû·Ä™W 5“˽‹"ÃÎÞâ­¸qyöÔ\¢’þ…ÔÙ•rCàŒ’ëV­ß/ ÷ÏTåý±]'–Ýt3»NCsðÇ¿êRÜ·¶°ëyTYÏ•.žüK¬,VXµ¼¦+ù½vóµ…t-Mÿ•šo| KW+Ë®{J¢+íš÷^ÎÉíÛÿŒÐÖF=@êkc´èL™a(ͬí)±?ütH±ae鯺1‰/=®/=¥µ·—Hçá‘΄Þd7ƒ!UÉ»þhî=²ù†¡Mj™kÓŸµæG¬<¶´­¿“+‰ T¬tŠãøúŽÖE‡.éªL¼¸ýÑ|XT}kb _óNïHôôô*23Ùl¿j¬QÞB ]‡%¶Ïü|[Õ˜Â=~}WM)QîÖþ„vI~¿¸¿ùÖÕðN©ÿZ5Aú\È_©«¿@õøpF˜š˜Z]ý’BéÉHqT Ûìð|mß›KwŽníÔ±6g†ÞQî­\±Â’œ¶|=/ò™ÍªNÛ|oý ¢"[Ÿº#Æm™®j»xö}òÖ –Í[•g4/H]#üìš’á(óßã›bžùcÒŠˆG&*±š;Š, Ï‹|ÖáË{ùBàê•¢oæy}ükah0i÷¾ýÿ)õ}^*ŸS>,vv¶Â©è©¯¾¾¸ý‚ºú.¾{LC8¥FúÒÚ‚ÓåE4RL|Á5k‚²cq8EvØ™ûµn™¸¯}ʲٛ¾ÇÍXšo #(‹ãœÊ´æïu„×”ŠT~Lý]x»n7»Üì¼Ì¸–2#/¯ Ðâ0%§ Îœ*òþO7 sØëlT–ò?Ø—™ÙtNÌÄœÚîR=K­m =6ïyHŠçìeÜ“™ð•Ûæ–áXq1¬×Á%ôØŽà)m72ë_𯭪kh›á] ;óH- ÒB¿$E¿}Ôè1’  ¹9ólrñ”©=lG§åóÍ¡N‘·­F/Ì·“'*Ö™lå}«¸xýÉköY}í¯W÷õ.ÄRH±¯ï~ñ^_BÓ´5 ¼µ—ÿ––›r» ½¯)DdnÜ8nó†ÉƒÎ.ÿuÛC‚°Ÿ{ħçßé—UUT¹-¤èÊÌ¡âUFF~Õµ°žäï:¼çÃyŸ8ñr°"C{Ä'~ŠLGÇöñã¢ø„ ë~;\@ttÉdë1RRLÔM¤¾ÞCÌfÍIæ3Ò@Þ·P}=Gnz€¾FG×®îEÝo¿±¿þº‡#b Õòêº 'UÖ5È©Ë{7u/)DæÔÖÛ¶¼šßúkjêïžLÒsâ=Eh½%x—cÏ¿€“Î΀ñãÝzõ˜<ÙûÏ£ágÏVZZêÿI1ûžëè¨N\úz!UsUœèãŒg¼|ypϞм0ë§Ýz¾ÊÊ¢~.k×›«Šö¼¨øxNCgÚ´y½çÓM›î·ãô©ç'‰÷«œÑÑÏ´5U íPúàƒ05)/›±eË__úÀÞÝ¡ˆ®Çç=Å íáFFÓzÛtpXpñâ¡Ý»sçÌéÏø½x1hûŽN޶C†è¢& õÀÄT1šç­²mû/Ó§K SùŒ?ii©dtômW7y9­Þy„¦¦NC•nnØpÔÛKš&ù9ÿ»w—vüdÎüoæSĤPú>¢ó¢–|ëµíô[7Tžø±ûÖ1:)áЮ„JaáuÂcuÞ×}bZPĤ–üœ´çBÚãÙ³i$Òç7ŠéäÉÆÊÊ¢ÀÀe½ü@GŽØ³EŠÞ8ùslíÉá üuw)s¨Ø’ï~@Õ@êû¨2Ö›]î$à1õ¨¤òÌÜ\þ…¹§×ˬgÎ=žÍß½á°ù̈¦™e#­uïåoW¨X€Ž¬Æ»×T—…ÿô³ŽŽˆ¹ÙçÓ­È;’'NæÌuŸ%MWî+Çì⺸äɽ›N›JUUýlnÀ’RR„²³‹=æÎG@êû¸ž'}ß.òé,¼æ$ßq«ò‹!4³5­³y¿N t.Ý`%•Ÿý™Hýÿ~ujÉf\W“Ï6HVVÖKi˜™«ñuŠ][˜qîÂ¥+™9ìǵ¯úš–”SÖÖµ0ÿÒB­ýM¼îËUS’uù¯ó©YÍ%sD嘣ÆZ[3V}ÃØµ5Oï¦_ʸ’ÅÊg—=«­!“É¢’rjÃôMÇ™iË¡ô7d mo07o„†ÆNœ(ahHéÓ'?_rïÞ[^Þ¦ß|Õçž!£ú}À²œ;©+Wž›í.9TI¸O_‹«WÈ ‰lÿEóÆŽ•EE@êûj*«Úͳng="ä‡tØŠjÊᄾ¶sÙÑÛÅi—&®vM\Í{a|!5TšwËn“?¿uŠé{üî;¡nËÛ2Êm”WÌ߸quó‹ðôrcþQ€ Wè*®¼þÖS£¿7'ÍMMõ _áæÏþy¶oÄÑ>8œéÆu‰S§sfÎ4X¾ü«>}-Ô5,–/·ÈÏcmØok+¡¥Õç¾”…’Î4Þ¹Sìæ6{ùhT.¤¾OGvÒ·„W\Û‚U¶J«š_ªXØ[NúÂÒlŒŽº:“NzmgÚ¼¸B'‚¸°v’SØÍÖ¥^{®¯±¦WW×S$¸±ò'kŠo"ßNcVÞ=¿L•WXî: •¥-é./|Šqräΰa²ã­fŽŸ@A…@êûäÈž¿sfÝ>àf7+.·Ã*vêÜ)¢ýB¿í{7/0múê#ÓåÉ¡$#KmñLŽ©H§KÑéMsB>Ç꽪ë뉆††z‚¢@oiu©”tú”ÈÄÔ–óX·Ë ;ê;[ãíÂù\v?Œqo½EIsÝzQ[i‚žRÓ|b€Ã>»»nj¼/ q1þ“²/樚Œ£¾²<7Õ’åLÂ29a¨ðŠˆ(ÅÁ7`ÂÓöÏ?%ÙZbô²øWYI;}º8ÿa¹ƒ½Þܹ3>×kA¥)zxð~°3vî:Ã*:q"¥·Å¿'ÅR'ãsëg8Œ_¸Ð5©¯—%?MçßÙÎM¯Ê ³³¯\J>î\Z\jÖkÛ²·û™m÷#–¥”®´x·¾U„„ò’wúûú%ä¾mËŠw?ä*öÍþùýs”öÏyÃöìÂâjBMœ¿^í›ØÖqVZ”¿E”‡­'ÏZºÐœ‰æÀCg¨ÌŸÄ}Q]UúGÜáÜügÆF†£ ¢Qþ…¯g ^Jÿ{Ð ak«¯¾Rî?×BEÅhÉwFÜ%%Ž9VSûÂtŒøÈ‘>͵xQ'–qåEÆ•‚a*Œ ãÇyx8 ¾ õõBT]³éÜÉgK_ôEkÓGžUßFùÞ ¤¿­´ò«›h£¿ïð¿k×…J²âƒ¤Å«RÝü£zv˜Ÿ³°ñöVèjëÚgψž¯Ë,ªi‡ÒPÊÚ¾3fT*»“ícƒ¸÷P#nf{i£…'4£ˆIÛ;ÌozýìY^bÚ™œœ'Ç‹éë‹*)‘>ä¨Ožˆ_»öìÖí§ƒ©ffÆzú#õôûõµ`0†Í™ó]ÓëG_¿–RPX¡­)n`0!óËq8‚ùyƒX™O<(ÓÐ’535³°baÊ€Ô×[½ÉÂoÖútr$LÖîÞ¸M‡ocÒ‹®Šá´6}©ùmy»òuƒ/d†š¶Îæ9ßþ]Þt´|Åò l×P“0_½|Wˆa7R£´¾çŠHîÄ¿°æéÝØ5 ùÆ-d{‡Ä~ýç\2ê¼FJŠ9mÚ¼ÖÙ’’ÙÙWØ Ÿ>­UP$+(ˆ* bÈ4JHt/ VW‘Kž üý¨¶ ðùÇÏÅÅ…UTd4µt˰±´±Á‰ïÄÁ:³fé4§²—/=úß­[×ÙìâꪆÁC(Š äÁC0dþ¥»Õ"T ¢BäI1éÑ£šÂ¢çÕ †¨êpEMÍÑLe¦2Î:R_A5 8ê½m:_ïš¾#D} Â5`«ýD“áŠ2a¢ºìQæù“Q‹7¥¶ßWwºyW7ú–nýÍsì·MkÅ¥ ¢­)fÖÞý~¦F ¢¡”â`°±C¡é‰¬ò@KêÛ‹%šOÌñ_¥§d¶¬Mûa´^~ô騹M]ج`À··J|Ék:‘f¢çß6R…®Çþ¿¢¾n}C2]Íj¢Á7Z½®¶"¼ cØ—_r§×SܳÊ +ÊK*žWÔÖÖÔÕÕ54¼|Ùø’DHB‚‚"¢Âd2™B§Ñ¤©rT9¦ÁdâŒöIPpÈ]îôúªŠòÇeËËžVU=¯}u1^þÛØØø’»JP@PHHPDD”KBœ*AeHI*JHÐ$$U5œT¤¾>nÚ®‚ê…{̵ædò-ŒÙä³é ;©„§³øÇBP6AImëÍg^µ~Ñ\_’ú‹^Œ][á;evòeþ³²ïü-³)V4Þ@~ß'ñ|S±·éRv¬ïjlliwšíÁˆöèäxMÖ—\jnŒª»(=sÐ,=Ï-»¸Ð¢»ìI™é¼?ƒïÎ$@PĤ¸ÓàÁ8Ÿ7Qs'B gúYê#x]¹¸³8îÜ Oó._L»ÎbåÞÊ~Rõj u‚#&+K®m 7f´±©±*½“[_ô/~äpVgÞº)üL^u•…·‹É{ªÔp‡ÓP”¹7bïÑsiUbrÄãbBVkÒLwOç/é¯Úlú¸û Ûz„UÄ;IE¦º¾ãH‰·ÛôÆ‚ê29¢üÞåñ‰I¯ÞÊ­–U¡ç>‘ejŒ³6ÝNW¡c,º±X¢¶0íLÂÙsW³ïçäW7­£È*éi™L´™`1”65©ï³"DgšOãNn= FŽÁ¿;w^²¼žç îÔÅ®¢L§ íNÝ/¶UÕØ•;ùwçxȨzq'üs@ê¤>@ê¤>@ê¤>@ê@ê¤>@ê¤>@ê¤>@ê@ê€>íÿ¢d=6-ŽIEND®B`‚nova-13.0.0/doc/source/images/create_vm_states.svg0000664000567000056710000002420412701407773023274 0ustar jenkinsjenkins00000000000000 blockdiag seqdiag { edge_length = 250; span_height = 40; node_width=200; default_note_color = lightblue; // Use note (put note on rightside) api [label="Compute.api"]; manager [label="Compute.manager"]; api -> manager [label = "create_db_entry_for_new_instance", note = "VM: Building Task: Scheduling Power: No State"]; manager -> manager [label="_start_building", note ="VM: Building Task: None"]; manager -> manager [label="_allocate_network", note ="VM: Building Task: Networking"]; manager -> manager [label="_prep_block_device", note ="VM: Building Task: Block_Device_Mapping"]; manager -> manager [label="_spawn", note ="VM: Building Task: Spawning"]; api <-- manager [note ="VM: Active Task: None"]; } Compute.api Compute.manager VM: Building Task: Scheduling Power: No State VM: Building Task: None VM: Building Task: Networking VM: Building Task: Block_Device_Mapping VM: Building Task: Spawning VM: Active Task: None create_db_entry_for_new_instance _start_building _allocate_network _prep_block_device _spawn nova-13.0.0/doc/source/images/filteringWorkflow1.png0000664000567000056710000020266512701407773023541 0ustar jenkinsjenkins00000000000000‰PNG  IHDR&𵫳sRGB®ÎébKGDÿÿÿ ½§“ pHYsÄÄ•+tIMEÜ;Ýo¶ß IDATxÚì½gp\Yvçù{.½7ðŽž,ºbùju«KÕNÞ¶¤PKšÑjV#íNÄFÌîhûm#6b¿(vFóe$…4=…´­ÑŒÔ¶Ô¶Ú—#‹E$áL™‰ôþå3w?…*6YU$îþ"&€Ì‡ûιïÏ=÷E!H$‰D"Ù¨r$‰D"‘Ha"‘H$‰D"…‰D"‘H$)L$‰D"‘H¤0‘H$‰D"…‰D"‘H$‰&‰D"‘H¤0‘H$‰D"‘ÂD"‘H$‰&‰D"‘H$R˜H$‰D"‘ÂD"‘H$‰D ‰D"‘H$R˜H$‰D"‘Ha"‘H$‰D ‰D"‘H$)L$‰D"‘H¤0‘H$‰D"…‰D"‘H$‰&‰D"‘H¤0‘H$‰D"‘ÂD"‘H$‰&‰D"‘H$R˜H$‰D"‘ÂD"‘H$‰D ‰D"‘H$R˜H$‰D"‘Ha"‘H$‰D ‰D"‘H$)L$‰D"‘Ha"‘H$‰D"…‰D"‘H$)L$‰D"‘H¤0‘H$‰D"‘ÂD"‘H$‰&‰D"‘H$ï‹~˜ÿx!BˆÍ¯ß¢( ((o-‘H6q…‹p®‹í8ô, !\ ÝÀÐuTUEQ”;þI$‰&÷#µFƒV³I³Ù¤ÕíÐ1Mz–…í8(Š‚®ix ƒ€×GÐï' …ƒrr•j!_«×)—ËÔ ê­&íNÛ¶×ʆP¨ª‚ßë# ˆ'¤‰MÁ"‘H$÷B?*8€´»]Škk¬•ËTuÚ¦‰â1Ð Ÿ?€f¨šŠªi Àu\×ÁîY˜6®e#,›€×K2'L’N%1tCZä@ã¸kÅ+¹åj•ŽÕCñxð…‚øü<~†Çsïßµm¬®‰ÙíÐn4qLŸnøéO¦Âó¿+‘H¤09xªãP(®±¼š£T¯c!D#Âa ¯õí­šX¹ !@\W¬O°µ:Fƒ ÇC2expd"!W€’ƒ%æ;–2òåµN8L8Ãðù¶ñx{ÛÔ±lZ:R™€®“ŒD&LÊA—H$S˜8ŽCvy™å|žJ«‰/!­‹u{r}]×Åìth”+8í©h”áþ~¤@‘ì[„›‚d¹¸†¥@$™Ä ¢éÛ»ëëº.=Ó¤Y.cµZôÇŒ Ò—NË!‘Har0„‰‚ìÊ ™•jÝþxŒP,†®ë;ú™¶eѬT0kuR‘(£ÃCô¥ÒR Hö–e1¿°ÀR>‡­iDRIü¡Ð#±cË4i”+X‰$GÆÇ ?¢Ï–H$R˜ìˆ8¨7ÌÌÏS¬×ðDÂD’)4]{¤×a[µµ"n«Í`*Åщ ~¿´0ɞƂµâÓsó4m‹P*E0ÞQ`™&•\­g11<ÌØØ¨Ìã’H¤0Ù_ضÍR6Ë\6ƒ¥ëô o{ÈùAéu»”Wsx]Á©cÇèëÛ¶-$‰d;1{=¦fgY-Q|>RCC»¥BЪÕh•+D gOŸ&Ë›%‘Ha²÷ét»¼59I¥Ó&˜ˆŽÇ÷Ôõ•óyÜV›XœS'N`rå'Ù;Ô›MÞºqƒºÕ#14ˆ/ØS×'„ ·°ˆÏœ˜˜`hpP |‰D “½K¹RáúíÛÔm‹Á#hš¶'¯³×íRÌf‰{|<~á~ŸOZœd×ø+ù<Ó tpßÓ¹µb‘^­A4ʹÇ“âD"‘ÂdﱘÉ0·’ÅÖõ=z¾¯•ßüqŸ“G‘ˆÅ¤ÕIvÍoÏL“Y[lj¦’û"ÁÔ2Mró E£<ñø%™+‘Ha²w&Õ[SS,ò’ Âñø¾™ „TótËf|pÑáa9¹J¹ ^}ë-²•‰ááGvâf»°m›Õ™Y†bq.ž?¾G£¤‰äW&''Y(äIŒŒàí¿ÒðBZÕ*Ýr…#CØâDòhüÇu¹vã:Kkkô™ÀãóíKÛs‡üÂ"é@ gÏʪ±‰&»7©Þžšb¡'>2¼ç’ôPЪÕh—*œe|tTŠÉŽâ87oßf©¸FjlÏ>Ïs®Knq‘¸áåâÙ³øå‘|‰äÀ±§3É\×ejv†¥R‘ØðÐþ%ŠB0Å1“Y"³œå´*’즨Ÿ™a¹\&9:²ïE €¢ª LLPµ{\¹~N§#o´D"…É£ .æX*ˆô÷ãƈ+ áD#ezi‰\>/­P²#þ3»0O¶T$:4€÷EEa`b‚šmquòf¯'o¸D"…ÉÎOª‹Ù,³+Ë’Iü¡àøh2 “³3Ëei‰’íó`>“avy…`*¹ÿ#ïAÿøÓ䯭›ØŽ#o¼D"…ÉΑ/¹½°€7!‹ØÁOôãz=LNOÑj·¤5J¶…\¡ÀÌÒ"þxŒ`$rp'/U%=6J¡ÑàÖíÛr[T"‘Âdgèt»LÍÏ£…ÄA§ÑôÈ`òö®ëJ‹”<f¯ÇìÒz(D4•<ð¯®ë$††Èט[X ‘Ha²ýLÞ¾MËuH š›¡ÐlðÖõëÒ"%ŵ7hX&ñþ¾Có7{ü>‚é4K¹åjUD"…Éö „àêµk,WÊô®dªªÒ76Êr¹ÄÔô´´JÉ–üçÍ«W×ýçCÅ¢àó2·°€#óM$)L¶cR]ÊfY.—èŸßõÁ»áõ’b¥´FE®ú$Èòê*«µ*ƒGJÿQ…h…fƒ«×¯É|‰D “‡£Óí²°œ%”Ná=Äîü¡®Çí©),Û–Ö)¹?ÿét˜]ZÄŸˆcx½‡w2SUúÇÇ(Ôj,e2RœH$R˜l Ûq¸zý: Û"(Ä)w;ܼ}[&ÃJ>Çq¸vs’¦mM&}%aÍ0§Ó,®,Óî´¥H$R˜<B–2ª–ÉðÑ£²<;ë!éÁ#”š ÖJ%i¡’÷õŸìÊ2•n—Á#²÷ÒÛcQšŽÃµ“²¾‰D²ÙÕÍèz£Á|6K8DQï­‘\ÇEpˆB²T]Gx<Üœš"‰à;ÄáyÉ{Ólµ˜YXÄŸŒ£¾G·]«§Ðm¨& Æ\¡¸°ÈÊêªìæ-‘Har¸®ËÍ™iº*$£÷.¢Ö®›Ì_ŸÂl)ëOìŽí8t:Î>{‘ÔЫssÌ/,púäI9±Jî|ü ÁÔܦ¦’L$îù3ݦÊÂj×1¾ÿØ=L³ÈÙe`¢_4ÊÌÂÉD‚à­~+‘Ha²d–—©´Û ™¸ç÷[U“…ºÕAb±óþF˜f‡Bé5jõB¬‡Ÿƒƒ,¯¬’J¥H'“ÒZ%›dWVÈW+¤'ÆïùýNCcñF†fÑG2~E9ØQ“n·J>•fÃÄÚhŠÇX­T˜ŸçÂÙ³Òh$)LÞ›V§Ãlf 4‚vt³b²xs³ž$;sà£N“|þ2år ç×ï§îõ0»°@"»çXI=Ëb.›Á`x†ªl»i·KärרV;¨Ú;…E!=2L.“%Ë1xˆŠ6J$û™]YF-,.ÒS6ÙÝ%Jz,MæèÕâD§üJ¯ÝnË]¦\.#Ä0(w>DT;m–²i­æhÙÖ=[6t:‹“Ë4‹ áði4ÍsÀýgÕÕ·¨V»q÷x^/z0ÈÂrV&ÂJ$R˜Ü›J­Jv­@4¾+áµUí±ts³#>yVz r¹7©T*1À½Xš®LÄÉärtMSZì!§Ùn‘)䉦Ө?â?ëÛ7YšE…Hä ºî;àþ³F.wZ­‡)àÞ‘Õh_šZ§C&#ŽD"…É „`~q Åçà Þ9ÉÔ-'—1kQ¡ƒ/JÖ·o®P­–¢0ÞógC±Ça^6);ôÌÌÎa©*pøŽ×»Mu=§¤¤ž>ТD±±}sjµ‡É÷%š¦L$Èæó˜RÜK$R˜¼›|q\¥L¬/}GÞH§ÑcþÚ2¾-qÈNÈ-ä‡OR¯Ïpÿ±©×—©Tz‘Æ;­ç ¨Ø9n߾ͅóçem ‰ä0 €ùÅEB‰šþÎÃØuz´ªÏ“† ÓlS¯Wp݉žT KQ¸19ÉÓO>)'ÖC‚ëºÜ¼uÅkÜÑ9X¸ ͪ‰ëöÑnüêÀBZ­öúéµ-øŠBjd˜Z~žeáõx¤qI$‡U˜¬är¬5Œ ß]ÁTQ ãÈ!yÀÔÉ­¿ÁF]†ÊR†f«E8’|¨ÖjT{&}÷ì'¥àõN`‡ÀLåöÖDÉÛž×KÃì299Éã/Jq/‘ìAv<ÇÄvæ'“wo”lá†é:¶¦rkJv> 8®ËÔÌ ªÏ'ýgP…þñ1ª6ÝnWˆDr…I©\¦aõˆ§Sru²MkßÈõN—V[¶u?èÔêuªÝ©ÁAé?Û„æñжmfççBÈ‘H“0q‡Ùùy<Áà{v–lá¦i=æäÄz€q]—ùÅE„qgn‰äáÅ}jxˆµjEžÐ‘H›0©ÔªÔ{&ñ~9ÒÛ;³’¤X­Ê¨É¦ÞlR¬×HJÿÙv¼×eyuEŠ{‰ä°×u™[\Bñxdó¹Àã÷ÑS9±P„d–—quýŽº%’í#œJ’]]¥×ëÉÁHƒ0©7”uýÛ»Ús¬"ËÙWéØö»gqìÞÙìè9[}L !p]‡{?çµiV®‘ɾNÏÙýÄÓH*ɲ,³} iw:¬®ˆ¦RÛì?–³¯Òµ;üÇêæÈd_ÁrÜõŸFù*™ìX{À±¦¬•JÒà$’ƒ.L„,çráÝÞZ­Ê®^ù,ÅFõŽI¯¹ö._þKJíîV/š^õ¦oþÛºkRus“ÿ‘×_ûc^¹üWÔº]¿yÁHKQ(‹Ò’ùBGÓ„·÷Hx£ôW®ü'ÊíÆ»^u©¾Ëå7ÿ3•®µeÿé–¿ÏÔ­À¼ë´˜ÀµëÌÞø ^íyõÊßPß#Q =`iyGžp“Hö ;’Q×5Mrk"}é]ûÃ\§E£¾‚åØhF”HdmãTƒpÚ4+ôl M £Ña1û& Ëüɳ öÅP•·çUÚÅËÔ:&ñÁh-^Û37Ð ²¼ºÊðà Ü2; 8ŽÃJ!?Ù=ÿ±4«XŽƒnÄGÞå?-êU,ÛBÓÄÂCh¢ÁRö ‹¹"¡äiÒgÐ7ýGÐ\{ƒZ×">𚙩=3Ö±tŠÒÂÕj•d"!O²«¸BÐjµh·Ût;šÝíN‡žeálD35UAÓ4|^/!¿Ÿ€ÏÏï' âóŒB‹;"L kk˜@âG: >²›Ûçæõ¿%Wo k:®cíÿÎü,º¨2uí/X©TP5 ×6ñ'?ÄéÑ£TK¯S©µYͽE8r”øÛeßð'.q:ú$½êËd÷0‰$”æ(•Ëô¥ÓÒ³kÅ"n—¾¡ÁÝFin^ÿ[ º¦ãØ=bƒ/pþôO¢»n]ûsrÕª¦âÚ&ÁôG818L¹ô•šÅJîáèQ¢oçÆ( äSœI íØa«›cêú_°âñnúO·1CO¬]È|“LÅæâÿŠt$Juõ‹¼víK¤ûž ¥.‘]aôâq´oNý¹JOè8Cƒç¨XgNÿó_ÀOCS)¬­qd|\¡dGBP©VÉ®®’+q4•@,J*Äðx¶ì3Š¢¬/Pu×K0Áu]zÝ.kµ+·nöùîëgp`ß>8å·íÂ$—/ ;Ø K3ŒŸø4£ñ Â¥ºòVKŸ\,ÇAÕ½¼]ÓMQÏF–¾°¨”®3;÷C\‚Á…ÌרÆ1‚#ø£Û—t*Ü­KÓ ÍçceuU “}JvyÝïG7Œ›œ´ýƒ¿s~Ó~’ <ª(D‡>ÅE× »ú µ›xü'¸xò$^ð?ѱ5V ¯QÍ;Þ4'Îýñ~,ãYÆË9Öòëþ“6¢FmR.¾ÅìÂë¸8„‚a K/Q ÃÇŽo—÷ (-Œ-DÆc”KÇ‘Gï%[ƶm^¿|™²Ù!˜H0œLî©ëS…Ä@?ÂuYž›çõko160À‘ñ‰=#N±MõÌÛÝß~åUãcx|÷73ôº].-ƒê~ôлø½^¿ûÏRP”»öý~ç½Þÿ^¿{?˜fRé%,ëp÷ïèzþÑ.üÁȃÖ2›-¬R™{æÙ‰vŸ!„àåþo:…?t÷Þ¶n|/K³4Ša xÿq]“bñ%:á{ŠUÍròq?=wM{ð<×qÈÏÎqéôúú¤QJ˜µr‰¹ÅEòµÃÇmž®ÙËóN»^§–/0qáܹ=!Ê·-b²ºšÃVÀð;zÁïu“ôõ÷3†û}­Lž[iuúÇ:œzbˆ@8¸¥÷0|>JÝÅr‰t2%g‰}D±\¦Þí2¼ÃáÞƒé?M[åè9?'Ÿ8½%Q ª¾Pˆ|¡ …‰äÁD­ÜšºÍr¹Œ 0ròľH¢V…`4Š×ïgyqë­·8yì±],îÛT’^A~mP"Ž"3Ú· Jj Œ·8õäÉÁ¾-O⪦â YYÍÉÆ~û,Z’]^Æ Ê![%9&Îx8õÄI¼þ‡X)ŒÅ¨Ôk8Ž#‡Vr_ôl‹·®_c.·J0"–Nï;?Ö=†Ž¥î:¼ys’åÕÕ]}†lËèµ»]ªí¡HTZé‹’*ãmN=9Brà!WiŠB8‘ R¯Ia²p‡bµJ4•’[p[%§Ÿ:?ôðU.u¯‡V¯'ûOIî‹N·Ë›×®³\­’' í[V5äÐ Âïcrv†…¥¥]{Ž<´0B°¼²‚£(h†.-õDI…ñÎöˆ’ Ÿ—fÏdMN¬û†µR‰¶cáõyå`<€(QÕwDI ¼=E£Uų²Ë+FÉ~%®NÞ ÔiÓ?1Žw.û൭B,Æ›ˆ3“Í0¿¸ˆ» ~ðÐÂÄb½a_2)W{(J'ºœ~jtÛDÉÛ†ed——åĺp… »²²žð*ýçDÉ2GÏœyzûDÉÛþI&(×ävŽäý#%WoÜ bšôíèIºÝ'‘D‚@*Éüê ‹‹¸¸ûöC “n·K³Û%—GTï{е*CG-N?5F¢?½íFK§)7êØŽ-{cÛ6åz]nã<(YâøEgž>…?´ý«TÝã¡ë:”Êe9Ü’»°,‹«×¯Sîvé=°G˃Ñ(¾D‚ùÕñ¶ÎCí½!(‹8ªúÉ>‡ce¢(.ªÚaðH˜SOŽKíL7SχéºÔjuR{ì ½äNªµ=án¹ø’ª LsÇ­|IâZ(J…ÓOqò‰3m‡üt=긼²B_:-£d×uyãÊÖ:íÍãÀ™`4B0»œEÓ4ÆFG÷¾0q… W(|ˆ£EXN㟅QëºÂh:ÄÑócDâ;—(¬( ºÏ·ÙÊ]N¬{—|¡€áßÚª_Ó£§ˆ¦Ê@ûpøç"ýã}¨ª²£þO§¨,¯`;ξêÊ*ÙAa,¯¾ö+µ*ã9ð¢dó`ÛÓKKƒý;_™ú¡<Îêõ¨·Z$ŽŒoéáçñù8÷ü‡ÎÀUÙq±M&©‹Ø¶q€ö?=Ë¢X­Þbo)EhJ'š:\57…Îöøý˜®K£Ñ Kc•¢„ÉÛ·É5Œ>vˆÄêzÞU’ªã2»´D4!°Ãõ–jtK• „áß%ÿ¨ºÉÉ'‡„~\᥵5LÓ”ÂäÒ³,®ßºEW¡ÁC?š®Ó?>ÎJ6Kl9ËèðÈŽˆ“-{[§ÝÆ´-b™„ºm“Åë+´Š üþƒo½^ƒ|þ <ÁýãGðnÃ)È@8D­ZÅì™ü2²µ—èv»ÔÛm¢#Ã;&Joä¨ç üþ£(ÊV\] t(¥Y(ÞÆ“} qilµðu»U¥ Ò°¼êGŸÄí;ƒHŸB öƒf<"ÿéRÌ¿I·[`ìLt[„‰¢i8ªJ¹Z% J£=d!˜™›£jv˜—²Çç#H2—ÉD‰î@_- “b¥cG2“{Ý‹7Vh—’D£§…œÂn IDAT|èÌ4”ËóÔë©mzº×‹£@½ÞÂdQo6p6œ|»±LXœ\¡ž÷‰œDÓü3³…–y±ô}ú¯}“tf†x¥B¸ÙD·í»zc  çñPýr"IîäÓ/|åè ¨á!Pw.b×ëu©TnQo8x¼Û{bÀ R*•’§Û+¹™µ±Á÷}Î)˜(Š{(ÆDh„ã1rµ×nÜàÙ§žÚö[&ï„¡·a÷l®gi­%‰(i’Ë]¦Tj"Ä ÝÖ÷7Šå2ý}}rbÝCKet¿oÛï‰mÁüõê«>¢Ñ-ˆ!0 3t—¾ÅÈ·þ3Ùy†——Q? ¸’x{=ú ú NLO1wó¬œû&¹g~ãèÇQôí/¹ßëõÈå.S,7ü§²­ïï‡©æ ˜½>¯lpXèY³KK¡þ÷yÎÊ,½ê÷qz…C"L\L.JŒÁ#äææ˜_\äıcÛ:—mI˜tM“Z»µíah×,ÜX¢‘‹9ðÒ^¯E>…R©ŽCìD¡9_0HµR“dž÷¶cS©×ðmcT.Ì_›¦ºì'lK¢$xíKøßü¯œ¸ù=NNOß!HÞþªˆ’OFq6j‰$« BõªxÇ~uÇáäô4ã ÜX¼ÆÌÏÖÐOþ4ø·¯B´eÙäó¯S*6ügû“|}?u×¥ÓnKarHBp{jЦm186òž?çU'Y›ÿQß’ñÎÁ_L nÝ b))¡Ž N¼€l>O:"Ý>ßÞ’0i6›8BlkÚu ר.‡ˆÅ;¢$—{“R©¶±ÒÛ¼þ¥ünW “=B§Ó¥išÛºmç:0wmšrÆ ‘¸°µHÉõ$õOÂðì$ý…Âæv@¡Ü7Hap˜ÂØ Jcg°"ýxŒ(–b£ÖVÑúç®3°8ÅÄü즠ñZ_yƒ@ëÿbá'–è<óÏÑ¡m%oP,æqÝa`g¶Š„¢€¡Q®ÕˆËcÇ‚µr™l±H|ä½·ï¼Êu*™—‰ú®sîÜ ýlA>ï¡R1‡çéºc›¯{~ʪÂôìO=þø¶0Ü’0)U+h¾í CÛ–ÃÂEªÙ ±ØyTõ`ßåuQr•R©‚ëö;'ÂTCÇÕTê‘pXÎ:{€z£ªŠþõîðŸž`îÚ<•¬‡DâÒ–rJ|3ß!ùòŸstò ±ZmÓ"ë±7Ï_béÙ_À¾€@ bppq„K¶¶ÄíÒ5^ý"#׿ÏѹtÇA‚“·oá5ÿŒ·‚1ì§ÅØú¢æíHÉÚÚ*®;´c¢dó!R­VBÈíÐC-™_Zˆ„ðî—gˆëTW¾AÄwƒÓ§r^”¬®z¹qCÃuçI§ dòï|OQúFF(..±”É01¾=IÂ,L\!¨Öêx·©~IÏ´X¼ž¡ž ‹;ð¢Ä4[äóoR*U7DÉÎטð”+F††ä̳(UÊ~ÿ¶<äz]Áâõ,ՉĹ­%ºV2h“ŸghêÍ;D‰²#cLýÄgœúÞÈÝÞÞÚyÛ‚ÕäI"ñ£F>Lîâ7¨|í/9}õU¢:*0¾¸@ý«ÿ‰>¼ÇEpaÖë™äó—)s¢dçñzê2ÏäPˆ’[ÓS䫆O¿§ªÎMêùo\ãøñº~°;¸¯¬x™œTbŽDbÛ¾Ûþ5]Ç1¿²L*•"´ ¹§ìÕn‡F§M4ùðaMËt˜»:GuÅC,6†m7ôM¶íkk·6rJ(YŸXýÔʲPÔ^Àq]ª¾m(zçØ‚…kÖ–:„籬.–Õ} ÷P­.žébìÕ/0º¼|§`†—æ(¼ñ5VRg!yê>ßÔ G^äæÏ©EÿŠK¯¼LºXD‚ó×Þ ý{fµÑwöÁE®C±xk#Ñuç¶oîòŸ—šãЖy&šv·Ër±Hrdøž%ç…9C=ÿMÒÑ·8~| M;¸¢DÈå¼LNj1G<¾ü¾µµÂñ8…Fƒ¹….œ=ûПÿÀO©V³…-ÄC•¡›fU[h )ƒ¬­Íx÷¬ år !FÙÉí›»'VË¢ÝéÈíœÝžüÚmZ¦IÊÿðùYV¯Ka9ÕKS©lí4—¿4ÅÉWÿ Ggg7_3½^<¦‰DëuÿÎq­&“Ïÿ"Nüü¼ù í§~ôyþ;_Ágš¨BpôÖ5fnüwŠŽÐï?òê8.µÚBd¢@Õ4C§Z¯ÉòôW&oݤãØ$ïÑ"ÂjÏÓÌ}“¡äeNœ( àbÊB@>ïåÆ ×%‘XþÀ‚ŸŠ¢ëëc5³ÌP¹L*‘x´Â¤T­¢y½ÛR¿ÄµUT5×óôá0~wXy¤¢@7 \U¡^¯Ka²Ë4 „¢ mK"²@Ó’x‚ÚÒo+¶I´øCFgñZ]_ˆ«—ž¦ežñÅ V¯óÔ¾…áz™úèœô³ ÜçÌì}‚¹çD*9ž|ãRåg.›Ëã¿„“~ê¾þXVM[ÁÞ…ûÞ`Jµ ²ÎÖ¤ÙlRnµ¿»!­Ù\¤™û£}¯pâDá@÷§z[”\¿îÅqfI&³÷ý÷zý~´`€Å¥%’ñøCmUëvÑ‚j­Š/ÜÆp¤Wì4Š‚á÷S©×–㱋”jU ŸoGûãÜ/Zk•XæUF²Ù ™o^¼ÈÕþéÕ9Ä·>ËÄ i4xâû_B·mn¾øo°ûžå>'ª†=ða¦Ÿúe†² æ×3çF–æ¹½ò øE„±÷«ªzü>…5,Ë’§ÛÜ‚ÑåæÔÖÆ"îŽïõrdoýŸ2›R¸}»ÿ@…i òy×#•Zz`M%),.Q®TH>DÔ䄉iõht:D2œ¹ßðÔk5y²`WW#‚Z­†/²7¢V¾Ò$ýK7ðöÖ{QuQVO½€6ð3ûZ¼¡yp¾ùç››E‚í6_y L~üqÒÏßW´C1¢tG>Îü‰¯n “D¹D2s•Úé&Ê>&†ÇCͶiw:D¥09PÔ êf—Á‰»£%~Ï,}±ï01~óPtò^]Æ4s -lÍO¼^<¡3ss$"jò@¤Óî`¹î]ªR²„‰ÏKµ`Òév øýr@vc5ÒëÑêv‰í`ãËûWI.tWHó›Ñ’ÌØkÏ‚' jŒò¹Æº†øúŸrbf=,ÐépáÕ—°u[/þ!"õìýýíÉ3ä'žÄúá·0lÍué[¾E¶:5°÷W¡ša T…F£±#½A$»-™š™Á‚{&¼Z–@UÛŒŽÞ>ãQ©¨¸nõ¡Þ#Ö—&73K©\&•Lné=(ž\¯×Quíž7P²·Ñ GYO¾”ìívûžáâ]±‡v‰Èê5"õúº0QUrC£:öNþH`Ú™ßâõÿS'Nlþn°Óáâ+/qüÛÿJ¯Ý×ç)Š‡Â±“x§!g²°‚kWöŽS?@uc¼$ƒr¥B­Û¡otDÆ6¡jþh”™ÙY\wk=„H˜”ê5<€Ü ؇(ªŠjÔê59»D­^C3ôi|ùàØj9tÛ^_9* ÅÁxÕÔÛ3þ§“7>þL?±Y»$Òlré_æØwþʯßÏl…¡ý®¢Uv£U§·/îŸ/\/Ž'90Ñ’ù¥%„aÈ]€mñ±¾4•n‡b©´³ÂDA³ÑÜ‘Æ}B·•§Óû‘Z Â¥Û\¡mÙìÔÉr!\ÌN‘fc…V«„ãºûÎXü¡•š\ñíÚê¬VÇn‡f#·¹±ºhÎzò¹PTÚ‘4ÝÀ=Öü4N}†Ëÿ}fŽßô±X½Îßÿ"Ǿýï üA‘A¨Õ¡Ø7ºùûš+ð×ó(ûD˜^/m³K×4¥1ªµåfƒDŸŒX{B!æ¶ô¬¼ï=™®iÒ6M»X`H¸5¦¯ÿ)Ýðó<~úãMÄ\»Ìí«‚™þEž<ñ<Ú&~!lŠÙï F/’'îX4 ×$Ÿù Ó ¯bö:(š—DÿsúÄÇñ{öÒö´ry™»K4 |}©ÝÚ¬}ŸË7¾Ë™ ¿ÁcÂEݘ4Ó¡kº¤÷8Ðî ~ú7yÓqÑÄŸ217‹ Äk5žüþAn?ýë,; >³é›o‹’^k–êÔ †Þ©¤º£×„}r2OÓuE¡Ý‘…Ö˹x=ò^îLÔ¤¿Òü"­f“ÈæeÝwĤÝnã( írÚì1-ó®Õ˜ÙÎÓµÍÿ;vÓ¬cš lǾãgïøžm!ØÝe¦?ÏêÚ<Öü¼Ù˜dfæk„ú>ÂSÏþNyŠÕ…/Y[Bì£âšnеÖOH±Ýözt­ÞîO‚¢K»UG e½I  꺈 ƒ®E¯×À4ëô¬Îúëþ*ý&W^øX˜8ÂÛë X­ÆSßÿ"'^þ÷t²ß¥Ý­á¾»+±Ófyþ d²‹øÜw„¼«(غ—ýrÜAÓ4U¥ÕhJƒÞç´ÚmV‹EB²`ÞŽú‹£©,e³›óʶGLоWöÇ?@DUÞàöôשµ(ŠF r†“g~‘¨Ï‹Ù¸ÁÍ[_¡Þ®#_èG‚öêט_¹LDñè‚££O¢o¬úT=Îð±_§oèY‚ƒˆ÷ fg¿Lgã=öKìAÕTÐTÚí6Á@@zÎ#¤ÙlbÁžñUÇ£i¶½^«¤UÁí5>‹¥™¿'[˜Ævšc`ô'96zÅŸâz8AáÜÓ|8²0Êz…Øç.¿B¯Õâþ Ωß&±¹í«I>Aðâ ßûãÍkp4…n( ªgÜÄz@µf“QiÒû;Z²ºŠ­*îQåõ¡t¿ËTAwñèw¬oé™*ÂpñhÛÿÌz]•Ý4ñø\´]|@ÅûÒäWs}ÀÓ ÷-LªÞ=‘ø*èÔ§X^N¾k+§B˶Ña噾ù×Ô”Ç8}þ×Э S7ÿ–ÛSž:ÿIVæ¿@Ñ pîÜïâu ,f^§Ú(3܆Xì(ý#Ÿbtèâ¦(Op‚#Á‰M ¨Þ¤ãÄ‹öï«³íª¦¡è:õzt*%g§Ge±BÐl6Q cOV¤˜¿BÕãÅÞJªërdå 7ž/SÊ\åÆÜ[=õË %û),~ž›×ÿ ¡èÿNBÜææâe'ÿG®ô?‰øæŸsln7›¼0y•ˆÿ¸=|7ð(*Šæ#9ðatû%úÖ ›“r+¥@hž}s/ýÁ µJUõ>ÆqrÅ5‚±èö?ÏlÿöÙ1º£~íSüOX³åáïþãâlÏüD}KÏ ! 0Á¯ÿ‘@c=ä/ÿl”¹’¶)D|ý->ý™,OŽÛ»¶xöƒT…ÂÚcc; Lj5|©Ä^0+*Å+ÜnÏßñZ½œ¡ºÍ%ò•c—~’þÄ0Ä`ñUnænе?Ž¢zèõš´Z%‚©œ»pôÖ÷Ì}>|ºñž–QÏ}‰›·¿F¼ÿgJ ²ß25¼µ¦ E?jª:¾€Oäöôš‹,®|iCåC¡¡vEFV—™ìÌP-\F<Æøè³ ßѱùªUAÀ®SéÖ™~ó'\õ/9¹Qç$ØéðÄ/£x‚ÜzñqÓëuN´^ ½2¹™ÓPM  èÑû/o¿0|^Ú½¶ã ô~÷”üÚõN›Ááé¶ža†äPX™c®m]ót þþ¯†™ø´ÅO>~çv¼k,dužû¹ž8ÞÛ\Ä Ù»/æ#a²++Œ ÝwÙûú)Ûq0-‹àž(Ì¥3xäÓQ›çÈõ¯1´²²1e*ÆqÃû¬ùŒªÒ.­VKZÛ§,¯®âFwu[µSðO_O2ñ€îp䱟üx…°vÝÇW¾’bjÉ‹«†×xáÃu¦¾ÓÏW~å¸wÍÌóÏ4064½ÝÓ0u‡Ógœ9jí©ñŽ$k ”ÊeúûîïÔ} “f«IO¸{f\Q4TUG}[(ÚfäBÓ|hŠ ×ëlKt°z5T-„¦*¸"‰³ÿ’£vzùS·ÿ‘™¹ÑÓg6&Ì{‰›Ræ¹1÷&‰‰ßà±cOãÙ§«%Íë¡ÛëaÙ6yvÿ‘ „À완öPÅ]UÕ1SçX;ˉ©[莃a[ O¿Jàȳ#\@CØ-,WÁ«{@ØD>ÁSC£Û^e5ó®Î×ñ?ý«Àç6ÅI¸ÙäÒ¿ŒÐ4žý=Ôò-úVçP7’à*‰~ÖF/"<ûkKQUU4]§ÝnKa²iµÛ”ê5ãc;º0k},.úñm<&zM/M¼€èüýߌq­Úã£-£Õü|ó¥aš¶à·¦Êwþa˜¯MÃO¬„·ãã—c¼žîrv¤Gÿ É‰“5Nh£¿ëòí®†í*\{-ÎôuŒHSçj<6n±ÛëgUÓÐ~V ùí&vE×÷Å Û&RÈ~tðET3C®8O²ïSø•Ù™ÿJKgtøÐo€¦c!Ðаh52´Í1>ߦØé5o35ûm¼ñsdh‚^g {bø<¾ý噊‚£*´Z-<±˜œ©ÍV‹žãìú‰¶»%ÍÚijäû¿ËðÊ púæ KFY6¯³š¿E,N1ó]L}ˆxxFù-¦®14ñ"±@”HdKŸ§|ìÓ¼ö ˆ¿æÄÌÌfWâKßÿ"®ÛÁתptnnó£3£c¢ãhz€ý$ñß.TØh6QÇ}Çj.‡£ªžÌkR¸úƒÕ©øfˆ¹íç¹O@5à•·||ä÷ùÔs°ë4VC|û?ÿ‰¦†©š¤ºªsî áµñ› Ëø©&c çŽÛ…A6`|ÀbéJ’¯;Æïüþ"O³v=å ‹QZÉÑîtî+ ö…‰‚F£î1ö„züI„á¹Ë¼þ4hªw˜ã§~é¹—¹våÖú÷BÏpòØ hZ€Ph€•Åo³¶ú-EA÷ pdâ9|Þ§™Ï|Ãðqúèóx4„C³2C­–ƒÞ÷y­òÃÏ4˜ø9NýÐ6néˆ?⣨*ºÇC»Ý&.…É#&б7üÅK BU@è~ãcéèK ®®¢ A°ÝæÙì ¹#nM–¬DUœ8ó+ F£ˆÞõe¦oü9 ¢(:éñO‘8OÙ[ã{O¯  ÁÑÙwêœ|ø[_Z÷˜*³•p”7†Ç¸V.1ÖjoŸ*ì|±6Ãï£.ó´ö%…R‰P<޲£Iè‚'_(ð/~¡tGÄä³ÿöøú|ÐÔi9.ƒÑ Ý¥?mÒšñÒ´O½X`úïúøë?;B8aròl~´Ìû=ÎN—ù×ÿGƒHÒ$´ !þßÿûßy%ÂùñÒfîî=·ýØ ”Ëe÷Ñáþ¾.·ÒhàÛ'r5ʉs¿0¢wˆUOpêñãéGUTbý?Æã±³˜½(:_ÏFÒMjä§§Ÿ¡g™€Šá‰âõPœºø¯é¶Ð€ò€z,É2N¯—ñÉÉ&B0“HàúóHsɘ’ÅU~Z¶¯(Ptx"‘¯ÌYVOCH$ÐËL„m`ZSËqP±Êv3¨g¤ù1<‘3„OQ£ žÄ™JáH¥°•”ËMÒé`Êïc¢´–©òM¤ ;±}M=˜`>øw´¬8²œ ¬ÖIËöÕ%ºÓI,5šO _215‰%I9Ó¹|-1%£#×oRÖô,ªciî+··€©Ñ±{²ê½žM*"è(^–®:l4ç†yrcl´4F¸ÜIKWê„on:Å—“øÌl>v9½ Ûf.™\¶Š6EÕ(¬HÅÏ.ï©*Æ2w±¶0›L¡g2HB $ -I$]N ͬùpÊ08ÿoÉN.ÁŠ0 •‹ƒ«º·ªCÇ´l,ËúƒÉò²:ÎÁøÄN'ï€})Û¥0<² Ü¿EEóIñ IDATT}éBþšÃAÒ0˜½ëX‡{j”eÛ˜–µlí{}…*›­Þ@Û]ƒ¬8ñøV¿ôZw8˜IçÉ2ŠeÛÄS)\>ï²<_Ó5š¶5#ì dpeEÉ~0’„%≎|^ΊaLÎÌâ)çã ƒ•þí(Þߦªåqmií—¬(HšÆøÄ>¯wQÀxO`’Lfá-&Ià æ“/WÅãÓu2FÛ¶ó‹±\Þšm“12øôåkè$+€²‘ ¹Â4]'‘L ó‡=G%•J‘1 ‚n÷}Ÿ7!$ cc°b¦©Ñ?°Põ‹T·=Ь,½-I¿ŸñXŒúÚÚÅï§{= žL"å'ЮOó.ËX"[Κ÷ø–G,ÛÊ2R«¨C1b¥{Íá •JåzËød,[&|ŸÎµi—01··µnˆõÊd4BÕ]Ôuîá N¬¹/ñø¼LööÝ•±W?m*úêUÛ²™Ÿ6ˆW/² Z¢Âù8E×H$ø¼^ò²ÞZ!I«ÖœÐ¶!6'•HoH‚î‚⪕a0dU%•LæÈsÕœ ÁD,†Ã}ÿ# dGu[ÃHäÈØÂ Ë<ó[QUÒ–EljŠ¢ÂÂ/L2éô2‡¾»“¾ }L*hZhÝÓL›¸@ zzߊ|f¶d8ïñ-—Ä ¤UJŒ´-¼4J¤×F‘ÝÙ6òëX 3ÁÄÄyt—Ì.ÿ><¾åÿ½Š®‘J¥±…@É“œ“t:Ít@ÉÅ1Æû˜%¾u®? "‘Óó ¤W˜d+Û†aàt:ó‡>—ôϲ˜›Ãá+X–çËbŒé¡wñëÐÜ1†Ói­ëõŒÇ-NòârÅI]"öEBÑu¦i´&ßÝÐY+Ö|Ë2,®žéarȃÏÛ¹îA‰iD£§‰F‡1Í¢yP²ò¢ª*¦•&Ë,Ò¦¬®€þ˜‚þ QÆûT <딘f𱱓D£±[@É*ì±E&™Î'皤Òiâéô²t]–™ Öÿ>^õš›#ë”$&§Nù˜œŒRZz IZ:Zu:™™ž¾c´Ã]5:É€$!-sR—eZô|z™éa/ÿVdy}÷M±m‹HäccÃXV °zž–¢©XVžŠ^ž}¶1, EYÞ™mÁ@÷8± oA#šV°®×Õ4ÓŒŽ~D4ŶKW ”,VM#•O Ï9I§Ó˜Â^ââ Ä £W>Ä«¾O[Û(N§¹Î×1ÃÉ“Ab±qJK/!ËKËê;=nfãq,ëvpwW«™L¥—9ïÀ2m®¹Ê̈ŸPhûºOÀ´,“Hä8ccÃØv%ËÙÀæ !EA Ýq(ò²4ÀIJ,”edLl.FïWðÔ¡(®uÝPÍ4SD"‰D¢–\è«i™t&àsL¦fg–<ñU"ÎÀ…c„œï°iÓº¾¾:ÃHñÉ'EŒÇ¨¨èF–KíÃêNS‘èyŽw&éô²“ÈÀ(ƒ—&Ð4Áèèáu¯,™ŒA,6“ ²T´-e÷Ú›o²¶Ä—¨ ,ïDÓøt𾋗°2~’‰Øº_SÃH09™ÌP h™L˜ä0™žÁáv/™³+‹ÃWŽr¾ÅŽý”?&™¤¢âÜ’3% ÀÞ¡cÚ‚T:}[ò»ÞŽ™te™+rÒ‰4ÇC¨jñ†P–túB˜9JNSIåcäK.©tyYY@ÛY*Cw=´!ÖÔ0Î#Ä$¹4/GV2ét¾ä>‡Ä‚D"Žî_š\+YŒ1Ñw˜°û›7®{P’Lœ0Q,[`ÛyoÉ€‰eaY6²œkÀ^;Ilnê3¯›LŽ}ÌD"ußc$-c’îOÈ`¤ï3ú#HLàô™’°½TÕ<ŽGIrþì_1<9ÎZ?u’”m¢—Î7YË P’É6Œ>›—¥Ýá$‘HÜ›1±l –}NÎýˆme°…}‡Áµ­ô- DIMÊ$ÝYˆSw.òžŒæát¸HÏ\`¨ÿ8­ü¾b ¨@A|ê,cã}Ô´þµ%•$&Žpìäß1݃¯ª‰µìôɲŒ-ì|«³t–=[hó_ÃL+y»»n’ÇÚSܯϒœÔ‘ýw„KarÄ͉“>"º7CÇÖI65fPÖ€ŽÉšFr6Žif‹&ÃÈY£ 6ÓѹØ_XpaÍ‹ÏàìÔ×®ü»`?[[ö££tŸýK®^ÿ%EþƒŒ½ËÅlïú&f:ŽaÚ”•ï Ðóe5_¡¦¬= @æ «ª j~3ç<ÅÔäu´ÀNÊŠ«×zˆü0É3&Kʘ؀,墙Œö½N2zËl*a2 ¢Ò³—î¡|Ó¿¡µºÉšÂNý1Ãçh¨n%ÆVk))}¯ËCYÅ(hÅ8„À¡PX²ƒPAè6À!I ²l雳Ýo’2 Š«_¢8f=¨+Š‚iåINœîù»kÕÒl™cï”î²Øµé&0±2*ÇÞ*Åx"Ácí÷7ÄL©üôûïå‰-ÉÛtlvÔÃýË*bÔUd¼ä£^~÷êeS¥™óz¦( Ëœæ·8cb˜9 LFfš¹ÙÁP DšŒ‘Ä ¤â#ÄæRÔ·>‚¿ „ŸÒp=熻I[Ï£¹J0'ú8JIQå5Ïâpø ݃hºMVýÜLzŠÙ™AÉ)t½ !ì5ϘdY1 +L–î„Ú6äl(G¥¤r{oaLÆ8ò?Ï{œÓ¤…Ž×_œÕ/Å· ”Ìô0[§¨òIÆ.¾É‰ãÿ7ÞZÂE]TVT!ŸwÎ4ÂåûØ^ÐL,z’ëCGèm¥±¼~Í÷ÏQÓÌWµå‚¤3idEÉù3e¦®t{éÑŠEUÃ-õél¸Ô¹rÞG- Jkã´Ö¥¸öq!GN(W B[j2Y0ÓqÍqà…6WZÌõûùßþ]-{t:*ÌœgMdYÁöBÉð]C9RÎÆèÂرéy´yÃjQ>™½D °ì–Ð4ç<`QµL3‚aËT4þ –ú FÆNrnè]guÍ¿J¥ï^°…m ‚Ų£h7©™3œúø/¸ÚW϶Ö} ~M^¢d‘U>F¾´Œ‰@ä,¸×\%øý5·MPÙpÔü ±mƒyò¾’'ÙÜÊôÔuÆ£§¹vñ¯ˆ§Ó´UﶤÌ$MÂ…u CƒÕÄFÿ¢—¨)«G_9&Vž1É ‰§Ò9ÑqÙ6e2 y¯f2 õ¶Ä_Á;§ÜTפS*oþ¼ˆ}ßèçë{œø§ ¾ØMC] ÅTyÿ—!}n˜Ò9èŒBaR!•–o&ŽKPÚ:ÅoÖMãtÚ¤Ó£CN,Í$°Ö«/)2–}3×qÑ4M YY›=L4Õƒ&Ù¤’³a’JEQÔ ªœÍÚ.¯ý:Uõ_%9s‰îs? ·ïCŠ;¶Ì›áÏäZ“ÉÑÃôOÐÜúnUÁémÀét0—šÀ•5oYóåŽK(Yöimæh9]…xƒ‰ÈUªA0Æ™˜ìÇíßSN0pí0r ‹ò’‡w`Æ{™œ"SBšÏݤy­°˜~ÈÉËçhëú—T†0“×H¦fpª.ÖC§$EQ°òŒINH2Z’9xÍòñåü¯Ÿ/„[„-ÑÙÉ®'!>ìå—ï…èxé¿}p)©ñWßmàãžz(Åh §ù­ßÀ¯K\½àÅò¦¨pMQú÷…ìz2ÂÃÍ©ÛB9²"p9áÌ%üõß1—éØ7žÎôš¸$y˜Ü‹1±L3K¿˜8¼5”2Ú÷süŽgPR׉^§¬ì¸¥iz¯þ˜YJMÍ#¨V9ˮȚ”f:vž©P9w~C4b?åŠ$SQÖÉìø Fb³T•×/°6kÝãË“¥Ó4‘$yMµ šºÇ¹6ø÷|{¬)³úæGÑdÉx½}‡éuE†dZ¦²¾ §Äïusµû/‘ÍoÒPµU–ARï$v3kØÔ¶O¡ü¬ÿóß9iëœfÓ¦¶T›¤£Ÿw» ;§yQ˜tŸór¾ÇËÉ+3<Ú–ûàäFNÐ$ò»” [H9ʘxüu¨NÏgž@º®!©E4·ÿ&WzÞ¡çÂ@Öð•~…æ¦Ç‘…’Ò]L]û%çÏœG’T\Þ6ZšÇéP©¨|˜ë‘#ô»Šð4ìAWd$ wÓÙ>ɵO8=ò² ²ù[4UoZÒQŠ’AQW¾þ^VäÛÚ߯Øë¨ºG’¤ûÞ[Ó2s3¿DöRßþ[PÐx[ÌYVC4vþ.¶· ™Ú–oSXr…™x d7¾`3^— AsçoR»Èlr$ ¯ž€·Ytlý)žÃ対pHh®¶vý>±‰Ë$4Šæ#lÆãô,iUŽ,Çð‡]èΕ>+ò}—Û' ƒá‘a6zQœ$AiI)žìØjJ{ÕO¨vŽ;§qÍß°éYGR‘Å- eóM-Ó’°t>7È¿éœäÓ³^º/øøóCExy€g6ßýŒ¥æTbq‰ò’»K±k× ò¿·p줗íMiÜj®ï¿„tK? Å;¿Ú¹9ÐM’ý4uþsõÛò:d5DËÖ?DÈndIÂéë cKS6î+ÉÈŠe‘y»ÙìšoÁ.eßST$ ›~jÓ@Rœ¨·3IvP\óu +žÅ6’¤ (Ž%ÍüVÕŒâÊÂU9ÚÇÄ´mÎ^¸ÀøÄú˜Â©É [:; ¿ü¨sÛ¶ÉÅ:XIR)*x½r.ô–t|¡|¡;þIq,êâÎU‘pûÛ¨ñ·-z먎BŠËYF+Bq%ÔuÔ¡9¤•^Ø…’û/k7û8uî,þÂB6²ÌNNÒR7ÇæööröLÃ@Ws»¹š?`Te®_u²§nÒ*½ýNBe³øT™s'¼8ÊãŽ÷!þ¿¿¨ç㪠s'ý"Å·¶Ì.„“–ê¬/ßÝ®’ʤïLD— ¯/QÔ¾à$-]ÅT5W¯–+½D-é— ‡Ëµæ‰¦ßœ:W“u JÊMZwÔQTé_µob °äŽ®š†µÚ£Td›‡Œa–%PoùŠnòÈ3#Xu)$ÅæùW©8åãÚ©(Á7÷ޱ¥5‰,Á3/Ž:‘m”†nòäW‡èÚ6‹G—øê7‡øø¢“ùüú VÏòÏ~§“§}ŒN)”µL±ÿ¥6µ¤—°*Gbf¦Õ±<ާªéd2Æ=€‰-”¼a]1P²½„ª¦ªÕ»ˆ%齕¼Ü˶óŒÉJ€%J¸Ô¤eGÅ•ÁUü&ùÎÉ9LL[‚Õì,%)6{ž»óÒuX<úüèMüâ1Øñè;y†3”bïÔ¢'­íá Ú^Ü›*®ólm|ÙܵɩFfÓ/ªÞ´<ÀDU1ç‰|wÆ$oX—ST5…/8MK×ê‚’ÀD䇸-÷œgL– (J”Â’ -Ûk)© ­þ7ÊuX}‡À²²Á´¼ê-9(‰Å™Ë¼L¨úëøÃËÃìgûÝ£\ø~’¸òòŪª§)ðMѼ­ˆªæªÕ?vKüš—[€‰äµgõG#ÎÐÒÕ@iM$JB&¹ wy‡zÉe|¼™¤ýM «_À[XµŒj$-ŒFQïÿóß2‰¦gp{Mš¶RÕ\™#€XÊÕ¥ôÜòŒÉ²‰¢D(ª4nm¢´¶0GÔGʆsòÛ³ºuÁ¹ÊëÞRÉØX)¾MqýsÊWìRïÄ$"‹ï‚3!l“Ù©i,!áöù1âÓ(..Ç—k‚655ÅÅ‹©®®¦¸¸u‘A} ÁêžÀŹ {p¹’–02ZÎm”6îÇí+]qE²çÁ‰z7Ô’ï´Á<¾{„r&''ù·ÿöß222ÂÓO?ÍO:::(,,¼'Hqx‹¨© sáÒUÒû;pXsôõF)oÙÌÜÄ(“ |%N†{¯‘vWP]SÈdï>xï(ãÓ lKÂWÞÀcŸ¦4 sýÌQ_tòò¯>K²ÿ4ï½y˜C"PTŠÊ$Óf=ßúÆfF/žä퉌Ŝaö¾ürïi>9}WÌÆöRï›æÐû}<ùÒ Lœù'¯)Ô”:˜Ÿbv.CÓîìÞÕŠl%9óîë||ê*ŠÓCqq€©Ø•[Ÿã±=Í|™VY'Nœà;ßùÍÍÍ<öØcìÝ»—M›6QXXˆ¦i k¾ÔAIMVúF’6ÆÓ² Š¬Ñœ«3åFO ;€‰mÛ+˜üzo±-HÌ,cƒy t—‰«@]ñ¾c’Éd¸páï¿ÿ>¯½ömmmìÝ»—gžy†]»v …¾ÔE,kBEa‚ÎÛDídŒ³G1ëÝξÍÙç¥F¯q¾û]º»Qß}éÀj|Gß|‡Þ¡ ,Kà.,ç‘瞣ʗáÈk?etÚ‰bÎPܱ—mµ6ï½u”ØtÖãñ•Õ³çÙ§) ¹¾48‘e™wÞy‡?ýÓ?%PYYISS»ví¢¥¥…-[¶PXXˆaäÆ?Ó°ÇÈ$6„úÛFÑœ”Ö–£¬dƒÈ,er‡ÍÌÌ066ÆÈȇ¢´´”M›6±ÿ~{ì1:;;±­;IsQÝÐÀÑ7¯06cRbŒ1KP¿wÃGÞa`d‚šP˜Á+W 5tR¦yï×™rµðä7w£Î ñË×þ‘#ïòµ¯>‚šb|‹•œàÄ»o3ënçÙƒ8Ãû?{ÉPYVŒYæ$ƒ‡ž<ÀÃŽï¿úcNœ¸Ê7öµQ][IhÛ£lïj'Ùó.ãÑ +£û¢MëC/²ëÉçßú1ù„¶ö´ØŽ>Aóþ¯Óµ¹’øèÝ˶=}_ŽÁµ|ÿý÷)**¢££ƒ½{÷²wï^ººº²#$¤¥ÍŽTuªîË£ïQ# æsµîÚÇdµSˆl /M1v=†m­ÿª[ØÌÎŒSV_À¦=[Vœ©¥îÙ IDAT1ÉétzÑ·“É$Édöb7 ƒ3gÎpæÌ¾÷½ïñÐCñÄOðäd2¾Ð`W!°, Ó´oó:æ&F™´Ó0ÿ$gi=~ÑBYŸ“‡öÄÍÙü!§¯JìÿÚ‹”{œxóUÞ}ÍÁË/ïf6rëÑrž~þqêª8÷OΈUÉ ßúJ|„Ó'.2<¥$X}OMA&“¹cMEÁ¶mb±±XŒ3gÎðꫯâñx¨¯¯§©©‰òêj¾úë¿¶ªçÉ2múÎ19äÂ¡× ËKÀ ‘ULD6/é+!‰ì9B^®Õ4SŒÙÄ*ÔVРÂÀ@?ãÃÃ8ç§ãÚ¶Í¥K—n+£££ŒŽŽòÎ;ïPRR®]»¨kjdÿ /|FÊêq¥?f¨/Š*z™³ ©o¬Å¼êe¸g€T­Moÿ4õëIG‡¸ruœêý{°âSXBÇëvqåbñƒ7Û„&§cŒŒÎQÿµmTW–"J¼\;þ!“7Ž¸ì¢¼±ƒ¶öT3NE…—Ó£“Hî:TIÂYàÅãÒI~†-*.+bë¶F T‰ÊÚ~yaŒL&ÍLÿ ³j˜¶]Û yUºvnæô'ïâÄŽ~x„«×uŠ;vÛZF£Q:Ä¡C‡…BlÛ¶–ÖVûÊóy¶r ;æYÆä.ɯ«]r% ^gìšÇÝ‚ÓY´¾A‰m3:zŠÙ™i¼Ó+ïݦR)¿ù&o¼öÚ]/éîîî;^Çã?~œ3gÎðƒü€'Ÿ{–_ù½ßý¼ÝeìÂq~ôïÇnÉè—¨ìÚǶ.zZ}~’¤á/ !â1._¸Š¯æ1Š<`â&\RÁ™cW‰Íu!I:%µõlÞÑf§PN梳 LÐÐÐÀS/·#«úçÒÈÝ/òþë?¿ÃH=ztÑ¿w»ÝÌÌÌàv»Õmô$„ ÷|“ƒ.üþ6åór,-r…T:Š«ï$ò̦$°æ'p«–@V$+6a·¢”#<…+vA˜¦ÁÄÄqff4¼ǪÜKN§“ׇ{~N”mÛ¸Ýî;Jñý~?.—‹ÊÊJÂá0–i-’´)á*ª¥¦HæÚµk¨VΊ*‚ÁBª+K¹|®‡Ážf{êª0æÎ2Ðöc¦.g÷Ä6¡¨*DÊ·0hÒ†„»`¾‚GÖ†0ru«¸ÝÞl%$¡h*Âþü†êô£Î¯¹¬* ,la“JesNœóCc´‚n¿ã®·’Çí&/ L<Ïkét:ñù| áTÃ0:‡® $FWFØ-/@…Œ]¹|A’éN`"IRvìô*Öä^Š2v5Ç]Ó^÷ $ù”±±A„(FWü;¸œN>]Û}¿··—W_}•‹/¢ª*^¯MÓ¨««£µµ•íÛ·SߨÈL*ùB€Áš&¶x¿~óoáJÜúç÷þ°Ì4ÓãÓD2gx}ô|VYloQ†i¬àrûQ$;ž‘Äëosê¿ç˜­RÚÐÆ®ÏP[ZpÏÏikkã[Ÿ§¸¸ø¶× 8}ú4™LMÓ(**"sðàA¶mÛÆSO=ÅõÁ¦W±B ÷|/±~@ ã@‰ŸÄ9‰2ð þî÷©‰ ŽFq%“èé4ŽL!I¤2ºÎœ×ËDaãÛ˜lÛ\¶¹¸¤åëébY&‘È)¢Ñ(B”¯Šþ ¨¸˜G¶uẘüä'? àp8¨««£««‹íÛ·³uëVÚÛÛ¹64H_äÎÙ*²î¥¶¥šw/œÅV')ß±§®®«E|pˆs§çÐÃuT»† Y9Û¿òmiφe,#…%ë84™ÉBÓÐUA*‘ž×ƒX,ý¥~çÝ9£;/§CCXi+û?͹)’Ówÿ¼ÍÛ¶²µ£3{ÿ,ÂÚÞ(n·{!”³k×.:;;Ù¹s'ÉTŠÓׯ®XƒP‰SC@òéôúÏÊ4M›©iÕí;)?²,Œ„4A&«‰K.EíIãq7lPr†ÑÑl»IZ­qÝYVî:•SÓ餽½ÊÊJºººhiiaÛ¶mÔ××ãr¹’ÄûýBŸ¦iß¶mÑ“Ï3‹Š¢ã+ P¸ó_¢sþ-ƒŒ)ãR’\DBº%œ`âå±—~Ý©9Æ®ç½|‡cSöâ£8îÁjÜPÏ®Iaa!@]×Ù²e {öìá‰'ž ³³sÁCîF˜«ãA]?wñë*~'Šâ¸Ïƒi!>Âùé?¼t˜úËÝø§§Q-kQ?IK$ ‘ 85Eåà æ¹3D޼AoÛVFyQ³%Ô°  Äblì4‘È(¶]F6§UÔ'{b>s–””ÐÑÑÁþýûÙ²e ;wî\ЗɯҼ‡x'2Q©jlÁzóÇ\ V²¹®E’ðVR¤OsáB û¾C ””S_íãò‘(ñìÆ#f8óñI|Íñ榛 ƒ/@I‘‹«'OQ_¤“ê;ÍÐð~^èXF—m&F‰LÔ}A_Y!TY;õŽ~‚cS9ÝŸœcx*Ií}.suu5?ü0]]]tuu±yófÀB•ÜÈØX¶Ÿ ËOøK$˜þ€é5š· át®ï䨙…3g¼ φ¬0ðȲ¨Ñ=I±®,2B0x9Êè•JÎ26Öe…}Õ «°Å¢^Ê )++ãþè…B´µµ‡ï¨È1–‘B•TÂJ%TSHMC%Ÿœ9ÃÕ¦Êü*WN}Ä´ZÁ®®šÛ×8=Éé·_'ám`óÖf¼¡"¼n¦eÝ7ðnlläå—_fÛ¶mìß¿ŸêêjÇ:´ÒÈ^Ø‚Þó½Œ÷ªøý›QU×}=GÀ¸þ J¦³'(œG]¤bËVT„$# dܸ^%!ÐM“Êá!Š£†{Îså¡§ìwp”íX2OÖ²,"‘ÓD"CXV  ¬žþ{Þ Þ~¾óïð•¯|…ÊÊJÜn÷+¾åÿÖÖSßVÉ”\OE؃h?Õ-uôÎDhh¬Î:’®0{žÿ:Gá—ÿýo±x‹kh­(C‘@v…(.v¢º‹ØñäS¼÷öÞþÛ> ËÊ ÖTK© A ¨ëfŸÝ¦HxôM›;9rêû}tV8). #ª'Lq‘çf®‘楤´ð×tòÈ£×9ùÉ{ ^ R\죬"°OöedçÎüà? ¡¡`0ˆ®ëw°³²$Á #½Á”¸Å4· áõfÖõ]5=­ÐÝí$•ꥸx‰íˆ¸0‘çÉJÚUÛ ^gôjšOý†aJÆÆú1Í0àXõït¯ên·›W^y完ʗÍå%¸ûåäò…ðzt$I!TF‘ PTMEè#>zë(_`ëçI¾õ Žþìï°m ÕégÓþ.\·/óM¤dÝGqY G>>ƵO"Ë2Î@-»ÙŒã>«6öîÝKgg'Á`ð®,¯l‹ÿl¢ëý:~ç}ƒ=zÎÿwÿˆ–îó¸nIüµ•©`ˆ‰âr&JÊIúBUG²L”tG|š’þ‚“1üÓÓH€nÔöõ˜úÎÇF:ð/PëŸåÁ˜Á,SrŠHdp”h«ª;ÙÈÁí̇$I”••QVVvßןâ«ãùßþC,IÇçũt]¿Mëã&ÿü'JkÚyú•ZR©  ÝåÆéÐAÃCÏR¼YÁçRÈøŠy虯STF•m>üáüE>´‚ré×PœÙü+ÕÍ–'^¢ÅÐÑd•mϽDÓÞ$²îÆí€p]¯ßKEøEêÓú<ûXÖ¶‡«,¼~"3G°¦“›'ès"¦{8OÀý¥ñ©ÏçcÏž=ŸcÃæêeT=‰ÓÇñð&--ë”ÌÌ(œ=ëfvvpø*£c›—qmA&‹?ÔE z˜¬cb™‚¡K±,()¨ß‰®ccŸ2660Ï”¬>(á 4[ P"É:ö¤6e߯]A~þLÅ‹Ç-óâo–â‡På_ûçÿ‚TFà †pj2O|ëW™›cZ6ºËKAI’xä…W°•‚l2ž¤Rÿð“”¶ï •6$g—K¿oªWQÂáðçx»òŠÓ°é»0ÌÔ ‹@ EqÞ×sä¹ 2—ÿ‘æwÿmÝÝ(ó,‰©¨Œ•Wq­£‹HË.Òå(¾„·d`HfyjþØUœý§(½p„Æî3„¦²Ù éiº>x ÇÜ$W^è ÏÜ—× `šæ|øfÛ.™gW]–º¯×¸)î÷NþE¶YsºÑœî;̽¬à×A˜I†/žà£OǨïlC‰ry¶<Ü€*ƒî |æy>4ç &ĉ/xóCý¬Ý’^|Ž[ÙMþùǤ“Óty›qÂ46–¹zË_C]UhYB-7ÏÔòèžD‚Ù‘Cxxƒææ¼Þ4ëYfg>ýÔÃÜ\”ÈòòÚ4ižY<”ÃÍÙ+J/Æ»–Äã©Áá(Z×s°lÛbtô‘È0¶]¼êžÞ­ÚJU‘èn/ºûîGÓí¿Ùا°ø&Hõ ¹µùµ¢»ðÞÉ xü…w+?ÈJ6Î^©äqÛôb¢_Âç«%;(òËw¨”4v÷Oi|ë/è8~áÒ@OK;g™DÇ3xjÑœ¾;øŠJÚŠÛ0+wqiÓÓD?ü+ZN¦öúuTÛÆ•N³ùä1lýé}ɇT¾íKßä–e‰\"šO×sG`Eæ‹=áW]´îÚ©ždx$‚-;yäk/±u[å=C¹÷+»Ÿ>À™ÓE7òÔTº—Uï–Có$'>ö>å "x<ƺ%ñ¸ÌéÓ>fg(.îA’V D&²°,Ë‹„rdY’°VÀ°F£\?U "Ë1’÷L~\û’HL‰Ä¢r©·® '‚­‘e9Û{™%•H1t­;SD2yî~oUü£çÙúÆŸÑqáÂmW«¸æ¦`f„™ÙS™/jŒUb›2YØHêðßÒzñ<ªe¡'Ž ~—žG~ËõåºKš¦ÂÔT/BÔä (YПå L–Aþ¶?ñÜ2e |ö¦‘ Öv²¯¶se€×2Mô–DœDä-üÚëÔ×Oàr­ïD×x\æ“OÂÌÌôSVviEõ €¼h°w¥¨èd|D PL*µþ/¬tzpå(™¿œäe,é܈ÀdE¨?ªR†ê~ô¾¡Ï â}‹ŠÁÁ…ðÍ­U 5ýýGÞæ”»…éößÀv¿Øì‚d`š£…ÍÄßú÷ç¤áÚUX'ß×ç$°+:©Âv¢á0EÑ(à™BIŰ,ãKUçäàªÞwŽIÈç#ät‘ÛÐ6¨@’‚ìÔ¨š†eäeí‰eš8ç¥zwÚ{Œ‰&ã#c¹j(”/Ô< ;ÍøðLïJ}Áûôþ¶•ÙyfCa':†ín¦,T¾,‰VZlÛBQ ˜(²ÌÖŽæææÖ¢””Ü_µ˜¦ª£tòtäCÎ^>N[ÓW¨OÍ¢Üâ€ôzK‰+!¼Ü,–$ QøWûd+ÃŽ#oâN$n'’Ìtû¯cÊ2£ÑQJË:P kâ0.º³ÆÇ²pÍŒ`ØÄ&²‘•ûk.,$üÈ#äeiDÓ5 ÓÌ/ÄtŽ…eã¸c¢¨ v" «ˆÓwùǤ‚OSè/_¨$Ö ×»¿Gªâw(ö=z_]B„•¤çìß 略¬qÑ !Ñë¯rüøÃÓüû¾†¾¦]Û–ª<ødcǃÇãÙðJ¦*JVÑ–,D¶´:4=Õ‹!I(Vf˜Ø’DR÷Ò\xoŠçD‚‡¹¸ÿ_!Y¶ô.îdrœˆwþ '$‰~_˜K×#‹[pIê¿]õ0YTqÓÎØ6ŽÄØkû"±,ë¾I^–VºN2LÖ0±m$)k;ï LTE]ã¡Ar®Ÿ¹Ä4BÒpTSàò,¼—š`61\žJ Ü^â1ù~ü2BnÏÏ´ç¸6ð)Š£dÝ”×JYÊMSóÚ±D’ ‹‰¬²åè…%>s~%!ÄÖj#}¯Ñ?|Š´a¡9Ë©¨y–ê’=t?ñ¯v†íŸ¡ ²¥ÄBúS"›v‹&ùô\9MÍ_%ü*Y³Hvê³§XÛ‰ö–e¢8\ùƒŸâv8‰MO­¢ç}ÝP’¤¶Ðºõú ¿Û‡Q>C}€eËó2 ÝÝ„æ(÷¬½¶"Ë ÑÛHSÕ5œÙl3tí'\ºþ!’âA&†å¡¾ã·i(«`¢ÿg|zå(²êD&:µßÂ1w‰Èä5¼j ±©6ü®V”[<>Ûœåâ¥7ÞM4ºdFÖZÕT¼,Ø“e$¤N€µ˜›¸Œ)+XŠ‚fšÙiÀf ÃH29øKÎt¿KiíÓÔ…Jˆö¿ÉÙO„kÏ¿$àðóó²"âÛc÷é ÜHˆíéáiÀÜò,‰Ò6.×gÎX‚©È*‡{oê”,‘qùA^۠ضlT5Ϙä0q:±&V‘11U~ò½:Ò[Gøƒoãš?Úé9÷çõ_ç¾:}_%¹vFæÝ(ÆÛ>ÅCm©;žéñó¯ñ³Ÿ†9ø¯»ùÝg¦Q×H±¥mÛ(’´ë¸80ÑT„«Œ‰EtðM>Ž_¸ÊiƧF T€¿JïÕ7q—¾ÂÖÖG‘Í(çOý }ýP]ò"ÓÑ0 ìÞñrŠèèql‡âàcø{ŽSÑð2u¥-·!LÆþ‰èœJG×KÌuw¯/m7)´¼,0™¯Ð°m›\\ÕôìUF{΄ÇšßwIªG.Ð×9BÿØq¤‚VP «ø‚‘Ãÿ…Øô>·IofkÅV4vŸ:Lhj hîéAâ N51¬€“Â…ÍLä—Df/°¾g €¡j$}¥ym÷1±,EÉ3޹ ]Ƕ¬U £Ú–Äb£²lûö×§Çub3*È6p†`ÁM2`fB'6­bKašÏ&zÝDZ”ÉUUiʽ7fÌi}?@Ú‘¦­&ÅZ£lËB‘e”{3&"g ‡3„?P¿€mkŽÙh©T”™´ ©´‡æµ„p¨žáþ+¤,·¯Æ?åìÙ¿¦¨¨Pp~o)"më'ËÚgæÆÒ3ç¹>x‰âªoPäq_gl‰$‘ïc²ÔÀ$‡»';¼MÔný}ìá }ê8žDÙ¶©î¡O%‘˜Es¡)* ¡èatÙ"a$pø7ÑÜú×OñZ(D¦³ƒ}çÏS8™'M==Èâ?qB˜L·ý¸J0Rýô^9DhNà‰Çhì©`˜Œ»äµÍÖY–ªæI.ˆ¢©ÛÎ:9ìlþe1?{;HF€dK¨)^xeˆíõ}Ç‹øþ?Í¿'$”‚ Ï%}Ñϱ Ô)¯LQ¼#¾p*.“ÝÏŒâð þã©Ðš¼‡4E¹wމ®i+W‰Œ/¼“–ÖçÑæ)Ûˆ27ü)@Ø&BH(²:o%EòSX¶DIÃ+l+hb,rŽÑþ×¹~U¢¶ùש /~ˆ…g¸ÿ Ʀ'ñ]ãÊ•«ŒO1'JÿP5í mtעض=‘æ›C-Ù ŸÐËà^õ”’ 40ššB*ÆÆðŒ~Ц,#1¬$„Ä Ê*H2%5/®x’øl?×' ) »ÏœY'W{€ÿŸ½÷ ’+Kï;×ßô¶*+Ë( ®=Lws†cz†QCJ"G†¢(i¹±!÷°æAzÙØXmÄúXQ!­´Zy®Ä!9äÇÁ´o˜FÃWå½IonÞ{Ï>T5h Ñ0™•eî?@òÞ<ç|ßùþÿâCá’Ýÿ—©nSÍN0\X#¹¼|÷-fc­”‰`lóÞ7®íxÄd‹@× $±žßEõòè‡ þ­e¢m§ªrsAe`­úùÁµá?0Ïßü¥Uä¼÷/{øÁcŒô,qåÝ$k¾ÿàoÌáw5Þ{7‚# ž{)Cï÷£œúÊ<¯)Þ¦‘AKg»¼=½nÍÆÔ»dò¡f²ª( ¶ç¼Ma(…Â2®X/%.æÑÌvtÅ%ŸÆ=Âç~›žÿ[´ø\ff.`¹ ñŽ·¾PéìÊ¥Ò–U¢V+Rµ 8îöNÜs]w½ƒç1©/1Ù(­Ø ó]ýwݾ‰Õö]ü!QŸ‰»ÉÒêV5ÃÊüT¥8±@+¥ÌÇ\¿öGk¡è µÐ>~Ø:ÄÛG_g-½ûÙ}ccûÑï¹úošÝ<ßuŠý+óø6ú¦Í«{£Æû·}C>×qÐ5/Gk+À4 4$Ü&oØ5‰rI¡X\ÿS*ËÔìõ“¾º`p{EâðËÒI›TO‘ƒ%¦Ç‚dk‚Hk™Êb€üY’Ñy™Ã¯.óÒþ †o}Δ/à`î0\«V1Mó®ü™}L>8e›Yzp/m{™ÿ#ÆÜ%kš¹Õ%:{¾„O*s{ê»ÌUÒí‡QÝ5Š–C Ù‚ªø05ÁâÜi¢Á0©XÛú…­H÷~“tï'Op¸Sõeú÷¼Œ®loµºNLäÑe+MQ¶¨ÇDEÓüH€èd±ûùÐÛDòy$ wô2]û‰)Œ]ýçÜ‘„€îþ_¦=ž@”WqKWøèüEdIE_Ë—IPu޾õ]ŸÕ76ü.:E´Ò ]S?o??Óš$ÓqU nûý®s·ÿ‚‡æÂÐutM[ï"jšM{¡——ùí¿xOòkÞ <¡Ôj2¶+ðûÜuR. |>‡ª¥S±áÕ_™†x‚/†øýãHÁ*_ýæ4ÇÚvî¾Y• þxâ-õHŸX|ŽËÖÊé’‰§žÇúôð0I#ž~;FV"ôïÿkègY]¾€|tô}‹Þî—tíùöÄÛ,ͽ’F4ýº{_GסgïW˜\'“&M}æeíK%%¥ênéɲ…ªm®‚û$éÈëÃPÇ«_QÐg –ÜGZó û#Y"³çu&{¾ÇÈÇ!ÝS“=û'8¿ôßÑø¾j”¾‘¿}?¯‹½ƒ¿þŽâzh?ƒ#ûúùý‰ÎþÏ{ …¶½z“WE©­ÐÒ™ÜÔ5uœõ$1/ùµÎUÅÚ‚ÄD’ƒÄ}wÿn'qóù?GÏÄÂù<®\¦ùW\ýB'uœO×%*Z„H4ò Ém}•›¯ý}‚ç>øÉ]ÏI×ôô}¿7:ø·_úuÜà¾:’’â­Ð»¿Ý·yÄÄuÖû/x9Z[@€Bqë–(Ä»K¼¸¿ÆÚF›¶„X ðÓ IDATþ “N-“4~ðÝ.nU*¼þÆ*zY£\„#6šêdn\Š0¶èks[“¹u3@¹¤“s$ìÛA>þØ%Ñ^¢3á°•§p]$¦a~1‘eTeÝcâa3.²"¡X–#Iº{6÷ᮃ¦È(1©¯×DUׇ‰mq¸f’ìž/r{ïŸpøÒ‡h¶Íó?£dª\­Š;õØ-ãÔ)n½®`kGÞý3¢÷””Q&_ÆM¾J}­)Êñ6›Á£{H÷n®•,\EV¼­-„p ÈÌêjÓžß5P ý”Q" :òˆ°@Òküê·&ùÃ?Nòƒo·ƒê²ÿÔ,¿ôæŠ /ŸXaæ{ þ¿Û…¬Úöexók+DC¯œXãGÅx»¥B×W2wsM²‹~þìÛ¹9«Q‘]ò×büË›^úÊ,î3øëùª•®”¨ëzÙµê=9ŸML6\Ñ®ë òk<))Mæ<–¢c_Çæ[^®@W5Ïâ«71Ñ4œbe뿨$a·¼Èù¯üÿ˜C]BÂ…¯þìû˜Å SÏýyÖöþJpd呚Ë(® [*¦IÙ绘Ô4K‡2~èˆP/õ0ãdyD[•Áç{hëŽnúò9Žƒêy·–ÇÄçñíõü¹ÍÞÕæW~ó¨íQ1¿ö7o#Ôõö t¿ø×‹Ø ª >W–Xã·öfp\‰uo @QABðæ_àmÁ½Íº£í~ûïÝzÈ]î¢Õ‰”ÌÏïG ¾J 6T×%«YºªÞ—ÏúðY9²Œ®ªXžÇ¤Á¤$O¬5Ïà±6Ú÷´7…ÇAÕ„j(U‘]—¼ßÀ(,¢®]Å¿p‘äć ^ÿˆÔÂÏ'æV Žçòþ.¤¿’\ùÉ’LWz¾‹Tw¢)Kç:ë0½äñ-${º¾ž·à8›NL$ 4ã!I hÆý÷©¢Š‡çpJ jõ!£dE`(®a4æ¾æ†pÔ7iø2’Ö[_roÕ05íó=&’$¡«ª7 iHÉб4é=é¦Ç®¡™ÞœzÃ4 \{ûx…Áêù—Þ°p”ÁÐõ+h¶ê8´./‘üÙ÷Yýè]fºöP '©éå@W–é(®â/åi¹MËò¾R õžŠ¤l$ÎLJ_åú‰¿‰Ôò:¢-èe9O¢­ÄÐó¤º[š·pŽ‹®ªžÇd A×5TI±×.ÏFJ†°å¯“î{Í?XªYøýþûdè3‰‰¦kØå²·3 !%¢É<ƒM&%µZ Ý+u¬;|†±}<&ŸXÿ4ÅÁ¿Ì…@ŠBäÿfðêD×ÖY’««$7b÷ȇB¸²L(ŸGyHitMÕ™ëìåÚ¡SLý˸ɗA}ö¼Y©K<ÚFkWks·ã`¨šGL¶ ÝÀ¯ëÔªU Ÿé-ȳxJæ‡qÔ¯‘î{#8ØçTJeÉûåø3MM×qò9owê E)ŒÊá›tÓC(v­†iÞÆ4Àc‚p?³Cú„&Ô‰œ˜1ªû¾ÉÇñ=,îùº¯þ„½·®(Qï©2’ànÏ}Ê ¨i«É6F‡25ò&…®“ˆÈ{~ã¾®\#^aàh m½Í—×±Ñ Ÿ ÝR†ŸLÀ`µ\&øê1GJææFpµ¯’îÿ z`°AϸVè§J»ÕG)V·ÖX‹O’À²FqÜÕÝ°ÕØÎ4PžÁ£ýtìkoº2“% Éq1MϪ¨;UU$>q'7¨$•<®s¥þd'îgþÅ7™íïfôÖ;´OMÒ²0Gba@±ˆâ8wûøI¢ªëd£QVÒ,µµ3ÝÓO©ãrt¡<û; ©H d3p$JǾî-AjV =ä]~[ ±H„ù©É¦óÛ®p]˜™9Œd~™ŽÁ_Dõ 5ìYŽe¡ÊÒwÐgjLŸi6ÜêéD7 8ö.I²•:1ý½´tlE&²Ã‹Ã6„ô)²‚ëØÐ bâ ú|¡D£ä4¼Œì¼€[³˜˜™áΛ¸•*r6‹–É ™Z,Ž!‡Ãhƒƒø zt q7Áµ>ï'0PÔˆ$¶¨Yºá…B·¡b£2Gñšß=Á• 15}5ðeº†~Inèóªå AŸÿ=z„ÇÄ\§N „/`àÛë…šǶ‘eÉ‹7‚˜( šª4´û«$A¬uó?C==ðê«!Å"N©„$ÉÈÁÒ=s.v $ÀµíûCyØ0MU–±- Å÷lÉýSS]”Ë;¿@ Tò“ÉtMí¥{ÿë  7ü™V©DKà ˆ‰¦ª ŽãxŒs‡Â¶m¯ëkƒ È2†¦m‹&kONˆ$¤`9ÜÕ{,KÒzYµ—£µå`&ÃÀªT0ž’˜t­äsÂÒίP:ÃÄÒƒ îÛ”gÖ*­©ŒšÏ$&’,£lt¯ôˆÉ΄cÛ¨^©ccˆ‰¢`ªy«æ-Æ…ë¸È¬wùõ°õ ƒ` ÀråYšJ¡aÌPÿ®Y7!$6'´/®e ‡øÙgJ”¼añÙµºgìÌCh;˜šî ðkWÁ0MÖÊ¥æ’Ïš„ëJ»hÝ]ÔMJù¨Õ,Å#ö[Ñp„ÙɉgüÁ.Ê!ÚDUáÖjh²‚ÿ!-õQŒÓ§ÔjͳøòkEVf—pœÝauÖjÁ°ŸÎþîMº´j„ ÝëZÙ ¦Í4íù¥¬`úÖ嬵Kä§Š?¢Ów¤ Ýlü™®Yšæõ0Ùª‡BЬÖô>•R¿Ï÷нy¤Çħ딪ÍQj¹•“W¨Z0Í–¿IÅâ ‹‹ˆ§íM#&¶eaÆb^9]ƒà7}8M"öŬ`âÊ,•ŒÓìD’v¶bÎçgX\ÇXôìïBß„|T§fã3 ïÒÛ¢ð™&º¢b•+˜Cž!\‹šµ›F·tCéLʹmÁГI’0}&öÚÚæ+™Õ“WqJi"‘¾¿Õ… kkT*~`óÚ˜[Õ*†×äqÄÄïG4!ùµ”…‰+3T2ÂáAdygç@ärS¬®ÎS©D0ü›×­Úµ,LÝðˆý…aD²ù|Cˆ‰pJ¬NýɺɦÆ@š„rYfv.Ìð‘4ÁÔWž‘—ªÅ-{ö>T~ÔÏÛXÛÚ\I~µÂäÕìbŠpxw’ùùóärU$) LoÚ³mËÂïÍÉiL]Gb}˜Ø&åñ”s2w.ß¡’ ‰ìtR"Èå&™›»A±¨ D؈2gÇD¾«ë¢+2±èÃ'‚?Òé÷ùp7Ñ_­2yu‘Z¡•pxçgBçóËÌÍ­“!Ò›+´›ï÷yĤQeEQ°7A†„€rNeìÒ¥5HäÀŽ&%B2™;ÌÎ^§PÐ"ºéï`U+žÇq‹#‹!;ëy&õ;{.ó·ß"ª}‡ááOJ,ËåÂ…0™Ì ©¶Huð²9i¬öXÄ$àóƒ³>ïc3HÉÔÕj…¡Ð¾/0¹Ü2óóÉç«ѶéÏwj6Š$y¥à &&†¦o 1©äÆ.Ý¢´¦‹B–wî¾ !Èfo37w‹bÑBMyZ¥Šßïû­ Ó4ÑU•J©^Õq‚…±·ˆkßæàÁÙOJj5‡ "¬¬,ÐÖv½nVT1“!‹}¶î|Ôÿ×4 Y¢áŠ5¿RaâÊV!N(´oÇ'ê­“’KäóB¤hF|ÒªVÑuÝKÜk E!èóQ«VúœRVÚð”hÄãGv¼§äRR*™¬·Í—šñ"(HëÆ›‡¦Ãu]nݺÅû|@åžÞ%š¦…(f³uØr›ùÑ3DµÿÄ¡C“†³£×Ô¶m.\ˆ²°°J*uY®ƒÂq]ÜZd2ù™aPùó,>]ש5°2'·Rbâê,v1I(Ô· ª–YX¸D¡PEˆ–ÏÛ‚†¿éóˆI!IŸ«Ò8bRÌÂøÇ“T²~âñ£ÈòÎ{$„ —gnn”RÉhVÒa­Z]oNèy› Çqç?ø?øáYXX¸O§I’D"‘¤Z,>“ç_¸e–ïü˜¸ñ{9ŸG47’,©–ËÞbì²)†¢ ½ÅØd‹EÞÿà}Eah#ѵ§§‡¾¾Gw —$‰tK Ù¥åMéËåáAXÕ ²ëÒÚòù³ï>×c"IÁ@j¹‚îu9ܨ•+DB!˜l1BÁ s óž×q‡ Z© iª—øºÙU¹Ì¹s瘟_àµ×^£³³“?üH$‚ù9w“$Ittt0:3cÛ¨šæ-èfËM¡@,Â÷ÝÆ?×c"I±Hسøv „À®V ynèÍ!&’D0À±¬­•häáél©L0èyK6uÍ«UÎ;ÇÔô4Ç_}•¾¾>ü~?¯¼ò ###õ~ŸˆßO)Ÿ÷´ Z5_ %‘|,ã챊º#Á Ókk[Ââ«”jß½sÍ…K¥´€«·àÓԺ׼áP..òéæÃ¦¿ UÞ^V“»f =ë}“ ž§Õìýw+K|Ê=ûï:eJå,@ª!ç©å(Wïï³"iü†ÛÍeµ«±„w°7KçW*œ9s†Û·osòäIúûûïöÀPŸ°/I*Ù™iBÑ(’×\rÓ`•+È® ¼RÿÇ åøA\ÛF¸Iibÿ 7Ë­Ëÿ”jèe }mƒ¸ö*7.þ¯X-ßàhÿ+÷)ÜÇ¿°mVfÎ Eƹ÷#jå1.¼ûwÔìøº<ðWHG“ÛJ±J€kÙøý^EÁfÁïó¡+ ¶m£7¹Shv鮜eø…¿K[hcðpÈÌýˆó·.ðÜKÿ -~ý)δ Všbnižö®î#ìBT™¹ó{|tým dã³%]_cßëÊö¹ „ëâÖlB^(tSP.—9{ö,·nÝâä©S =S·ê¶TŠÑ©I,Ëò0n"JÙ,ñpø±Â8í1 øýHŽƒ+\dš«Dªå%*fõ¥X-ÍS±»wì Žc#$ E1QõþŸ¹6‚õŸ)Š‚]™åÎÍïàëòGî»@„“§X…=þ*íñ¶»W¼/ØvÖŽ‹©iù™ŠªšH¢ÆêüÛÜ'Ò2HØþùÅ-¬ê2fô%Žþ:êÆ?Ëz]Ù^V«pv­öØ ÖóyJÞ~ûmn޼ɉ'9pà™É`Àï'“Íd0ÚÚp>$.œã¦ ÙWå“ãíÔd.‹ãôdy¾·ÆÓ~ZYA2»2t/\[bb4Èè5Åfp$Çž´MÓú®K­X"õåÜOMLü>¦¦a[ֻΚÂÎÖÎscôÏÈó ©"ûúaSÇ*\åÚõï“)®u2CìëûEг?äÎìyÂ"€¦¸ìë:v×êsޤ uá÷G@’‘%e[ZLåRÃ0!¿ÂÁ±‹ÈÆ~‚d@’$IÞvÄ^rŠ/G«‘°,‹wÞ}—kׯóêñã:t¨nŸÝžjcáæ œdòéZÔ?ÒðSøÉvP=:Ë=U|ÄÄ®hüè÷»¨ýBc½Ù§:÷NUá;ÿºƒøKK¼ö\ù>r#\‰K?nç?~/J¤ÕÂ*hœy/Â_ûíIÛœ¦ÊY~-ƒ©ªÆylbë-«å2fÓ-mA9w‹ÙÙ÷î å¬Q´@Ô¸uí_“e˜Á‘_C«Mqóú¿çÆÍ Ç~‰™Ûßa©êcd䯣»‹LL~ÀZn…ŽÔ0Ñè^R_¦«ý¹{\Ñ×.ã¸eÆGÿ)w Io¡¥í8Ým}(Û,ǤZ*“ ‡=Í·É“H(ÌÄÊÖè… „ÅÒüP¢ÕÈ,Ýmw¸2õ]®Œ]dïà¯ÐO±8ñ®}ü Fþkââ:·FB¢ÿoÐÛš&³pŽÅµ[´Ä^' ^ôï}ƒØ½áàØe¬üG\ûh‚RÕ" gÏ >¶G®–+ºá ¿l Çáý÷ßçÊ•+¼òòË9|¸®Ÿß–J™š¢ÍI4'WH¸07`jAÅ¡£·H{ÒA–Ö– ~¦æu\Yê*Ò•²¿˜àƒ Zä©´ÅP‹sWvŠK>¾ÿã('çùÍ?¿‚“ññ³·C(Ž´ÞÇTjš²¡˜É0ÔÕýÈ>3OMLâ‘(«+hvÒ—ÃÚÒy®—Æîû·Üê$©¨&YXËÑ}ä+´%z€Ò+ïqmþ û‹H²†e•(×&9xè94°n#¦¿Ÿªjm5††¢(´´%¿ò]ú§8Òï°/½oûX}BàT«„SmžöÛdƒA„m?ý ±zZ£…q¦Æ¾Íü=¤Úµ×¨¸1Àbméjx˜ž®—ñk2¦úƧÿ72ùYâÀÎS-Îa9¤{…´¨ªJÉôƒä¶ÞWJ+„‹ª%Q4 %ÐOk`•ÙñÿÀjncGƒ€¦n›},‹±o0>üðC.]ºÄ‹/¼À‘#Gêþù²,Ó–Lrkn–p<Þæÿ°ïü4J0b#Ù ¹ªÍW}’/«òÑ÷:øßŽÙÈ®BÑvùâ×gÎú¹>éÃjõ3=«ÓŸ,ß é¬M™,:|q¤ÈA,Éåå_X$ØäÈq­REâ±z—<1‰„BÔæfq]·ÉÖ‚Jzï7yáÀ›÷xL–9ú¿¥ Øv [Șf`M Èc;·±\…ξ_¥dÿ!7ÿc7tÂñƒìÙ÷Ë$±Äq^ÇZñMѤGì=pþüy>üðCŽ9‘#GFÚÓiÆgg(‹øë^ú-qñÇmü£ñ»ù®#sí¶Á¡_€ò|Ÿü …¾/Nð[¿œA.™ü?ÿç^~v.ÆÉC‹L]‹!¥3üý¿;CH–¹|>‰ýûVèþ^„7~i–_x®|7¹E|ÆÇŸ|§ )cR,)„ºs|ë[3ìMºMÛϵÅEÒñ8¾'L4~¢PŽä8¸ŽÓt7¦$©(Šv7Œ"¹?/VEXVyÃ5íP³²ÈrE–"ÆÀÈßb_-Gvõ27o|‡[·£„‡†>Ùã™c(U DL|ÅDU „kÓ\?ÙÂuQ‘(A”‹óHþ I$4=ˆ„»z»8Žƒp‚AO~ .ðî»ï222±cÇšÏè3MÚIfV×@L-mž;’C¿'Çdu|ýܬ­èÌW_8Tħj•Á¾? ¯¹¤÷eqþ4Ê?ù]…‘y ìë±°–×=ù²Â ­¶-a•àÀá¾öj‘â\˜ÿåêæÇïäè|3{÷=6õžÝuéîìzb‚ùØ;ðû1»VÛÒ]óÌ@;‰P€¥é·h ¼dM3¿|‡DË—ðIy¦GŸ¢ÒKWÇp73@Þ©"Q¨Q,LS²ºñæ:Ý.…å÷¸tåtô‹öd'ù•÷XÌ—iïè»ëµÙ¨Uª˜†¦ªžlb‘Ë¥ÈHÑ–nߺÆüâ R‘KÓç¨*ib¡4…ÕËÜ¿L{ïDq"áîາlâ¸³ä ‹è¡Ô]ÃÁ±V˜ý÷,‹}è“SçÐB„ÍíӨ̵mÖ›z¨§½ärùòeÞ~ûm†÷óâ‹/¢7x®”$I´·µ1³¼DͲê>Ǫc†¯¾¹ŒoCÕVó·N·RWBªºÑ×HUX¶ŒåÂѯOÚ“çÂGA®¼ÓÂ~ÐÊ—¿9Åkýpüñ ƒC%ü>OžžV‡ÙEËi1É,,bªêS%Š?ö ¥( >Ólz¬nÆqUý›L÷%Š‚lt²oð×½ý3._¸†@B¼@ß(ŠŸ@ •ÙÉŸ²4ûc$IBÑ[ÙÛû2¦¡-5ÈÉ?FS †ö¾²^Ê(É„Z^ »}ŠÉÛ¿ÇÄ- IÖié~“½ûëÜ–º±Öc©PðâãÍ$&á03ãÍmT(øüÉ,.iãß‘d’_¦¯T`zô?2…Œ¤˜ô “t$‚¨¶aJ?åæÇÿ $IRiéù-±vjb¿xŸ+W¾Íþ¿Nk$Š(z ]=oP¼ù§\úð<®Ð| ý áºö’p$¥aÌZ¥Šß4~iî&8ŽÃ•«W9÷Ö[  ñÊ˯`l’G1‰…Y[\¤µ³sÓ¾s8T#¤HÌL鸽6X 3sáD™*qûfgŽ_ÉP̘ü»ßÝÃû„9¶'Ôœ®?¢]%’zœñ “ƒEœ’F¶ òÛ¨Mò–Ø¥C}ý(OÑ»é‰LçD´¹ °’¡äo#´è}ž Y3xèïãmÈ’L,u‚ÃѪV !if}ÃKÐÒõ&á–±jUd4=Šaø‘ þ¯è(PŒÚ=ý-ÁÞáß$Ý»‚íØÈЉÏG©kHK ¨+# jú#µr™H[ÚÓ†MB(B8vSguDZŽóâ ûñ#÷–B´ý ¼9JЧ!K­ôíÿ-:ÊkØ®ƒª†ð™áõœ-³“áC¿C¹’Åq]dŇ鋡Ê2FË)^>Ù#dÁÈÏù$n9ÉÑèA*Õâ:11â˜z=+r,4}‘޾8¾`c˜I¹P Ý#öu%%ׯ_ç·ßfhpãÇo)õ$Ø=]]¬]¿¶©‘€pg‘ãÏ9ÿ£4f1 “¦T^ýj†°¬pæíV.-»=’G«êÌ =TÁo:ÄðÁ™$éÐ"Gú«wIG¼£À+ËüôûiòóyJÓAÆÝ*¿q8Ñ„žŽ¹¥eüªFò)¹Â“h$ÂèÜìzžI“:Xú‚é‡1|¡Îû(¤fÄÐŒØÃ|x¾Œú#IÈZ”°ý R¤á4*éM ËtîƒîÁÞ†<Á¶m$×!äÍøhº¤45*É>BᛃɊŸpØÏïŸyÞe5@à!y’¤}†å)I¨zŒ kÀ·ª¡jóôŽÓ÷\?HBP«Tˆn¢e½“áº.·nÝâwÞa__'NœhŠ'*‹õùYž™¥­·§®¾²B-]½Ï+)+._]Æí´‘T‡oüú4ñ³1F'HªÍ¿9ʼnó(|á«‹Èg£Ü¹BR\ž;µÀñSk„L‰_|s·®ø˜_Ðqû~ÞhTñÙ|í7¦þ$µ´ù IDATɵ;~´P™oýæ/Zus º®LÍ "äG÷#¶C)“åðððSyKž˜˜ød!¶Dkí¦/ÒÙ'ÓháD¨1ÄIJÐÕëøÚDèšFÀç£Z®l~@;6Š:ÇÀáGúÑt·9µ²±¯É»5:ʹsçèîîæÄñãM ɲLÿ¾½|på Žm?sÅœ¤:|éWgøwÍ_ã+iêçº Zá _Ÿã 3`Ú |ý/zW¼üÕy^þêßm†-¾üY¾Ü=ƒ¥¥Aôàþhÿ#·°ºJ"¤5ùôÕvOd[ü~‚†U©xÒU'¨ê]ý0p¤q¤ V©ôù·EçÞŒx4JµXô¢>tYž£ÿP„£#%Ÿ{SÕ¼Vôõ %·nqúôiÚÛÛ9qâĦ†o†h8BXÓ™»³žÿåáAR²¸8ˆ-¿Ajïבô$ðŵ ===Ï”G÷DÄD–$‚ ÕRÙÛ­:@Qçè€#}„b¡†>«Z,F½Eo2b‘v¥‚ëºÞb<3)™§ÿPˆÁ£hzc“‰kå á@À#öψÑÑQNŸ>M[*ʼn'¶Ñ“$‰ýÃÃ˜ŠŠcÛÞ&} ËËýTÅWiÙó ´à±G{KVVi‰DH>Aûùg&&ŸX|µRÉc–ÏJJ”9º$õŠ5Ö=,\Q³‰z‰{MG0@Ù°À=<)é;dèùt³±eÒ±yÄþ™066Æé3gH&“œ>Ι³g‰Åbœ:ujËyo%Ibdÿ~‚ºNÙ µ°¶ÖCÑú%Zz¿A06ø¹¿Ÿ[\"=Ѱ¾ÏÂû%>>]£Z© êu¨,iç3Ô»t´yºú í#ÝœD:»\!ä÷cnŽ£»², G˜-ä Åž±BEaã8Ù]±v®›GQçé{®›ÁcýèÆæ4h¨UªèŠŠÏk¬öT¤dfv–³gÏ 9uò$ñ:\\1LÒñ£Ó³}{QvqÔ.Ÿo'_ý:‰î_!”èûÜß/e2h¶ÃÞ={êң鉗^Ó4"Á k¥"ð³¹âd„t‡é¿ÓHÊ"]ýQöŒnž÷¢R,Œ%š×ÔËÃ}ˆÇbL.-"\éj[ ŸBK—‹U¹´+ÖÍ/lz¢ýô õ4<§ä>bR.зpÇë­JJ8{æ º®sòäI’É­;I’$úúÈòd––I¤“»nÏ$I¢\IQÏnÿUÂ-ûÇb œÉ²·=]·vOÅ ±8óuè`Ù¾·‹xj÷¸Í=˜¾º¹y N„Uóâã[¡`¨Y5tóé½X¦_càè BH»F‚d6“_’_Ò‘n÷ˆýbiy™³gÏâº.¯½ö­­­[þ×›®usáÚU*?BÈ”Ê-K;¿=†ã ªVˆåµ“¤û¿J<ÝÿXg~unްnÐÕÙU·wy*b ‡Áqp癲Ô5]¯ûŒŸ²öªUdðú—l!øL“ iR)Ÿ‰˜Àº×±Ñ£ v3$!‡X$â-Æ`ee…3§OS*—yýµ×H§·OÇéÖ–:—[™š_A(=\¼þ;ì†|XI¸®KûÀAZ»‡ï?Yªí°o µŽ½ÍžŠUø|>šNµTBõ*=¶4ª…"AŸÓ4½ÅØ"P…x$ÊTfð·{ØŸRU–½þ%O€µµ5NŸ>M.Ÿçõ×^£»»{Û}‡þ¾>VΟÇñHïýUoSFd€ÅéºZZ‰ÇêÛÑù©ˆ‰¦ªDÃaò1ÙÒ( ´¥Ú<7ôC2gba¾áý\\[Þ5ë*)E­Ÿy[)ˆ†Âh^~Éc!“ÉpæÌ2™ 'O¢··w[~]ÓØÓÙɵ;·©†Â>ϰû4Æ'0ìmÀ?1‘$‰–D‚é[7{È—×XœXÂ*×vÅF[V…H‹Ÿ}‡ë£¤…@²º³YÏŽ`0ˆ*I Ö]ÌÔ˜º1I)g³Â=–UÄ0plÓÿìdLb=q<±·Ï#ö|>Ï[o½Åêê*'NždßÞ½ÛzÝ:ÚÓäòy¦æçIõöxgà^Y+)*ö6$)ü©D"á0ŠÔªU´”¡f–V™¹žÁ­ìÁ4S»@¨çYX8Oµ2W7bR-•пç†Þr0 ƒH H>—o1)djL^™§’M ö"I;Ûk’ÍN°< #»Ä¾çëò™®í ¸õú—|> .9wî ‹‹õÕmOJdIf`ß>ò— ̓޳ÇÛhÀ®ÕÈ/-Ó×ÙÕ°&yOML à ìóSÌç‰Ö™˜d–V™¾žATzºvüFçós,.^§TÒ‰R?a®ä DC!Ï ½%•žD2‘`yj²!ž’É+ TsÂá~dygWär,-MP.0üõó •óyü†®{ý¹Nå2çÎcff†ã'NÐ×ׇ,ï "¬i#ÃÃ|pé"+óóÄS©]í9q‡…;tÆbtwv6l-žúô(ŠB2§œ/Ô—”,¯m’î]CJæç/Q(¸Q?ÏкºHK"é¹ ·(±’ãÔu>G1k3qeŽj.D(´;HÉÜÜ  ¨¯õVÎåˆÇbu­6؉¤äôéÓŒñòË/3Ðß¿cHÉ' õõ#Jeª¥ÒîÝlÙùyR¡#4ô^y¦ÔšL‚e!ê0LAn%»¾)wtïø}Îåæ˜›»D>ï"D}küZ ¼þ%[>Ÿ€aPÊçë ?PÊ9ŒÝðc¨*¥gœ- „ ˜-0ss §ÔI0¸HÉ<óó7<%­[_?2Y¦Ïë³…¡©*ÉXœB&ûŒòÕ¢ÃËc”ׂD"C;š”!Èf'˜›»E¡ ×Ÿ”•b CÕú½ùRCµZåìÙ³\»~“'OòÜÁƒ;–”ÀzÁÇÐà 푳££»n q%—£–ÍÑÓÑAx†/>ÓIR•d$Jnyå©'2 !(åŠL]_¡–O öìè BË-0?¡a¤!(çò¤Z[Îl=<ÚZ[q«ÕgŸuR2zé:Å•±ØA$ig“’LfœÙÙ1ŠEƒz‡o>YÔìÒ2±pÅ ã<€Z­Æ[o½Åµk×8yâGŽÙábY’8vô(Ñ8s·ïàÖ!R°à–+TW×èio§£½}sÖúYYd:݆U~ú¸[1[dêÚ2V6E лã79Ÿ_daá<…‚½‘SRB —–¤ç†ÞêŠLå)cו‚ÃØG£”V¢$Ïï‚ê›qææîP*é@cʬ…ÔÊ%Òé´—ŸõéKÊuyï½÷¸rå ¯¾ò GŽÙukpôðaR¡ówÆŸÚ Ø.ªååÚ[Sôlb£¼gžŸ‹Dñ)*ÕrÓÿde©Ål‘Û—Æ(gƒ>*•¹½É•J‘¥¥QŠE!Ò !%°ž´ç×t /Œ³å¡ª*ÉHŒ¹¥%L¿ÿ‰.B«â2zñksÑè¥ÒΖŸj5ÃÂÂ,å² ˜ “Dzðk:q/?ëRòþûïóÑå˼øÒK»ÆSò0ƒüÐÎt‰ù;ã´íéÝ‘ë *UJ+«¤qöönîw|fb¢( ‰H„¥¥ERÝÝOôò«ó«¬ÎÑõ(ÙìÚŽ>ÌB@¹qXÛª’Yª"D™LfǯUµ:M¹,€ÆöåÉ-¯D<ùù”^9þ</^äÈáÃ9|xW‡¹t]çðÈA.|ü1ó㤺»wÐzTóìl–¶D’===›. ÏLL$I¢«£“Ù/¯q‚ͱm ]@US»A´±,Xh)Ù0kP]èh÷¦¡nÄ¢ÑÙSe|Á'K¶T”†:²+ÖÉq\`±±RêºXÅ"½{<ù¹.^äƒ?äàÈGõz#¦irdd„Ë×®±49E¢£ux©sË˸ù"½íítwu5… ×å‰ÉD‚ n`U*OåIØÇæ1Ý Ïôº½n#ȲLkÛu]nÞ¼É;ï¼ÃÀÀ'NœðÂ7)Ãíí9HÒô1?v›b6·eåY‚å©)Ê++t¦R/$üÌòlš&‡$›Ëqst”ÌÄ5Y"‘nCÙ$]­Jå\ŽâêŠ+Ø—n§·»]Ó¶l.¢\ïhK&)e³ b†Ësï°˜™ã>[KX,ÏüŒùÜOkƒ¹Në—þ-sc”ö ·ÄäÍõñó¨þN )Çí[ÀBf™­`ó ÇÁ*ikmõ´Á6G[*…!I ¯î`Û•‡œ[Ç®Ü#eŠ…r¹) ¥û½­P,Ì®ÿ¬¸Œã \§ÄÜÌܾý.™Ü4νB$Vç~ÌäÜ8ûüm^{í2Ô»§ºJm $ËK¬)kI$wmÒøíÛ·9sú4‰D‚“'O‡=a¬×%+ËÄ¢Qž?z”CCC$M™©i–'&)¬­5$IVܪEqq‰•ñ ª+«tµ¦xþðaûú0t}KŸõºS­­ŒMOQ.ño‡ª^k‰ù¹Éä—A2Åö“nÛ&K¸öó³ï³–_B Œ ’j 7û§LÌ'("˜ºL{ËÞ»–\­4ÎäìU:÷þû÷A89–—oá3ý hh¿´ÇA9_ `˜ÄvyÒÞN€iš´Äâ,d²øšd½»Ö,£×~¹µ¥uÛERH´‘á¾ã(n±kÿ/³+  I½Ao2ÈüÂY–V+Ü™x Ýü± ‹[¸E–—.£Å^ âØ©®7iWhJóóª…Š€Ö–Ý7[JÁää$§Ïœ!‹qêÔ)O4Š,ňE£ŠE—–˜_ZbyiI×0CA ßúDxUUy\Ó^–$„ëR«Õ°«•b»TF‚X8DªwñXl[(©»Vðù|´ÆÌe2 #&Ž] RÉàlø{\;‹ó‰æ–¸}ýß0±¼J²e?Jm‘[¿M®ü×8°÷ ócÿ™+3S´¶îGq³LŽýUçÏ“U ¡i{¸† ’¿MÁ1hWlîŒý 5¡O>GÀôÓlÒ)ål–ÞTª7 uG J1{õ 5ËBk€2qj&Fÿ¬Ïÿ‰ „ÂÚ%l±~Þ—¦~ÈØü"Ãÿ ©hœ•é?æÒÍ? –&)Ýabòmþ{ZÛ(®^d>[Bö¢µe˜UËehà+„MóçDÇ.SÊÏaI~®]¾C¾° jœž_¥»µ©É̾˜ÉЖˆcÞóλ…”ÌÍÍqöìYBÁ §NòËm†Î–$BaÞ®. …+kk¬¬­QÌäp„@V$MEÕuT]C’%$IB„À±ml«†kÕpí®ã «±`doŠh4ŠßçÛ–^À†˜+í©3ËK R¬6 Â[ËçîUµä–ÇIu‚Uezú2ÉÁ¿Ãs{#¹Y®_øŸ™œ}¾ÞA*…;8j;Ý{¾LÈ0)åÇqÔ4AÕÄg|—–ô«¤¢m?'ÂŪ䰊·™ÿ€®P-M19ýûŸû[´ÇSM%'ÕR ÉvHyaœƒDæKkØ®‹®È¸µ –+ãÓL„SD däð+ص5¦¿ÏÇ×¾ÏbÛõ.’I«•U“@¤µê ®È8Ž…@mnEŽ8å2}ý»¦w‰‚••Μ>MµZå7Þð:ÝzØzÆY£>8™LbJùµÍÇn1üªÃÚÊ8®açX]›ÀÀT-æ¦Î°Z’IµŸäÀ¡ß¦§¥•Õ•q,W !¨¹V á>"F‰Åùó!°Ë·É•VñùZï–,7k ‹ s׳ëC×élk#¿²ºÉOÖ‰µÃÍß`bê²™›ŒýˆªÑO<ÚEvù?ügŒM_¢TÎãº.²b"I*²ÀvVY^ÃvîIÝ“C¤ÚŽ"Ö~Âõÿ?{ïõ×yæk>{ïÚ•s@!rI00(Û•l·Ý¶»×ééî3gÎŹ˜µfÍuÿ 37³f­9gæ´ÝÇmÙ–%YJ¤ÄRb H9€*ç°÷ž @)R%Q îçŠDUíðÅß÷~ï÷¾ï]:Éô̇X<=8-?œ¥"c%|_E,ãØ±cd³YöîÝKmm­ÞÙt ‹ ¬&6j¬«ãúÜNï÷çåm°wjdbþ5ΤN#(Ò9™P×>Œ‚D6q•é±÷˜¶W!Q".hèÄltãrÚ˜û=Fñg4×õ#‹" `´¶ÑR?ÀÈä¿“Zþ”es;mu°ŸP,jmÕŽ­CA ¶¦†™… ¹æûµU'Øñx[>×lÉŽÇÓŠ x~BYaná0+s*’)Hÿ†Ÿ´YÐ̨ö]gaòOÌ© ÉN;þ†jo-eÃ&ªæ¯3>þ>²)@ÐåFAÄUý=&ç³›³—¾ž—°ï礀pï×Ë'’t64>2‘Mc±Ç'•N³oï^õަó`ŽÚw?>_(0tæ Öꪻ}œ'|­Y¾7S¢¦•Yž?IÅÒDµ·á¦ƒ¦Xž;FÙ±‰Z·MÍ“ˆL¯€hÁééÂã¬BDC©¤ˆ­ “Ê%«³¿·ƒÙØEca,ž ÔøêosàS•<‰èebÉe$£¯¿§ÕyÏÛTš¦‘Ë]!]BÓ°~¥¤¾ :šqz¿: ݧ«½` JÏúù(tPQ¤£­ IQP*½@nWMšzœ´mhÁlûêþ h°43ƒÛáX÷y¥òù<ÇŽcl|œ;wÒÙÙ©‹]˜¬N¾]Ød™R¡ —øMV}JÚ:7·Ü“(P²9\+Í¡>È<"8ìv­Š¢pæÌ®\fû¶í èÛ7:º0¹ŒF#u£ssXvjZBx«Ë =ª^À`1š¾Þ ª–ÊäIZÚÛõÁæ§½µØ…ód’I=;{GgkG”dä¯-VSrñ8MMëÒ·DUUΞ=Ë¥K—Ø´‰M›6é'öttaòu5†XŠÆHF£xƒA N݉ó«H­¬p¹êqKyL&õUA®ÏÍbu80YÌz¡|ÉÈ2›Úšõ—FÓ4Î;ǹóçØÐ¿dYº¨ó/@¾w%$I4ÕÕQN¥)éA×¾’B&ƒš/ÐØÐ ïëÐØØˆÛj%YÖ ã+(åó¨¹<¡††u—SJUUÎ;Ç'Ÿ|Boo/[¶lY÷±YttaòQS]MÐã!^ÐùrˆôJ”ú`Û­—‡ÎMqßÒØˆšËQÌçõù’þ“\\¢&Àïó­;QrñâEN:E__;¶ïÀd2éu®£ “oЇéÜ& IDAT ´65cRU2‰¤^ _@bq « Ò¤gÕùÁ@u~?ÑpX÷_@&Å"ë®ÿ¨ªÊðð0§N¢¿¿ŸÝ»wë–]˜Üv;- dã1}`½ &AÀP©ÐÞÒ¢::w¥¥© " }KçÌ’„P,ÑÖ܂ż~üp4Mãúõ뜯çѹ˜¾v£ P¬«w»qãCCC477³k×.ÝïLG&÷I’ØÐÛ‹]4K¥ôÚX#±°ˆ èZçù@VU å ]˜ug6¯êÀ¢HOWNƒ¬;“¯õ-Ÿ§­­ “qýôŸññqŽ;†?`Ïž=ØÖ"ÿêè¬G~ðÍIAh¬o`yy…Å©ijZšÙ b°89M•ÛÏëÕ[§Î=a2ééìäâèuйæGtÒ5Å©iNU~ÿº±&LNNrìØ1<{q9_ë÷±xœ±±1=¤ÎƒÕ_%‰–æfªªªîè«‚ö€,±Š¥g/]¢(K¸ïò ëḢ±K6lÐ#¼ê|-4MãÚè(ã‹ ››1‡iH…°I6õ÷¯‹þ£isss9r‡ÃÁ¾ýûðy¿þ±çCC <‰ÝíÖ·~tŒ¶ ¤c16ôõñä“OÞáÀýÀ¸s›ŒFZC!.^'gIcû𫂇|ë?Ýë¢ÿ|º}SÈçÙ»w/Á`Pttaò@ <&zz°!°8½¾Ä‰I2™žÆ.èïîÆ IzKÔ¹¯‚@CC=Í5µb1 ™Ìºy7‹Á@dr›d ¯«iôŸh4Êñ'Èe³ R·¶¥ttÖ0°ÛllìíŦÁÒô̺Ø/7K"SÓX‘ }}zFPï®s "Í¡UA²Ë+³Ù‡¿ÿˆ ãX$‰Mýýë"T"‘`hhˆt*Åàà ¡PHo¼:º0yq:lìíÃ!ŠDçæÑTõ¡-l£ ²89…EÙÔß¿®’‹é< \iij¢!PEjaq5õÃC*ðe ''qM lܸ.DI:fhhˆh4ÊîÝ»ijjÒ­Ž>n= ét8ØÐÓ‹K–Yž™CyOëHªÊâÄ.óê ª[Jt¾OqÒÞÚJ¨º†Ld™T,þÐY…r™¥ÉI—‡çQ5|6_ÖÅäÈ©ãg¹qᗆèšFrö§ORª¨¨å"…bùk_[-g¹òñ fRßûZæ¡éÝv›=½\¹6ÂÂÄÁ¦¦‡'XP©ÌÒÌ —› }}ºC›Î÷?± í­­˜Íf&æçˆ—J¸ƒUE02%—'¹°H•ÇM_OﺨUU9uêçΟ£¹©™B¡ÀåË—?_iw©ÇÛ>¿åÿ` ÔÔÔ~½x.jžK‡‘m=@s¨–Oãò©å,çß}ìV;] ^øÖZÎ3ôÊ_±öîaó†FÄo9ôE'Îrø½,]mXŒ‡oQ%eèÕ?q5ëaï“Mdç¯rêøu6ýä'œ÷h5×4´Ï׿Vaìäa}x•®­{¨XòT4jX¸6Äá3N:z›ˆœùˆ…²‹íÀ(Þëí4´b†Øâ"ª£…ºj'ßç¬õP-;ŒF#ý/_fnb_}=f«õè5M£˜J“[‰ôxéï]ƒªÎÃ+Nëë±Ù¬ŒNLŸ ¦¹i5ÐÑÚ’‘b¡H}0HÇ:9üé»ÉF#¡Æªª2<<Œ¢(·Y²TUE½eëZƒÛ¶²5M»ís“ÙÌàààwhNS+D秉,'ÑD#¾ºFªüDA@Sâái–" TÁ€§¦ž`•‹å‘O¸>|»bÇWí¡Þ©žš!-![Ô67a·ÈwLzš¦ŠÌž‚ÑJmK+N‹áfIä®OE©`¤®½—͈€F1evjŽBYÃâöSªÃdÑ4J.ÎÌäùb“ÃK]S=£4•b:ÊÜt˜BIÃUÓ@mµQP‰L]£h¬Ã\ްÍ`´ûµ5 KâíϬi(¥<ó¤rE fuÍMØL"sÃç˜'t÷â¶[8ê—/LcªkÀ´m3“@.a~n‰R¼uMTW94•¹± Žjò+sˆÞVBu޵ûjdf¯suxƒ3@us3MÞ2)­æ¶mløÏ] ªúñÖéî!ª ˳“¬ÄÒFuÍM8¬F”B‚ѱ%êœLO,ÑÜÓM]k®7Jf‰k3iBõnÂÓs”T‰ªæ6üN3OD˜™šG•mÔ†jYŸ¤±§‹,ñM¦gÃÃ8¸nìïÇ|c”ùÈ2Y&Pÿày°‹‚@xr ›(ÑÚÐ@cCƒ>3ê<ø<^¶ntpáÒ%"SÓ¸«ƒX°ˆ©A`zô.“‰®¶öu—ÔR’$öÞ&2¾R˜|Nˆ|ú›Ï ûwR—>x“'/bv8Ë%Ò9Û^ø%»j™<ö6o~x«Óލ•Èaàñƒ¸af‹=sDæÇ;tœÑp›ÅH)ŸÁPÓËû<^³xÛ½&OáÈû'¨ˆ&4MEð·ðì/†—opâ@! [ÁÖµ›Ÿÿêy,±‹¼ñû×X*Z±e²É45ƒ<ûܤì‘âÉÿü¿³±ÁNzaœ±p ÎxŒ˜µ‡¿ÿÏc‘Tοõoœ8q‚¾¶gÔ2W=þ-UV8ýïÿ'®Mr`?v³t‹5IÁ qpßOi®¶‘œ8ËË¿;Ä\â16 îcøô0}Ïp`[+3§£ÜX³ïÇOã7dxçè1ÊÁ-üí/ŸÄ$”úÓãÜñãll>€¨0ºxúFÀf¼¥”Dj7l§ïâED¥ŽÇ~4ȹW.±¬Þ>»úim=M¾…½ƒ[)ΜåìéËô¿ô¿ðضʱ1þôÿüžKFlÐJú7óã§¶Bnþ¦UHE4¥Bco?vtP\¸Äÿýþ;331\K˜Í;yñÿ_äÌ«¿çãh”o3åæŽì÷ùرe £ãcÄæÂT º:´@¬e\œ™Aª(Ôùý4‡BÈzÒ,uÕ.Š´67SðsõÚuâ3³l6^Ú÷,¤ ‚å2ñ…E¨Thmn!XU…¤'´üÞF0O H[Fiµî•RšLx5†T&'VéëjÁ$@òÐØ\ÏÕÓSdË|¡„#—yí·yšÛ;hhi¦©!€šL‚ b÷ùih¬bøÝWHM´ÓÜÞFsO/vóícd>b%š£õñLFxBüäo…h41ÿñVg¦P ²AÂá÷" ÊÅá3¸šžÂï¶"ªšÚ±j'‰¤r´6ÔÑTkcèåÿN¸½¦¶:z;0”–˜Ÿ&í03ôÖ«ïº#NÏD#ê|öÕ­"w•ÒT–ŠªŸ ÉäÀí2ríØÛ WÔrŽh"I6[†*ñæä.Šk~A‚€(гIæ§Âäküó @#M‘œM’Ì—A2¬ áµ™ø|WauòÄ/rÞÄµï ˆ¢F4¼ÈR2‹â,‡/€V"™ÊP ÏQjñ`0yhhkÅ`¨|nƒM¶úih¬E’$Lv²¬RÊ¥‰Ì.a© Qå·c0 tlìÂ;4þíÆƒ‡½;™Ífúº{ˆÅbܘ˜ :=…ÑáÀîñÀ÷0¨ k…˜ÆHg³Øe#­¸NÝJ¢óP|§Ãɶ͛YX\drf†h:ÙåÂêvÁw܆%A@-IEc(…"~¯—¦P³É¤÷ŸïY˜øZ{Ø~àfÃj¹WòQæO}DPUU>õ­$I¢¢”©(кÿ ?­kãÆÈ f.Ÿäü±£l~æ%¶u|6y‹–ûþk¯^erbŠOý…“ÎV^ø»ŸRëþÌ*¦i*ªª!‰kοÈFãMç[A6¯m£¬N¸ÂšÅ§RQeÃÍ&+H ¥ŠŠÑÓÄõ÷Œ _efjšã¯~Œ­q+O<Ñ¢Xì6,k T-Í}h3ãs˜™Dƒé6Ëšz»eQÓˆãý¿¾Kpàý½!´Ä ckGó¿¸kªŠª‰XìÖÏîß¾™` —EAÂ`4ߟn¸¶õ'ÊF¬v³Xéß÷ÎúVd1Óªð¼[ ‘d ŸzF¯*"44* É Ý쯣Œhøvs¯a=t)Qñûý¸= aæÂ ¬$S˜\N,NçwvzG+—)¦3R)ì&3­!‚UUë"íºÎ#f=‘$êjk „˜[\`9Çèt`u8‘M÷7fˆ¦(”r9Š©4Kø<ê[Zq¹\ze<€Xmì²ÂòBµÉå<+K¬žj¬²ÊÂä ûžï£˜Špä÷¿åÚÅkô¶ö" QQ5ʹK‘­[÷ѵm7‘ñ³¼ü_ß$ú§MC©T(åódã (•ð8œ4„šVÍÎzhy‡Üzb2i…¨­©aqiiµE§Ìfœn7F«Ñ`@D´{ìI¢ ¬wT”R‰T"I!“Áa2ôx¨Vãr:õ x…I°‘Þ MŒœ|ã…Ä죳iú÷>‰]*qõÂq®G ­§¹’b9£âï`6ZpÙ`ôÌ LÉ ã®`µQ_í&µ8†äôcw:n»—Ñ kC;ÇÏ~È;•eLù®\Yboh æ/š¤#mÛ¸ô—“9ôu>Ó—ÏáhÛ@¨ÞGbê ‡ßø˜Ú¾~‚n3‘É9¬þ._5Ý›7qèÈ'úk™j—ÄôÈ0bõF?Ðs¯Kc\>/V%Ź#2o)[šÇ kÌ_&ìºí۲ɅšpöÄ'lÛ¾ž^Žž9Á{b¿Ucbx[ÛnÛÕxVíìV #W9w¶¾¶.zZÏrñÝW(„;QaƦâl|ú9Z,ßdÐ0PÓÑáòQŽ¿ýu^™ùñI Ò·[œKÿò/ÿò/ëqõçv¹¨­®ÆërA¹D&ž RÊfT Y‘ÄÕcdš¦Ý SÓ4 ƒ `Ð@+•(¥Ó¤#2Ñ(rY¡Êí¦£µ¦ÆFœ‡n%ÑYWnéCYÓÈ'’¤V¢Ó”Bµ\FDXÝŽQUEAUUDm5ó¯P©PÉå)§Òä¢q2Ñ(Z¡ˆÛb¥¥¾öÖ6ªG¹õ‡dllŒT.GMSñn‹'M¡+â¬n ®6€´æ¯ © …| O}õµAš‘µË‹K™Î{Ù¾½ƒ(ã¯rSÉ'Y /’É•©íÞÌö=[±Û¬˜DHÅØë»èl’YYd%C5ºÙ´oÝm5·ÙQ¦ª¾‹T&º¸HIr°qï6õ6P*äÁè&ÔÒ€Ù(¡– ™ºæVêÚÚ x-¤—‰Å3¸»Ùýø>‚n v_«Q#¶0O,GvÖ°õ±4ÕûðÔ7áuˆ-΋¥pÕ·³e÷6|6‰|¾„3Ð@}½(æsHV?M­õ ŸmkX<,F‘ØâeÁJó¶ý´6¸(æ‹8ü¬&#PU^V‡ I˓ΖñÔ†hïíÆi…èÂñxK[woÁaÒ(ʸ«CÔT{ióyÌž uõU(¥v ~Jù<’ÍOs[.»™R.MY´PÛÒAWwJY–‹ä*Ú¶Ó×׌ («Fê›[pÚh•"ù¢F ¾CBm447a·Ê (5‚m„:Z±U‘%ÑBM—Hx™Îm{¨òÜ}JÓ4"³s4èî¾3ß• ­‡4÷blSUR©Ñh”D:M>—£T)£ H"êÚêNDX * ¬6.‡ŸÏ‡ÓáÐ÷¿u94M#—Ï“N¥He2¤3òù<år™²RAY edIÂ`0`4™qØlØm6œN'N§sÕ‚¢ó½qèÐ!f——Ø¿ƒQwÆ×¹¿d—'¸pn’¦Í;¨ö™:þzš_þ¯ÿ…ÏtWUU¹4Jée"˹ôua¢££££s_'ËØÔþò»?1N'wP )F†N²Ï}íg›¹t„¿üámV’…{ÿ•Räâ[à¿ÿ_ÿ•S'/‘I,3ÉÕ¡“¬$óh•"WOæèÑs”ÕN™è[9:::::k3°F©£P,#"F‹ £,!ÙT ÙbC-æ)UT F3ëg§.”R|¾ˆ&HT*%RÉÔ]#ÝhªB!—£\QDf› ƒ´ÁUUJä³yUC’MX¬fDÔb†LÙ„I(RT$ ‘qN>F¯#ˆÃь٠ ©erÙ<Š¢a0™±˜M«GU5r1G¡PF0QK9’Éü¼¿J!—¥TVD ³Õ†lÈÇ»4‚«e'zH…¯ññû'Øî¯Ãf­Ç( ¨J‰\6ª‚l¶`6b."X œ£"ZpØä›Žáå|–’*a³™r6E¾bÀá´!PÉg)¨6«‘âÚs‰’ŒÕnE$‹Þ»Ñ,µHÂjÙæÒ Ìv•|ÉdµD2‘BU²©$Š &›£,ã“޳ÑUƒ£³‘Îí83d²©$F‹J1Oùnu^Ì“Ë$‹ÕH6SÀît ~K¿w]˜èèèè耦±2~£œ –*‚(âªmãñçŒK]äý?ÿ•’À*åIÄ(7{^|‰Öj;•ì2G_ƒ‰¹8&§—9KE½Û¼_æÆÉ8}n”B©¢LMçfz7²’âÌ»‡¹1OEƒÑNÇÎýlßÒÂõã¯qêZ¯1GIôaWÃ\!ï>‚l”é 99ýÎÛŒN-R.k~{’žöjò‘ >øë;,%ŠØ¼Ìêšæ¹Ëë«ÌžûˆC—È+ˆ’L°}€½{70qú4W¯Í¢•ý\ü°‚exdíÑä§iõ œ:ôsQEÃì²ýégh©ùøÐ+̬ȘÔîöA|ãj–­Ìä©÷9· òãŸ=‹]ÈqôÿÊxÖÉOÿé7øÌ çßyEªhª¸zñÙ| D3¡Ý îéÇPŠráè(Õ{xr£Gqòôu0Û© zɦâ{ž ¿ 4µÈµ£ïp6±D<‘Ãߺ‰û{9u”+WG(ùŽ`öS™<Æp,D»•÷^ù+eC)O2G•½ìyáEZªíT²>üËëŒÏűzüø½2‹…ý/¾DKÝ· ˜¨oåèèèèè U2Ì¡:yü—¿âéí"{åC>>w‘ÄÜU"‰­;ç™çžÄ˜¾Á™Ó×Q5±“Gøx$Ŧ§ŸãÉ'·# $“w†%/Fg9ýÑIÌ­Ûxîï~Ãþ==ä—YXÊ0yòGNNÓ¾çi^øÍßÐUgàèëo3±˜C+F¾4£yûŸÿ ¶öRÛØÌæÁý´7WsãÈ|øÉûòüß>Oƒ-Á‡o¾ÏR<Å¥#ï0²bcÏK¿dߎv´Ø2…âªI+F™¿£¡‡Ÿüú×ìÞÙÃÌÇpm.CÇ–-´47Ð:°ƒÁƒéÞØKm¨•m{÷Ñ\çâêoòñHŽ O¿Äó¿x–*qž#o&žWÈÇf¸1§aÓ6oéþì´Š`Àá³™˜"žÈSÉ,1½fq.Lx9‡’3=9I"æä‘S¸Û6rð7¿bsogÞ}›áÉ(éè"©t‰ôüÇŽœ¤~Ãn^øÅAÜ&…+Ÿ\![P(¥–(Z\ìxþÜÈÄ'Ç™J˜õtRÛØÌ–½ûik®¡œŽ°´œCEâ³WYIæhßùÏ|Cb”³gFQ5ÑðÉ[~ü"öö“_˜äÚÈÜ}Iª ÙNçÎýlìm$žaq)EYÓ¯dP5 D+Õ-Ý´µÖá«m &h%º§¢–X¸qWS;==­Ô´öÒ×׆Ëz—éE1he‹ó,­¤ tíâÙ¿ù ¡™¹ëÃØBlè!PÓ@ßîØK—£‚€¯ÊǦ]TœXDAÂáõbÔrŒ]¹‚)à§Ê!R(‹xkê(,M±07ÇüÄ }ýt·ÔRß»ÖÖfÌw‰3'˜üô?öcÚC>"ÓSÄc)”b†H<‡ÉéÆ$ ˜œ\.3v§ A0àôz In _ÇQSEÀªRPd<Á3Ä’YÁ@0ÔȦí}øÜ·pU·à’„Wâ¤æfP, ôÖYœ “‰FH¤5ärŒh΀¯*H1—ÇjÀ-&™›˜¡ò©ˆ¦›™%!øéݳ _Mˆ­;6Rëù,μÑꥥ+-¡:·ôáv*dY¬Nûê»x<˜%ásÕe%ØÒCkK-¾ÚFª«-¬,Æ©¨EF¯ãm餷·†žÍô÷µã4ߟØEúVŽŽŽŽŽZ9ÍÙC¯röZœê¦FÜv™r¹„T^]q#ÉX,¶µ‰U@”E´l´™t s•ýf8{‹ÃŠd¼3®ÉbÿÏ~Ê©ã§8òòoÁ`¥mÓžøÑNR©æFçZæ`0˜í˜Ä ©| ™½˜î’µVSK$“y–*S¼÷çðêß4³ÛA¥“ 2Z IDAT”%›Sqyìk¡Ñl.3‚¤ÜqµáÄ›dbª° …µà›ê—–›ªIÆ3Ìç¯óöÇÖž©‚ÍïAQ$Ì7Ò]æl³ËG°ÆÆüÄiS°‡îà Ï̱Dš¢£šjs‘Äâ(gßyõ¦¨)hFLFõ‡W•\¾ˆh0b–W3@Ë6×g飇ÕÄjVfQVç—!ÉX­¶›Yž%Y‚BeµÎ3,õ޵÷px]L÷GRèÂDGGGG‡ÔÒ—/LÐõü?òÄöVÔÜ ±áI~Õä%ÈX-Š™ìj6] r©,J±rõSÁ^×ÍÁØ@!gòâï½þþP3‡‘™dzÍ P)d(ª"¶Oƒ` wà'ˆF\N3Í5;øû_ì[6j…bIÅ ¥™2 äS«Ç_A#È£)Ÿ›ú4å±QÎ]Xæ™ÿò¿±©ÉMváÓ—/}ɱÙÕ ¢dÂåuÐÙ³Ÿ_>»uM'”)–Á"—™áÑNC} 3×®0!ĩݾ›j·åü%FÓEüõ½ÔšÂø[{ùÉ?ÿ#Õh…\³ÝXŠ®]IÄl’Q•2¥ÊêW2)ré·Fùb{Æ—¥äîZç³ÈR6·vìZ#›H£”*÷¥-ê[9:::::ˆ‚ˆ$‰”ri’+‹\>y’•xœRl‰LQùab¤º­•Ää ®_Ÿbqb„áá1’ù;'»dx’#¯¼ÆÅË”T ·ß‡lP4™ºÎnò3c\# sõãÓlµÔ×î¼¥$#¨y–æÃdËF»»‰Ž sõú,éD”«'óÑáOÈ”dªC5Ì^¹ÌéæFÎ2>1M¡|ç³$ •l"I|ašá󉥳dW"äÊ·[0Ñ”{AÄSWµáÚé Ä—¸pî ñüWW¼dDP ,…Ãdòʽ5ÁDu[ ñÉŒO̲86ÌðÕ1R…ûsÄX·˜èèèèè`¯j`ó®^Î9ÌëWíT…ZÙþô“ ]åò6·Ë-¦z£Í‹ "m»ö³iáuNýõ˜ì¼/­í |ÎÜàð zD.y‹ó*ˆ’ú]ÓÛUMs°7]âÚÑ·Ö@29ØûÜi Z5yðûl7×î6O-Í!7?z“É–ÇBªxˆËï¿ÎyEE2ÙèØq—ׯǞ"òÚ!ÿþ·X=ìÁêlîÛíx[ºÙºí2×Þÿ3Sµ­íìØ»™‘± Üèðb÷@^ý•Óß@¨ÚÄÙ>Àh|Žþ'Ÿ%[y—3oý‰ƒÅEÏàl³Ã‡S6}¡5Âîo¤Þã bª!°!ÉÕÁZâˆ44øðû‚<ödŠ3ç?æÕóÇA4hÝ@U•A‹csûQ¬2®†vìçÜ©÷˜v 8¨ªu"Š"‚dÆðÞvŒ×í¯ÂlqxëhjppáÈ¥§1X½øÖ|Sœ¾–[¶äŒ6/nÉ ‚@Ç®ýLͽαWþ€ÓÄiwârøö~&LvaG‘¯“]X-I§Ò¨ˆX.Œ’B2–DvzP³Id›ë¦8)eãä*fÜ®ÕI¬œÏ’ÍæA”±9Ìd’9œ>Ͼj¥D6¡\Q%V§Ù°ê¡)%2© eEC6[°Û¬Âj´ÖDNÆã±­ùŠh3)²ù2&§›Iú췃ɊݱëM£˜M¯ÆÛÍ8l"É”‚Ûë¼=Þ†¦¡”r¤Óy4Ñ€ÝéDP ¤R9,n/j*Šæða7Š€F>• _T°¸¼XŒ"j¥@&•¥¢¨Èf6»Q(¤c”°át|‘8ÑÈÅ£”0áö8($cäÊnŸ QXõ™É§Ó«ñe$»Ë‰,­>G6C“Xä2sãÓ¨f.»1æåÿö:þGö IÄs¸}ž›ïœˆ®`qz0É"…t’\¡‚ÙéAVR¤‹$Q@-ÄYŠkøÝFbѪ ᪠b‘EJ+SŒœ=MÚÑA°!HmÀ½ö”w›÷âo ·¿§ÙáÆí¶#|‡mM&:::::_¹ºžºr¨àe’ éùa>úë óËdReúôSöö’™»Ê±wŽÍ”QÊ*ªìfÇÁçÙÐægéú|í^·Å\Å/üˆÂø)NœºLEÓÐGu{Ÿ}†Ú[N¹£a¦®žf|2ŒåüÔVj9Ù÷¡f#¤ÓÌ5Ý<û‹ºŒÄ&¯pô½‰e+¨eUö°ëùèkñÝ!NT¥À'‡ÞB*®/–0ù[xâÅç¨sÕ#ïqæòšªPÕ1ÀcÏìG]ºÎûo&‘« ¨ š`¤÷±ç©*Œ2°‘vƒJ*2Kmÿìߌ]Hr"» ¦ÍPíì|v?͵.¦Oæ­£ÇéÜ6€Q+,Óà)¶´þˆß½ü ³;÷ÐÞÛG¨é†–=ìܱaM”|F.¦¢” Ÿc>-sð7¿¡Ê¢0~é,…JUã®ùîº‰ŽŽŽŽÎ—ÛK )–“Y¼þjŒŸÎF‚DU}½=M8Ü~jCŠ™ ™ø2Sc³X™¼ÁøTÁÈÊä‰\ ³ÍCSW§Øì “‹9¡Ìôµ–SZ™ðÄ4åV“Å‚ˆÙjE@¶ziîéÇïuRÝ҄îQÈäÈ'£LŽÍ#HË“£LŒÏ¡‰F–ÇÇIäï /Ênšz6ðº©jë¦!è$žfftŒHº‚’O1>|•X¶‚TIž]@vù0¢\<~œ‰éµ=ÛØ:ЊÑbEEd“³éöÀa‚d£­¿›èø(±t‘td–¹H‰öÞv¬6-}=hÉyÎ?ÁèréÉTqÕŠ`tÑÐÙƒ×íZ ·†ÍSMKgéÙQÎbv~™|j™L¦ X~šÚZ°ÛløêkÄ…BÑhÆ ŒfLFãJ«7€˜_áâñãLήPß·ƒÍ{Ýb¢££££ó½¢”V“ÃÍÖÏ¢¥ "²ù³ì²’,!h*¥L.“§˜Za|$·öe~ k“™(Û°e@£T*QJ'IÌS‰¬Z2Ìþª[kо<±/‚Á‚Õ$³š1W@WO”(J™|6‡’\f|$»öe7]›w_åËl6óšaCÂ(K”+Erù¥\Šå©Qk/îoj§¦ÚEÃÆM~{˜²h¥{ïAöovÉ 4tö`?|™‰é¾åÊö:šCA2 £|ðê«h¾6Z[±J%Œñ³2-ØÌw­[>Í{o|„«©‡Pc5¦b YXÝ’ù´ŒLi­ÊV#ìª÷ìå+ÚþÏìŒ\¹Æ©·®P‘ìôî?ÈÞíÍ÷%ü¼.Lttttt¾6¢AB’@)—V³ôÞbŸcŽ—-xün,wóÒ“ ¥”%_6`·™ˆ¬¦Û]óó±ÙlØA6ýèçlht*ùtÁdÇl¼Ëħi_š @6Zpûݸöòüc}kÏ!_1b·ÞÅ:PÊHdšR$“/b 8ñz5\Á»_ú^š¦’Ž'±8”2qªº¶Óºe¹ø'ß~3 ±©ïG€†ªÝý)ÍUÍ´5:˜º|𥸠Uíð;$¦/O07ð›ÿô šÝVF†8¡ Üš5æŽÒÐJ„Ç®‘6VóâO_$`éÊŠvo'Œ4¾¢,5réÕ=;iÛº‡ll‘¡7_å̱ٴ¹ —¬ ÉdÃm1“L¬PÑ:ø²Œ;f·Ÿ¶î&N ã”Ë@ƒOäÒ©ãècß¾mwX<Ímt7™¸òÁ›˜w ħ9sf”¶Á§Ù²¡ù6 ‡([Q+&®ß dûâfOí]!>>q”"unK§Ž#Ö°op ¶[|L4 þÿöî´9®êÎãø÷®½ª»%uk³¤¶d-–-[²ñÎb;l¡2 3CªæñÌ‹àUdj¦¦RS™„I 0‚7l#É’e[û¾[kï}ïmË’7ˆ –ÿ§Jºûö¹›¤ó»çž{Ž•Yd¤ó —6™¡ú&34í©¥²$F¸¥‹ Ÿ|„ÝÔÈR_-­ì|ú0öðyÚ,¶x˜|3M"Å—ïG×4\Lô18REuyxímÕCõæ´¼q ÅðóäÓU节ÇëFM&èïìAÍ›§§«‹åTŠÙ¡A–ƒw І/‡aàJ/ó™úGIX£}ƒòîÑä¤h¸t˜ígpdÕáÛoÏXqz?;EÇlÛ¿“ž"™±ð‡|+­_ßÈïÛ«¯¾úªüÙ !Ä÷SOO‹ñ8¥ë£¨šv_e(šIrº—+ƒKÔmÙŒG·˜™˜o1µõh ,ÎL°”2©nØHu] j|Š«m­\½Ôâ+aû}D.b ÓÌ/C´¶–€ßD1üD£%ÌŽôÓùy+ƒÃ3„«izx cm7H·Kcirˆþ 4—C÷P±¡Žü '½Èäø5Beµ”¯‹PBl’«m­twõ ÊhÚ¿B¿¹æi’Ôâ43q7.áËmô N³~û>öìßJ^~1eE~Æz¯Ðq¡Ñ©•[w²½©‚ ‡k£½t]ø‚žË½¨yeì{ú)J Œ1Ôs•¥´IY´÷-ûáq«Ìôc¬ÛÌÎÝ[ñ˜*Þü0V|–¾ŽvÆgcmÚCM±ÎäØ$¦× ŽÁº õ†ÜØ©E¦&æ–Õ±is±éQº;/q-ëw"b,3=·Œéó (ªêjÉóXñkLN-SRµ‘âH>¤c ÷\e!eP­Äcæ¶ÓNÎ399OA´‘º afGzéºÐJï•>Ô`9{Ÿ~‚â<㾟Êq‡©átÐnù½”Ù……â{ìo™]ø^úZxýµ¿²ù¹Ÿ³ç¡õ8Ù4Y'×À±³d2†iäÆ+±,,+‹ã€ªéhšŠ¢(8¶E&c¯,·rqžÍ`Y6¹þ*:šz§g3²™ ¶í é:VÖÂ0Í•r2©ªa¢]ïr·mX[IÚdÒY S'›Îàܲ~Çq°²¹u¢(膱2k±meÉZöõð¦¡ëzn–dÛ"“É¢¨º¡ßᆗC&ÆQ4ŒŸ;¶m‘ÍZ¹þ;†ŽcYd- Íб2«÷5÷}U3ÑT°¬,–eçÖ§kØÙ – º®’ÍÞ<'¹}Í ›&êʹ¸};'W¾n˜¹qR®—ŸÛO]רw‰Ì.,„â¢[iÚÞCçÙÓ”—Qñ®™]XQuÌU}3UM»c ¢j˜®Ûß×tíKk$ݸ¹Ö[¯´ ×ÚΡwÛ†5%**¦Ë¼ã÷sŸ¯]çªPuóÛ|·}\½†éº½ÈC±N’ñ¡[†íÇJ1:0@ƾǹ¹m?5û<××ʧoÌb2³öx9Ë£í|ôæÌHOüôÇ”zi?×ʒ奪®†Ôàœ8ÑŃ<ð+-&B!¾Tr¦ŸÏŽŸaxlT“ÒÚÍì>¸¿®`-qúã Ïâ ^_OóÞ팶|Ì™ÏÛ(U¼¨™ÇÙ±£qå ÜÎ$H«&ë6ÔP }Å­°j;Ë©“§˜²"x‚ jÂÆ±œ:ò'Ɔp|a¶=z˜M5Å(8,^âÜé LÎ, ¦y;¶¯¸Í±ï¸À¹Ž!öüäŠ} WO~@ûÕšŸyžê?ã­'8ß“d×3™ºpŒ®KÝ$Òþ‚R6ï9@}4@ç©£ôÏš˜©)ì¼ ìÝ[Çå3'é˜ kAh]5͇g¹ó$'N· •-árÙ<¼§¿ù͵¤Æ8wìCSØŠNqu»?FЄÁÖÓ´µ^ba)…á R»muIÎ}ü>3øÞ~Ÿ=§ª8p=fèimc.¿‰—~ò$~¢ë£Ì.AЫ¡xªÙÒPÉÑóçÙ¶§"Ïý5I‹‰BצdtðÆÏ0±ë%Nfž“oÿ—‡â4ì~„­QÏ}À±Z°‹‹¥µgM{¡¹¹Ž…á«t÷NQ-¦¨¬Œhý6jê7p³ußÁJ%±œ,ÝŸŸåÃ7ÞࣣŸÐ=0}Ï m…âòR"e¥”×4°ys-A—ÃpWËiÛöï»ÜDZN0Ë’˜àèf6ã¦ùÑǨ©œzçM:z§n¶ß(*¡ƒÁ‹íŒ/ádb \n§£ã2CcsØVšáÞKŒÍeXlý÷ßm!¯r »Ù;6ÈÑ7Þet.Mr¶›OÞ?GJPÓPÃDë ZZˆnÛË®}Ídfú¹Ôz™’ò%åeTÔn¡¾¡öæìÃ÷Åffdèæ9d|dâæ¾e—8÷Þ[\èšaÃŽý47×3Õþ¼}Œ¥‰^N¾û!©@»=NeX¥ëB+×ôR‚Å¥WVÑøP#%þ›k˦¸6<ˆ7hpñ/oó§_ÿš>üŒ*ª¢ ¨:ë·mŘ½J_ß÷Ûh"-&BñƒgÑßv–?\Z5È–ÅTÏ(u»`yb„ËWfØü³ ©9ŠbÕ›èál{Ë·‘M&ÈàÁíÏ£¼¬šÊ†m8º3£aêáò(á ÷–Õüj‚ÅÙIü¥Ìö_¤õ|Oüâe¶ÔßåvŽ‚+&ç!ë-¢´,„¢®¬`ßsO‘ïRp-õpõø,ÉdŠ…«é›°9ü£Ý—x) 6Ñåwô]îgSuCUreV².FGFÙt3·¬PS]µÁa’õ^Ʀ)ß±‡ÑŽ#¸£›Ùÿø^òÜ*a_šîÿ|ñÉk¨ŠJ¨¸=Ï>AÄ£ÐÕ"ÏKqtµu¤,7yî^·I ¨ŒâHõrI|f”³ï¾EǪpcgcÌgrU{bv‚+]#Tü%»voBq2X×ú9r¦“™‡ŠH%âx y…ÅTT½@Cò‚Á›ÁÄÌb¨4Í@SlÒ©Lî6c“N&AÉ mîŽTóä?U°8;ÃhïeÎ}zœXFç™ÇÖqçùk4ÓÀëÉÍ]ã „ñû½L¦_r;çNÙäæu¹²ª5MƒPE-{ú2¥þ\u—Ídpòñ®>ŠÎºê:>;r…^Ÿ‰§leU%´Ÿ?Fo›âPZbÈ­1O®lŸ•I’± ]½^Œ±²~3XÊ—þ‘¦ùkLöðùñO9¶á/ïÉeˆ[ŽŠcÛ¤SIâ±Ø‚I†L6sÇ$O @^0¸Lby7o½hªŽ¡A:‘¾¾6‡T2£˜èºNùǨز‹Ù‰1º[Ïrì­wðFð\/ëVªf(.@‰i×ƒŽ‚ÇŸ‡ê¤I[Ö×÷»(ŽB!î%¯ LIH§ÿbËü(ñIúúF(Þð0=Açñã$ƒÕ4n®¤>ègâj'c×I;åhX,-.Jgq»nÌ c1ÑÕ‡mç¡§^¤a}> m OĨl,ÃpRŒ\éC+¬¤$ì¿å¢[AUT’‰eâñÔ=ÒŠN¤²¿ÕÁÔØUÛד]ž¢«ý e; øÜ+·– +«ð¨Çù¼ÝMÓÓ“_¢Ð»À…‹S”m:LÐ㥼¾†Î¶«ŒŒoc]ÉÐ¥NÒî0e!–‡V… +Io˦2AjÚHÕ–‹ƒ]œé›'i¦8ÄcK$’iüÐL?[=ÌÖ¯ñ¼yƒù”ùî¼Ä\m—µH÷å «ë1‡8y¼š‡¦x}--AgpêÈÕIDATÛ{¤ ¼ªm%XZŽòWW 7µõ|ön'»·³±ÔCOG'Ž¿˜HÀ—Û÷l‚¹e›üç¾…–`"„?ôÖÓ…®©wyŒP;äÌéó¼ù__à86Š;Ê®ý»0 šjÓ~ìm.ž0Ðp°UÛk à˧ª¦˜/>~‹Gö=”» ¢hmØHÅå^Nüßo8åX(N–¢Æ]ìl®Ç^žáì‘·0›^äÙÇ7­Íš›²ÊrZ?<Ã{ï(ä[`¬ÐKÕ1]¹–‹üª­ìÞ3ÆùãGè=£â86îp%•M·Ʀ ‡Ê(Ï÷04gQ {ÝDŠÂ´\ê¡¢¶]UiØÿ(ã3G8öúoPU Ë‚í‡R½.ÈÅvóúÄ9ŠjbÐwú#.·CSÀ¶lšöì#äÍ£²ºœã-ó±©sðÐnÞû©ŠU —y[•óÜ( hy%4<ÈÉcgøó¯¯àØY¥˜=Ï API Conductor API API Conductor Conductor Scheduler Scheduler Scheduler DB Compute Compute Compute Keystone Network Glance & Cinder oslo.messaging DB HTTP Nova service External service Hypervisor Nova-Networking API Conductor API API Conductor Conductor Scheduler Scheduler Scheduler DB Compute Compute Compute Keystone Glance & Cinder Hypervisor Neutron Neutron nova-13.0.0/doc/source/images/rpc/0000775000567000056710000000000012701410205017765 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/source/images/rpc/flow1.svg0000664000567000056710000010610212701407773021556 0ustar jenkinsjenkins00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.56 Direct Consumer DirectConsumer Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.60 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.64 rpc.call (topic.host) rpc.call(topic.host) Sheet.63 Sheet.66 Sheet.67 Sheet.68 nova-13.0.0/doc/source/images/rpc/state.png0000664000567000056710000011321712701407773021640 0ustar jenkinsjenkins00000000000000‰PNG  IHDRÛˆ `JmsRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÃÃÇo¨d•øIDATx^í½˜Eö÷Ÿ]`v‘ØDfâîBˆàƒ»[°Áƒ… IAƒ†¸NŒ$$Áƒ\qA–eq]Yvÿïó¼¿ß{þç[ÝÕ·ºnÛÌô¹sç„={çzßêoõ§Î©ªsþРAbóýۦѶ þÀÿ9ÿsnÝÿãWãåäÜÜÚŸ%÷¥¤¤¤¤êS ü¿ÿýŸÀŸëГ!K»6nLcÆÞB_|ù} û öµßÜçðš/¾üÒ±/üö9ß·/ø5)ØçüÕ°Ïø½Ùö9?–À>ã×TÙ>ã÷Û?øñDö~Û§YöúôSÇþþ÷Oé“ ûäïô·@û„ÿ„>þ›iãûŽ}ô±mÓ‡ÙGôÁ‡Aö!mý Û¶lý€²m+mÞn›¶l¡M›ClÓfÚĶ1Ð6цá¶~ÃFŠ· üš ´n}¼­]·ž’Úšµë¨6,éñŽnÝúõÜ&°èvAÛE¶ñF>lçiÓ&>¯°àó¤‰Í¬[K[¶neÍÁüÚÛúÁ Òó‡}”¥ÿ¸?|ô±¶LÑ}(sk÷3§ï9–Ý?ûíßtÊ)§Ðÿý¿ÿ—þçþ§F쨱ï›xÙyô?5øÛÒlÃÓ9€þýûï4zÌXÚeׯ´MÃF[µS«h´í¶tÛm·Óo½M÷?ø]3ê&º–íºQ£éºFÓÈÇ(Ãß׺Y=wÍõ7Ò5#o «¯EWÁ®Ågî¸özçq×®ÆkÙôûðÞkù3<Ã÷ÝbêX²Í÷~ã³ÔwĘ>óV+Žvå5ávÅÕ#)c×ñߎ]~ÕµvÙˆkvé•W{vÉWìâË+»l„g]z%]xé»ärºvñeÊ.(»”οȱó.¼DÙ¹”);û¼‹èìá*;ãœóéôsΣÓÎN§žy|úÙtâ©g*;þ”Ó験N£cN(º÷þ4d¯ýhðžûÒî{ìCƒ†íC» Ý[Ù€!{*ë?xê·û0ê7h(õÝͱ>‡Pg¿AÊzôÛº÷HÝú PÖµ÷êÒ«?uîÑWY§î}”uìÖ›­—gºö$Xû.=©]çžµéØ`%º*kÝ®³ÏZµíD­ÚtTÖ²MjQÒžŠŠÛSóÖíÕyÅyî­Î»ÖAWÖ¬[ïþÔuÒƒ5Ó“µÓ«ÿ ÖÓîÊúî6Di­?knkoÀà=ià½X“{±>÷V½B·Ð/t¼Ç¾²¶R­Ã ýývìîrßÐv0÷—C¸ßÀå>t÷¥Ò£Ž£Ã>^Ý¢¿yvÿÍvô '+;æÄS”{Ò©êúܾc'5p{â©§ÙžñìIþûɧŸõì)þ[Ù3Ùöô³Ë)Êžyö9‚½¿f-¸ê*5XfùsôìòçÃí9~.Æ–?ÿ…Ùšuëèá Π­«VÐê)+[3õaZ;ͱ l]ÛÊ·°Ù>ž6ží!ú„íïlŸ&° ×á1Øß”á3ÇÓGl°meÛäÚ¾]çÚÚ©ãé½)ãiËsOÓÉûïEソ†n¿ã.ZõÊkt+ó\ÕάŠƒÀ·Ý^N /qÁè@R€4 åɨT-`{• X´€*À0^wÃÍÊF²]Óu3l,Ý0z,Ý8ú–`»™°nâ÷Y6 Ÿ™À®¿q4™6òF>&Ã0˜ˆ²0GmšøH¹k í+î«Öl—¸VA9ÎW1¤çÊ.¾`¾R™ó%WÐ_îÙye—Ðð‹à‹éð™ç^@§Ÿ èžK'ŸÁÀ=íL:á”3踓àjвê"À=Œ/¡€=8°¸èàäö@ê© `¸â‚ÃÅMÁ•­¯ìW Ø^ýw÷ 뀕/ l€«lÏ~>Èâ‚‹ oæ" ¸òE™ €mÛ©»g²Åí»LCVÁÕ,àªÍ„¬¬†¬lã¢â@ÀšpÅßIL Ó°Û?ýµ1Áâ^d bÀæý`·àA[3ÇviÎ fÛµ9×MÃWà5áëد†/À«á« ð–tx»1x»+ð*øò@JƒZx»°F`]{õóÐÕàí3p°Þ=¾¼{zàÅ1 ¼\Bÿlî{Сt^ ߃x€ªÁ«¡«Á è¢ïyvì ºG’2€÷õ7ߢvØ>þäSlO+sÀ«¡ ððõÀ[Iø¶—_q%Ãöï ¾ÊºQ–ä¤Íyhøé´å¥èÝGî£Õ®½Ï·k'Àî¥ ï¥l›Ù¶Nrì#¶]û„o“Ù8~]¶}ÌiûˆÿÞʶ…móÄqü½ãh=ÛZ¶5î§÷ÙV»¶ñé'èø}ö¤Õッœ 8CsçÑíåwPCŽ{ž-\Þ7ø$–úVÃ3ã¡2ˆ]¯Ïç¹´–«ËÐ °¹`U@s Ý<ö6}k9¹­œÆÞ~ÝR~'ÝZ~ÝÊ£Ÿá± »·ì–Ûï¤$6ö¶;È´18ÃFßz;[¶Ý|ËítwÆnå¿]÷f nÀ ‚MÐ&ÚÐF0dzǀÄñððM6ÓÏ@û:†¶ãY_v`}5]Ì^3¼e€ðpïÙç±·{.C÷ìsé”3Îq€Ë°=–=Û£àÑr'/eØšž¬öb•'ëBV{²/6²Ž'뇬öbƒ kz±Ú“5AkBV{³ ´ìÉjSÀ €,€éɺ^¬òdK2ž,@«M{µa-€ ƒ7«Íôjñ·íÙ.‰gUÔQàµçùêãÔÐ5‡®aÊóu¡k·ªÐl5p‹;Àãí¬<^Xtµ·«¡«=]íí†Cw˜‚î@ޏÀÓÕÞ.€]7 º®†.`k×]î‹èº¯¿ñ¦[×® [üm^oe½Ý÷Ö¬¡‹/¹TÁž°ÜèV¸€ígD›^|ŽÞ¼ÿNeï²­f{ÿÁ;i ÛZ¶ ÝI¾ƒ6³muí£ñwÐÇÊÊ铆×ýM½îÏœ÷ßI±}øðüÙwÒ¶®mxè.Z˶†íýî¢ÕlﲽŶññ%tÜ^C<ظ°W^}§f›d`;†cÌ÷Üÿ `Ok‡M•@ û[‡Ž•Gk„ŠMÈÂk½\V+àyû÷Ðw£;ï¹îº÷~ºû¾Ô1Œ{ࡌÝÏ[vï}RÝÃÇÙÝã Ûîw?ùì^O°áxƒ¬ü®{)Èn¿ëRvçÝžÝÆƒ ˜@ÜÂØX(dŒÜNcø¦¡ýnëß»†¹‚8›†7Âý5Bü8¯o_zÅÕTÆð<ê~C÷ü‹”§{ ‡•Oâò±'qùø“¹sŸ¨<[À!c€ÈjÐ"d¦ÃgÑÓõ¼Y+T Èšž¬&ðdáÅšž¬.¶=Ù(ȶC¨˜-È“Õáâ(u@ÛÁ¬ Û0¯Öönël“†œÃ¼ÝXè&ðtn®Œ§kC×ôtáí†yº¯éé¸vˆY{¹ˆ² ¼ àšÐ5ÃË€®^Ö!fô x¹ÚÓÝŸ§Y‚¼Ü0è¢*/×îk ¶ÕE}áâÇhácK|¶è±¥[lÚ’e´˜í1mK§Ç,[²ìqrì Ÿ½ýλtÁ…ñÚ‹iéãOdÙ²'žd¢œŽ?éÂDvÃMåôø<8Ц ޽»ú=ºÿÔ£iÃò§éÕ»ÆÐëlo²½u÷z—m5Ûû÷Œ¡µ®m¼w Á¶°m½w4}ÈöÛÇ®»ì >¦ <ÃýîMã8èüJ~ÏeªÏ‚¥ÍlÙ6°­uí}¾]}ÏXz‡í-ØÝcé ¶µ-¤c† Ê‚ímÌ:Ìá*ÏöÛ4T?°ô<$º¾Ö­ÛábxaðÐི7ßr›ò^á¹*ÀÞ3Žw¿ë}>L÷?ü=8~=ôÈ$?q2=2qJ¨Ÿ0™l{xÂ$Jb=2‘¿#ÚpaöçýÙx~<ÛðÛ`zðp/lÓƒsàßþÜf0èhO˜Š¸Þÿ-| ƒçŽèÎÎæ»õ9ÆœñE,¸óË.£sp¹Ü¸˜·=’çlÑÁ5láÍpˆ37«çc½Y†¬.òd“†‹MÈêùX´nØXÏÉú¼Xöd=È"dÌÖ†½ØÌDz»s²²6l[sè¦çf1?ëÌÑf,ȳ5çiµGk†’õ¼­ Þ¸ùÚ°yÛ¤s¶IÂÈA¯©®§ øfæw3¡å$áe ÝÀùݘ¹]g^—çÝ+^ótý¡å t1õ¡½\3´¬×&àÓ)®†®örõœ®ZtƒBË º.pÛ¶í;Л“|Æß?%ÄplÚ&MA“¦NÏû$Ëð{´M˜<•”MšâÙ#ü7l<ÿvsà  ö€@Ãÿ‡'p»MÈÿÁñ40ϺãáßÅxáh{xÉôÜÀ!|„¨]xº˜ó=ŸÅ§€Ë ¨°p !eÌ Î#jÌ×\z´Ze.~r>9ž¬š“ \ø”™—Å…)i¸Ø^ôä-|` Èš Õs³6dMв°ÖîÜldh1?èBÖ„­éÕâoÛ³pMÐÆÁ6Éâ¨4a›t7jqUeÃËt¹Ü¤sº•nfN·3‡˜ÍUx“„—íùܨ…TAó¹fhYC×ôrET™Ð²ééb.7h>!e½ ÐÕ°}iå*z˜e&òµΊßQÎËdßõ×¢‰|M²mÒ”©|-ƒe®i“§N£V¼D§žvºšOÅýÉ|ýÓ¦¯™€@›ä^;•¯¿Sgø ×ñ/¯¤;ßÞ_²ˆž»æzm…k+¯»„V]w1½ÆöÆueô6Û;lï,£÷¯/£ulë•]D]»óÖN¾ˆð:ç=xÿÅüYóg:¶ší‘Ó[lo°½6òz…mÕµ—ÒK®½È·ïTL§Ò¾=³`‹P2v«T ¶mпW^y…ÆŽuÜfx³ðÆ&†W/€&3¸¿³*x„†QFbu-´,{$†‘Ùœ¤fîð=¦Íæû¡æŽ1rÄ1j›Y1—möšÉ6Cÿ6ü>˜9ˆ° h‡),>{à‡<ƒ~Êô,kx;gÏŸ à~püD^U>žÛÁËíó€ÂùX†ðò<·«‹2V-c…2V>q̉lÝ0²Zeì®06ÃÅzu1F÷•ñd£BÅldõÜl`moVÃV×\iìz²²­ [Ó«5CÊæœ­½Ù %yºUY•µBÙöD“ÌÏVÆëMÓÓÅ C·²àÍZÉlyºA ©lo7ÉB*ÓÓÕÐżn§kÏçjOW‡–õêå /ÀÕÐÅÊe¸X¹lÎç*ؾþ†òlŸqO§=ÀýÛ±q˜‚C´§áüÆÑCD­èm"ºèÚCãù­Œ£lËyáÒI'Ÿ¢ òð# sAϰ7x…”5Œñœ<2‰†=ÿ‹tç!{Ò{‹çÓ³—Ÿ¯ì9¶ØV\y>½|åpe¯±½1b8½ÅöÎUÃi5Û¶µW;¶Þµu|{çðó²€ë-¯]Y{Õ¹ê=úýk®>V»öß¾uÕyôÛk#Σ•l/³½tÅô"ÛólËÙÞš1™íÕ•Ûé=5WkZj°u²\Àرd¶ün´§ÂCƒ' ÏЄ,B],X¸X…°"Ú³Eüw€Ùaˆ û ø³²Œ¿ߣm>‡BBÍ™Ì[°ˆÑ±¹|¬æ†gæÌÏÝTÌ]À€_à@}Žß RÆ×æ{ƒ›mº‚õle¨(€‚ôtƒÚôÀÊмð”ïcè"T OáfÌý"´ ‹©°’yø…«ÕÊðn;ù45w‹ER•òö7|¼—»§:žldõåÁZž¬³…Ç1„µ7 ïU Y¬6Ö+Žu¸Ø…«†¬ Û0Èšaä°ÅQa° ›¿­Œ— èÆmª H“¾6(tÔÓ5·©ßÊž.¬ºÐ [ÁœY¹ìé†Íéš ©¢<Ý •Ë&t5põ|®oXhÙžÏ5P™[…^eضi×A-TºýN¬}Ý̓imX s/Ýé3ž†â¾o¯E¹›§ª0¥§¦õøú`š9ÛSO?CÇaîVÝ»}klT0ûùçŸé—_~Qã1Óìçü]{€Úð›ÊL«CO]t–²gØ–³=ñYôâÅgÐ ¶Ul¯]r½ÁöÖ¥gÐ;lï±½»ì ZkØþûN^jŽƒ‚ò³Ïá÷œî½ï…½ÇöÎeg*{vé™ôÛ+—œI/±½ÈöBÙÙôÛ³lO³½1õ:¸{§ôa‹p³öl5h·Y°h1g‰.ü¯ Ú›…wŠy€G¨‹ó¤>Oà/]ö¤2{¢÷ã üD¦&ÿ sþðúƒiÃn¢©KÅQæ,2¸Îâ÷žu¯³à¶ÑoKèQžkðLžanš ws¾Ïcs Íž»aÊc7¼qí…ÒïÚð¬µ7­ÀìxÐÓÊhg´7<^\ßã| ¬<†çsuHádìóÅþ]¬R>•÷â»=ša{ø1Ç«…QûTÊsµ‡©½…¥û÷É&kÐêùX2Îòb­9Ù ÈjV-€²æfM/V‡Žd]ÐbKö€íhÛä¾íhÛL¶Ù}ÛÒÜþmiAÿ6´¸ -PBO ,¡gØžg{‘í%Øn~sèÁ¾cÔÇ9š_a½÷Ÿg[ÎöÌnmè ¶%ùû´¡G´¥yÚQE¿v4£_{šÆ6…mrßöôÌÈ+iðÎÛÑjžŸMͳÅÅYÃÖ e¯Ü-Àkz¶˜;„W…‹=.ú‹ÁV¸-¼R€«ãÙe?Å+Þ¸ mÆÍ{ÐöM3Ü×+Hß?\AvÈM³øsyžkÓnʧ{Vã9«ó2¯]¼ä~:‹ÐYã–Ñ"†s ¹v@lxäü7Ž1€¦ÑuCø˜®Ÿ–ñ¦Mï™%Ø3Âê¼h/e:ì­ÃÚ:¤m„¯á ÃëE{cŽó:#Á»õÂɼp ó·˜Ç*eìÏ=çü2µ:ûo±çsEH^±/‡ºö>à`­“Âc¯¬o…±µO66…áÅš«‹mÈš€ÕòfMÀzûgyÛˆ Zu߬ Z:6XàosÎÖLf[{ßmÜV š‚­½ 9)lÃæuƒ¶)Å­`Vóº!ž®½ *lë/›Ðm®B̼’‹Ûp~]è&™ÓÕÚ3W.csXr ]½rÙIŒ‘YD6Ÿ«CËI¡ûêk¯S›¶í•—¹×~*Û‰5`ûDûx°2{PÔâ%ò Ãjì>’A× ¡gï¾|[L¥GÃvt–á»AðKÎ$ˆU¾A€7Ÿ?‚÷ïÃl¸ßÅ‹@¯i¾­¸ëVz¨[+z„mB÷V4™mÛŒî-©¢gKš×³-ìÙœ–ôjNK{7§§Ø–³=Ïö¢¶>ü7Û˜ƒTÇóùçÎñiÃñàXGóó/¸¯Åëñ÷r¶gØžb[ƶ¸w-ìUDó{µ¤9½ZÑÌ­hjÖ4™m"Û„î­é©«/¥Ai˜luv$¶€, Ð…´>ز àÕbÑôjNÅÜ,¹ÃË$Ú'°_Ì2½q;èvÖè=¨Á°±4“—Ž/Söû‡t΃ ìYch˜áÙžý ƒøAñði)/Y÷™÷%ʳ~ÎfØž}¿)¼ö¸[Êž¶2Ò¯Y{ÄÎí º~Hׄ®ZF_23QièêT¸µçr1Ÿû ö¤m;âÝ“#M{óÔll3‚ñ ÀüôÓOô¯ý+ÖØ: ´4dE¦+6„¨Û…€íQØ àrö+Ãð]H ûé§ŸªÛ Ãs0ÎYì!vMÁ¶h{zéîÛh|÷b†X1옦ô,¦él3z¶¦Š^­ôönAKû´ e}[ÐSlËÙžg{Ñ0€ÇöÉ'ÎwÃN8Ù¬¸/öã÷²½À¶œíY¶§Ø–±-æÏ\Ø»%ÍïÝŠæô.¦™½ŠiZ¯šÂ6©g c =} Ãö¯ª[Ö^y¬AkÞâ5¼z‘”Bvæjç0$î¤Ó‚gžuŽò@‡Ý8‹–1÷0C¾{Ž¥Y:YÅØÀçfÙ“¸F‚:6|Îð‡ž¦Ycß1 ߇i8Cwèè ZöÄÃt¿æœ±Ÿë!þ{8=€½]³ñ{Ò¹ÃùsôwïÁïå=`ËžxÈ-¯ÒõžcH/a(/yüA+D=œÆ©ýiÎãgŸãxÖÊιŸ3iÔ°?¸mˆÇÏ¡{°?î^ç÷{vÖ½î|ò½t¦jŸ³½çœBó&^OƒÝןv'‰M¢«ÿv¿f¢o>s¿)Oåù\,ªz˜·La%ó=ÜA±…°Å¶ ‘¼ž-ÂÈz²K!Œœ™³=Jyµ{ì{{´û:iÕÅ*ëS5]0ò}wЄþhÂÀ4™m ÛŒÝ:ÐìAh.Û|¶ElKÙg{rpzvp{znH{zqH;Ïp\~˜9&€vté¡ øúxq‹û/å÷±½0´=-g{fHzŠmÙîh Û"¶»w¤9l³ÙfêHSwëHSØ&²={Õ4hçí«[ Z¬à@“@Vô-Œâ‹?ÿ(Ø*HžM÷²×÷ØÒ•7:lô7Ïç\½ôüñœçs<‡çÆà9dI™c<Çpu=ÛYH[Æp~p¸ûZô°ç1T‘ÎlÝ< °Åßïtólþ›a;Ç8ÃÓ5Zݦ (ã‡ÓýðzÙ#nÐ`Ý8Ë qg6;ðЛg)Ø"ôÜ Á9t/BÏ÷Ã¥QÓùoÛ³z# áßuæ½1cÑÖ½Ü&3OuÿÆçœEw²G;aä`þPL×N|”Æ_Ë÷w¿ŽÆÏŸH×ìÞ€v»f‚·¢Ú[1Íí¬çm1_ØbE8­!y²]!CU¶—ÒYœGùdÎ(užó¥Ÿp:ð!êÇù£ÅÃ}Øq'!ãœcÇŸÌùÕOƤLòˆÈ¿OÆšäf϶û8'ÿÕmv¦—¾‡&ïÙ›¦ìÕ‡¦±Í`›ÍV±WošÏ¶pß^´h¿´”í ¶gèAϲ=ÇöÛ‹®>êßqá>ž}töã/Èïc{m¹kOíϟ϶”m1¾wþÞ}hÛì½ûÒL¶élSÙ–aGh׫[:¶CéyZg®Ö1Ó£Õ^-Gy°e ž– Û3îyŒ3pù¾sÔôryr^χÏãÇÎã…î-¯Š{R›~Œ_ïx²Ú ~:Û‡ø}x\ÁÖ¬÷·ãÙe€Þ4 ž¬c÷ŸË¿ç܇<Ø"Ç—²7«<Ýœ,*÷ Ïx¤øýj~xÙÎ<ï}Jæðòâ¥÷Ó™|ÿLÌû2lG!Œ|à µÈjò¨!Ô`È 4ÉÝà àNÉ ExÐî#';Í٣ݽÁ`ºfÂ'„|çéÜ6§ÓmJVÛ’Ô*gwkæpgó½NÔ^ó€KéNÈqåQ|ÌGŒPûŸ±@ ¦ô~Ûá\Ä@­FfØb52’¢rÄÑœÈâölU ÝS-¢P@Xîâ´!kÏÇjÀêäj!”Îiì.~±A‹û€­ Z Xó_ ÚªÂÖm]‡mu@[™B¡ƒ†o×^Á5¯¼W×-AU ‹ÜËAsºæ|®™þўϵ3QÍçš ¨Ì\Ë€-æl'MžÁ •ÎàLoئwª²#E¿åçü¸iÇœˆí|Èw–²ãNÂŽ†ñ)X#]ë¹ÊN<:vÒiçñš›‰4€³g=þijœiî|Ÿzæœ çε~aˆ]Ä;vúÙ;ãœ2Þf˜m=2•FvhF«&?HÓJ3Øf6Œ*ØæÎvÄ0ZÄöØ‘Ci)ÛlO3”ž=f½pÌîô±»Ó ¶—ÙVº·øÛ´•Çñ}¶—Ü[ü Ó¿ÈÖ;DÙSüùO5”–±-f[tä0ZÀ6ÿˆ=¨¢tš ;jzþž[iX“]*[s~Öôlí•Ǧk‡™õÞÚø"ä ˜³luù4þ°Óïv¶ú<¦<@„v*€%`«oêOxé°±†ªë騩.ž5z˜¨02«»sJ:€ÕQìI k/|2·ïØs²aõ­2NYg^Î4@׆¬¾àúnz·AÛ€‚’]íà ›Ë òr£ `7É*åÊìÅÕ°¬l"Œ$û{õqÄ.¦ra‹ðruC̺òPVV*-«ÁU äé†:À|®žÓ5QÅUÒÅ tƒ•¯¼Jºtåõ<^Ðx™ Êá| @rbšÓÎ÷ìÔ3xwi ½S¿³±6õs.æ<éh_ªì,mÃ/ãH×e¼‹d: ¶/Wz¿¯– ¿Û¯äŠc#‚í"~ܵóË0]åØcQ¦ß&MM#»•Ðë3'ÓÜ“£ylóO•Ò¢S£ÇØ–²=~ú¡ôô™‡Ò³lÏŸy0½pöÁôÒ9Ó˰s¦W\{•om8¿†íUËðØJ¿ïEþ,ØógJËÙža{âŒCiÙi‡Ñbלz8Á ?™–k×JÁ9èí³µAë‡møÊcäåÅ< Î…=]XŒ‡X 3q2oû™ænû©¸CÁäô»°í‡W#c› æ]yNÔYå„}šÊåçÎÃßOó¨ ºI=ÇÞ'ƒt&@ªá ¡àa¼:™ÃÂ÷±—ê@f8 Uyz žé†‚gަ!ðÌ®já“{ßY億ϺO/‚rW'³×zïÙüž³1ËÛ•¦ßHƒùuðXñ;”'«à Ggàw²¿`¯FÌÞ*‡‰ç3`çOÅÞ*¿ï:ž‹U{xïVm2è:Úü»èTþûÔ;œ=»\G»5ØFŒw@ÝvÃö4ºeöxºrÐhЈ‡½-?zîäiÓU–*$·ÐÛ~B¾…³I¡BÈXU®ækyqÔYÃ1BÎ!¡Ó9”Ä!ä#ŽUé÷äEC8 #¼Z\$PîÎ\üXæ.áÙš‚¬>6ÃÆ  É…BZ´BYï¿­lH¹6a«çq«;Ÿ[éÌTls]œ'5ïÝ 4f…!{»Pü|nøV!³œŸéå"‚µÏþ¼ðç„y-Ætî¿×ÓYç]ÎÀ¼Ô3@Áιé+3Æ`<—á7ܵó|Ê|ç+øa±$Û%×*›:cí¹ÏÁôÜ +y?þuTvùõYv1ƒ¶K®E¶]:âÒvÿíÙU7rÁ”ŒÍ¬X@7öåT”ófÒ¢óN¦EçŸL"-½àDzœíÉ‹Nâ}·'ñÞÖ“è…‹Oâ=·'ÒË—žH«.;‘^½üeo\~½iÙ[|?Ë®àÇì5~ìµ+N Wø³V^Οɉ¼¯v-çïÄw?uáI´ vñiôäˆ èe.”ðÐͼ³ãýUò]öT70©Eh5lõ*csÕ±¹òž.âȃŒ\½wÜ5Ž7I»[ø‚„*k” %ß®`Ï Ûb¦EÓnbð¡Ù¡£i:öÜ«dõ=w3Mã9gÞôåqªçÜç$Øžz#oñ9÷•x;³Ÿûxµ¹O¿‰A9”ó–g‘’æU^©'oëQɾÇe@ÊàÉ “ô{à5ïÎ[zœyWxì Sµ¢ØõPïrVÏ»óLwîõLº½ªñ›O½ƒf*oµœNáÏ9ù6cKÏC×ÐÀƒèŠœ=´ÓÆžÂï;…FO€.Ø€\ñ€j[JÒÙ_˫٫Ŗd‘R«9£Z\Ïu‰Q «‘» -”W{ª³ ™£àÅ_aÅ$VQb{@«kÊš{eò=%¢¨Ê¢§¨ùØÌ¼l°[%к^N˜§k¯B¶H%õlmàjO×®kg› ªåÝV&åc\è8-à†>ˆôvC<ݨUÌæ¼ne¶ UÖÓ +ë´ˆJ×ÐM²?×^µŒ…ŒÈ]Œb¥G©¦•–<þ4à—ò€~);/Ë[ {œ¯­Ž-Zò„ß–>ÉÓ\ڞ⿟âi0Ç–,{Ú1þ\ØA¼ðjëóµøÇžx†§ÖžõÛ“|ߵǟ\ÎŽQ€=ŹöÄSÏñúמæ[×nß½mzå%zsÆz›íл3{íý™hÍlÇ6̯lÛf×¶ðmmådzl.?`[ø±Íl]Ãw¬­˜@kgñ÷²­Ö6w-Ÿ6Ƽ†ëÖQÕ ÆöJ [íáfÁ^Nl‘a(hN6hŽ5i¢¼å6'²JjÁ‹rJ†w;€LGH¼ ænuö(^V®>M%}ûSo¾5 …Yß~¨RÖ 'ÏHg´oN‡·-¢ÃÛð-ÛmÙÚÑQíZÐQí[ÒÑZÒ1ZÑ1]ŠéØ.%t\W¶nméøîméÄîíèÄí]ë@'õèÀÖ3ó÷ñüø xÎz ÇŸq\÷ötl·vtL×¶t4ÏQlGv*¦#:¶¦Ò­è°ö-”Õ³=bZþü \4þnh5tÁVƒ6(rlá1¡ä.ê'›y‘½¹[7œ¬’[0`œ=·\q‚a¥S6šéãR.: H»È£¿y0·’…Þ§êK»È 0zÍD7¡„——Ûj@šæesòK8©ͼɘCÅ^W/?2IC£v\HâÂÅQÛwªëÉV²qÞ®^¡,°m¢Ò=Á6ÍÐrä¼® [7 ºÕñt3íàj òtÃc˜†ô"ª°âõðtÑ·Z¶ó-‡%Ä0kçÚ¨€F—ð³ ØåûT­\£^îQœz†ª_0$9ö¤Syõòiʰž†Ê`0ìLJ! ®#¦rÆÙªl' {÷mCòm˜Â2íÌsÏçh[°am‰iH1k*˜%1$ð± ÓiAˆVÖLè†z¶&hMàF•ÓC¢«¹€ü5#¹–-Wy¸7ߢL¡®êmåwó®®þÃó¸ xdðÎÀ`¾PdÙeóbËä™eóŒryÙ%ò0·i™b¦4žY&ÏG³<ƒÒñ0Ù(¹Ò§ETæÖÅõÕÀu=N]O“çRaH®êØ24501ÇZÎ…çafÝZU¯VšGqymå*|ïÔ¯½EE°âZ^u‚PÝÅhXoñ9å ^îªSNi‘ø…á÷Ø™¢xAƒV­×^µTà@/ 2KøÙusÍ Tfa»’ ݪе‹ûºAÀÅcaЭpߪN¸° mØcj~÷*Ç®¼z¤Jû§{íõ7©‹ý 7UÉVËó¸¨8s;Ã@¹[•‡ârP¨ú  È£.cp!øì¯Z˜e Àgc̹b`ƒh¶ï¨úÂnýáËù|àœ]Ê‹ŸP>  .Æ<³’ÛQ^.<\ÓËÕn—[ÈÀ„«âáfÁð¬ l/q­[¸€îU®—{ ]„–‘DåG3\à•</¶£¾N(垜Ш‚™öø<ϯ 7 ;ÜÂ3ÔVÎ+£m»=m|¿2ÎÿiÚm|lÀn¹Ý1GYtA ϰTÀäß ÅÀR“s–0 H`&V;Àt¡éž ¬†]zåÕj‹¬ì²lWò2ü+”§ŠÕ‰( Âk=—‚µ®§òÂ'U^Õ©åïÇä…gOÖ-§ ë&­PÛ{´½8¬Õ«!‹ CÈjoÖž“õÂÅ)A¶²žkÜëí9Û$àÍÅ®#h…²éÝÆy¹q ’T 2Aä‘F…–ÓžÓÍš×Ý¥)íÀ¦CËq!æªC·˜uHÝ ð²™ÊLŠ6Ÿ›$´UÜ@ïËÕÙ§]„•aa^n®ÃÊQ^nU<\x¿u!¬lÁö µh&1l´€­ Ü,O—½.x^ðÄ­häΜ.¼6„;'x½Ø.ˆ•áoÃð|˜á3´aŽR ¥W'Ä0C˺As¹I¼\3¬l{¹Gò¢I3´¬½Ü¸yÜ °2`[aå$!e¼&Wó¸ÿøìsÚ¦a#jðÇmÒç_$„­Y Û [ ] „8¯ºŽ=^/¼8 _ /x|×ks=@<æ™BÕ¡Tó ´ioÑñu¨Õ‹ï¿z$8žŒañ¶?9[ 2æ„e¯S°¼ °d/Óó4˜XÕ{±ámÂãD’E çl"ð8•]À^'àÉ+€ag²÷ ;œÆE-YPÀ“@°€çq O¬@!vä-D‘Rñp^à˜¢“ Ó ,ŒÅvb¸îÃpÅêbTîÑ€EŽc¬4Öó²ðd{ôãm=.dmІAÖ^üäÛÊÃéK\«N¨8Î+­Êóq«“x¶Y¯I0‡W)ÈN„V„>l×Þ“[EékÚÓõy»|ÓòvƒÃËÙž®^D¥“cØeýª2ŸknJ²€*,†®&×,l`{¹¸Ž$®^¬VNÛg–?¯öhyûµô¾-÷y:“š³ïË4ÿçêÏQßñ8–¨ï|™÷˜e[fÚË+ùoðOí¥—[áÙJþ{%½øìezAÛ þ{ÅK޽ø={a…²ç^xQÙrop~ö¹Œ¡aO/Žž~Ö±§žY®ìɧŸUžpíq®l¤ÒRº…T-.| « !C–*r  Ús‚•Ð#S¸^«Wæd±r’møëáêíU*ϲQŒ^m‘2s-»E ô¶(sï0êå¯&ŸajµyÀÖ)ßjsÎ2†LciV¹×ˆ«à‘¼¥ª†½Öq¦·”9·Ó²WÓs’Ô2¶m¯¨¯Œa{šß&ó}¿©ýßqÆ‹ÇGYÜû+ù¼}Œê¾»›@߆¶Ãdn#6_ÛMá¶ô™ÓææyÐçÌwÞ íAÏzŸ=ú€ê'³`¼/6Gm=DC?DT6—·Er?…!}«®i­ûµ“­.“c]å)à¤ó_—ó}e|½|îy¾~Úæ^Sk+®±{þE¾î&4ïš­¯Ý o_äë}–1 ÀÛV0+çkvЪXØ¢®y=5nZ$&m  ˆD¢ Œ¸êš@àÆÂ$@µ¿VžË””¶¶ ˆDuSÿçÿüúòË/éÛo¿¥Ÿ~ú‰~þùgŸýòË/ê>nMûõ×_é·ß~£~øvmÒ\y¶w [žëØÖMÑHg—ó& ˆ*§4ak/œ ‡­»J`[¹“%â–ö ˆDuSiÃÖn6l±ÅÅXq,°­›¢‘Î.çM4  TN¹€­Nû˜[lÏÁV ܼ„mi9­X±Â±Š2™OFáûÊX÷2ª(/¥ÜŽeÝ+÷ÞÊ|¼VÚV4 ¨C¨UØb{M^-(VTPYw0¥å+¨¼4lº—U(@'}}¥V‡ÕÀ…-Ú£&Ûçj…qîìö{Þ|=^[ ZƒÐaUŽ?éûëjûDõ9é¿É®s{ݪklZ°Å6#»A¨g«TÔlË*Ê©4¤aÑá|©Jzg5˜¨ãÏ‹Ž€È{¶5Ñöï-«È ”‚Ú"îù$íWÛíŸäû³t\É IußÕŽIŽ?ÉyÈÅkjC³¹øò™ù9pˆƒí¿ÿýoúÏþC¸Z Øê´‘addX ƒmY…¾ð2£l?õkœPo68CŸW^«6nMï«´\_¬K©Ü}†¯ó¹ü|™f¶<)ÕYËÌï±ŽÏ Q[Þ‡7².7?ßx‚ãG' üýú½ #çwñçzÇb·¡þíგ°Îìx–U"{þ#ÚÏùí|~JÍö÷Ã7îùHÏ.ªý+Ѿæù)/uÛÚh#_òãÞ!áùG;T–aïOâùúôÇç£Bÿ¶J(,ìÏ0µe<§ú´§¿BÛ×m·œõ_5àÉ\WÌk‰7Àú}î€)´}cT±×¼?¬%Õw‚ã¯Ï(Øbk@Û«W¯,àÚ[LØêr}~Ïöó/¼|¿:¥a g«O¸îH|_B³=Os^5îyç‚ [/|ly¶ŽÐmfî;‚ÎtîlO¹ÔçUãóLØ;ï÷žŒ:þÈßïv„Ì…ÈùžìÑ|lís£>3{@cÎ \*9ÊŒ8ÿ NfTÂn?o@äN¨¶q|qÏë A¬BÛ?AûúÏOh1ßí^8m=Åé7Éñ'¹ØÅÁ: Ʀ^جÁVu<[;2¡Ú' ¸nÿ3χ۞êx"Ú7×ý×§W÷øÌöŠû}v»µo|¨<âúÕ¿éÛ?°É:?•Œ°$Ñj]zMl5hKJJh·Ýv£ÓN;ÍÜ8ظ>Ø~[N{˜5gºÕ o¬æ]Ôãž÷F†ñ°õD[{Ò„U¸ìß0º·aëûü€¶ˆ¼Ø-B_û}ÑI:œÎ”ñp²Ã¶ú‚P؆Î7Æ´Ÿ _í噃µ¨ó—V‘íÕ¾8vÛËÇÀÂ{ŒÛ7A ¬â`wŠ{øó¦çyª|ÄDoˆgœÝò^gk3¾}sÚο¯ý¾øöƒmäõ%ª™ÇïNùúW¢ã¯äÀ»Àà[ ÚvíÚÑ¡‡J£F¢qã¸tëwxÀõÁ¶is•Ê×®ôÏ>ËTý±a‹äü+󶾎m\ƒæü’Ã6+ó¢^w`×Qì AfJ îâî{>tž<¾ýâÎOÜó5[ß8ùîÙfgß`ÂÑO’ÁB°^’µ õ¬âß ÛxýE¾?d°•XÆ_’ö­:lc~_l"O0X¬Ôµ À¡ãN8FŽI3fÌ eË–ÑsÏ=G«V­¢åË—+àÁõÄMàÂöâË商­ž“Ó!B= Ãlø.¸*d¶5Å¡Âa±Ju¶€0WìÈÚnëø³abüþØÎ¢aZõ9[s¤[å‹jTd#`ŽÎ¼ fŸ 0rÖgd{ZÑadÃc2Û?AûÚçÇf¯òuÎ…ÿ7†|¿qaŠóLã.xqï{>K†¤¿3ªÿÅWÜ*hÿ4ŒÓvvXÛ5ñ·o®û¯Ý>ö´OÜïKÒ¾U†­=°¯O ôwüqç·ÐŸòlÕ Ãb©0ØêJpNÙòlu©9†Ê9™0r°g䯄ƒBMYÛO|‹¬ mÀÅپЕó"*sЪ{q4¾'è5˜ÛÕŸ…ïHò~%ÆØã÷/PRÇo¶›»Ö^ˆ–ùÕƒ­W¶ñçßl#»ýôÖÿùñoã2ßãœC3ÔüýYaÊ öOÚ¾¶~y1œí ø¸ùA}þ¨·÷þ¸çíÅyÁaäHý&ðdìöÑç'iÿ kß$ïÒ_’÷Û×§ , ³ö¢‡ý>'*`/@L’Or|¡¿Ï\ôsý¨Î1ÖGØšù‘åFvÃÈðlÍ‚õl?gÀrtÃü° SÖÌó>ÁYs il)tA9°•R²ó,픬j¦ï×ô±ÄEjúxäûr«³¸­?I`Û¸2°-»ìJÒöòªWò+©EÄÈÚ¿ä^2KIǬ^Ç´· H{V¯=ëJûÉu¤~œç = l„®êJG–㬿Yνœ{Ñ@~k fa{ÙÏ«…wûòʺãÙŠó[Èr~êðù Ý6¢ç(“ÏMŠê° Üñ©Qتðñ¥Nù¢K¯ —V®ª3adéÄÒ‰E¢Ñ€h ªH ¶Ï¿ø}ÞE ¤>û\VÙ%Ž lE¸U®¼O´# Ô% ¤ ۳ϻÐnÖjdíÑ*ï–MÂÈÒYêRg‘c½ŠDUÕ@Z°E)sëOÀ>ÛϽð±·Yæl¥eÏÓTµcÊûä¢.(, ¤ Ûs/¸8"ƒ”F¾CÈ0_Ùª8TÑG‹/×™JrýùµÑ‰ôþᚬ1[¿S¾³°.Pr>å|ŠR…­›ÔB§lô…‘Q•àÂK.§ .¾LÝ‚æl«œÀ>¡‡TùÌF…#öäE ç7JG•ß!š Ôm ¤ ÛsÏ¿ˆ”e¥kô`뀶*°5³;…U†©N=ͨÏ÷<ðz³IêUÆ ì-R¯3»RN\Êó25! ä¡Ò„íð¬0òçFÕ7Œ È^tiÕ`«GvaiÎìÇ«ZO3êó£ ÄÕ«Œ™ÆÕ³tr«J½Î¸v”çë¶ çOÎ_!j -ؾ¸âel‡_˜1_=[3Œ|ÁÅ'W=ŒžS4¾Þc’0rlCëAÆÕ«Œi%­)õ:eÔ§%y^4"È; ¤ [=_ èfÁö‚²Ëèü²KÕ¼-lÅËÙI-ì9Û yÆÄ ¼«XO3-Ø¢ÂIh!ô¬Î0)½±LêuŠPˆ€ü&Ñu!j W°uæl0²òl±8Š«nÙ’,ª l“Ô{LRO³J° ¨·k׫ŒPÜ*h©×™ä"W"°ºÏ'9yMœÖåyÑH}Ó@Z°]ñÒËtþE—Ðy]ìY`ùBk‹Þú“©Ýèxp¹®§ýùIêA&©W'0©×iÖì¬JNÜêÂ4îýr‘ŒÓ°‚=ZÔ²u¬ÐçlE•¤´™´™h@4PˆH ¶/qÙ)UËb•$êŠ`Øf<[Àö #K9ï6ŸbG—ß$ Ô®RƒíÊU™Úðnø@ÏV÷R϶vO¾t>iÑ€h@4P3H¶Š£ ZÍS ¶_Ð¥W\Eó‹.áp2lå*ñlEè5#tigigÑ€h 65lᤂŸ`©æéçþ9[†í•WÑ%WŒP·°•¯¼šÇad7¹YyÇ ùÆezªÍ*ß-Ñ€h@4H ¶pR5KÁSØç_ ¤pçò«®¡ËF\M—¸FY~ÃÖÍZÛÚr’B µy|òÝù×ÉåœÈ9 Ô¾Rƒ-;©à'Xª- ¶W\u­ó¢+ñ¢ØIöW¬X‘Iâ¯+ÝTTP9?®ÊÜy¯õ§ô3“*”—†{§â³*ï¨÷°«§«¿Å2¯5Ž/ª^­á5ã·+ãïör9‡Tò#¨}AI§–s  ˆ²5:l™£Êqe'öó/¾ÌlýÁ׌$WÛ*;ŒÌp+5VçÕ‡UpQ u f*ð'þ¯$hUîe·V¬Y>À³LC¨ú}|_1®^­ª¿[^ê­Îu€mS†oUòKç—Î/ ˆjSiÁܼòêë<Žâï/²a{=]]ÍÀe[õêkþ9ÛïÍóÜÌz±š %¶õd”“†ƒÊá}¦ øÐÒyaeõbëÕ&+±'°• Fm^0ä»E¢ªi 5Ø27•ãÊ hl¿4<[÷ªëFÑ•ü¢׎T·¯¼úº[Û³ô{­ª¢Žá-&‚m,Å’kØÂsŽÜóÎ ØVMèrv ˆjSiÁöÀ–ªíªk¯·`Ëä½zä ¸Ú^}í lm0ºž`bÏ6 ž¬ Ëf.¬„÷‡hƒßï *íÙòñÅ­bÎ~Þ9Vs^6I=ÞÚ”|·\ÐD¢Ñ@îæl_}íuºÚå(na_~ùUfÎö ¾síõ7)à^3òFe¯¾þ¦/Œl.@Â\ey9æ,6eìÕêEC»ó¢æB$$;T[Îó¥‰a«ç‚zªx¿»P)¾žnp=\{SX½Z-Nûù¬P¾Ed2+Z.ì¢Ñ@]Ð@Zží«¯¿Á,u Ãß>ØâÎÈFÓu£nöìµ7ü°M¿Á’…fÓÿ^¿´©h@4  d4l_cØŽ¼!ÃQ0õ˯ ÏwFÝ4–FÞ8Ú³×s[6¶ÐÈI—Ž/ ˆDµ¥´` n^opôúÇ0l¿Î„‘qçÆÑciÔÍl7Qöú›oåq)em‰R¾W´' šÒ„í š£¸eûÊ„-îÜ4æ®cxño¾-°•ª?RõG4  ¼Ò‚íì¤ÂqÕ,ÅíW_“ñl¿úúk=ö6ºIÛ˜[éÍ·ÞØJ'+øNVh#tù=âuŠ*¯Ô`ûÖÛÌÑ[=–ÞÌLµ`û ¹¥œF+»]Ù›o lE´•­´™´™h@4P×4lÁMÍP‡§åôõ7†gû5»¹·Ü~'½íÏÞ~ç]ñlųÏV4  ¼Ò‚í[ÌM“£àê×ß|› #ƒ¼·–ßåÚ|{'½ýÎj­t²‚ïdum.Ç+^£h } ¤[8©KÁQ‡©ßøaû-Ý~ÇÝÊnsíwë'lã2I‰ÐÓº´©´©h@4P›H ¶à¦æ¨fê7ßž-È{ÇÝã莻îUVÎöîê÷óγ•ÜÃÒ!k³CÊw‹þD…©´`ûîê÷<Ž*ž2W¿ùö»Lä½ëžû”ÝyC—íÝ÷,ØÖf=Û„õbýé3éǹ^Êÿé„þòxqõpÑÉìdvY>鈅Ùå¼Êy ¶Rƒí{ï)†Â4S¿5a‹;÷Ü÷€²»Çݯlõûküžm­Ö³uNt”g믗ëæR6r/;Àµ‹Ågç/+d¤ž­tÈÂîr~åüŠ SiÁv5;©÷Œs8ª™úíw†g‹;÷=ø°²q®åXÄÛ ÔŽÒ‚íæ-[<†VÌÏLOÿüçOØâÎüGѼ iî‚GiîüGië l¶[Ñ€h@4PðH ¶[¶~ ø© LýçO&lùΣ‹£ù 9ÆàÝú¡ÀVF™µ3Ê”v—v ˆjRiÁvë(~j[°p1ýôÓ¿2ž-î,Z²Ô±Ç–ÒÂÅKèÃ>ÏVF´?¢­É-ß% ä§Ò‚í~D2?ÁP°ö¯°Å¥Ëž %l-}œ[ö8}ôñß ¶ÎV£ìtZøqÏ›$_ëÝ&©Z$=?;ºœ9/¢ÚÕ@Z°…“ºxé2æ(ìqÅÔýëçŒgû¯Ÿ¦ÇŸ|ŠíiZÆ·KŸx’>þÛ'[9.©EÜóI:C>ÔÛ K7™äøå5µÛá¥ý¥ýEµ£´`û1;©K‚–1C—=¦>E?3_·i؈üq›†êÎSÏùäïV‰=³ì òRê4‰T®jÅr¾b/­¢¿„™\¢œKö©×'L2¡ß‹ïÍxqÆç‡ÕÛõаg[ÊÕBêÙ*ØF<é9FÕÛ­¡öÑ4 ¶aõ~÷š)+3µýù k§#ÈHÚ]4 È¥Ò‚íߨIuú4=É·O2Sþå¶|çÙçž§g–ÞSö÷O?Í»z¶^nd hly©+¨z»^^c#”¬ P`Þ-É×Àz>̲ëãª÷¹ÀUÇjP0KüùëñVn bŠ0 ¶Iêýzm鯀V.p¹¼ÀÉg‹¾òEiÁö“¿JÏ<û=ýìru ¦þò˯Øþ°}î…iùó/({ö¹èÓ|æ‡mÔ³¨B[ˆ ºž-`äÁÆõ†“×»u:Md½]c€°Â­½ë}>ŽÝöðå„^,lãêý|¿„£åB˜/B9Ñb®5lá¤ÂqCaàé/¿š°å;/¾ô²²V¼¤ìŸ}žwõlÃa OпÊeМ¬ß³¬N½ÛÀ6À VGB² °Ø$8Ž\wù|¹‹Dii -ØÂI}þÅÊ4Kýõ·Œgû+Ãö啯ÐK/¯rlå*úìó/ò®žm(l0™a[oÎÖš #›žd•ÂÈ!õvÍãƒÇj{¶ ¸Ô é,ÏVÏ«†„«ùsÂÃÈqõ~ýÏg…°² J4 (P ¤[8©+^ZI+^v \ýõ7¶|gå+¯*{yÕ+Ê>ÿâË<ªg\oÖ ûš ˜°ÍG×®ÅkôÖ=Û \âž÷/2kÚZÐ ª·k†·Y÷5æB/õ;ì0x9ÏG'†m‚z¼1avûû+xÁ˜ÌÙŠç–ç Ÿ#ZÊg ¤ÛÏ>ÿ\9«/1dÁÑ•l¿™°ÅW_{ƒ^yíuzåUØkôÅ—~ئßP5[Ï6ýãÏuç©Ýö‘9Û\Ÿ_ùüº×'åœê9K ¶Ÿñ­b~ÂK™©¿ýûß™0òo¿ý›^ã-zí7éµ×ߤW_ƒ¾üê«œí³­éz¶uM µÕ>R÷W.¦u­¯ÈñŠfÓÐ@Z°ýâ˯”ã †ÂÀÔû`ËwÞxëmÇÞ|‹^gûꫯsÛ4G>C:™h@4  ¤¡´` 'Ž+ –Âþýïß3ž-Èûö;ïÒ[oÃÞ¡7Ù¾þúm.HCœòr‘ ˆ EiÁNê›ì´*cŽ‚©ÿþÝÛßéÝÕïÑ;°wWÓÛlß|#°-!Éï‹¢h@4 ×@j°e'Ž«2æ(xú»¶|°}÷½÷=è~óí·âÙŠg+[D¢Ñ@Ák -Ø~ÍNêÛï0d]ç\ýý÷ÿdÂÈ ï{ï¯ñl5C÷›o¿ØJ'+øN&£}ñøD¢´`ûÍ7ß:N«k`éïÿ±`ûþ𵤠àýö;­tB鄢рh ð5l9"¼Úp\ßc®þÇ[¾³výZ»n½²5lß}ÿ}^z¶¾m1\ÁÇÎg,£ð;†œc9Ç¢Ñ@šH ¶ßrDxÍÚuc–ú`«’Z¸{‚^ã[lÄý2çI-ª –*æ Nó¤D}V>Ô³­©ß*ßSýÊ´„LKˆòRiÁösNs¬Z0Cu¢(_R d‹ºúºhĵ#銫®¥ËÙð†ÆM‹2 V/¶†êµÚí »nD=[ʱœS ª÷麻–ðÃê½zïwËùécɪéë}¶óâu dP" ä¿Ò‚íÊU¯Ò¥W^M—ÁF\«x ¾zÅã?øð#ÚcßiÈ^ûÑî{ì£lÁÂÅùWÏ6ʳ©gë3“ËØNGWï5«Ü^@¹?ñló¿SÉ…OΑh@4`k -ØÎ[° Ù“²í6t/fé¾´õƒ3°ÅÁ{ì¿û0ê7h(Íå7ù<Û|¨gÛ˜Dû‘°Œ+AçVÓñyªÛ¼ É…T.¤¢Ñ@e5lçÌ[@½쮬ïnCO·lýÀÛAÃö¦ƒ÷P í3pÍÿhþÕ³ …m|=[­tÀÊv@y½hF4P?4lçSÏ~»Q¯þƒ<àfÁV{µ qïƒiÎ<¶qõbkª^klãŽ/gš]\Þ_uÇvv½h»ï3ÔrxÝYéÄõ£Ëy–ó,È ¤Ûйó©Gßpû ì÷lA^íÕ´½úïΰ]Gõlñ’»ÈÉ-Â1GÕ³5ŸS¡`c1UÔ"'_ØØ¦æZ¸eÎb+pƒêÙÊêC 7‹D¢¼Ö@Z°=guï3@™öp·lÝš #¶ˆ-#| Øöì7ˆ*,ئ?:«Ýz­éÿžü½Éo–s$ ˆ²5lg1l»öêGÝz÷÷€»y‹[ÌÕjÐöà˜3Üaß©GfµU¯UD&Ñ€h@4 ÈÕjäYs©KÏ~Ê\„”a‹ð1@Û]àÙsçå ¶"v»h@4  ä‹ÒòlgΞCº÷¡Î=ú*à"œìƒ-îh¯ íÊDFì9Wžm¾4°‡tvÑ€h@4 H¶»õ&˜î¦Í[2s¶¶=ú:^m—ž}¶)†Í¥3Kg ˆDù«´`;cÖjߥ§‚-<\x·Y°Å¢¨î_î“» 2bÏâÙæ¯8¤ãʹ ˆDéh -ØNŸUAí:÷ð7¶Ê«Øæõué\ét.iGiGÑ€h@k MضíÜÚuÉ7 ¶ðj1WÛ™CÈpgÎÏV:£tFÑ€h@4PøH ¶3gS›NÝÈîÆM›3s¶›¶lqæjÙ«h;tíŰ“7ad»êNîÅôœ8£¢L¼\™» ˆD®t`[DÓf΢â]¨¤cW¸~Øòj)åÕò\-&v1Á;#`«3DÕlÉ:Iº‘ûAMᘥ å‹ò_©ÁvÆ,jݾ3•tèJm:ÂÃíAY°íÒË-¼ZXlk¹ž­*$PVFaõhíª?¶WQïVuëýå\²¯ÜüŒ˜Ï×é$‘¾1“ÒŸÙ—r²¢‚*Äs¡À=MþƒFÎQJ¶ÍZд³Ù³íên¶g˰…W Ðb5ÕŒÙyUÏÖç$îîˆ7»mæ9ý¼/ YïÖ®”F¶ ¨ã º~œ¯½qûxU-­ÀV`+ ÔºÒ€m“æ­l[·ïê·‹š¿5`ÛHíêÒƒçk»õQ Åä.–0çS=ÛØyžÇë)P÷ Ï2ªÞm@mZåéj¼E²ªú}Ž×‘\€‡yæÒáj½ÃÉ_¼0Ñ@ýÔ@:°-æ9Û9 Ûî.p»©pò†M›ô)¶j¾¶ko[Ðxú¬ÙyUÏ6¶ âæWcêÝÆÁ6öó]FÂÖ1ÂÚâÙ `e% ÔºÒ€m³miúÌyìÕöfØöàÛîlÝhÃF Û†l± ¹}çž.l-Ï6®^l Ô³†mBXÖ®/ëRãŽ0µjÏÚaÞÈÏ×%¶Yõr[£D Œ¨ëçˆZλœwÑ@ík Ø6oÙÔ Ø~Tܾ—\¶Û4Ü–6mÚ¢¼Z¬BnÛ©»ZE5÷ ™aäÐz±æ¢%ÀÃ]ˆd.Ró–v(¶œç3³<;jõ/,JT–—]ó6ó9Qõn•Ø­T+p|ÖöŸðÏ·CÄN(Û\=ulÒÙj¿³É9s ¨¿H¶E­:1lò¶Ÿ Û> ]·»ëÙnK láÙò|-` Ж°ë;Í‚múBŒ ýÖߟ~[K[J›ŠD¢0 ¤ÛÖ]¶‹¨¸Óî [önÝp²ãÙº°Ýèz¶XÐb5VUå*7²Ô³ÑË…O4  ä‹Ò€m‹Ö]ØvÜÚZµsBÉY°U!d Ûv]r Û|i`9éì¢Ñ€h@4&l[wäÀ¶mOjÙ¦­÷{¶›2Ï×¶fЦr&Œ\y¶"n·h@4  ä‹Ò€mQëÎ4mÖ£Ôªý†lo¶îÔ¢„a»Á #7R›nQ¥ MG^ªÌ!äVí: le9~­/ÇÏ—Ž(Ç!P ¶Ò€mó–™›ó²}²=¨¨¸+[— lÿ¸[äpÄ|m+öj[¶íDS§‹g+¬°;˜œ_9¿¢Ñ4l›µlÇÜœKÍ‹{PóV]¨¯NnÖº#Ãvc&©Å†›•WëÀ¶3µhÄØŠg'Þ½h@4 ¨H¶M‹ÚДi³©YK†,{¹ÍZu ¦ àuë}°Ý¤`Ûº}Z¶3eζˆLFö2² ˆê»Ò€m“æ­iòÔ™Ô¤E{jÜ¢ ß¶¥¦|»ný†L=[,MÖ°EYÂÈÒùê{ç“ß/}@4P4 l›µ¤IS¦+ÐîÚ¼XYã¢âlØ¢,BÈ ¶âÙJèH¼zÑ€h@4PO4ls‰½‰“§Ñ.\ýGY³V ÜVÙ°EYÁaä’4Å#ëT‡åœÂЩvPñ&+]b&U¢™ÄBå/6S#åè2Ÿ‘縞œ|UןQµœk9×¢ÚÕ@*°mZD&M¥š´¤›¶dØò-ÛÚõë3adlºU°e¯ -²` !8Àõç6ÿ#÷¯¯€UÕÆ®çª€k&â¬7[»'B:‚´¿h@4 (\ ¤ Û¿6nAmÒ‚vjÚ‚aÛ‚Ö® ­òlÛtÈZ [O6 ‚Mi¹YÐÝ_Y'« NT½Yñf%œ% ˆD9Ò@:°mΞíúkã"†mQlu™A›Ø:Þ±ª„“U^.¦ÞlŽXFª…;R•s+çV4 Hª´aû—ÆÍ=àf{¶jq”ãÕÂÌ9[F6KƘþ0²éÅâ$Uõq ôjÍr{®—ëû>®ŒjE¢Ñ€h Èl5pý°å ­ÚaË[{Î6Q=Ù„a`õY!çØz³9hणyŒE¢Ñ@áj MØþe׿Ðj«lEd…+29·rnE¢ú®´`ûÈÄÉ”¶ðjƒV#×÷“!¿_.H¢Ñ€h 056lwܵY´g«S5 l SPr¡ó* ˆ²5&lwÜ¥)s›FFæ(ÀV<[éŒÒE¢Ñ@}Ò@Ú°Ýaç¦ ¶°@ØjÐ6/nŸµ¹>5¼üV¹ÐˆD¢ú£4a Ð*ÛÅîšuëŒ R¼Ùôj¶õGdrA‘s- Ôw ä ¶n(lZ­t¾úÞùä÷K Ô ¤ Û?ïÔÄñnÙÖ¬ ðl5h¶õGdrA‘s- Ôw 䶸°Åœ­x¶Òñê{Ç“ß/}@4P¿4+ظ[ÉH%ißD¢Ñ€h€5 °•Ž A4  ˆr¬mŽXBEõ+T$ç[ηh@4¤Z­$µ1ÊI4  Ô' lų•ð‘h@4  äX5 [I×(#Ùú4’•ß*z ˆ´Ò‚íø “ÕÞZ¬BÖ–µÙ„­ì³Ê…H4  Ô ¤ [´[¶Ò±êKÇ’ß)Z ˆL Ô8l%©…P.B¢Ñ€h ¾i mØþé¯ êÙê•ÈRÏV:[}ëlò{Eó¢ú«…­YõG`[E'9÷¢Ñ@}Ó@®` ï64]£ì³•ŽVß:šü^Ѽh ~k@`›ã½UÒÁêw“ó/ç_4 €j¶²ÏVD'Ñ€h@4P5 °ÏV2LjD¢Ñ@Ž5 °Íq'Áu/« +VPy©Œz“¶™¼N´" Ô lS†mYE9•Vñ3\­\<êÊÅCŽS´*H®Z-¶Á¦NŸE›eÂ¥åÊ»ÓVQÖÝy®{Uàñ †‘zžæ½Ö·²ŠÌûËKK×W”E‡H’~~Øñ™Çh§ZZž9>Wÿ¦²îÎ S°-s«þU·t‚ä@ÚJÚJ4 ȵò ¶¥>¯àô<=ˆê¾‚žYÓÄë=@7HZ ³Ÿß€ámz­¾ãs?'ʳU -/õÀý°µïg~t†\wù|јh@4+ älµ‡ix‡>Øj°u¡åÁïµ=X¼.Ϋ5aõù!Þ«ö ‡-Ã?æX²ÂÈ|ü[éü¹êüò¹¢-Ñ@Íi -Ø>21SõG§l ­úF†'šñò¼°ª^0dÂ4)lƒ–û|å)G_¬g+°•Ž]s[ÚZÚZ4_ÈØÚ`4úګŒñ<ÍùOM…m³¼I7¼ì†¡=AÆÁ6îø<Ø@6ÂÝúxüžªs,Ú;Ï6¿:‡\¬ä|ˆDii `ëοfHñb¡rw;Œ¹hácw¡ÀåÛ2c‡¡ËËB·°5ßñùú»œc4ŽÏÜ®ã[D•½2Ù\Àe. 2?;3/í,¦’P²tø´:¼|ŽhI4P;È+ئ/‚øÐmúßY;'R~‡´»h@4 È_ 䶺ˆ|%çlÓm${‹ˆ0Ýö•ö”ö ˆDÉ5P°°$´•´•h@4 È­҄펻4£vnª LJ×+‘“ZHÉO* ˆD¨mžT¡æv„*í+í+ TV[­Œ¢E¢Ñ€h ÇØæ¸+;ú‘×ˈY4  žÒ‚í„ISè/»6'=o‹¹ÛZ],b-<±Ê9•s* ÔU Ô(l±(ª¨¤ƒªøSû ¤*Y¨@<` 3‰D¢Ñ@5Pa‹¢$½¨«£D9nñpD¢º¤ú[+£ªwkåNö§S̤[ôÒBr ÈLJI+£.2iÅ¡*Ž”ê’ÐäXåÂ( Ôg ¤ Û¿6.òæm1w›5g[»ad»jOvÙ_—;†U¢Ï®ÀfÞb»‚z½WÂN2˜ ˆê½j¶˜¯m^Ü^ÍÛ¦LŸI›ÕÌIª kVòÁßFaw=+åbeÝidUž€Z¼Žœ]Œ >îä·‹w# ÔG ä¶zUr–g[а•ùßš4‰‡ í, ÔA Ôl×mب¼ZÓ‚=Ûz³^ãVõy¼ÏïeÚõní0°½€*®Þ,>/¾^Üñ˨·>Žzå7‹îE…­Ô`;y*ýµI 5g«míºõ´MÃFÔàÛ4¤Ú‡­;«B»®¡Þ-þÖóª¡`³°»~_T½Y»^mvY`+•¾¨Èù•ó+ÈÖ@öYëv¤­Fçlë`ØA+-Ñ€h@4PH¶zE2¼[¿g»~ƒYmaˆG.rE¢Ñ@2 Ô(l›¶jK0¶ÓfÔÜjdñleQ…h@4  Ô’Ò„íNM[ÒNÏwð˜†î¤|„-ò!ë}¸’׸òá-ö2(=¦ ,*ßžÒfÒf¢‚Ð@Âqf˜†î¤)yæÙª¤™\ÈÈ¥“ZÄü¼ª@¥¹!ÅK­>ïÂ6+ÛVŽ/*˜qîì6ˆ{Þ|}²L`ùwžµ1XŒÏd–}üIß_WÛ§VûEŽõ/¿-ÿú£}NÒ‚-¸ ‡u°Leï6˳ÍØ–U„ÀÅÆw‘ª¤wV€‰:þ¼èpˆ pA‡šh û÷f§ÛôwÀ¸ç“´_m·’ïÏÒq%/ôÕ}T;&9þ$çA^“ÿp‘sä?G5 [µ7qf×Ãȱg³êNuàeFÙ~8†Õ›Õ'6ôùª<¦çš©ð£S*f<çsÙë-3ëÙf¼`|¿L¼cÒ»¢¶¼Øz¹ ŽÇøûõ{+øøt%"ïXìÁGÕÓI:Þ£e•ÅÇžÿˆös~;ŸR³ýýç'îùHÏ.ªý+ѾæùQõ”Ít¡ü|mÈmç ž­Ãªx¶º…Á6‰çëÓŸŠˆT¨ÐJÒÈQPÿ6ÛÏŽ,évôµCD½éê¾?ðúcþþ˜OlÿÇûÃôŸTRo»VÃÑiÃvWL˺ÞíÚu2¹‘áæ"y2LC׆­¬”î¤|_wȸz³qÏ;äpÏVÃÖ [ž­s!1Þ¯Äk˜÷—äó{Ê¥Tjt:|žy±‰«—wü‘¿ßíh™¼ÎÎqg{ °µÏõûõùÃo6KVj„qþ0œ¢ÚϹ%XÇ÷|l"Û?AûúÏOh2ŽìC|ÉÆêz¦q©w»ÐGœ~ãt×~q…BâêM§ñþ¸ßõcû”þé/Û9zÛ5!¨1Ø®aØþÕå‘ñÂ…îD®^UÏ6,tWo6îyrI`ëuÊØÚ#q³ƒÆuVçâï÷ülØú>? -B?î÷›µ{ÝP¯ç'šgœÎšñpü×ôªªÛP¯,¦ýìÁ‹ýû➯6l"Bé¾ï7Û_¸p<ÏŸÛ7A I6–qP‹{øó™ˆPX-ç$Çx|±íWýzÓ‘ý7Q½êøßÛÈþ¥ÿ¸þèøkÉû“@˜Úl7„7ÝyQs!ZæwV¶:|\yØÆŸÿ¨öÓ[{üçÇ¿ËlsçšaðàïϪGì[¤â‚:iûÚúE=å¬Á[ø4CôùOxü¡ uâÞ÷¼½8ÏZß`†ÎÃÆ­šŽk?ßó™ÅŒ&`#ëMWóýñµ¬Ã/æIú¨þÍE™1ý»:Ç7’ç£a]s°]»Îóh5p òlã:\ŽŸ÷ ÚX¬’åçø8êªpØÆ‡ìêêïK÷¸¥ª×žÒ~Õk¿Üzrrl¹Ùúƒ4Ç(æÓ©Ý¹[¿g[G`&ÿ–†²Zÿ‹ëö…ÂÞâ#ç³rçSÚ¯rí%úªýöJͳMÛviF¦å£g+¢¬}QÊ9s  šÒ†mSTÑs½Û,Ï ýóÎM=à>21`”„gÅk Ô/ „nK I#ú¨_ú(ó]£°hÿ´S\Øø‰“¥êO©ÐF¡ò{ij ˆÒÔ@š°mÖº5ãy[x·°,ÏV`+âMS¼òY¢'Ñ€h ®h Fa»ÃÎFÞ‰ÃÈ| “0²t”ºÒQä8E«¢Ñ@u4P³° š³mV$óJ ˆD¢‚Ö@ Á¶­ÁÖŸ]ŠØ£mî<Û&ÍZ8 lU´ÈJ&`1×™PrýùÕÉ{et- ˆêžÒ‚í”é3©yq{jÞº=ÏÛbî¶­Û°QçFlQˆ '¶héÙ#“¦R“æ­|£™*'°O8*¬|f£ºwR¥#Ê9 ˆDù¥taÛÁ…-—¡ëÁv›†ÛÒ®·÷—Æ%´cãbuû—&%4aÒtjZÔ:1lÍìNa9N«SO3êóõsåœb/“®1¬¬¿öme’ñ‡Õ;õ¾ß­ÐS×êuJÇϯŽ/çC·h f5l‹hÊôYTTÜ‘½ÛŽ Z@·#­Wží¶Ô`›†ÛÑÚu¹Žm{.¯×NÝÂ&NžÉ°-I [-ްDèöãU­§õùQ… |%ܡlj»ÐëuÆý~y¾f;¿´·´·h æ4*lK:S[óbÜvaØn¢mmǰm´½‚í.Í;ÓÎÍ`”Mœ2‹šµh›lU _½ØìŠ.IÂÈQ° ­7Po3®zŠOèõ ^§tìšëØÒÖÒÖ¢üÒ@:°mÁžíljѦ+[7jÙ¶;µlד6lÜL ™³ 6úoºÝÄo{Ò®E=غÓ.EݶԼe»HØf•ã¹ÙÄ«b=Í´`‹E_‰CÈA°-°zÒùó«óËùó!¨9 ¤[^P<•aÛ²m†l/jÕ¾µîПa»•nû'†í¶fØnæJ}¨qËÞ Ý^ ¼“¦ÌaضO ¶IêIV§h\½Yûûýaáø“Zèõ:ŽW¯ºÏÇ·³\`¤D¢šÖ@:°mISgT0d{+ÈwÜJ:¦›> FÛíØîÀ°ÝBM[÷§&­úrá[¸“¦¶B¶þdêz:¡Û\×ÓŒþü$õ&ízºÉ=[€¨€ëuf„]]˜Æ½_."5}‘ï͉â5l›4lç0hûQq§AÔ¦ójÛuO†í‡´ív¡Û [©YÉnp[²‡ëÁÖïÙÒIKîŽ*èm/¤ö‘ßßA¥¤D…¡´`;mæ\öh* mßm?Ú¸ù#Úvû¿¶;*Ø6/DM‹[x¶#gÏÙÖua¥U÷VêuF«ëz–ãŠÒÑ@:°mEÓfÎs½Ú¡Ô®ë>Ô¡Ç´ióÇ ÛLØÂ³í§æm±Hjâ”ÙY«‘åĦsb¥¥E¢Ñ@þh -ØNgضé¼;µí²GlFÞBÍŠ¨ER»²W»Kó®¼Ï[Út>L{þˆ]Î…œ Ñ€h ¶4láÙ–tİÊa佨C÷ý9Œ Ï–ÃÈÎjd,êË^m/öj»©=·AI-j«!ä{¥ŠD¢Ñ@®4l[ry.•t ¼Û6JÞ‹aë.röÙnæyÚÊ£UI-8ƒÔ„É3²Ò5æê‡ÊçJ' ˆD¢ÚÒ@°mÌ«‘§ÍÄjä¾j‘”³"ÙØú“É å@v§¦í8mcÎ<-«Am5„|¯tBÑ€h@4 È•Ò-'µPûl{QkNhQÌ[€J: ¤ ›¶R#Ž ¹‘Èþ…‹ì¸k+RU˜Ô¹úqò¹ÒqD¢Ñ€h 4 l›¶È å¤jlÅ©Þ ·8é3UZ;åõviA;pm[_=Û„%òò¡Ñjãìª?µq òrÑ ˆDUÓ@:°-ât³8/r׺*ð®ß¸‰aË…þ¸SÏvÇ]²nñø?ïÜŒÆOœL›Õ)Ï6I!ƒ\‰1(OtÚßU›¿/íß"ŸWµ‹‚´›´›h } ¤[”ØkQÒ‰«ýÀ:óߪ?(±çÀv{³ÍèÏ;7ul§¦°µ“9åΪ'«ÓrzD§êWûAý7{ÍN²‰ *+3ëÑòýîN£ÆÖ‹µS)ºÕ…|U€ì×T"ûSÜñéc,/+£ ¯²‘UÕÈûÍNªK³Bl=Þ$¿ÏkGJÓýþí/7ýÎ+m*m*¨;H¶Í]϶“\®i[T¢ëÙ6l:°eoÕ¦<Û¦ÏV¶¼Ôót@d€˜•Øß¬êã^ðüt˜Þ 4Pê=™ûq… ì(ÏϨã¯4p£Ïl¬t¥¥ê7ë諯ë (ü¿×®JýûüWí¬_‚ö— Cݹ0ȹ’s%HWéÂÖlóâ Ünñx¶®gû§šÌ[N0&\Ì k!”–»06ËÑîkmØúþ “^"N…¹-`ÛÇçóÌMÏÖ.>o†u]:lÃ_uÛ_:oºWÚSÚS4Pw4 Ø•p™­’ž­Óh¾ª9h•¨Â¤æãuº …Jsá`QÎaçÌ+ î¾ÏÏ,¦ò×·ÉZ püÙaä(؆_’zºæk0·«+äý:4µM‡¿«Ùþra¨;9Wr®Déj V`«W$Û ¤r}rƒÂ´¹þÎÊ|~¾_e~‹¼6ÝŽ*í)í)¨Û¨7°M«Îl®ŸïÇ—«ß-Ÿ[·/ rþäü‰’i ÞÀV‘LÒNÒN¢Ñ€h } l%MdÊà%ô/Ò¦Ò¦¢Ük Fa»ã®Í}Y¤jzÎV•{AIK‹D¢l lųÏV4  ˆr¬mŽXFx2Ê ˆD¢­ÀVF´¢Ñ€h@4c Ô lQý†z¶f!‚º>úó*÷¸•„Òü=RÏVFÆiêI>Kô$¨Y lk»k.SH=Ûšír1’ö ˆÒÒ@ÞÀ6¶ÞjPºF3pL=V/i„Q¹Æ«k|Ž?]¤?£~ÎLi§cÔ°õ§NÌNëhŸ@©g+:­N-Ÿ#Z äŸò¶‡¨ðz«Iêņz¶qUkܼÆYyíJ>:7²~œï›Åü°‡¬Ù)’ÔÛ•z¶ù׉äÂ&çD4 ˆÓ@ÍÁvÝzúKã"Ò{mƒæl#«Ò$¬]üÜ_¦ºŽU5(ª^«ž<·kÈZ“êžg\…r{RÏV:l\‡•çE#¢º©ºÛ„õb£`ëšw«þ.5 Ò'ð|•ÈÀ¶¼œ‹×zÒÎ!õlëf'Jz~åur~EõWu¶ÉêÅú€eÕ›(B*4QrËÏéY¯5±gëÖƒµ¿Û󀥞­\pêïGνœûúª¼mÒz«v½Û,2ªÞ¬ Eëâ耞Q(=ª^®¥†$xç:»Úü°ÆãÎgz‹¯øµþôR϶¾v6ùÝÑ@ýÕ@­Àó¶°G&Ö>Ûêv¤\nªî±ÉûëïEBνœ{Ñ@õ5 °ÍqÖ¤"•z¶ÕsÒ¶–×I[‹D5­mžÀ¶¦O¼|Ÿ\lD¢Ñ@Íi@`+°•œ¨¢Ñ€h@4c lsÜÀ2r¬¹‘£´µ´µh@4¯Ø leD+ ˆD9Ö€À6Ç œ¯£,9.ñD¢Ñ@Íi@`+°•­h@4  äX5Ûµœù¯MZxù‘eŸmͨdô*m- ˆjWÛJŽfj»^®t˜Úí0ÒþÒþ¢Ñ@U4w°5Ó®à‚vV¥Àz³º"r«TŠ\ÝÇKÛhTúA*Å2 Ðé3é½zºn®d}^ªÅ˜z¹ªñí×T¡òOUN¢¼G:¿h@4 Èo älàŒ2w3@hCëͺ S¹Š"fÙ¾$õbÍ\ÇA~¢Køùs-«ãàÊ\P%£'rÑÌœ9?UÑ@­Àuma|¹‘Ú²A§¸xxÞ(计mÃÖS·`½~,²ž®{L‘Åéï¹rä«rå=ÒùE¢Ñ@~k žÁÖªò“&lÖÛ•‘ßBΜÑ€h ¨QØîÔ´¥·"9Û³ ªW딣Ӟgd½Ù„ž­Ö5=Ux¶ö­¿<^ÿ²U³apûõ¹8iò™r1 ˆDuKy[ˆÇ®Wë û,RRÏ›cÎ×]ð™ ŸëòrÌgêÑúàèûüÌbª¬ybïýÙ!âØz»2's¸¢Ñ€h Þi ï`›ËÑšÔ‹­[#Á\jA>[´  Ô¤RƒíŒYÔ²m'jѦ#•tP¶~ÃFÚ¦a#jðÇm’ZÄ…‘sùÃ¥^¬t¬\êK>[ô% Di V`‹LR° “§Rã¦Eõ.œ R:¥h@4 ¨_ØÊ܉ vD¢Ñ€h ǨQØîܬ•J϶~êd/ç[4 ¨ÏÈl1o Ëš³ØJg«ÏM~»è_4P5P+°ÅB)ØD™³•ÐMŽC7rq«¿79÷rîóI[¹Ø ðE¢Ñ€h ǨQØîÒ¼5éPrÚž­]µ§ú#'{Ue èc@ÒŒ|Ë$•~ûȨ¹ú“6”6 Ô  lq² T{¤S!äó}i«} éuÆôÛ'ûؤÞoz竾\„äwŠfêƒò¶þ䜫Ø*QUïVÁ¤Œ«ÿ„¥SŒ«7k=_^šml½[ª¡°ù~ýÛÍ4“ª6o`;ï ¯×ë F¢ÚÇ«줳4½sï÷—›õ€cKRï7+§ûþõˆëCg”ß(Ð ®ò ¶6¤X ØÆÕ»µëßÚŸg§kô×›EØØ¬ ”FNR‚OC-(Œýý®È4ðŒrvYÀ°™¤^¯Y8kPÀƒ ìø<ó»öÍ6hP]ï·zõˆåBT¸"9·rn ]µ[ÌÛÂ&N™ferè™Æ‡t#aâyyð`ÈeÒ¬$¦zûºÀÏJRï6ä3“ц£Z,„zp=ëØ÷+Ï5ÄO¡q’6×ÈE[4 ÈG äl-‘ÀËó<ÛjÂ6®Þl®a÷ý:T\-ØV§^¯íÙgÏ lå"–19&Ñe]Ð@ÍÁvýÚµ¨˜ôŠä Ï6«*`‹’y.ˆ²ëÅúëÝÆÁ ºÞ,>Ëï•Ùaì$õn£ÂȉêÝÆÂV{ÿ!åý¬9n»^¯/$m~—åÅë²…v9ôýî9òC«Þouë×…%Ç(~Ñ€h HyÛLY§æl|½X}ñ7·Ý¨ÇŒÅ>fH7²Þ¬µ@h/òmÿ‰¬wk‡Àõoñÿ†ðï~ö|mlÃëõ&ió5˜ÛÕµq IÞ¯ækCëüU³±\Ää"& ÔU älëj#æËqK½^¹å‹å8D‹¢¿¶ ¶ÕÔÑH½^¹¸ÕÊ1ŠNë«j¶˜·…e¯F!ÖW!Êïí‹D…¬mx¶…,Rùmr ˆêºj ¶ëx5rã%ÞŠdñl¥óÔõÎ#Ç/ ˆ’j@`+ž­Tû ˆD¢k V`‹ý¶°IS¦[¤d””t”$¯­ˆD¢º£mŽG3ÒêNgs%çJ4 È•j¶MZ¶ñæmÓölÓ¯×ZXõls% ù\¹8‰D¢x  lq²Ó¯×Ÿ9HdùZÏV:D|‡6’6 ˆr¡¼ƒ­Ô³uÊÐeÒ#&«g› qÈgÊEG4  ¤£¼‚­Ô³5r W¡ž­tŠt:…´£´£h@4¶j¶Øo ›4Õ^,õl‘È?¨ð|Ú'^>O.&¢Ñ€h æ4g°µ~¸Ô³•½o²Z\4  €j¶M[µ%½"9ȳ•z¶NY<ÛšmÊÈ^ÚZ4 ¨ äl¥ž­SÇ7cÙõl¥cÔDÇï‰Dij ¯`›æ“Ï’Ž" ˆDù¢mÌ䋘ä8äÂ& ˆ‚5P+°Å¼-lrÖjdªU4  ˆ O[ñle¥£h@4  äX5 Ûf­Û‘^‘,žmáÜd4.çT4  äAY`+Q:¢h@4 ¨Ï6Ç¡ƒú(*ùÍr1 ˆD~ l¶2W# ˆD9Ö@­Àó¶°ÉÓfPã¦Es’ŠETÖ]Fµ2ª ˆD¢Œ ¶eµ[’.+ådŽGK"f¹ ‰D¢ü×@ÍÁvÃFj^Üžô")Û³Õõ[ËËËt…8»—Q…™ÎP—¡ÐìçÜ×ét‡^ÜòRÏ“.-wÓ"Ÿã«§»Âÿýú¹¨z³¶™z´øŽÚHGÌÿŽ(çHΑh °57°…Ð@eÀd×·µ½Fõz¸ü¡ž-`l€V »´<öL}E|U‡\!à1€<¤Þ¬¶Y¹€öDίœ_Ñ@2 äl}‰÷Í 8!ž«í5F…‘õsÊCU°äú¹š `¬DS•ÇóŒ­A€2™ ¥¤D¢BÔ@Z°6cµj×™Z¶íD-ÚtT¶ž#ÇÛ4lD þ¸MCZFÖžm(lM0F̃FÁÖñbÀª¿K o7EØ:¡pñj ±ÃÈoˆDUÑ@­Àó¶°)Öjd„…ÃaÛ€i…y ë 5«¯=¾ªP1<Ô ÿ÷e/n2<_ý]±ž­–¶¿[J̪óªt4y\ Eõ[y[sA‘®žåùQsÕ¿€)`ñ‘ñ¾,ïR…¢õ-ƒÔÞ¦ªÎÀ¯·ëí®ð Ì­?Þâ+ëø¥ÃÕï'ç_οh ~j o`+¬Ÿ”ó.ç]4 ¨H¶m9[më7n ž³ #ׇ—ß(Ñ€h@4Pÿ4*lÝR[™Ÿ•ùYÑ€h@4 04*lų­£5¡Ë9 ˆDñH¶âÙÆ7¸ˆRÚH4  Ô? ¤Û]›6'µÏV<Ûú' ¹hÈ9 ˆDñlß{ï=Z²d M™2%˦Nªíi‹/¦µk×Ò?ü@‰`‹ ÈtQTÒAåH†M™>“7+œª?"¸xÁII‰DõQ€-@ûÕW_Ñÿû_ÏþóŸÿì§Ÿ~¢ü‰¡ú#}ÿýôÍ7ßÒgŸ}Në×o¤¥K—*Ø6Öžmx¹‘J'¥ÓK¸0À¶‰ÀVÈBÑ€h@4Pàl'Mšäƒìo¿ý›~ùõWúùç_èË/¿f¸~Aÿô3úøã¿Ó†›éõ7Þ¦M›¶Ð„‰ü° ##g#öµnßÅËéðNåØsÓæ-kIdn’ Éa\Kí/£ûú8º—ß,º¯¯а՞ì¿ÿÍ ýåWú׿~¦þó'úûß?£>ú„¶~ðv+½óîûôê«o*Ï6Û"gΖ=[Ó6è}¶Û4Ü–)½‰ÚtìA%ºSqûnʦϜMÍŠZÕâÅ> c®ê«ÐåwËE^4 ¨M ˜°ýý÷ßé×_ó@ûý÷?ÒGB[¶|H6l¦5k6ÐÛo¿G¯¼ò­[·![ŽOcn¶îÀŽk{®kl6ÚŽ6nÚLºõ¥ö]ûP;e½iÆì¹Ô¼UqÍÁÖJÇX^š Û°z¶Õ®·+¯¹ó,m-m- ä™Ò€m“æ-hú¬ÙTÜ™ÖN]=Û°‰3H5Ú–4Úv{Ú¸y uî½uê5À³YsæS‹â¶5$ ;rv9®žmõvksd%ß-#{Ñ€h@4P;H¶-ZÒôŠ9Ô¦G‰»öðn7°3Ûp[†í¶Ûÿ‰6mù€z FÝû¥ný†(«˜÷(µjÓ¾f`T­žn%êÙFV%JXoW„^;B—v—v ˆjSiÀ¶i«V4cî\jׇ£ÃÚz÷QÎlÃí¶£ÛýéÏ´yë‡Ôgð>Ô{÷½©× =•Í}t1·ëT°MXo·6O¶|·\lD¢Ñ@íh Ø6/)¦Y æS§A©ã ޳uÜmmܺ…m¿=`»máVý÷8€ú ÝúÙWÙ¼…ñ‚©.lu)»°âëU}ïó¦*ƒg¬FŽ«g›F½Ý€²*Õ'Åå¥Ó×N§—v—v Ô¼Ò€mQ»¶4{ÑBê²×Œí9„6}°•ýùOÔ`û?ï¨`;`¯ƒ¸R¿aû+›·h ¯P®)ØrãújÝ2𸼟J¶jÖêz¶©ÕÛØÖL$#ÏGÈÅ­æ/nÒæÒæù¤¶¿ÿþ‚a V&ûí÷œðâú⋯èÿøBmÿ±W#·èÔž*–,¦îìEÝtÿÞüÑ´íÎÀvàÞÓ€=b;PÙüÅK`+É'ȱˆE¢Ñ@õ5`Ãö¿ÿýÿhØÐ¡4üÜséú‘#={èÁé‰Ç§¿ýíÓ,ضìÒ‘æ<þõ*Ý—m?ÇÛ—6ü!m·ã~ØögÈjØVÿJ'6 ˆDù¯ Ø^pþùT1{6=ù䓞½üòË´zõê@ضêÖ‰æ>±„úqõ9’o{±¿Û¿0l·ó…‘yÞs·n¹¤Cg +JÈS4  ˆ Zi„‘lŸdØ´G¨ný°uH `ÈögÈúHÕÔjdrA YFöù?²—s$ç¨>k 0²ölXe‡ï¯BÊžg»íöÎÖŸ¾Cö¡>¼õ§÷ ½¨×n¼õgÁbjݶƒ@@Ä—¡èÿ¤¤ŸˆDuXi„‘[vî@s–=F=Þ‡zh;ho^ õ!m»#/j´“Ô¢çÀ=¨‡—Ôb°JjѲ¤¨ (µ‘ªÀVúôÑ@k 0r‹íhöc ©ë>Ã2¶÷PÚô!¯FÆÖ¤kÜ´y+ué3(“²±çBºÆ¢V%"°Xb l¥H? °Ò#7oÛ†f=º€:æÔÇÚvß6må}¶â¤N!‚-Ô±G?UŒÀ±>4³‚ ´¬ÁBIN¤¹WÊïU¾ó#mey©ÚÓ\QÖ=ùû¶ÉÛ*‰Žå5Òž¢¼Ò@lo½åµÍgåÊ•ž½ýöÛ\ùgCàjäfÅÅ4sÞªž»Yåš¶Ò]š¹§#;‰ÀV`+ð °Ò€mcçz¶­9ÍqkÎQ¡ŒÿVÅãQÏöÛ4TwŠ]ضlÛ‰`S¶x³ï"¬! çKù¾b\½Ù¸ç…{¶¶^øØòlïWËÜw€™p¶§\J¥†˜ðy&ìãêåÆäïw«¾Oµ±sÜÙhlíscý~uÝ6Ë \ŽÄ¶r¡-à m]õÆä¸^¿h7Ø6eØÎœE­ÚwÎ8®ü·ÛFضn×EA¶E›ŽÊ¦NgØò›mØ.¬Ñ o¬å]Ôãžwß—¶^õŸØÚs¸&¬âª5ðNmØú>? Œzüq¿ß¬Ýë†z=o<Ñ<3 ì $2Q¿g¯?çO`›^'• ž´¥h îk Ø6WaDˆµÓŠ[[ž®õælU¹ÀÖÅÈ|~Ï0¶XéŽc¿>ÖQžyÎa'öLèÝ %'^‘,ž­x¶ ¼N\?”çóU#©Â¶­¶ëmت02^TÏVÆö¤ø_‰z´NÖø #œjzea'K…‘­í@¦§ KÓ³ÄEŠ뺙d° ?þÈöIìÙV}ÎÖô”+½K`+°ØŠ X©ÂÖòlC`ÛI¦æl½0r°gä «Æ„aã´zN1ãyYó·Ö¤=§ ˜•ó"ª Ï-I½[ó5˜ÛÕŸ…ߘäýÉŽß¿HIµŸÙnîXüs!Z¦«[>ØŠ‡‘¯†—h³64`Â5lùåWúç?¢~ø‘¾ûî{úàƒU Ûuë6Ò{ï­¥7ßz—V®zïo  'ðë~`^ºaädž-`똶ù!&ß‚¦ì9ÊÚ8iùülý‡DÇ+ž­x5ìÕ$êòû ºhØþþûïh¿ÿþU4þ믿eÐnQ`}ïýµôöÛ«éµ×ߢ—W¾ ÛDž-Wù öló¶‘!d½­E2K¥ß)¶é·©\¼¥MEy£ÀváÂ…ôÕW_ѯ¿þJÿú×ÏìÙþ“~üñŸÊ»ýûß?¥?þ„>úøoôá‡ÑV.Þ³™Ó¿ûî{4þ|åÙîšØ³íà,r`›Ÿž­Œ@kiÐ#°Í›‹‚ôZêÆ‚î€-R1¸ '5€Ó~•‚mIÇ®ÔI-ò8Œ,šZºÐl úB#ýª–ú•[GÞˆC.r ˆDéh Ø&#kØ*à²M›1;;©…ÀV`+ ˆD¦4`ë­FNêÙ¶0m:#&yJ;ŠD¢üÖ@°­œgËy¶ù- é´r~D¢Ñ@º¨Yت{]TAØ4.±—Uˆ ÀB"Øt+í)í) ÔE Ô(lÛvæÂñ \ÔµÅÊdÔ³mÒ¬fëÙÖÅ“$Ç,Ñ€h@4P·5P3°å:{7m¦]{S»Î=•¼3fÏáâñ-e!€xó¢Ñ€h@4PЂ-¶ôüç?ÿ 5¤u4·þÄÎÙ6l´mÚ¼…:÷@º÷£ŽÝûR‡n}hæì¹Ô¬E«‚n`ÖíѨœ?9¢Ñ@‚-@Û·o_êÔ©“²®]»Ò{ìA\p7NA8¶fy=üí"h´Ýö´iËVêÞ0uë;ˆºôÙ:÷@³ç̧¢V%[ÑŠD¢Ñ@Ak ʳ=á„謳΢Q£F©ÔŒ«V­R  ólCa»íö¦-[? ¾ƒ÷¦Þƒö¤^÷ ý‡RÅüG©EqÛ‚nà4FDò2² ˆDu[as¶:”|íµ×Ò¢E‹èý÷ß÷@‹LR>϶‰Sõ'¶ÛýiÚúÁG´Û^Ñ€=ö§þC÷UàûèbjÕ¦½ÀVF´¢Ñ€h@4PÐаýî»ï¸Á¿|)áÁ“]²d‰´A°Eµ¼PØnÿçië‡ÓÐý§Ý÷9”¡{0 Üó@š¿h ·ëXÐ ,£Ñº=•ó'çO4 HC¶?þø£Ê Õ4”ÞûïÿK¸5‡ç «BìÙFÂöO;ü…>øèo´çÁGѰŽ !û•*è.X¼ŒJx¿m?D>C:„h@4  ä«4láÕêÐ0@gmrØîøWú€kôíuè± Ü£i¨ ÜG{œÚp’‹|m9.鸢Ñ€h@4†4l,€ª½X Xó6¹gëÁö(†íá4xßRR°í(°MãDÊgÈA4  ä¯4lÀŠð1àŠÛ K[#ïqБhí톑ųÏ^†ˆD¢×€†mLãO[½@jð¾‡Ñ ½¡jUòî©N"²™Œ¶ów´-çFÎh f4 a hVÕbHm‡}¶|Hý‡íOýxÛO¿!û¨­?ã'M£ÓÎ<—Z–´7¬µ,n§öß*kÝÆgE|‰0š·,ö[‹bÎFe>Ưá×µâ×·v> Ÿ›e%ü˜ûý­ÚtP[‘œûúµîq¸Ç‚ÏSßÝ¢55-j©ÒM6iÞBåxnÜ´ˆ +ðßüXÓ¢VÔ ¯Ã1èïw?Çâü|F+õzçýÍÕj31iÑ€h@4 °5pÜ 'Ñ]ãîßúÓh»?q:©ÍÔk·=¨ç€a*¡EþCèì .¡‡™Ló²=êÙìy höÜ4kî|šÅY¦‚mÍªÈØLþégÎæ[ýø~^¿Ÿ¥?Sýmšó}Áß‹Ïp ßá~ÏôYª*a“1–cOîêôNŸÉÏÏš£lŽ ïãcQV1—f¨ç*2ïç÷M™>“¦LÓ6ƒ&O ±©Óir„Mâç²l ?c§L#ŸMæû16aòTª´MšB {„ÿNÅ&N¦GblŸÕWåeÖ¦Syš]óþvÞã|Go.š€Â ޵éÔƒ+u§’]©5o[BmÞVœ›²{Å-JÆãVíø9~Mq~-¿§-§¶’Ž=øñnêý­Úu¢–m:R¿¯yq{jÞÖŽš¹Ö´U[г&-Ûi[”PœíZTL¶íÒ¼5EÙÎÍZ‘m;5mIqö×&-È´¿4.¢8Ûq׿TYÛa—fgÞ¹)UÖþ´Sª¬mÿׯgög~ݺít›¢ýõyÁùÃyÕçºÐúÑšÒ·Jo®A¶µ`]æÂ‚¾Ë|Ì<®°¿íß’ä~P?²ûNÐ}»/%é7A}%®ïÔf?IÒ'*ÛªúzóXÌþÕô5m¬¯av?ˆê ÐOP_Ò¿P¢¶ïïÈá4ö–[õtljܤ)ÍáôŒºAPYou«©Œá© šœ ;T R¥ú`m:u1ç5™×ñß^§àÏt>Ë6@’MÕà43fBV54.JÆÅ àmÙ¶£ ÝΔœk¥ ÛY½¯-âðr‘ Zû¢Z¿æy¯nħ6=Û«®Å\½CñÕƒ-þh´-€{ÏΧÎ:Î+"Ÿ U@´ kÜÚ@´ïë÷"Ì«¬}¸!”ë3„ TMó]tJN¾`0h½0„]\,\ùb¡Íó*TØØ±$¼&É"ÎÛ º@ąǶÉÃɹ‚mXXÙÙW6”NÖ0Ø:Ó3iõm²þTYØ¢/˜ÀÕ¡å\M¯Ôl<ìH^³4Wñ\5V”g–VƒÀpyÇŒK6nÊK[¿aÅÛF~M:¶Ž?'U[¿ÖåÀÖògfÙ:~Ìgëù~ú¶†?3ã…XLPf´Iv›»ç%JºKKÓµñ9©ô¡”úM`_±ûOŽúN•úI]î‘ý@_‹œkWàu1äú›XÃÌ1Ô“­I=f¬â¨áÑú=[s?×·i¨VO‰IˆD¢Ñ€h ¹ÀÏ ýÑÿ?œÒéú¹Â°þIEND®B`‚nova-13.0.0/doc/source/images/rpc/flow2.svg0000664000567000056710000005775012701407773021575 0ustar jenkinsjenkins00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Sheet.36 Worker (e.g. compute) Worker(e.g. compute) Rectangle.57 Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.63 rpc.cast(topic) rpc.cast(topic) Sheet.64 Sheet.65 nova-13.0.0/doc/source/images/rpc/rabt.svg0000664000567000056710000010200712701407773021456 0ustar jenkinsjenkins00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.27 key: topic key: topic Sheet.28 key: topic.host key: topic.host Sheet.26 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.58 Direct Consumer DirectConsumer Sheet.59 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.56 Sheet.60 Sheet.62 RabbitMQ Node (single virtual host context) RabbitMQ Node(single virtual host context) nova-13.0.0/doc/source/images/rpc/flow2.png0000664000567000056710000007367212701407773021563 0ustar jenkinsjenkins00000000000000‰PNG  IHDR°W›c¢ÑsRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÊ&ó?w#IDATx^í½ ¸$E™¶Íõ!CÓô¡±›foA•Æ@<Ã("È¢¢ö0(=à‚,Úþ.lƒàö3ŠŠð1Š Ò #8 à†¢ ² (´è àÖàŠÒ"*îùÅSœ÷dVFUeUETÝu]yÕ9™‘‘‘O¼™ñÞõƲÚj|PPPPPPPPPPPPPPPPPPPPPPPP`ì˜p ,vÛ©ÚfÏž}ÇZk­u¾ýÏ÷cº°¡6€ ä`³fͺqbbâÒ©²ï¾Ž}+‡(€(€(€Ù+ h=rbýõož1c­ßpЋyÓq'Ú.ºô3­o64À°l ?8ç¼ Š3Ï>·õ?êØ%Úló-~å~”|xÝu×ý˜{ïOfßzq(€(€(€c¥À ÷y“‹²þ|Ñ!/ÿ­`õ‡eClÀFØî¼ûÞâÝï;«X°ð+Õˆ¨ìXµûÜ,     d«À®³fMÜ¿øˆWÿFÎ à ¸cØ60~6pÙg¯*•}âŸx™kÍfdÛ¢Qp@@@ÑUÀ9*oxÚv ¼mùÝ€ëGY€‘ñƒêœ:ïÖÞwæüqΜ¹ßq-ßüÑmý¸3@@@ì˜3gÞÅ/yÙ¢•÷Ý¿x^±lÀ¦màšën)6Þx“ŸÓ¥8»¦£    Àh*à~]ÿÈ›?ùÑn¡ç<¢;Ø6€ Œ¶ ܳâbó-¶|Àµ‚ÛŽfKÈ]¡     d¡ÀÌ™k¿jÿ_ø+œÏÑv>©_êÀzµ /™3wî]ã6‘E—V!µTÑ2o[êþ®[žãŸ_÷7ù¯ªw¨WÎú\Î3˜ÖÃMiP«À®óJ·aÛ^[Îdž°ñ°Mî45Cñ`[«ü¯¶ÂÝÂ"·MNm»FÜ’¢Ý–¾Óoòo¯]Îúh†ð˜òG˜IPP 3Ü"ö?ÐØ&Ïñp<©gêÀš°ÍR¯¥Ö2kò†]\ìüa‚ë„óFâ.¸ @èTºãÈ6áÈ’v„ ŒŸ h‰µµ×žõK×îÔuí´iåôì(×.÷†(€(ÐwfÈù`×ñs< êÀš°¢°·ÓlÇ’q     üME{?¿‡›pbÈgÀ°ñ³ýºÖZk=Lí- QPP P`ÖìÙWœsÞŒ}eGlÀ°®m`ëm¶]éš&“‰ó2Ø8H…(€(€S`ÆZ3gþZkú5¿¨ uNcØ@S6°äÇýi5Ö|'íl”l”L$B@@Ç+°«~5oÊ!œalÀÆÓ4‹½f³§¡R€’‰D(€(€(ðxpЋÁáO‡“z§Þ±l )`lG.Û‘\$F@@¿)püQÇ.ùSS ùà cØ60¾6àš–‚6J6J&E(°Ø¥™ˆHG@ fΜùÁ·¿ëtÆ¿2q 6€ `Ø@Ï6°ÞìÙ¸Öqþh´}½ ¶¯òŽUæËÜÝNŽÕs³(€ã­À¬‰‰»®¸úºž".ãq¡î©{l0`£} ?Z*Ö(Àb"(€ã¥ÀÌ™ëƒU2üáé²Ï^UÌž=ûކ¬€mHH²AÈDǵ)Ç•|°%l`Úø°«ޓì@Ÿ.†(0j °8œ@6€ `MÙ;P/€`epD`úØq1@¡+Àâ¸6帒¶„ `ì@›u€`úÈq1@$`q8l`6pßý+[Ýæb·K.ÿB_Òþ÷§>ŸïeWD§½´ƒòþwùæ6K<;Ц€`Ø>r\ P XàeðÂ5°³“ÞúŽbóÍç{>ûÙQÛ¬Y³ŠwÞ¹6í;ìX¬±ÆµétÝ6Ú¨Xýõ£Ò*½Òn·`ATz7lTº7Þ¸£2l°Á¼bé…—ô<;ð žAv M; À°}ä¸  @ °€Å [®3Þ¶ö¦ãN*N9å”"ö³îº³Š+VÔ&¿þúë‹9sæÔ¦S‚#Ž8¢X´hQTZ%Úd“MŠeË–E¥w/ô¨t–á°Ãgž}.›D‹Ùh!¹Üfô˜# À°=>DœŽ(¡ìxCPIýÊØÇø€Í°¡ìO‘W¸lç÷˜5 À°=>DœŽ(¡,3(€á:ãmk,›aÙÏ"°™ÂgmËèôóÑ"o@‘W€o¨h¢!&l(ÆXväÔÎn€`ïèÌd*S³ŒNCB’  @& °ÀG |;éÕX6“fqPÅ`XvPO×A-XÀ¤W0á|l(ÆXv´ZÏžï€`›Ø+5îÚ³E’   ä¢ |ÄÀi°“^m€`siTΑØ£ŽYRì²ën¥Û!‡.ndVí}_p@¡­×wÓ0Îox ìÄ€l–Ë   @ °€É0o®9~vÀ°i´zÉ”b¤ö›w­(®þÊM­Í`Öþ¿ñ¶;NAàÇ>~i#y ºMj`“1j ‚(€Q€?tCÍõ°1Ù À¤QËç"# °þ{ÿ„{[+[Öø ÷AWð‚ï÷ô`¡óýó,¿¦ ¹_í›ÏƒÚï’*|~jÛš’{r×§ßö™lþ,pѯš|±-ßX6Ù†p8{€=ûœó‹-¶|òt7cýíGT½ŠÞZ}ûÝC0ÓêÿTÛ!v8ݰ®*HªúäP”¸€<,›úuX #Õžr–m°ìм´ 0Ö«Hi¬ öîÀ*ES}® §ÿ÷VitLiìü”»°i=Œý,Í<—ù]ý¼y£À8*ÀŽ$}Ôgª6À°ãØÆ¶¹ç±XÁ§à4ìþ«}ê.¬ýX¥óÓèøiï9£µÏØvÝ”S|'°ãó6˜ïnU;@`žwÊ4zv À° 6]£ÕØlÙ¸Ø:€õ¡€…Ç`ôï€ý:懠;z üQ§)Ú À¡‰Kù’l0±“u+¶.ÃUX%›²iS6_{hR3šÌ,ç¼X`'EØ¡L£g—,›s[Ù‡²5ÀÚxVÌjØ ØÖ‘ÕlÃj4¦UZ›yØO¯|üè­¥Oµ-i¸ ññÎ>5Ô’O‚ ° VJÆEÒ`>Nvô@!Õ›r·­°ì°]×à/þ‹›G寫­¶ÄýŠ£¯y]äÛöò™\°ð+S·VOÕŒÂá,Ä6þÕÆÀÚDNöíÏ,æëçÎXœšF ì2gD“½çöO¶ÚŽcÎìT­°ã ©5ê”gtí€`‡ål`ÝVØæ`öÊ©}3†U¦.#f*ï‘^™³XEP«ÖeÕ~MÊäGbýYˆ©Š¸ê»lØpŸþ/K›ZÛÀñÉð¥Ø >â—`ØdׇK­¡¥<£ •ƒ¬[€–_¬²:˜]ê"³û «l]\w©;gñÔyYl·ï™²1°Ýæ•Úyl–Ÿé)l¦—h±X€ucŠRkÔ)ÏèÖ À謯 *{¿ƒÙ3ܾm‡UÎÈë* «.£»ºm¤vßð¸etF¥`#­}’°#P‰ Ý Ào,60@`Øaù1ÀljãeCé4Ž÷f·-Êa ì¨@g“÷Àëm0øë°ƒ×|”¯À°ÀËá¥É†Ÿ¼òŒÒ°yìÏî_Yüî__Uüa÷=£¶×XãÏ8¸rÑÌe©lv8ù€ÚÉ߉Œ—-óǽãi ¶ÿïÄüÞ‰ì(#ƪ÷ÀŽO]âNX€`‡bÏßo½Ær;ì°Ã£¸ˆÏºëÎ*V¬XQ›òúë¯/æÌ™S›N Ž8âˆbÑ¢EQi•h“M6)–-[•^uóé´ ÏzÖîCµ•ÅÞäG€ß¨¥}ÔéàÖ*)\ÿÝ¡ÖGøîXkæÌ?°¬º“Âyæ+Àv®gT+À°Cü¦ëlÓÍ6/n[~÷ØÙØ<#°?¿ûÞâO[m3=ƒï¨iÝý¸ìSËï¤66v¡"°O}ÚvD`3ü1–ìø ;>u=ˆ;`Ø„ ¥l©€¦‰üÒ†d6&NYD`Óé°Ãgž}îÀßWݾGÖ›=Ûõ îy}ÓFý€.ÆÀ^é õHw^*kƆz^5‘Ó ›öû¾ê9`}ēΠ€Mºz²+ ÀÌ!¼Ï!{Ý’73fÌhu=ÛjëmÙ 1ïÖ¡Ï`ØNº1°½ûu뢬ZNçb­‹\Ú‰Þ¯•ƒë‘¼Z7ëмªœ…øê¯ÜThkê]8Îù°QÏÈH$j `ÝOVû¸íT¶l5ÐĽ~Xv ðÒ /)æn°A \×\sÍbÓM7?õüýË®¤ãì´èÞX€íµ ïìü2€uÀªes–ºcu–[c©W¸œäÛvò´*òj=€=ûœó‹-¶|ò*Û ÿö¶´¡£Ú^°˜}Þiõ+Õ‘MÜ‚{ò­s&Ù²Õà®ì€`ûÚøÞpËòb¯çì= «ÿøÿXÜtÓMÅøÃâ”SN)fÍšÕ:¦¨¬¢³÷¬x ¯åU' Çû`X¶V¼ƒ, `°®˜Zëu²ƒÓû•´€ ËÒw€ýØÇ/mµU§½çŒé6JðuÈ¡‹i³zèIÀöë±á|®_]wÝâëÇÖ™S?:è¥ßë€`ûÒøª»°&«±îÂsçÎ-þó?ÿ³®þöío»xå+_9 ¸ŠÒæ4Î-GpL¥Ì, ÀöÚ„wv¾{Ñn«­³³úž: €Ýe×Ýjaõ›w­(‘Õæƒ®Þ¹ßÿуÓûtLin¼íÎUÚ_ýoçûóD„Q^óó×ßÊ_­´ŠÛ5-?•-|÷[9²*ÛÊÊÙdÀöýù½ ‚®ßxãâî»ïfËL¶ùçqæÌuÇQ›lˆ,¯‹.ýL±™›aV¿Vk;ú裋ï}ï{Å~ô£ÊMKt(:kçìô;W\}]_àº÷LžO À°lómY†9&°‚?un7ù àSi‘ô xõ¿ÀRíƒÆÌZ÷㣎YÒJ§ÿ ,í|ÓùÙÕyjý1·–¿µ;º–m:_éu¾öé;æ·SäÊkßÐJcÇmŸÊ§ý!à6ÙÞ°>±Ã.2›/¸7°MDq‡mÊ\€í@ÂFìN·Ì„ß]xÇw,®½öÚâÇ?þqôvÞyç9øÝldzÑK åÛdƒI^½×u°,ÛHó•{&ÉlD†ï@A`ØX(€4€Xú¬ã] ¡ÔòX]ßÎÑ5•·A¯ Úÿ_@ê«ÎÓq‹Üª,~ú&Þ÷Uy4 °—;æþ@PþX–‡äo °ÝCº ŸôÖw¬Ò]øŒ3Î(~ò“Ÿtµýà?(N8á„UÆÇª;²®Óφ”¼»·n´`X–VØ)0[¡µè§ £ °9‹œúÝ}cÖïflê_ËRÁ«€×º[ú:˜îæ=_wNÃË5 °ì8Øyì=°ÝÁ‹º k9ëú{Øa‡µ†$üô§?íy[¾|¹[ûñ°é¼Õ-Y³×5ˆï®.­ À°±-ÔH§K`Õ½7Œž†ï˲ˆ¥Uµ.ÄU«ý‚9eYä¶® q¯«ó-:\ îGÀŽô³ÝŸ›`ØþXVž¹°Aºõjù×í¶Û®øüç?_ÜÿýoÊw§vš¾Ö³vß³¸æº[Ùf~ì‡#Òiž, ÀæÙ^6\êäVﶺIœü.¸ö.ô»ǬßmXùÙ$O!û‘]+['[ÖÝÙ° [x³;Úå½vóï$ë;IÜKZ€íÅ~Fí\6`Õ]xÖ¬‰Pjœ·¿ýíŸåoú¼sÎ9…f3žŽöþë«›1İ,;j-iW÷“ÀÚ2:‚;ëâ«1£6–Ô&M²cŠ,úÝŠëVùÛ¹–Öþ÷#²Jg“+tª ¬­gëÇÕß6áÛ•ü¤IwÅïôùª[»ü¯w›¾ë>—¹êvŸùu™Ä`Ø;—4l=ÀjÖ-¶Ør _øÂßúÖ·ŠŸýìgÛ´Ï›ßüæéñ¶é·¿ët¢±‚, À°ãÒ¶½Ï,V°hàg³ ”~ÄÕŽ…Ù2€µq¨:߯¡ú3[¾þuuMýïOÚäç£s4I“ò©«ý6Ñ“/Ì6 q§½jºIOâúw€àQp~Îs;N÷vV¥«¿Bu Ewuí #29t v«’ ^™9€`#ìql’°õ{Îy´àuÍ5×,N?ýôâç?ÿùÐ6­»ï¾ûNôÆájô­ÒÊq²nÂ*‡_~¥ñË%}tܺ+?âªc!ø›6óÝDÞ®ue¾~ã[K^°å¥êÎmMØÁR3¬qÿ`;Ø›o¾¹¸æškнèEÅ.»ìÒúû—¿üåÐ6­+¨žz; W¼û}g±‰:G, ÀŽ{ËÛº6Ñwt¿ ÕÏ·a€=ÕÙ“8©í§ ²bÚï|àˆ¶ËÜ"£1cFý.¿vÞdIæe]ƒËÊ[–Î@ÝÏV÷b‘Óª¼}8ö6Œ‡Å•F¡Ž–FÓ¸¬&J`¯¿þúbÇw¬Ü¾øÅ/ö ÜŸúÔ§ŠÍ7ß¼ÐwŽð®ºs[v gŸS€í`o¹å–âÖ[o-–.]ÚêÊûŠW¼¢¸çž{Їzhh›ÊôœçÀ¶㪼Û>;5fWÝŒ·öýÜç>w:¢*Õþƒ>¸•Æ6t}0¶nË~~Ú×T´·é|Øæ›h¶9€ýæ7¿YÜyçÅç>÷¹bçw.<ðÀâ»ßýnñðÃmûÎw¾Ó‚ê©6‹¹n|¬–ê¶Ñå¼z{©ÓhœV½N9唨muÖií®Køá‡ÿ÷_›Nùì°ÃÅvÛm•VéçÎ[,^¼8*½ž±º²vS†§»ÙÏÕ ¤Î®R9¾ÞìÙLšo°È1T€`e=u!¨€ à| 50Ü)BªãÖ¥Ø&J2£´®ÀÚo똀UNˆö[”Ó®¡ ˜,O;ÇÀ×®­ýþ„P:ן@©,]Ó/·ÑW›¹Øò÷£¿¼¸–ŸÒZwa~U¦ÉŠ÷ëV`ê«`SðiÝ‚°ª5èJïŸãlxì‚ .(n¿ývvŒI¶Hêº_}õÕÅ¿ÿû¿°Z«õÿ÷‹³Ï>»ØØM§nÅZ;ö׿þõÐ6E‡÷ØciÝév)®¸úºlœâTœó&Ê1®«nìê»ýó¿ÖxÚc—¼±xÅâWFç{è+/–¼ñ¸¨ô‡º8*]§e^9Í,ÀÔ`Øh€<†e0¦ýIZ8“þ×~[†Ç,\ç„KdžùØXÛï«Uví°|:Oê_§l‚¦²tÊËÊ–Ó"°a寭L?Ú¬ãeiÃ¥}Â7ëÖÆÅú‘MA­¢®6†ViüãoxÃZçÙ>`õ· µéHi¿ò»–IœoØîVÝ„}á ‹¹®{î&ë­÷8€½ë®»ZÙ×¼æ5­nÅŸùÌgŠGyd¨ÛÅ_Üzè‡.m‹yyqçÝ÷²t‚Æ`›€ò¨_ [#¶ñfº]†ìßÝM>[ÃèB\fHí&¨%wp1‹wpÊã’ÖM:ÕIÞuå`§6«*@m°6ópÀæ0öÕÊÀvòHÅ¥`ëÂ0ûùϾ8ä ƒŠM]·Å÷¯¾zq—ÁM&&JV3«ñµ×^[<ûÙÏ.ž÷¼çµ¢³¿ùÍo†¶iíÚO<±5.V«ewM9/ìà´nªÎÈ'¾Îظ¶·¡T,+SŠêB<*k]†c–ì©zΚØv݇u}v `ȩ߭8„UØRóe)YØzÇÌØ£Ýr9®:øs†T¸Ùåjö{ßû^¡õZ?úÑ[n¹eñ–·¼¥µnìoûÛ¡l¿ûÝïŠÿøÇÅ„oA,³×ÛASÀNë¦êŒ|âë €mMã²`ØžV]dýñ¤qf7üT«”Ùy1]©Ý¬ÂdW—ë`4³ªñª~·b«åw,ji“:•E`•ŸØŽi²'ÆÀvb¶ù§m`¶â‘ìŠêìQn|ù[ÖZ«®¶u°+V¬(4±ÒñÇ_lºé¦Å%—\2p€ýãÿXüõ¯mõFmÀØ`ÛÃÐ7ïZ1’ï‘q`v ~;à÷wSÏq*]ˆj­ýº˜óbNýËj«Ýå¾w›€1¥ëV€i3 Û,Ä>€ZVÇì¸Òû3 ûc`-??}ÙÒ<ýÓÚi¾×2¶ñg²I€ýóf›¿ßoÿâWn–ÛŸÐr-aâ§»gêf`ïw¯ëºã~ùË_nyµIœ4Öº[V{ß}÷?úÑ -½³Ï>û´º/_¾¼PT´ŸÛþð‡¸>ôÐCÅ5×\Óš-€,5帰՚_ý•›Š-¶|òHì÷ô`q¿½­ÐwS¶“C>lt3}ªKÙMàÈ¿ÀäŒký~ÁÂg¬dËKƒ'm±å#n™»£­¥}®»7týáf#€õ£ f¯tÿ/v[‹-÷zsc°íƦª±faÓïB¬(¬‰5X,Ë·*m§€Ùïôl¯ÑãÏo`íòW·þèïþõUÅ/™éV3î^tégºr$C€ýÄ'>Ql·îº…u!Ö=_î¶M×^»xÑGßøÆ7Zã\ëV]xúÓŸ—]vY±ÕV[¯{ÝëŠx xôÑGÝ®ùË_Šßÿþ÷Åm·ÝV|éK_j½;ØÁë€M` 6›Äo»³ãún2ßÔó`£Ûi×y§‘`Ѥˇ-O æE[ [­@°æˆ:}ÔmKÿºÚjû4$t7Ù¢usbxŽ èz·´E¿akù—F9úuM¶‰'`Õ<ú°þaŠÌþÆ-ñ [FcXΗ&)Úm÷=»Z:¦lc^ùÊé®ÄÙ׬±F¡H¬º?mà ‹sÏ=7`­?ûÙÏŠw¾ó-ýÈG>Ò‚Í^7ëŸÿüç¼Þ{ï½Å7ÞØšL €¸ší°éì(E~‡õ޵ë°ÑítS}AެD`}§³ìo²+Üvš;¶m®fÀÞ-¼°Í?uƒXÿ]ò‘Ÿ}n1Œñ²š¨hÇvîxÙ˜ªu`ΟßêJ¬1°êB¼•‹:»V¤5+ñs×Y§8dß}Ûv!¶¬ìƒ>ØŠÜrÈ!Å®»îÚêú/íf¸ª»ðÊ•+[á¯}íkl"c¥Øx€=ûœó‹}_pÀ*?|i½Õ]vÝ­µ}ìã—¶Ž)Ê©ÿÃñ³:×" §½çŒÇ×¹Ú¯sÝÛµõí_OùÛµ´_ãÖ Ô”¯Ê¢oS:uöRûý2é~,¿0í°Á³©ë°Ñí4- k`ë6èb|‡‹ÊéöMädZ£ °ýŠ|¦’/Ûü“6h€îb¼æŒâѽ¤x¨Ën½Ý:[r@÷ú§½;Z6¦ `5“ºÛ2:»Ë6ÛLGf·wûéOºµŒNÙØ2€Õ7šøª«®*vÚi§Ö²?ÿùÏ M¼³ýéOj«º!ëºßüæ7ØDÀ•l}äÛ„ öÔWûL;ÁŸ Qûì¸A¥Ò F-­öûãiõw4 @:fI+­Ž[‚WËS׳4ÄVV¥QY¬<þ5üò+_K«² ný{ëö–Úylt; ÀFKE¾¬&=r‘ɋݶ,ãmE'ÀìÅféâÛo3`‰À:»£ßvÖiþîy:hïŽV_ý¿vQIEF{Ýþê¢Ý¼Cþ2wƒâ·¯=¦øÅ-ËÒÅøÝï;«8ÌÏuüªöÖ[o-^÷ªW/]sÍU–Ñùÿ^ûÚVVãb÷Þe—®V«í½ï}o±Ùf›ïÿû ÁiÕfWu¾ÿþû[3+š›2ÀÞãf­¾óî{£ë!¶¾ROG¶>kp((¬RíTZÄT p-½@W›ý¯¼ªf8.ëB¬¼”§oOÐÚgçøy†eð¶  S·×NËÀF·þl´T$¬Qàbw¼»ž±UãG»qær>Ç9à+•MÙÔØ|öxêæ5Š›º¡æüì7Uv÷ËBáÞ©ã“O}W<µدýë…ºoâÖS,Ú,Ä;l±E«{ñf3g¶ÆÆF`Õ8Üt®"± .lu¬ú›"®ú<òÈ#­ÙµŠMu'žß7‘Ê€Í`ìBÜ55—Í8ÿÖš±ø—+/¼d Ëïš^æ® Bu{ÑEOÊSVØSN9¥xµ‹H¿ÿ O(ÙË:Ø÷žvZ«û±³Ë–-ku+^´hQñ“Ÿü¤ÕUX‘U:E^SX?~“›ÈKðæÿÀÖw«µÓQHgPhÝmý|°‚G¥ñ7?Žëܰûp6lsvÀFûl´T$D6 tâ@OÍL|¹;GËìLä$, Àæd¯ý.ë°ÆÀjfb-³ó7)J“ÙÄñNáU׬XM’¤5]ýì-·ÜRÌuÑוŠÂºåun¾ùæVdÔ_6«™ˆmûçý÷/6[o½â‡?üáô>;öḘ;wn¡ñ²ZÓUß)¬f~>Ç­ ¬1ÇU½&ØæÀ¡‰çcØyø i“5ù]€ëÖˆµ ™tnØý·›l8fÖïVleõ×y­ëBìѶÖýº>Ýj°ÑR‘zXuÖr:«ç¶Ö†íª>sX­Õ¸ãŽ;VÎ×ÿáÚ°e0)Ϙt©LÞdåålWÆÜÀIƒØ?mµMk9_\wKtô³I'«xí`5þô{îY¸)ÓQØ*€ýðÿý¿Å¾ÏzV«û±f$Ö¶è/(¶]}õâÍn<­íó¿7ÝtÓbùòå«DhS‰ÀÞàÆ2kŒ±[¤½¶+8 ÀúÏx õÇœ*u-¶¬Mždy¨û°Ò”­¿Nòä_WQ\ýÈâçg]–µO׳ÿ­‹²öé•Q_ë>ìÛõ»ÛùšJçÚdTM¾ãRÈ €n˜Øh©HˆìÔ²9roéÉÄ[ØD °'Ÿ|r±ù曯²½á oˆ^Ʀn]×ðøsŸûܶÀk ¨2éÜÔµ®<lOÀªyô`ÿè&ˆúõ»Nê°rêä`vÒmØw»‰À `µìÞn–b­»©ƒ¹»îºëqد~õ«Åönü¬@ws7ñÓIo|cqß}÷µöCnŸŽÝpà ƒXìí·ß¾Jt6€¸VE[ÙÿØØn½&°J6R+ƒ-IcåèùËãØqƒT™;GûÊ"µÚoËî”Ý·Í2ì/£cÝ–•_x-¢)[¶ŒNØ ÚÊÎ2:Í·g™åÀfVa7Q¬ ±ƒÖ»¦ÖzmºÝù.=ì= °‚UÁ¢™€VÌ?øÁ(xì`u­Y‚lÏf42ô`¿ßþÅÃn¦ßŸÈ ³Ý¬€uS× Xt½~­µŠ“O8a€ä.˜7¯u\ó<ê¶ã\·ão\l·õÖÅR÷ÿ•nÛËõ˜øéOºÊ&€U×eA«¿¥0Vp&Í4QS]v#°©A㨕§löà~ÜcÙ¸Ù~\'·<‰ÀF»l´T$D6 8j¡ÛŠ)~F`–ê¾kQXl³:¦ÈŠÒÀêÁ¯Ž]pÁÓ€®Žùù©›°Î Ï3€}Ï{ÞÓ:VÔÊ«ì\•Ëʯ2i«榎ëÇ·5òCFŠF>Œ25 °ZÓõgnÜcnW]y{Øc]àS\W`Ea7sÑT= а 4÷r³ a57û´Ö•=ðïÿ¾5‹±Àö™=ÿüó°š9Û6ÔS³+2Nà¤óXº×=·®À~Ô³“ó;I À–Û.Ý’°ÑR‘òU`$V0Žqõ££T9yÚwðÁ·6?‚¬ÀÒò¼ê¼Ã?¼œ:×ïB¬tÚ§ãÊS0kÇõ·ÎÕy:®¿}ÈÕÿÖ]YßMj]>lóp“Û‰ó—SÚ^ö+_ùJk)è+”ž~úé-€=ö¯(Þåþ×~m¯wº½ën|yÉrJ¿C«Ù‡µ)«uh5”¿¥°~]_ãÆ?k6âílßzï°l“ï ÔDIš¼iT»÷R'lt; ÀFKEBÈW‘XëBlQÔX€õáQÐiÚ`}˜ ÁÐàÔöûyj_¬Z´ ¨ë€³éã ¬ã>R€­ˆ^öž{î)žé–ØYæ`í·mçÀó?]Ï‹\4Öàõƒn¹Wp@kbµçï¶[±½ƒÙdµÏ«9¤¹Õ¶É&›šéX³‡[êëÀšs¬¥uî‘næ½8üœ[ÿ ¢Q>°Ñ¾- k8Ã'ñIPlÖ"¨6™“éŒX ­€Ð&†ò»(úÐ*HÖµ§á¸Øp ¬žA±uö£°ÃžªA€•6|بîνì»ßýîVôÕºoíUã]õ¿Àv¯í·o­kp*ÝnþüU VéwvãiuÌök_ûZ+šn¹,P’”PWÔU¬ °Ñ®…%²Ú¼èÔ$Djœ+‘Äd»ÔQ‰Ù¬ÁbÙäJݬÀÔ ¶]V°©´êâk]‰«´` fØÑ{6‰ÀÖ;¦1«Ù„ýu`5A“&qRöÛßþv1×ìJ¡Ç»ñ°tÑTÁ«ûù½5‰“އQÔ—í·_k'‹ÒìîöÔ§¶Ò*«µeµ®l¸°õu댓-±Îl€=?;J^6á*Ê`«ºÒ–uó ÇÀ†Ø2Ø,‹Àú×4еµ_ÛE`mìl»ÙŒ‡5‹1ØæŸP¶Þ9k°þð‡‹]¶Ù¦Øbà +ö»ßýnñ//|akYRÁì?<ñ‰…ÆÈ–EQ_ºï¾«¬ÆÁj&ã­6Ø øÖ·¾ÕØ›nº©¬–mßùÎw A´ Z³+Z{ã7×^{mñ¥/}©ÉýÜç>Wl´ÑF­žêÎ;HG]×{·›©Zcy]®Uoïh„F½ØÛ|;MŽ(P£›°‰Œ$ÀZw`Á¢f¶IšlV›ÄÉ&`R:üj»¬ÆÍ„Z:û¿ÀÚLÉþ¤OÚNâ4ŒudØæŸP¶ÞY-Ø}èCÅÎn©›=\w`EJ7qcZ«"°Ø×ºI›þÿ5לؽ]úw^qï½÷–n°ŠÒªû±f0~ë‰'¶"º:G«õaÕõ¸lK`à œÎLâTo½Àç¢ï m€m¾&G`óµ‘XƒE«èªºüú«ÿm&`›ý×¢«ek³Û´–·òh×…ØŸMXùª{³[×¹éÉšªò`›€ØzרsÏ=·ø‡­¶*vwàªñ«Ö¸À^zé¥Ånüª?îU]˺ÿÚ¾—<ÿùÅÓÝ;ZGV3ë™ðÓÀ ŽË¶Tvé…—T.¡ÀÖÛÞ áƒëQ'½ÚÛ|;MŽ(ÀækYl,ØY×Þvé=m×­·êÜ^"¥º^LÙbï³×tló0[ï°úû;íTœŒMU”´ `ï¼óÎb+×UXi¬û°ºoàºkáª.Àg½ï}­¥­ªŽ `5îVÙªm]ˆïYñ@+ªzЋ^R¬¹æŒV÷ävÛ1¯c«ñ8mW\}ݦªîz)ÎŽÆlóí49¢›¯ Œ4Àö vãr>Ûü ÀÖ;y>À~ò“Ÿ,žìÍ"\=Øõ 8jÒ&¥}µ[ëuûµ×.Nq“9½þ裋ïÿû]mXgÕDQUÛ ö¾ûWo×éÅ^ÿ´w-¸Ô>y«mŠÝvßs¬6ý ÇsõÏ95£Û|;MŽ(Àæk¬ëB8. JâÁ=¨l½ÓŽ}µ[õdAµˆªºïïÀtÑ>û·Ýv[kVaÁãÙgœQì?kÖtº÷»õ^wËZ=ÍÁçåîu®ÃZ·_»lÙ²âþçÚnÚÄI‘Øsλ ‰å4¨ŠÂŽãØM7Û€%;²Qxvpí7WB)˜Ä)aS`Ø‚lóO(Û9À^wÝuŖ믿J·`Áì8@Ýa³ÍZ³ûª{ïv®ë°º ëØen<ëóvÞ¹5^õ¤ãŽkÍ(¼·ƒÞ|ä#¥cX«Æ¶Úþ7Þ¸5ƒ±`¹Ý6,€ £Y]ú™Ò±°l½ýD£œl€m¾&G ›¯ °,ۇ瀭wŽËf!ÖøÔý]W`ëBüz×%XKãÜá¶Ü>mÓM§'yÒ¾7ß¼µü&Wúú׿^Ì9³5{ñþÏ~vk_§›öË_þr¡1¶í¶TÖwÀ5ñIo}G±Ý‚íÇrb"°õÏ\NÀFYW­O6º¡>Ã¥œˆNMB¨V€lÂÖÀ°lP¶Þ™®ZöŸÜLÝê ¬ šÖtã\7uccßï¾²Wz3ïäÖˆÕ¬þdKW¬@VÝŽÛMÆTvL{Í5×´–î©ÛR\vœ~¶þ™gûÈýÞØè†Ú5«É·åƒ½*Àöª`ŸÏŸl"uC½Þ9ã>ž4ÇûWݹM/ý^?“ÇÇ)ÀÖ;ÓUû™Ï|¦ØÊAë]S³k]ÖW¼øÅÅÖnÝV¬@VÑØ/|á {ß\pÁÅ® ñë\WâßüæŽßGØ«¯¾º¸ãŽ;j7¶¾Ž lZõ1Ⱥ‡k°Ñ®- k`ÇÁDØ|'ƒ`›BØzgº `o½õÖâ𗽬xßê/££1°Ï~úÓ‹Í\ãÿüÿ¨œ%X“8)‚«IÚÍ&\vL{ÕUW·ß~{íÀÖ×ñ Á€M«>Y÷ãp-6º`£¥"!‹ ¬À°î1 ;õ.`ëiØÍÜMZFçæ›on­á*€½é¦›Š9.⮫ÈèÅ_Üš¸j;ú5¯i-§³½‹â^vÙemÓ†y`µF³ºÇl_ûÚ×ZݘµôΗ¾ô¥Ö¹í6Ú¨5C0˺ÔÛASðÀNë¦êŒ|âë €v´Øh©HÀblÆciŒÀª»ºG-e¡%aæÎÝ z«;à\¼xqqýõ×·V2½ï}ï+6ž3§5U*iVàºåmt\³Ïu3ÈåûϸÊ9ýèG AgU>Ø+¯¼²U†˜ €wÀû +l:uÑïºÇüØh×¢ €pW;•-[ &£­¥}BçF¬¶mCy‘Mª %›ªm£\D`ãœéûî_Yuì’bu7I“@v]7~õôÓOo¬ºè~âŸX`ë–·±ã{º®Æk2'ÅU>¶ÿE{ï]ìºí¶•Kä`¯¸âŠäÆnD`ãêºßÐÀ¦Qý®çqÍ€nÉ›ØÉ9sæ>ú¦ãN*ØòÒàe‡¼¼˜={¶[¤€ D*À°‘¦2ÉØÎœénY^<óY{´ VÛ;ìP|êSŸjM¤dX-—»yæ™Åþ® ñKÀ¾óïœ>O»Áÿù?Å[O8¡4/uýýüç?ßêλ°Õu¿€M£úU¿ãž/í:4° >cå¸Û\Ž÷Ùg¯`£¶À~õ‰O,ît³€²å¥êÎmMÌBÌÓ0¥Û3½ôÂK ÷Ë÷4ÈvØa…f!®[—5<®ñ«›ÎšÕšÉx²G»|”æ…Ï}n«kñwL³‡ç `?ûÙ϶Ƶv²1¶»únÒA`‡_MÖ'y±l—ûÐø¾ Ø.Ÿšq>m ‚lMQ¾ƒÂœ6¶Á€í¾U·â%o<®xÂT·âu€¾ûÝï®]—5\·UKï,qck '»uaŸñ¤'{ì´Sk؛ݦÿÃs `Íll÷õݨ°Ã¯ƒ¦ê’|_—D`£h€¥ qôã’wÂÉ&Šïfï9ÕÛ2¶<5põwdv@)ÀöîLkß½ž³÷t4vë­·.þû¿ÿ»X¾|yÔ¦_î v[vGÀªmîšk¶ÆÆ jßâÖŠ]âf,öóÀ*2ûÕ¯~µ£ €í½¾{…vøuÐkr~u°ÑÞ À°ÑK¾ 绢yË·þ(y¢ °Í9Ó]ú™b£7™Ù—¼ä%-¸ÔøØ˜MA)Úúr²êR,xÕöèTWâË/¿|:¬þvº±ŒNsuÞ ÈŒ+ÀªËÜî{ì½=m»Ånn¼yÝ9»=k÷â©OÝ®6òÙqÇŠíl•Vé•v—gî•þ)O}ZTºNËð,W½ïÆÖ†qÝØ°,ý¸ä›€Í·î(y °ÍÂŒºŸôÖwLw+žá–É9ñÄ‹Ûo¿=z{ûÉ'Ož˜Xb•ÝeË-[3+/¬ÖŽ]¶lYÇÛlw ã °š%UKPÅÚ¬fúÖZÊuéÏ:ë¬bÂ=/uét|¿ýö+öÚk¯¨´J¿á†gœqFTz÷šJ×iž÷¼} ­EÝ© +=Ýà°,ý¸ä›€Í·î(y °ý™;ï¾·Øg¿ý§£±O~ò“ ­íª¥rb¶ÿéŸZc`- Û뺿öå/o/€ýä'?Y|ùË_îx`ûSç±À0Î{Ê)§±Ÿu×U¬X±¢6¹ÖdžãÖaŽùqÄÅ¢E‹b’¶Òl²É&-(ù`c>–á°Ã° ·¡= €`Ø \N`s©©<ÊÉKcªžØþÂŒºMn²éfÓ »Ï>ûW]uUkýØv[À^©õgÿîïZç `/½ôÒB0ÚÍöÅ/~±øÜç>×ÊGŽ·ÆñÆéz³6ó ·Ö2+¥Ø<œŠ.J À°Mù¢nʌնíÂ9e °yŒ.!§S€í Fbaîíï:½˜1c­,ª[ñÑG]h ªÍX7qYk‰ÝäPçœsNëumÀ^}õÕ]mŠÞxàÓ`­ˆqì½®7›`ØN¢ÀìÈ6Õî7ÉÕæõxw“¬ÛÛûxXíYÃËè87aµÉm‰Óû¤Û'aÇ4[–ìÀMxà ž†FAè‡>ô¡âÖ[o}Ü&€uÓmOƒk˜Nçj¦cER;ÝŽ=öØBKþLklâ°ñq¼. À°cêy4Ûl¦Q\¶ù‡!ÕØTk&Ïr°ìР튫¯+¶ØòÉÓ ûÌg>³øÂ¾Ph9ÛqËê(â*põ÷ÛßXÍZ|å•WFoZ£v«­¶š¾®–þÉivÓQ]€`ót,5 ÀÊ,‰À&øpZ‘Ø„+'â°ìÐÖ@ìÝï;«XËM̤Hèžð„⨣Žj-»£™†ë6ìE]T\qŵÛùçŸ_ì¶ÛnÓàºÕÖÛZògT€0·û`X6C¯!Í"' °ß¼kEqõWn*Ýn¼íÎFÚŸÓÞsF¡-·6@å%›æÕR°ýPu|ó`Ø$½{Vk÷= Á1ŽC:3U°,;¾ÎGÃwž=À*Šj‘Y®ßV阺+Úêw?V÷aEzíu[¶ü´©µlÃOBÂÙ° WN†E`Ød¡N‘ÒÃõ4„jÆàÓN;íq{Á—_~yk[ºti±çž{NŸ³Ùf›K/¼$Ù{LÍ™dyX€ÍÐkH³ÈY¬ÀU ©±²ûø¥­nÆ~÷bë~¬}J£o¥±÷uÙUzK«c)w/`Ó| úQ*¶ªŽož,›<Üi†àgì°Ó4”n·Ýv­nÃ×_}1oÞ¼â£ýhk&âC=t•eqN:åÅ}÷¯Lþþ )] €`Øñu>¾ólöìsÎ_Fmb#ª"©ú߀Ôë¸àOû|€-Ë/¥÷~X¶á'!áìØ„+'â°l6€wÎy´&`R·bm/vKìÌ;·Ðz®YÛÿ¿´ÐZ³)7Ú”íÑ€`Ø ½†æ‹|¹Ër¢Çl³تq±TE[ `ÃnÅ:nûüêå`X6»¦²î`UŽm§ V0 ÀfÚ“¨a€í‡}’'   ¤« ÀŽ:0¦v, À¦Û&°dݬŠxÛ´ ÏH¬º§Ü ¸—¶€à“Æ¥PFO€í¥æÜÎí€`ØÑkK»¸£^V—;Ímç.XøŒ•¼‡;[3¶‹'†SPPÀ`ókø†ÝðrýÞlFûê×S¼é¸“Fb;ü•GFÝÇnnù§ãŽ;.Ž^]ªu×U¬X±¢6ýõ×__Ì™3§6qÄE'ð¸É&›Ë–-‹ÊÛ½S£ÒuZ†ý÷? Øgßý£4NÁ¦f¬µÖœpÛ©lm5XÙ€N÷Í7ïw¼“{{'C?v|üpõõ_4>·Ë¢À``ókø¾y׊ÒÙkm¾QíÚÕo—^xI60RDKÞx\qðK‰ºìë_ÿú(ÈS"ö1©ž÷¼}ØÑ„á^öÎK¸oÎ°Žƒ`ãã¦p•ù®ênÁP AØüVëá… À÷ºÂüGyvÈAk9.× ŸrÊ)l‡QàÃ[\œyö¹ÙÌ¿ÞìÙ¸&J>Ÿö ôÒ…XA} ]ˆókÃõÎ`ÇçõÀŽO]s§T€\ã'è|ö ,ìàê¬×ºâü¿Õû»wÚ…€`ƒ8ØKu °¯û¸âŽô$N£üþ`û° ójì0ÕçÚ#«@¿V ‹[#$ðòÿ·ýê«cepgût^x¼*?åkù)ïNA»VÙ¹–oÙ}ؾª² `Ï>çüUîÿ¿0Ϫku°U‡š~ÿG>®ŽÊêÆÀVÕƒéPV¯í´òíÅ®­ï°>ªl¦Óú&ý`~`Ø‘mH»»±nÖ‡W]u «÷õ¾/8 ÕóG›þfIoïM¶»‡&dzØk-Ý2Ó}ªnú °—T£§¿Ýe‹þímÓP鳯Ñí;äÐÅÓ §þÿØÇ/måU–Ÿ`ÄŽéÛ Ñ EÓòk´X#mçêZ–ÖöÙ·Òú°¥ýÚç§Óq5þ*‡îÝîÑ.¼¿ØkuÒ…XÐlúÛ·éëÿoõ£zïÙêÆ¿×²zuô5xViUvÿ¦›lÅ~œ°û°ò•ý ˜öæ`5­ À¦ë ¥dݬ ºÐ+mßÖÚ/µö£¡Ú¿ýkú]1ù5 °êNîÛÅP š‹–+ÀbM* ˜àãÀ 2 |ÔðùðþŠŽ­4À³ÍТzR??5²>)ÿx;€µ¼…ÔõTf+ŸòôlS¾:Ç J eÿ+ÿ¸Ò”u!6³k*]̵bÖÊéGAu©æœè>Cm”οg¥±rÚUõꃿiaeÐw;­ ¸­ÎCgI×6•g*-P­r>X–Fw–ºÿ&zÔ¤ï¶ö|ûm–ßë©ÓÞIaÿýQ×CÈ®e=Œü¿ÛõбÞAaYuOv_M ÷iâ ì2gG“=Ú§÷I¶OÂŽi¶ìTÅ`ýˆk]÷×2€õ'¿ÑóaΠÑïz;öÔ¢ÅeNÉ5дÈbÙ}ÕÝK;¨ÄùåÔ¶»VUCiPêë¡H¦µ(¸E·ý ªº‰…÷VW¯~Ôi‚´Ê`åõÛî)Þqøõ>Ç{`Ø1õ7úyÛ}ز¶/|ÿtÛ;É~(ö{MÙ¦öC§ßþË—ðÛ.k³ýFjüžPa¤ØztY™ý¼•¿ßãË÷]š~ç°ý|,ÒÊ€M«>r/ ›ÀZwRW£o¿Ñá3`ýÆÐþŽébB™ß`•A°ß˜ÖAY;X éíæZU«E¼CMüFÛ¢´¾cPU^»NÀʉ°kÊPÄÕÀ†d???ºmðêwV^áýÐ-ý(, Àæî8$Xþ¾lD†mML¡v=nÂ,ý¨ßnXo›`ý^Ÿö°µËÖþ‡?vZï û¡ÖÚËðä¦áUù° >M}*Û'aÇ4[6€ »ÙÖE-c¶ÛÉ%ê"°a¾~ÃÝ4Àvz­v‚i•þÒ®ÿ»‰ÀZ”ÔóN"°ö ºEýîÂu‘Þ~8äÙ °ì˜úý¼í¡l¯½“ *ChŒÀú?x‡C”ÂbõC§?–×~@·<Âo?ßýl?‹´ò`ÓªÜKÀ&°þxÉ^#°Ö@ù ?.Fp呟.lüôë¬\8È~¹ éõó+ƒñË¢­Ý\«ª¡-ƒI¥µ_¹mŒ°4²û·c¡–ÊË[Õ5Ü Óÿ%½€5'ÂïöFÃÃ:ìf¶é~:'äýxè`ØÜ‡ËŸÀ†½ˆ:íä÷2 ëwuö{ñX{À&há#P$v*1¡[`XëòcѶ^#°l6&Æ,в_hCèôÇÓXÃæ«4~7ܘ¬?3¯5øeÛ͵Ú“?æÇô°û õÖÿv_VkèUVëŠU×…ØÎ±zí`ý.ÍV^ÿÚþ¬Ê~7e ±™Hi¿t`Ø„|€Q)J_Ö"˜6„£ìÝPÖS§ÓÞI–¯M¤h?H†ícÙØN"°ízY•uQî×».Ä£òøÅݧ©â`°~Ô΃ð×Z›W–?  Ò‡i•¦l=а¡QžjØêf1,k üò”å«F¶lLmx­²{WÞ~¶ÝL‡VŽ˜kÕ5´yüùù•éîÓ9a÷®˜zõÏ+[ãÖ/³ŸŸ@?t˜B`– (ÕE; ëtáøà €`ãšgRu @ßÖ~ôõÛ-½í‡Înz …sØ{Øz6ùËÌYÛc˺…c`;Øp¥ðýO¶Ë#i´l´T$ŒP€Àƒ„QÑ:ü•Ü"²ƒ˜XcT4Lñ>X6¢m&Ig ô`õ.ñgæµ^1ö#c7=†|€ gö'ä ׉gÐج«{ »û½{ØÎŒÔq °q:‘*N€]e™šþ˜2©ñ­Úr™×föï«]¶H3üRX6®y&U `õþ´µVÕã¥lÎNz …=x”gY)ëj«È©]ÓïÍSÖ¨®·õìÒõTfÿ^ŽY± ‰ö„Iœ:°òÌ“°™W`bÅ`Ø‘Ø&Rò>ÜC°lb~À°‹s¥+À¼ 10€‡wÔ ï€íÑò3:€Í¨²2(* À°îWõA6Ø\k¼õ`Ø |ƒAq…»˜|Û^>l¦íÛ‹Ùçwn¯z~wL‰û¥ Ào™6ü€pž À°ýjÐ3Í€ã6€Íô©¥Ø(0dX€cç<°ìÛýÔ.ÀŽqÀ¦ö8RÈC€]`5qCnà„.ƒ½Ül$¶¼,›‡{0°R°ì YÛ2—ÏdCy‘  @ œ‘pÙZ´™3×yð¶åwu4RÓøÛ,·šÕQ[|H[ï¯.-ÇÝv6À°mÜÒ¿ À6°Ç;sïuB°ôŸJˆ(€¦ûhk*}M¯¯éôS²~”GP³ÌŽ­ñ—Š”#_HÀ.^¼¸X¶lYÔ¶îºë_|qmÚ³Î:«˜˜˜¨M§ëî·ß~Å^{í•Vé7ÜpÃâŒ3ΈJïÞ©Qé:-Ãóž·OqæÙç&ó>ª{×›=û§Å|ZÙZX¶)€­56   ÀH)0î{ö9ç·ÖwÊô¿Ö¥«sÔu|ØåÑõ¥Ó î—ëä ©íêné…—»ï±gô¶å–[ÏÜm÷Úô»>s7÷ oY›N×~úÂ…ÅSžú´¨´J¿å“·*vÞå™Qé7Û|ó¨t–áY®W\}]6Ïí"°,ý¸P<Æ`‰´.²Š6î²ënÝú֦Ȭ"•ûø¥«8:Ç Në\EJuŽÒ‡cG혎ëoßÑWÞšÐ!tþ«Êcét]+g˜g]™Ô}Øï6­û´üÂòëº&TòcõÚ´ °Ñ. À°Ñ QP€†Å(‚Š8 î paw[¥1HUú_§È­¾ý¨®òR¨†×4-s$ËÊ£tÊÊSl×·<êÊd@í§·ò똿aÚ¦^ò¢°Ñ±6ÚÅ`X6úqÉ7á Wô#ó->%G4÷lY÷ذˮ ÓÒ°Û±`1ŒP*½EU}Ø•£æ§ÿË"°æÔ‡å)ëölyÚXÞº2ùPZ–ŸŠÖ†Ý¬ŽÑê’ºlÒØè¶€`ØèÇ%ß„ó]Ñõ°óAhPöñã]ËÆœú «è¤ßý¶¬[°Ò !õ»$[´µ ËSõÓµ+“œU?º«î¡“ò6é “p… äelt À°lôã’oB6ߺ£ä +À>~‚¢2€õgí #ªukù ý-Ö16ÀÍ bíŠtÔk?l€nðØ'm±å#Ö‹‡ïÇz3å°½ý]§³gÏ`£—|°ùÖ]Š%W—t>NqØ*ø g!¶ådÊ¢•ÊÃÈj ƒNué ·S§1Ø2  »%·+S­Ôºm§÷CzÀ ]`£]‹&vÞ¬Y³n±å§Áœ9sŽŠ¶–ö ñi²Ù°ýPu|ó¼|o}Õ;w€-›a×&qúëÁÚÄH>ß·ýÖeXyjŸ¥±ÿ•ŸMääoÙ$Q~þeåñ'²‰¡ÂnÍ~7æ°L!”úùiL¬?‰“?S3à1ºàÑTÝÊöÃY¸›Ê{ÐùèyífMh=?á{bÐeÖõØhï¢ €¾ GZ+ÝÝ-é;ÌøæØŒ+/Á¢k\""°-'3ß)€ó—Ñ1GÐf¶‰’l¿E;möá²et옮¥ôþ²<:æÃgèx–•G€ ëX~á2:ueÒ}ûOùù)_R©²‰®†åsÝôº¬ þ ê­k7Û@–¿¬gG§yT¥ïÇ}6U6åÀF»g¸”Ñ©IˆÕ ,s‡&(MØ4ë%×R°S57îXëâBi™CgËá„Çúé¬vëX6U&Ó§Ûrp^úÀÙt `û5ÑXÌ{¡L¿¦ž¿²¼‡¡o'6ÀæêQîŒ`®<6áÊɰh,»Jß0‚Yå8–u ì§³Ú‰ãvk޹§ºüíáAuçp|ü Õ¯ó²IÇün¸êQ ÛÔf€¨ãeöª}vnLª·€ìÔ¢¥á3ªÿÃëù½*ô·•Ißþ½èܰ;´å¦-{þT6]»ì½¡û±r•W/ ¿Ü¦‘ )hâùîdz ÀfèQäÜ`®A6áÊɰh,;í¨ÊA­¯Vå`ËÔ¹íÖq퇓X—gSe*sàë®ÍqÖº¹†ëëÇûÁǺÕ˜†]Õmâ4³'çͶý=s¯´Ö·ŸÆ†öìza÷xƒn\µD–MΦô6†¼ê½aãâí^mŒ¹•ÙîMe²¼”Æ`݆6è˜Êc°ïß§þî6BÜÏg€ÍÐ#¢È¹+À&\ƒl•“aÑXv•HK?:òo ·ú·l8q˜A›õ'ó—ª’f7F-Y¦gYb³î§7€¶}Í´ÿUõÇÄû½+ʆTMð‚t˜o»ÉãT–pñvîÔì €ÍÐ#¢È¹+À&\ƒl•“aÑX€ãEäSsúG©<6±X89𥍾}È —‚êdò°2€-ƒÀ²kø“" ²Éõ#ÈÖU·“¡aÚ°|º·°×†ȾV¡m06Cƒ"£@`û«oO¹°=ÉÇÉ, À°Ø@lÀ¢šecamœª±~7\ƒÖ°ûpàWl8N´ $ÃY}ýrû Ú4À†× —¶²ÙÇC`ñgPØ„M€M¸r2, À/}€—:Ðàøèw§6À »ë–-YÚƒuîtò°*€ ' ËÂ`˜O°±šÅD`ÃI ÊºÛ{?š ÀfèqPdè¯lõí)w¶'ù8™l¹ Œû2:Õèu<Ø:#˜}¶$“qÕ>¿+­E^«ºØV­—là©ülb#wjùk–ÖÝÙα ü‰£,ŠkùYäTù„ª\U“?)¯Œ îíºÖµÙÊêíôR¹ýq»íf@¶½3 ®;pÉã/ÀÆkEÊzˆÀ%Kèƒ øk@jÀe¨4¶iaºlßÄB¸ Ûy~´Òf¶k…ìww6˜õ—ͱ‡íZ~~ºžgX¶ð¼²±Òø:øPó5òË.ñ3lxÕõØzd*ÅÍî{^tj¢@µlÂÖ¡‡ü®„ËGÑòR€`—>ÀK 4elĵ½^UKÀTu¥¹NUžáxS?šiÇ:ÁNÒÖ•»]^M^§®M`£¡.¥‚3|P WØ^ìóù}ΟìÇG€`Xl 1°nƃZW9õñ¤MAå ó`£)6Z*Ö(Àb"(0& °,ð’¼ ÒÉæZiFiÕUÖ_ûµßõ¤kååì·.½äÀF{Ql´T$`±@)À°,‹ `Ø@Ã6ÀF;Yl´T$`±@)p*2<¦³§‰ê%¹Ô)6€ ËØhï€–Š„5 éŽ3!f‚(0> °8ºÃrt¹.¶‡ Œž °Ñþ- QPPÀS€=( N±l`X6ÀF»l´T$D@@–qo {–³Ìu5l -`£] 6Z*¢     À°Ø6€ ôÁØh€–Š„(¯3\ÑÏ·ø”ÒT€.ÄiE/ˆ&QØ6³ °Ñm=- Q _滢ëaçƒ(Р,ÎrÎÎ2eÇ~±´l€n Øh©Hˆù*Àæ[w”Õ¥œˆNMB¨V@;‰@i*À¦Y/¹– €ª96-çgœúÀ°œm€ÍÕ-¢Ü+À&\yl•“aÑX–5 û°dÎŽ7e±Þm€ÍÐ#¢È¹+À&\ƒl•“aÑX€`±lhØØ ="Šœ»lÂ5À&\9 €`q\v\‰^õ½BC4ÌÝØ ="Šœ»lÂ5À&\9 €`XÀ°†m€ÍÐ#¢È¹+À&\ƒl•“aÑXǵaÇ5÷Èå'ú‰ ônl†EÎ]6á`®œ ‹À°,‹ `Ø@Ã6ÀfèQäÜ`®A6áÊɰh,‹ãÚ°ãJôª÷è¢aî6ÀF{DÑ)Iˆí`¶6áÊɰh, À°Ø6€ 4ll´Gt—K9/:5 Q Z6aëÐC~GÂå£hy)À°8® ;®¹GŽ(?ÑOl w`£¡.¥‚3|P WØ^ìóù3úœ?Ù, À°Ø6€ 4ll´#ÀFKEÂXLÆD€ÅqmØq%zÕ{ô Ñ0w`£½(6Z*°Ø  €`X€Å°l a`£,6Z*°Ø  €XŒ )0sæ:Þ¶ünœ¸†¸Ü£(”ŸH 6€ tcl´wÀFKEÂrÇ'P PÆF'µ'•s°l(³6Ú}`£¥"!     x °8¡€6€ `MÙíb°ÑR‘PPXºLÓeÀ°>Øíb°ÑR‘PPX×>8®MEpȇh 6¯ °Ñ.- Q _&\ÑÏÈ·ø”ÒT€.Äù:Š8ùÔ6€ ¤flt[ÀFKEBÈWù®èzØù  4¨‹œšLy°Il _`£h6Z*¢@¾ °ùÖ%OX6_G'ŸºÃ°Ôl€nðØh©Hˆù*Àæ[w)–\öÄÇ)Àâ§æSlÈרh×€–Š„5 àÓ&l"l•“aÑŠ ËÜ—"°ù:Š8ùÔ6€ ¤fltS ÀFKE–¹ã“¨”¦lšõ’k©Ø©š`q€Ss€)6‰ äkl´[t¤K9#:5 Q Z6aë`®œ ‹À°,¥ÃR:Ø6€ 4ll†EÎ]6á`®œ ‹À°8® ;®DÍòšQwÔ]S6ÀfèQäÜ`®A6áÊɰh, À°Ø6€ 4ll†EÎ]6á`®œ ‹À°8® ;®MEpȇh 6¯ °zD9wØ„k€M¸r2, À°,6€ ` Û›¡GD‘sW€M¸Ø„+'â°,ŽkÃŽ+Q³|£fÔu×” °zD9wØ„k€M¸r2, À°,6€ ` Û›¡GD‘sW€M¸Ø„+'â°,ŽkÃŽkSò!ˆ äkl´G4/:% Q ½lÂÀ&\9 €`XÀ°†m€öˆîr)åÛòA^`{U°çO¸¼UA|P  XǵaÇ•¨Y¾Q3ꎺkÊØhe­ ‰Àb(€N€`XlÀ¶6ÚÇ`£¥"aD`1Xǵaǵ©ù Äòµ6Ú‹`£¥"!‹   H€`XlÀ¶6ÚÉ`£¥"!‹   Hƒá1fÎ\çÁÛ–ß×°G)ßuGÝaÝÛí]°ÑR‘°F}Üñ¨„(€c£Û½£†“‹vØ6€ ¬jl´ûÀFKEB@@ð`qÀqÀ±lhÊØh€–Š„(€(€(ÀÒeš.ÓØ6€ ôÁØh€–Š„(€(€(Àâ¸öÁqm*‚C>D±|m€v1Øh©Hˆù*0ኾ4ßâSrHSºçë(âäSwØ6š °Ñm=- Q _滢ëaçƒ(Р,pj0åÁ&±|m€n Øh©Hˆù*Àæ[w”D±|m€v²Øh©HÀb(€¬g3g®óàmËïæ€9lÀ°žm€v²Øh©HÀb(€R`S€Í7ÒA”ŠºÃ°Ôl€ö.Øh©HX£À®îø TB@±Q€ÅNͦ<Ø$6¯ °Ñî- QPPÀS€Í×QÄɧî°l 5`£] 6Z*¢    Ûó8¯ÔœFÊÈ`Ø@ 6ÀF»l´T$D@@€e²lÀú`l´‹ÀFKEBÈW WôËó->%G4  1Q›¢6”;ÄFÃØè¶€–Š„(¯ó]Ñõ°óAhPv4œFœêÀR°6º`£¥"! ä«›oÝQò„`qzSpz)vˆ Œ† °Ñ >- Q _Ø|ë.Å’”b¡†Q&v4œFœêÀR°6º%`£¥"aû¸ã¬›¨™°‰VL¦Å*2-wãÅ`qzSpz)vˆ Œ† °ÑÍ4- kXæŽO¢Rš °iÖK®¥`§j€ §çŸzİl€v‹ˆšEKEB6_`ó­»KÀ°,¡Ñ‡%4Rp ) ‡ ÏØ]Ê4â M¸‚Ø„+'â°, ÀbØ6а °zD9wØ„k€M¸r2, Àâ¸6ì¸õ^Ô íÑ>`3ôˆ(rî ° × ›pådX4€`XlÀ¶6Cˆ"ç®›p ° WN†E`X׆×T"@”ƒh$60<`3ôˆ(rî ° × ›pådX4€`XlÀ¶6Cˆ"ç®›p ° WN†E`X׆W¢^Ëz¡=Ú§bl†EÎ]6á`®œ ‹À°,‹ `Ø@Ã6ÀF{D“Ñ)Iˆí`¶6áÊɰh,‹ãÚ°ãšJˆrĆgl´G´Â¥”oËzU€íUÁ>ž?Ïå½Òmª¤n¶ãkÊ6Ã_ÚeÞ*ù·¯—Ôô`X€Å°l a`£=aìÍ]úòWå·¶ûÈïêÆ_Ö9äß^»ÔôMF[ ®À®S¤JêtÛ6¢´äß^¤QÒga„=ŒE’™3×yð¶åwãÄ5ìÄ^íÑž °Ñ®ƒüÒN}YK/¬îCþí5}êìã(€(0: °Ãsôp²ÑÀFÍØÑñ¸@@HRzÔhî›Æ†gl’M=…B@ìØzª›Jv§ÀýW€ž£‡“öØ60j6Àö¿Ýæ (€(0,NwÖ6ˆÏ‰î"×âB\#?XèQs ¹lž °ùù”PbP *ØØZÃtìð=œl´Ç°Q³v ‰ÁÞòyîrkö’\ PÀ`±…$`q GÍæ~°il`x6À&Ñ´ª¢ö·m\P= Ë‚4—¹ý½ö>ÔõvÔ r@UVä¡S¼þÖq{!è׿ôð÷éogçà%#°ÊÛèoÿ\ÿÅ |ýréeÅg„`‡çèád£=6€ Œš °#ä ÄÝŠù¯~jí+ÜúŒeiã®ò·Tl§Š‘T  `µO»ºFèou“ÐG©6|9h¿>¸Ö¥Â€ÔØX?_kÇìÜ£§®m×Rz][ûùÅ«AH%+zÔhî›Æ†gl*­ûÀÊ!ÔüU]T¾§Sä;ÚÇ¢µaÁä[j+ Ø>7Ÿ6Ø2ßÔò,»–ö)_23.4J ”ì¤wƒaÔÔÿÕÊ èïª_¹ìÅayévé¨úåÌ^a´v”ê€{q °Ãsôp²ÑÀFÍرs-äkúAëµö,›Å‚$öí÷,”j¾²ò·c>À†~­Î1x¶<}PÕ1c½Ç®²¸aèU2€õE t=pöðú/{XÃò(塾­;‡Em-½=Äá·°–O¯÷Ìù *Àâ@šÍý`ÓØÀðl€M°¡ïo‘,âj (U~êd¶a@Ä÷kUBËÇ‚/òC ñ£»Jgk½ ý t ?ê+ÿÕ‡bƒ[&ê¯Mû+Ð)ÀÚ‹@5XZë¶×ÿÊ»êãƒðWÇøÞ;eׂؕ2©µ\ýnÅíVÇtp̬Ïe%%;–æÉM7©€úè‡KÝø¿$é¡öûñëÚê¡—@ÙÌiŠÀêÁ׎s ó²|ì~tÜÖìRþþ Ee_0Mê@^CV€Å5šûÁ¦±áÙ;äF}x—†ë¼Z·^Áf蛆KDÆ‘3µÞ†þ<-~WfS"6Gvx6•QP Øá9z8Ùh `£fl3ms†¹RÕ£/ z*ÃùWt{·6±’¾ý†uXåa½íš6æÕïFLâ ‰"£  @[XèQs ¹lž °cët(ÒYݨ¶‹z >ÕÛ¯¬waÙyá>ç÷`T9²Ê3LKôulÍ“G)fMLÜuÅÕ×8|ÃsøÐí±l`Tl€)›A@@ô˜9sæßþ®Ó؇p GÅæ>°el`x6À¦×ÎS"@@5Ž?êØ%ÂážÃ‡öh `£b®Ô8H>(€(€(€(Ð7pЋç‰û°lŽ Üy÷½ÅZk­õpßZ+2F@@@§À®[o³íJ¾á8|èŽîØ60*6pÍu·? eE@&Ý­ø <çrgšµÍfxÓÌnþ”ä¹Üål£Àß­¹æïîYñã`‹ `Ø6е ,yãqZc5ßIƒ‹(€(0: üü—s¹3­Í.í/ Ë}PÎ Ü/æ—žyö¹];-£=à>ˆ„aØ6н l¶ù¿rÍÌB[@@ÑQ@ X¶ÎUêwª@üôÔ Mù:R`ÑÞÏßïa·î7´C;lg¸mùÝ…Öï¨å!ñ P0â¼A_t ®'?Y>>9&+Œ[]rõBÑVÕT–Ö¾cØ¿Žòñ?]µ4úö?áÓJŸcWè‘3ªohƬY÷ËgŒ{@°lèÎñêß̘1ãM ¶KdÕ¼‚¬Ðlþ*ùç(ßÚ†ÎÅÞüâÐŽ=—t(¬eЧ_Ádðö‹˜^,U«ަ¦WäS›þÖù1«ˆ©u_Ö9:×麖Fyë?Âje4q5&–_š’5µ® vÐ{þã¯pÞºsÞРݰl`\m@³¯½ö¬_ºÖgF×-'ö[ù}ønq*‡~oÌY ö܉QŠ4Y) £ö'?xêEâÿÂ#¸¬êÚvÛUºØñ´á¯Hძ~Aò_re+Žç¬*iÜ «Ù#5‹ä¸:aÜ7‚ `Ø@ç6ðœçîóÐ:ë¬óòqoC¿&Ê&áTO@h¬÷ `Úÿ´ë%(0”{ÀÔùåÔ¾²ŒÚ¯sôÑ·Ò„× ýg·stÌ;úÛ÷u•®êî$þP¼î!пK°^,U¿ÞXý¨h'k%ÖÃ¥­ `µß>zX¨¶¯ì—(=Üþ9Ý©ÂY©)°p‹-·úÅ}÷¯b™‰À°l Ö>rÁEœ3gîu©5f”çq ”ùrƒê (ð“ßh¾¬þ60Ô>ý/Øzú€©rÛæ÷@Ô>ýoÇ|_Wùi¿†äÓ €õ‘ïêû¸Úç÷’”/®ó-;Ï‚;~9í˜]ߘoä°Ín¬ `µÏ~ °G«=ì±P;׺Û‹¢Ýç?„áKÏ«°#g¦«­6kÖ¬—ì¾çä/‰Bt…@34ðq²õØqð*®Ãéûª'ßÇdO@ƒÉP¥É)Pô‡Ï•õô#ǹæƒ*âêû¬¨>\¨Ç¬ÒTnüùùé:þCW »A”u!î¤ìu÷ÆñÄØ`ƒ Þõ²EÿòëqrĸWÀÀ°x¸á–åÅ“¶Ør…k¾æ'Ö„QœrÂèà {VE&ýÈgè·Võ´rW¥/ëÊëûÜuØ2€µ<ý^“Ò~9ÊÀ{D¬R‹bú¿†ißäÔ]êoA«ªu/¶‡Gçùk“;•‰äÿ"ä?„–ÖºIø×òí H•€ÍÚë ïÆÃž°ãN;ÿòžÔv!Ãé‹wúÐ ­°l w¸ð—ÿaÞ¼ ¼Ö·¥ ¥ßf>¦Š5Èž€ÃXßom `ÍAZûù ÀÈ(Pö‹u“°¾ö~¤ÔÒ[7a›ÝÌë£_¾pv'ŽŸÕuí—$ƒY¿{²u=¶<«&™ÊâFS`bbý6ßbË.ûìU@,cá°lsÐüo>þäGŸ8gÎ-j"h+³R >6螀6ù’/ZY4Õº6›Qº‰À†A¤¨m,­•-¼fX¦ªŠ$ÈÊ@(, tjØz0꺗ýzUõ‹VÕõ«ÆÌúùÄŒ«Õƒ]ö"¢æGSmgÏž}‡–ØQ—±Ü#”Ÿè6€ `ÛÀ™gŸ[Ìš˜xx½õÖ?k4›º‘¿«p.–¦{*ØNjd¢Ú0:[Cðl¾¯ßKPé•¶]/À€µ¥&•ŸÝ§µÀŽ«Ì¼ñVåÐÇÊ\6Ž·l§?zä-l¦nîÊ6Efµ•=ÀÝä«s:aûªÛkq^¾ ´îºëþD +GFëþávaØ@.6pÅÕ×G»äOëÍžýˆVr©k¾æåÛ„}ÉÃ(¤i²'`¢¡àþì¾úÛÀ0,C¸ÔOØ °,rìû±ÖƒÑ&<Õ±0Oÿš”ñÓÔú÷¤|•Öï5é¯29ulì FO×ÚÍéÁ°q°ö+PS¿ò(ߨ¼ôÐûl7÷Â9y+p™µÖZëá­·Ùv¥€öMÇTh{Æ;=²`á3V²¡6€ `yÙÀâ#^ó{½Çõ­º[{íu~ë"®w¹æêx·Íϻ٢ôS~žà«ì3¨ž€òƒ«zÆú¡u•éÙkw=åSwͲ²j_Ùy½©êî‰ã(€(€ *°«Ëë ·:µ-vß“lh€ `Ø@v6`ïñ%SuG´µÁÆ2‘¬zÖÏž€MÊS6çL“ùWåÅø×A¨Ì5PPPPPÆFuµ±Þ´~œîWOÀNËÒ.½¢£a—á&ó/Ëk×ì÷=‘?                                                                               ôG­3=ÙAÖJ«sø¤£€fãÔ2|PPPPPP`è N­©Bi=m±­iXtLZBBË+LÆ^`*AYÝ¢÷~¶±ç(ÝÑn;qjoØaù”Ü®×îÜAÀ¥ê¤“:ìâV9PPPPPêd ÃMÀÒĺ{ýX^ÝGøi5T¹ìžËÎ ó([ú*˜³u«ô<½¾:VIaeÔwÄv ü¡•€íF5ÎA@@@@h\XÁ•Ea=40Òw/Ÿ~¬ÀR÷FRí¾ª Öؘè¢Aœà´,½®oyZD¸W=ý2Vý˜Àöbœ‹(€(€(€(€(•zeQH¨Éî¨ß[U´€UÙÎs› ´Ý= N•Fi«îÇ@Riª>í¢ÅeçèZ:ÇÎ++#Ûƒqr*          @^ ´ØvÀ¥q:W‘A}WM¼äŸÌÎQ”·ì㙺Íú×(ëF+¸ô#°k{ç:YÖqm:f+—ʤtíºKä*¢Z°–G]´Úº!ÇD|UNXi«2–å_°ÒYX=©¬í>º–îW›Õi».Ä~é:LÀU#0‡QPPPPºW `}èÑß™5À²ÿË¢|ö¿uÅ »þ †,­½Âòûi×°|üîÄ>ˆ U3UZÜ2€µ{SYê>v~̤N¦¯òÔõu?áýW¬éj`‘ä²rZ^~éÞªV÷kiý.Öu _§ÇQPPPPP T*€Uä®,ê§ý‚<c?!úðéG] úÂȧåcÑS+´•'Œ^V•ßö·kyYÞe“,Y>Že[õ…ï$­°“õQ–ŸêÃÊé¯_O!l[}ø?Bè\¿þüû°h²®ïG¶MË&&ã‘E@@@@@U0@3Xò#i‚Øefª@²,úi08ò¯a@VÖÅØ¢þ±&Vå)Ó²ýÃX•¥,Ò[°qýPöáÖ7„ªû¯ê6Ý.jÝ®Ë1           t­@ÀZ·ÓªèeÙÅê¶ìœ²n±í"”Ýó£„M¬Á¡±´±§>`}ø´.ÈezÕu‰ÏQ=·\¤~t8¬ÛN¢Ì].'¢         ŒŸehàv×5u1õÇFÔ•Eýª"{Ê«ìÚíàÇ@ËcÙÀN–@œ[å÷Ç«`}Í âËôj§¹òϱÊ"¶–Þ·«‡p|­o:ÖÉ ã÷äqÇ(€(€(€(€(€+P€Ui죊Í:+P©ê¶š ÀJ8+«€Õ–Î Çr–ÝE’«Ð¯”:¸ Ó–Mˆä—s˜«k«î«¶Ž‘PPPPPP U[5VÒº“†ÑµnºWu­ŠÞ•]£©¬4²¼ãð“xeÚn™ÿtK;ÁQU7]¢¤vV_öXå+‹lûå®êB\¡ç‰C@@@@@¾(P€ºXÙÕªb»lÙò/í¹j©ƒ5*ë¶j]Ò²ûðËTU÷¯ý*wÕú¶6’Ò„P\U±UePzÓ®ëÿ ÐnY]»lvi»/Fa]§ª;yY]ôÅ@ÉPPPPPLv[Yl7pÕXí·cÖÝÔ(\ºÆöÄZzƒ²°KmUùýe€”‡À͇³*µû«‚ѪóL++·®ge·5mÛM”Tf‘íÖ@Ô´õÖ@Ü4·1¼þ²8!DûúZ¹M[ëê—Ñ–ÑQý õµ¥–xÊPPPPPP QÚ¬.d>뎪oÛg0§ïvXA§®cçtù³ ÛMYwU¶,½Ž…Ýbە߇á°[rˆVu­:OåÔ…×´²Ûyáú¶í*³Àê<ƒN¥ó'šÒ1?âë_[«œeŸ°ì§e]ˆu~¸N¬oeuۨᒠ         Àø) ¨d8žÕWAÀXv\ð"xô£oJvAÕ>ƒ+}+:§óªÆgj¿Ì®3YQ5uå¯:_åª*C»cUzøÅÓùvŸú6ýt®ßõW:´û´+‡W—Fºé:Úª4ôË`éUn¿ÞÚÙHx¦kn“Ã(€(€(€(€(€(€©*àG£Ûaªå§\(€(€(€(€(€(€c¦@UWÞ1“ÛE@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@1Pàÿ·ßŽ×Å‘>7IEND®B`‚nova-13.0.0/doc/source/images/rpc/flow1.png0000664000567000056710000012002612701407773021544 0ustar jenkinsjenkins00000000000000‰PNG  IHDR°Y¡iásRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÊ&ó?ŸIDATx^í ¸ÅÕþï?ÆpE«PÃÅŠŠŠWŒJHb"nñªqGçDT¢âB *"Ê§Ä¨Ä „ˆŠQEÑDÀ%¸cÔHLŒ&1Iýëmïë=ÓÕ3Ý=½¼ó<õ̽ÓÕÕÕoWw_ŸSU üP*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T  ôÒg}®N£Û·oëFm´3Q¶¶¶Ì¶ñx–¯»n›+[®á)ú»G!{8ž4 T€ P* ú¶mÛöú¶mÛ}°é¦›}|ÚÃ?1r”;îZ5eê …¿™¨ÛÛÛ@vÛÀ„‰7ªQ^ê=ˇyÌß7Üh£¿555ýQCíe„Ù\ôã< *@¨ …P Ç†nøD·î›ÿå’1WªÅKW¨÷>úŒ‰° ° °   <6o‘~öÈÏ;wî¼fà 7¾V÷zM…èùx’T€ P*@¨@ö€±£eòí÷ÒP-€¡Ê|1Ã6À6P® ¼±zúÙ¹|ÖÔ´ášvíÚ“½5¦T€ P*@ò¬@Ó×;v\cF Zµllllh+W½§öÛÐG;vº9Ï Ï P*@¨ÈŽ=6ÙdÓ?Þ~÷ôÒ`¥ÁÊ6À6À6À6à×.¼ḩbçé®­1;ÝkJ¨ T€ äMM¾Ñ¥Ëó-¥×•!ÃllllÛÀÍ·Mù—î4bóf ð|¨ T€ dCF6üâ¬GçÑh¥ÑÊ6À6À6À6àÔ.ýÅÕÚ»É]ÙèæRUË&]›ÑÒ€ÚÂóí-gWebùùÓ¿oªZ8+C¨ˆ[„‚ám:Ã.È6À6À6À6¦ ü¸ù„õr;çÅÝOå¬üú|VU€Olú ¯^½,¿2ÀfMŸéúšNj0ÜN¨È“Cöðí¿„1X˜—.ÛÛÛÛÚ&ûëÐÔô±î7ÉSÇó¹æÄ| _š °Å¹Øeÿê¤ÖI&f¢T€ P*ÐZáÍ'œüž4<ÙØØØ¢h}úî°Fw3€3~*+@€e ¡T€ P*V¦7^8eê NÞÄõÙØØØ"i—Œ¹RµmÛöú°ýQó`.àyó”© T€ T¯@Û¶í>X¼tE$FKoîY=@lllÙn“nºMuØh£YÕ÷L…Ú“Ëêród© T fÖùêW?Çú}4³m0òúñú± ° ¤¥ L›9[m´ÑFKjî X T€ P*@,š×ãøW† òÛÛÛ@dmQ=ˆîaK¨ T€ P¨ ÀÒhÌhM‹÷‡õ '’m ¾mQ=]»uÿ³î°°.%?T€ $£@£> CÒ“ÑšG¡T Î `±llll‘¶ÎDœlÏ®×Þ=OQ1eOƒõ×ßà•ˆZK³.grDe±*@¨@ªˆÔh¡ç£¾žêOýÙØÒаÉöû`§N˜x#ûó ¾ˆjyéEƒ!ÀF¡"Ë T  °ÃË`‡—•u (± ° ”kØdûlvïEl²÷ F¨@> À`ÙØØØ"mØd ,V·8z`“½íx4*@ê¨@¤F =2ÙíDyíxíØØ¢jX§^î uʉ›Ý{—Ø(î–A¨@Ñ ÀÒóÂ6À6À6À6i À:™u®9N9 °‘¶Ï¨^ÔDQ6Š;€eP*P4rÛ)Dѱ°Œì¾ÕæµKßµ[¹ê=Õ¡C“óL©ë®»®sÞ#Ð)‡œò¡Ìõ×_ß9/òÇQçÆÆF5ÑÒL=« °N¦–/ŽäùâÔ`25ë휅8 %Y ©W SF¡$}PÂkÂkâÚ/]¡ºwï¡\>ãÇWýúõsÉêåÙ{ï½ÕE]䔿K—.jΜ9Ny¥®Ÿ0uSßúܦ͜©g5Ö©ï'À` °N· 3Q*@Z+)£ÈÕPf>BÛ@úÚöK&À²+Ö ` °X> ¨ U(@€eÊ6À6H À`«è£ò¼ –Ï^lžïpž ±)ˆáJoXú¼a¼&¼&I·,6¶ž,›` °ØlÞ»¬5 uV€Ë”m€m ‘6@€%ÀÖ¹¿KÛá °|ö`ÓvW²>T€ dBD פ==<½‹lékXl&zÅä*I€%À`“»ßx$*@r¤–(ÛÛ@"m€K€ÍQßÅ©`ÏûùŪ\zí­j~ö<½x™Úµÿî ßY|q© 6ŠO³.„ËèD¡$Ë T õ dòŸÅNŠuNŸG×$ÙkB€%À¦¾GL¶‚…ØÍ·ØR6lx h£X”ñC«–¯Ê¤=C€Mö¦ãѨȇ™|à<’êM½£hXl>ºÍÈ΢k>;°>±€v‡õ!ÀöÐeõ¬u² šhÔ%à&G úlbä•}Â|³ü/µöÓ-ïú_·³#aø(ÛÛ@"m€K€Íc'ZÃ9`[ž½ðÊn%!äXÀÀ ëÄI·”¶ãŒÍbì/e¥=¼8B€­¡)r×8hÖ…®Öéa¨•>CõÆ9U&–_Y»"èGûM{™‰®QxoX½€lÙnXlÚ;Ä„ëG€Õ+°)žÙi3g{ð `Å3¿ò¢FÂßGÝ\²_LÏî­wNõöG9ØÿKYiìC° ßu ËAÉ ÎCJ,½oll‰´,¶P½kðÉ`õ³°yùã[=ƒàa/+yÌñ²½¤&Àb?nÓ­f°Á7JVs`³zåXï,(ˆášö„õ˶g×/×K€ÍB§˜`ûëc!º­æOSSÓÔ oL}î7Öï7*¼¬âEó9/Pë°ØÏ AN{ÿ@€­¹ù§¶lj/ +–Rß᥽óaý²O¼Nõ¿NXlų́O¡)Š‹°2–Eëaq)@€KY–Kô¸öõ7ìy x ŠÐ°Xvºñ(e€µÇ³âYhzQÅÛjÎ^Œas"'Ó‹‹m⽕çjËõÄõŒ¦6ž{" ¥`ÓpX‡¼*@€åøG¶¶DÚ–›×Ž´Þç•e€Å$K€8€'B¦&€Ê$N²†,ò!¿LÒ°4“<á”#å¥9¤˜ýݳiôEVU"¶*Ù¸Szèß# Ùɉ‰®q½¹d¹ô\² d§ ` °9é7Óp­l™¬,@ÒÏ ÅR:€N{B'ñÀb™À+’ ¯èìr±åÊK[Ÿ!À¶E»(Üç|}ÆÓŒ³~Yÿ½S T À¦à"ä¨ ˜ÑmŠŸ/ ÀÒûÆ6À6H À`ÙñF¦&€Â²‘(1+[ <Ú6USFš÷‰` ËKا°‘=\XP: À¶¾.‰®iîx^îÞ,¶Š>›»TV!Ä·mÛvnÖåóríçe–cK‘®Ô Ј¾ñ€ Šÿ1ËþÆ ‰íøðkîk7mÀ­”#À*eZQ&Ê|²?ÖV’ÿçQlë«J˜#̱ $Ü0Ɉ‘£ —NWý¿ÿ÷ÿ>yþ…|–'ü,È[x/ {»Y7¶€¨yXó7ÓÓ Ø4'X*W9ÌŒò©A@,òʇ¤·çAlë«È/ƒ^&Ëp÷F­Õî{P‡yL¦€$ àÀvìØÉ‰ÃÀ`ÞvÇv"Àæ„£»ãUì_ùÊW>þå•×°?Ï`ž5€µg÷µá&&¼¬2©¾Å[[ "l(µópªŒ²P~0><äÄsH…X,;ù vòQCd=ËÀÚË@Ô³>I›!Ä !N…žJÀ¾ÅLµ|é§ë1°ÓB\¿—’µh¨Ø.“8á[BŠm¸Åÿ2‰ö“0d„ËøX.~/ŒVCë&sµ†LÙÕ@_Ã^ô?ØÖ"²ó©±ó)O“nºMuêܹ­ÇsŒzî¹çÔi§VÚšÔ%c®ä5ˆé¤l °Xl½yö‹¨`›^“X+¢}b¯ø?­ÏÙ¬Ô+Kk/_ƒ¶'ÞQs6b{YbЉ2n½ö²:ø_f5¸ÊGƽ^BlN …|f9~õÌìã^WÀë“_ÿºZ°Ë.LÒ× ××0‚H€%ÀÆÚáÎztžê·ó.%pÝ}÷ÝÕÃ?¬Þ|óÍRZ°`4hP)OÏ­¶VS¦Îˆµ^YéÌ‹PO,–Aožý"jØVg·ö¼Ÿ_ìy]_{냊ýÔ ËWyp‹d?ËŸ^¼¬ô¶›ÿK^üfoC™HfyvùR–ì/y¥>öþ²½\]¥|”çWÏ(û©,¬ß-ç7‰SöoÍì¢ï}O­X±‚)Càš`c»™Myÿ–­x] ùþJPÚµkWuÓM7©·Þz«lº÷Þ{UïÞ½Kûì³ß ãe£ìYV´ãW£Ð“K€%ÀÆÖ§e©àÌ,<­®ôüƒGVB‹‘›!ÆøM@Üð¿”yÚ°á¥ýÍ}ÅókÛ2ñ-õ“ãâûò+Æ·*ïÖ;§¶h»®&c›.Ås¿\Ø,ݲu¨+6»ÐN€õ†!,Õ°ç:êÂK°àê¼óÎS¯½öšzûí·Òå—_®:tèàuè/{¦7Ëñ±éϨŒ,–k¿–•Â3°:6íg!<œ6° ¬J^Qñ† `ÊvìoB¦½_%€E߉òGê‚ãɱ£f¸³®”‰ºšÛnƒ<ÎQô YX4b3Ô7+7_fêI€%À¶4V†3„82hŸ|û½ª[·î%ê!CÔ’%KÔ;ï¼:-_¾\x≥²:uÞDwmdu¢£eÑ@5–K€ÍŒùgEs°6¬š )žMñÀšá»ETšv¸¯‹Ö,åÙÀmÖ!ÁØ.cye<¯Y†lO¢ÏË:ÀÆyƒ°l­–K€õ}ªðÀb,‡ÓÒñ¨vÚIýö·¿Uï¾ûnÍéÉ'ŸTûì³O©ìÞ}¶WW›DGÊcD¨A:` °Xš¦Z\¬ß„Nf¨oÀ,¿sÈ`.ÍpåjÖ>– °VôÝR®|ãØ¦7Øooг½šíØü> †èS]ëé` °-m¨¯þnªµ=åh‚Q€Å8W,#àÚ©S'uà 7¨Õ«WGž¦L™¢½»Ý¾ôîêñµ8~5$÷IL]u&À` °9êE«?•Ì,à®ÒŒÃv®¬ì¬9æccícÛc`mlÀÚùíg·Y¾ës½Ú|,l[ظü¤Df]„}Öô!À`kj@ùÝ™@ä°XîËÞ ³Á8Õ#F¨7ÞxC½÷ž^Ë5¦„òGÕj|숑£ÆÝVÛYr¿úÃ,–K€Ío§âÌ2°·Eߘ4û€+<§²ß寕¬:ŒãˆGÔOÆÄP¨‹9‰S€•c3k‚sÆ=°!šÞËߨKØ„*À!ó:Ö† û‘µ^ÃîWm~{Yžj˱÷#ÀfhÆà8fxŽx§¨Úe^Ê!,:d,sƒé;ßùŽ·žëŸþô§ÄÒ‹/¾¨Ž=öXc|lg…uf £õ‡Ñj®–K€ÍKZÓyd`ñœ“Y†e‚#|›a·2k¯„äÚ^Ì €µg– dR&Ù.ÞàZÖž1uËAq¨ÆˆµVÑåhpFù‘µaÖ èEýâúøA5Ž ¢ü` °Q.£eÛÌCY„ €ýêW¿êã6Ûl£¦OŸ®Þÿýº¥'žxBõëׯ²‡‹ñ¸Õ@÷©ü` °Øø ®éÆoT;w.,Æår|lý€4ìË,–_§–¡’ûëºN¢¾MMMS'L¼‘ý¹ã °Ïì8ó×`j6ùˆ§¿áo®|ð7~CB¨­ùÁ6¤M<ø[>ðèÚKì˜å™yÍr2e»N’õ‘º™uÆvìk×ypî€wÙ&e¡ž€ø(?ͺ0Ž-0Ä2„8ÊÛi­²Øá9,Öh}ä‘GÔ1Ç£zõê¥î¿ÿ~õá‡Ö5¤Ï9çcýÙ&o=Ú8;Z– $` °ØXûµ,ÞEe °Ñ<›ëÑÇé럸Öö¶\uHØ6¸¥QQ9@€ÛLÈÃo²¯@1Ê0µ½ÊÈ‹ñ·å€QöÃväC2ËEõä7”)PjzP‘y¤Ž¨Î eâœðmB|^ßf} ,Va"®(ô,£•G€?¾zúé§Õ´iÓÔn»í¦<ð@õì³Ïª?ÿùÏuMX?öûßÿ~É‹ñºS¦ÎàuMñ›x,–Ëž8J°Ø0í pg{FýBˆí|€@s쬩9¦Û$m(Ä6Ó‹[É £Æ4úÍúáw”eåJXt%-· ¡Î8f5!Ïå´Ï=À>õÔSžGÇ/ÁëÅÄH\pBŠ¢¬¤Ë 6Ìc)t^‚NH€]¸p¡Z´h‘š0a‚êÑ£‡6l˜ZµjU]!=sæLÕ§OŸÈî³ßjþ¢¥¼¾)Y,–º¯â À`ÃÜ 6üa_`Ëž™O¼²æ±MϪYF§_Þ ã¢æyáoxXMFžJõðÓ%Œ®vÞÜìñǯvÚi'/aœ’üØa‡E(gÿý÷¤¬Œl_ÝÀšji9Û—€S%ÀÂûŠÙˆ±œÎf›m¦ÆŽ«>ú裺§qãÆµ{Ò©ÃÔÊUz©Ÿ‚\QëD€%À`sÖ“Öùt°ØÛ6nàÇô¢Jæ¨Ö„Úz,¼»fì*L€­!Ü÷þç)k›om«¶Ó çRöºÜ]vÝÍ)o7=¤Ç¥Lä Sç0õÅzÏY»_ °±öÛkN€%À†X?Ï)àði{+ò;þ6'N’rÌYŒM(µAÿ›å:åcz…ýV–ø‘ü2i“ÔÍ †V~—±°æMcÖA~G½ñ{”ŸÂ,pŠPEñŽ<ñ&€@Åÿ¦÷+chM€Å>È+p›6«]l”·6¬×M+¦°ŒŽÌB,“8Á»ù‹_üBulß^Ý}÷ÝžVöøƒZºt©:ûì³=½öÚkÕ_ÿú׺§_þò—Þ¹àœÕˆ‘£Ô«×dÙ¢lØ{ù³k”`cí· ° ?³ã|¡?ލµ8ó’žßqýBhñ›ßï&‡™½7LÞ m*•Ui›}>€W{fæ cmw¾ • Êrq¹°b@(<+~Vü&“CÙÛ± »i†XlÐ-RÓvÂKØ{î¹Gí¢=š?\}µ]Û¶ê׿þõZ‹åmV®\©,X Ž<òHµë®»ª¹s窿ýíouM¯¾úª:á„JÞØN;«YÎc;HÐ "ÀfÌâ4dóT6Ö©_î¡svʉØì>Sê°hNðXÖ l~žÜ(ÚsÒeHHrÔÇ%À–™Ø‰[USã$N­e#¸„ØÙ³g«æþPõÔàª_Š)Mj¯ví*ì+¯¼¢xq´Ã;¨Ã?\ýñTŸ|òI]ÀÉ óDXgžŒç´Ÿ 6»ÆfÚÛVZêG€u²Oê\º+©ýC€Íî3¥^‹a3„·šV®µŒjŽõ>G¶ÇGqŒþºSj-(ËXLÀd{LýBˆíÙ‹ñ¿LÜdzq±¶,n˜¨Ö˜Û{Kl­­¿âþ—{ê‰'ªÚ´ñÀU’+ÀZ1;0ÆÇvéÒÅ[—ë·þýïO<ýãÿPÿùϼI§ð,Àø½´¾E¨6»ÆfÚgçH€uê· ° F¾DÑ®ã(£^ëÔBÓœI[/›ü§¡a¼þî•æzÖZ·,,‹ñ¬æ$Næ¬Â2i“ä‘IÌ1²&àb(ü–I¡âÑjË'ÀÖÚú“ØO†PÍœ;Bç"c`1Óh=æuu ûÆoxÙ³Î:KõèÑCM:U}ú駉¤Ï>ûLýûßÿVÿüç?ÕÓO?­.½ôRl (le€=òèfõÚ[äîY‡ñ›Ö2 °Nýv¤{†îƒ§é>˜)[`3i륇x4È.ù¯övj«,.µ»e`á•‘‰—Làƒ·Ð)3 ÛË與՜Ø^FÇ.ÿKy8nµ€÷~ØXo«ÈŒÄwëîy%ñý‰ž胔Ír‹P«1öL€ôÔày¢÷*ÏÍqÚ#Û¥©I]rÉ%­&q’1°B,X,f&~ûí·½‰ž ä…òbR(f ×Ï?ÿÜóº¾öÚk^ôŃ>H€­¼¢ `+ìæ[l©}bAU÷k5÷xœû|çÁ )Îc¤±l¬S¿À¶k×î˜6Úh Sö4@ø·Sk ÎÔ¬³Ô¼ìhðaR’ÃX3,NÃìt ³CSRÍš«‘€­+­[m™iÙ[só¯T@d†•¬ùùW¿]ÔÇoTª£2̯½ûl§ªX`1 ñ6›m¦–´xaá]¥Ó5ÔvÝpCuÙe—)ÌBì°ï¾û®Z½zµš>}ºêÝ»·:ùä“Õ›o¾©œQ%€ëÿû_õᇪyóæ©Çœ['p•öL€M'ÀÆÎð†ÝzçÔÈž³Q=ã.‡ëÔoG°NGc¦¼+@€µ@v†Ù4Ìb,jf?Ø©õ°26ÖÛ.2ÃÊ`åYòß6ê³ïÿP­¹ýÞÈŽÖƒ! p»ŸÀ^sÍ5jÿ 6ð¼°ûë vÖ9Ù ;nÜ8oâJXØ?ýéOêƒ>PW]u•êÙ³§3fŒ·n,B}«Mâq…Gž^x °é{I€ °¶7ö…å«<­ý»Ÿ×ÖÕ“‹|؉“nY«ÜrÇóäéÅËJÏ”aþí…ÆþæsGʳó†}6¥9?Ö©ß&À:ÉÄLŽ `M€µ`v¹þÿ\xmÅLM¶¼,Ʊ¦9 8R+m'ÀÆz…ºrFS%€mõéÔYýýÔaêÃEK#;¶«!wɘ+ÕIúØ®ù‘Ï`-Z¤öÕcȧ·ÌB|æ™gª-µ÷õá¯,<²»hìÿþïÿ†XxIW­Z¥†®úöíë…ùþë_ÿ •\1ÖáÊX?áÉØtÀ+Cˆƒ¯ƒé ÅxXü/÷,ÿïÚ÷RÂ6@!~Ç‹*É+¿ (– çÅvì‹{ßH²}<\9êqùãKuÂvÔY¶Ÿ÷ó‹½zÊÿ§ Þ*/þó<ÊJ^¬S¿M€u’‰™°˜üH§ÑYMÚ»zM9`­ô»ÞoŽÞÞ¬S££°uÍ–w€­Ó¾o¶%”/‡}–ŒÖÆ«F‘þÛ¡C«Ùy]ž+Ÿ÷ÜZ}<îZõþŠ×3â?ò5A‡5»‚åöŽ;îPýô9ï¬=°XvþüùjÀöÛ«1úœ;àö½ök,3#=óÌ3jß}÷õÆÈ¾ôÒKÞÖ „1®þ裼0æeË–`ë.ì×ÎèuóÀþL˜ 5½ª€R@ümÂ#ÀÒ„Gl3·Ûׯ!–ãbM 5å›û ñÌ6¿a› Ùy /&À:™¼X'™˜ÉQw€ÕO¢.FZžóhýL§É:Äx£ÀuÉF€Ívñ¹ºÓߪ¡á7ºñhæª)-Ñû?PcµÖ¡ÕþÛ64\õφ†Ïóüœp9·5ÌÞ1ø{êܳGªªãJÃuù]õDS·ÜvÄ–Xx5þÞ÷<ƒû /x“8 Ðk½^?Ó©›3Æz«˜uØo';„k§{î¹Ç +†WÛàYµÀŒ›…×Мv€…— Ë|yáúÒ"î|Ø`€O¥ «¤>ŒoäHo©íu½ž6À @Ûá¿f>ùÌ<Ø.¿™k{c]ë•Å|X'S—ë$39* `‡¸fyÏ£v¡Øáð"9Šœx6lvö©ÁƒÕI9Ø! “òþŒp9¿?m¼±zBƒW÷“ØàP €ÝRÃò£ÇÔ °³gÏVõ²:&ÀÞwß}j;=&猿M¯ª„‹÷ÔZéºù¬éÁ•}ƒÖ„Zlâæ^–H€ÍÒÕJ]ClcKm¢aQ†ÖB<½åÜã^rGÛ÷ ãkm7ØìlÄ!ÄxÉÒTk{ŠzD0Dy_»–¥]Á‘„#¹šbÌRüW=.5©%wÞX½Ftð¡jÊÔÎÐTÉûÜsÏ©o¼Q=ùä“%,Â}: /ìr¶ìÔÉ `ßÿ}% ³7®»®:lþ0ÅLÅ»dÉ/\ø“O>ñàõ½÷ÞK%ÀBóI7ݦöÙ÷€´ ¼`ƒÇƒÖÊYÝW h.AãâÁÌb;|ØE‹¨åàø8&6êÞ2Wå`su9ë~2°mSëHŒ\xM]¼#t11B…‡êüIŽ{u£PA¬,Öp}ê©§ÊÎŒu1qSØñ«W\qEè}Â#êülä÷PÆ t¹ Ðu§h€ÄÒ:IŽ{EÝ«×J“8!„‹I’“B èüÍo~£öÒžY<[wÑßðÊ– !~ýõ×Õ]wÝ¥0±$,àîäcŽiõ»l¿ôÒKÕüãVÚ´ì|=IױǨ:èqÂ&°ÚÓKˆ-7U¼¨Ž+“-Ù3 ›^Z„¦Ë$OæØUãM+9Å~ævÏjŽYµ!Z¼ÁR9¾x~Íü²Í¬?ÇÀf¼­­úØÚôãÞEV Àj`]Õ²„N=Ç·f`1#0Öf•6wÒ3œbßr0h®û ˜E~|W‚Çûï¿_uïÞ[ä~ís`á™ýTuü‹öÂýI{ã‚@8ŽíÕÂkµ‹ñ§Ûvéâ­;Y§ÃôDLåö{ßþ¶:PCnŸnÝÔ¬Y³2–´¨§q€õ輪Î÷.¼ˆ˜eKÑ,\¸PaJXìĉÕ`  ˜Ì©ƒ »|ùro+&YÂXÕ·ß~[]rÞyê§-ãe»ðÖ=è uà 7¨ú˜Wõ«ê¤£V«W¯n•°-ÖÛzÀ"\xˆ^ó7Èó*[D€Å8`´ÉzÞi>¶ía…çÕô²â„èÂÃê·–j¥år‚Ö^(Û/äx~/d¼+<®€U¿u`íßð?òº®Q›ækU®nX§Žžë$3Q2›òI˜2 °¦Ù0ÞÏ0ë²K€å#Àï•qôO=Æca?œ·(W†yµ `/ìõÒ:ß7vêÔ©¤ÚC8nÑÐÚY¯!»·Þð /ìÒ¥Kf,–4~üx`m°­7ÀÚm cFlNÚd{f‹BÕ=ÇrüïíµZãÔÉoâ8—•² °NöE:H‡*@ò¨@.ðà a¾H€Y3ä° /ªÀ([ü†¼HæøU3„û˜åIH±ì' -+0-^b€‘Ç–}M¯1¶¡>øÍo_W›c`c½­s›qwµìù眣~ºÞzj¡¾÷wÚb‹À‹»žuyþ]¡Æò?¾á‘Å6xaO<ê(õÎ;ï”öhí™5ÃßiXóz<¦_l ìÖ/&Àr l”÷­½”N”eû•o¯½ŒNÜÇÌBùØXûmN¨@ÈÀ"Å+0‹o9H"›È#Ð)aÂ~+å!dIÊ–ñ·r\lC9´&› ŒÐdÔCê ùñþsTËå'ÀÆz—`õä.• ÂZãW»j/ëj,¥×„}üñÇÕË/¿¬ú}ó›%@¤þ¸©I~ðÁª›!¾P‡›`ë­'«½°˜( aÇHXJ `µSš—Ñ­—èÙ§±œ –› cõSl¬ý6 §T  d`í1°Š®k™5á²Àâ¸&À ú…›¡Ê¨ŸæŒ: 9†6h¬mTà*å`c½K °1ìÊ•+Õ¨‘#=/ì/´'õ´NPCöÞ[MÑKäˆçð:~ÌDÿûß«³N;ÍÙ[ô>’ÇóÂyd V`ß|óMe§,,a  P/ꕵ6@€µßfáT€ d@Ì,`PBÍYˆ]Öž…Ø„ÍJk†›e,òÂÛ$áÃò-¡ÍõC1ÀÔçØ#í>©*`cØçŸ^uÒcZWé{«Ñ3µ'ք׫/»l-½çž{Ô€–ex×óÂêýP€uܸqê(VŒ1µv"Àv²;¬oþÚ,6©.œÇ¡%`ÛÂÆå'% d`Ë-ƒ&à0(„ØÞûÈo•Ö ôVÐJPl{Rs°˜0mŠŸ/ Àְ矾zòÉ'[­‹Yˆ1‰<°¯¼òŠ:þðý±¬z=êµà³ÛÉX@ì¹ë¬£~¢½°È{ÕUWy XõK8&ê€f ÀŒÉ˜AyÞ¼y^3^r=øàƒ ëÉ¢ ì¹×¶ƒ€v@ÈÊdñšÆwM °41¨@â DÂK‰×:ÇŒä‚hÃqà\m¨Á›uˆ«_y&l–Ûnzeò+¡ºÈ¿Í0`^wê°öÒ9AXÔõ†ÁëWgl.ï2‚K•‹µX·èØQuÐ`úë_ÿº"ÀbÒ¦ÎFH0†Çixôó â·»ï¾»äÅøÙ“ôŒÄ½»vU=ô·ìk¯½¦üRšËÇ`©ÃõÚÀXj‡c`ヵ­W ÀæÒ^àI¥[Hx)ݧ˜­ÚErAÒ°2»°Ì> `µV¼§«æ,ÅAc`1vÕÞ/`œÅ++kØÚ³'ñÀBmÙsë’g–K‹œXV:ÚÖÉD&À:ÉÄLŽ `…ÊT6,–K€­Æ¸Õªµ !ÀÞ~ûí%/ì:„¸“ž˜éú–u^áa}hÝu¿\ïUƒêØË.S/¿ü²—f̘áÁ+ 0+¿‡ùþå/éy`¨A)­ëÀ\/s¥Z¹ê½Â½D¡6UÍó€û¸];¬“‰L€u’‰™ À: •©lX,–[ñé°‹-Rû¶Œ…Eø0€rèÁ«õß–‡§h=tÏ=×>°o_o&cdõXÙ°cÇ1òGáÍP”Ò °Õ\‹¼ìC€uƒ ¼\ï"žÖÉD&À:ÉÄLŽ `…ÊT6,–K€­Æ,°wÜq‡ç…5g!¾óÎ;Õßü¦ú‘þÝ›m¸sgõì³Ï®¨'Nô&rÒ²Õ<°j€œº$ÔãwçÍ›§üqo‰¯|Ð[Îç·ç^ ç­¦-Dµ–U[Jk9X'™ë$39*@€u*SÙ°X,¶c¯À ÷í×O5~å+ê׿þu«Yˆ¯ºê*Õuà խ·Þª–/_î›0‰ ·›†Ý§žzªl>¿ýñ‹_xØçŸÞ)`ÓLØt]jž ܧò5$À:™ÈX'™˜ÉQ¬£P™ÊF€%À` °Õ_Õc[±;ºï¾ûÆÀ"„P8eÊo› °AËÛ`ûu×]§Õ^X,«ÓüÃV\Ç.o̘1jèСjñâÅN‰›.`"À¦ëzTó\à>ØŒ`l"²ˆ’Ø<6,–K€­Æèœ0ñFµžž¤  ºÎ:먣Ž:J=öØcÀ>÷ÜsjÐþû«»ï¾»•6hyÙÞI{_±Ô&zæ™gJKâ<úè£êà}÷-»DŽ,êàšBœh"À¦çZTóLà>Á×X'K:2€m×®Ý/ÑG1eOƒõ×ßà§Öœ‰¬Qâ9šj="–K€%ÀVkxb¦Üc?©d´×Ëâœ{î¹Àb ê’%KZ,Ö‚uI#Ï>[©á Ë>“'OöŽ5aÂßr.ӳà èuMØ`£»Úöv?lz®EØkÇün׎ëdµF°MMMSñ²•íÓ­}¦I§–—N & 6 #,c¨.ë†ZË#À`[ÚÐ&ú»±Öö”£ýÙá¬kvtó-U»í±W d{ôèás56hmVs;¸k‡Þš±=›š&Âö[n¹EmÕØèmC»LX„3»&lz lz®Eš Ù<Õ…ëd%`Cô¿yº?Ìs‰`aÛÂÆå'% DòFAöém¶QÏ Æ”! pÍæj×0%m2OÕ ÀVÑN™:Cmö.%ÝW‡ûbvß 5Yý¶Ÿzâ‰ê"šŒPâ=õÄO#Ï}úx³c|¬kBþ¾ùMuŒÙ5^°H[³¬‹/¾ØóÀÎ;7t"À`ëuáeO‡MjÀÞ{¦­¶ÚZ­¯£\ò"φn¤ºèµ•]òo¼ñÆ ÷§KÞ6mÚ8åCYaꦾôøø¬A,Ö©ãî¥sÕ<Ç ŽÄIœêÿ\¯ö¹J€uºW2™‰›ÉËÆJgDlÄo€'ß~¯g¤·tJê»ßý®úÝï~çÍVìšÎ<í4u¨†V؇5Äî¥ nÙôèÑÀ>ñÄ¡¶þ†NQ=°‹—®PÝ»÷P.D1ôë×Ï%«—go ]t‘S~€îœ9sœòâ>vý„©s˜úŽá­­Ö@®Ç~Ød-lýŸëÕÞgØdï•$F€MRm«h dÊ(ª¶ƒHz?Œ~öHo\,:§F=£ðgœ¡žþy§tã7zc``ñ  7nœ·¿,`´šôÈ#¨|Pa2(ÔoϽ°Dü"£R›#À#a$À¦Ïx'À&kJ`Ów¸ÚØdï•$F€MRm«h \b„L:øÐ’7vÓM7U×^{­·4N¥ô«_ýªÀ.×¹³§qúéÞ~ð4ýèG?ò<»Õ$X¬eK€MÞð!À`é-ZWïù`“Ž»jP>l¼÷F=K'ÀÖSýü{>%Œ;áç °1¬t\³§zmó­È"4rêÔ©jñâžiÒ¤IÀbì9z)-:vTøMò ÀΞ=[U“î»ï>€1¾màØãOd;H H{ À` °ì‚£T€K€m±maãò“°)¹9©Æd}hSü`‡¶±ã®Uíõ,Å€F$@$B€±ôŽ™n¸áÕMÏB¼¥Î{ÆO~²Öö /¼ÐÛžÔ° ¡Ì˜ÄFêpÒ©g¨•«ÞK\‹ ·ÒyÞN€%À`ÙG©–ÛbÛÂÆå'% `Sr!rR lë IpIÐó(,žvÆðÒøØvÚÓ:räHµhÑ¢Rz衇Ô=TáÛü]þ¾à‚ <€Åv×tõÕW«-·Ü²®ûìw€Â’&yÅ´ž–K€Í‰E‘’Ó À` °)¹j`ÓwM²\#,6ІY‘â íÖ­›ºþúëÕ3Ï<˜°?üáÕ¬Y³ÓwÜ¡X:NO½<É”©3R¡AZ3îz` °Ø,›é«{šö¼Ÿ_¬Ê¥×Þú æ¾èéÅËÔ®ýwWøŽûÙGù-6@м…Š–Á ¡˜,ªK€MU'‡å1¾Ñ¥k 0û÷ïïͼpá²éç?ÿ¹°¿ýío˦ûï¿_qÄ¥q®XÚç’1W¦êÜã0²P&–K€¥5¥YØÍ·ØR6lx h£X”ñC«–¯ÊdÿF€òNHWYØt]¬×†K€Me'¸\OOØ„ÎluÖQÍÍÍjîܹjÁ‚k¥Q£Fy;sæLßtþùç·çzìq'*̈œ¸+B °XlÖM‰Hê %oˆ¢¤4¬ùLÀ>úÄöEÆÐ%lw@:Ë À¦óºdµVXlj;OŒ=öø“JÞXŒ½øâ‹ÕÓO?Ý* À>ðÀÊLX;sûí·/í¿ÇžÔcó¥ö|‹«~çH€%À`³jBDZﺴ9Q”˜u€…Wp+ !ÇòìðÂÃ:qÒ-¥íøßc3„ûKYi/&ÀFq¤³ ¼¡šÎª±VT€K€M=Ðar¥]wÛ£¢˜| kÃΟ?ßKð°Â;mÚ4/ÝrË-jРAÆxÚîjòí÷¦þ< °Ùx¤šk‡qßÝ»÷¦W/c°Ü”ë' véÒEÍ™3Ç©h—®Ÿ0uSß{ï­0Ô ÍëµOŸ¾;èUÀhü”W€«=‘›â™E[|XÑ~ñ;îC€(„‘ð÷‘G7—î Ó³{ëS½ýåžÁÿRV½î‡JÇ%ÀòA¨€‹Xlf AL¶Ô±c§˜|ðÁjÆŒÀvØa ë¹"ÔžZt‚jÄÈQêÕk2sŽi4(â®=°ÁHQZ $ÀÆÿâ„ëbŽx€_x,`óò+Æ·ê³àa/+yÌñ²½ò¬6û™p÷ó¼Öò °N÷ 3QÂ+@€%Àf .¼TÃéãcÛ´iãÍ*¼ûî»+Ì\ÜÒù©ïýàGçšð’HÕ.Xlà¦6·v V?³ýÆÅPáe,ò˜Ï[Z?€Å~frµÏé¤ö#Àæöþæ‰QH À`3°Ò‰b¦#Ž:¶¬®ývÞUÍzt^&Ï))!mÇ!À` °‘öëY-Œ[%ÀÊxXl«¦ß¬ÿƒË 9T€K€Í4ìaR¦=öÚ[õÙn{5é¦Û2}.iˤêC€%À`sh]„?%¬X{<+žÃ¦U¼­æìÅ6'r2½¸Ø&Þ[y¦G±\O\ý=°áoîAЍ–KèËH¨m\C½Ë%À` °É›ÿihX®CXÎÕ©GòG÷="V÷E˜d ðDè/ÀÔP™ÄIÖE>ä7'63“<á”#å¥9¤˜›’»‘Õ )W€K€%À`ëÚ°Xlò–‚¦%IÃìý7H¨)ùš”ŽX ÀöÕ¥”@<+Ëè$ý¼¡€Q,¥è´'t,–ɼ"Ù³rÛåbË•Wï˜öñ °u¼yh*!°غÂKÚ:OÖ'þYm °XlòVƒ °È~¦avò%_£šf!Æ“ uò<+[McOØTMiÞ‡[‡;/¡C6Ê šÐñx˜|+pƒ>=®+üå5&ÌÑÉ6p À` °É~kyeWk˜¯&ñ©Å‹ú Ñi:6ù—Qq„ Û6.?)Q ™$%W"ÕhÊÇiDv„—„á%ÊŽeeÓp!À` °‘õaά³ËµWv¸þmç„ÏX+À∣‘òì•¥tòÚßE°Þ»ŒðÍ{Ä¥–ÓBÇ¥.Ë-ºX,Û@Âm€K€ÍÀ~<ñFõÏ=8§ç×ßàó–`¬iZR€µ`öá–ñ²ˆŒòÀ¢>ÓÛ¶mûô}ò yy>¯ˆ6ÊöɲjT€[£€Ü TP€^Âð’çŽ˜çææÞgßTï>Û)€lÒ6ßÚVõß}ÏÀsÙ©ß.ªC‡ÁôªsŒ?^õë×Ï)/2…Â.]º¨9sæ8• ãÒõ¦Îaê»Í6ÛÔµ½œ¸}ßÒäGÕB`öû¸¡ás¯»z‡††Eº]̉ -Ñe¬‰ œyºŒÏÏþÙùìÏ3ØŸ`ók£`ó{myfõW€^Æ:<ÌÄh¯q—48b–HÌ™ôqór¼e+^÷fÑÌKºåö{œÎëo¶Ù7œx0 æ`ûï¶›ºdÌ•NÇѦºõîÂì_Þx®¡á7ç74üDwÛðœF‘†ër±µ–õËuÖYg5=°n/ÓÖ`ëoÇUl\ʲ\* ½ i{˜çµ>æ"최cfdÄ’õ†èZ4ä¾õ1ô/]¡ºwïA€ é1°÷Þk-’t~_¿tùH¿tqM'l¾ùß&|1†t`ŠR(×¡Ï [ÆÁöˆÉXÀdQ—×X¶g#ç} lÒí=Éã`k¼R¼;6ŇU˼Ø„<°æBëµtŽØúÀW-׌û~qͰ_²{˜â4lØ6ܧï…g15—0f ­Ó5´žóäMQiR² °Ùí°QÝé+‡›¾kÂåGXž:>'Ý¢ð7¾mCá¨Ø†„ÅÇe;;—Í%l¿õΩeËC”g/vŽýÍcøer,»®f¹~Û°eÛç‚óÀoX,¬n.¸.úà˜æâíf9åucP–Óç-Ͳ¤Nò›Ÿâõ;_Ù¯’^(ºHûÍ6#z–«—ß¾a´aÞä ?,¶ž]g™u`×hh½KCëP½=êIšâ<Ý!ð¼Ê’X A_†„(¿>ÏÔpÏTlœ·I}Ë&ÀÖW=ß Ä °èä¾sÈ`/Þð 6;ÀjO$Þ[l °AšÈñp.È+0[I—󵽺¦¡c;òášáز]góM¿À±Ýnè ¨ÝŠl °Øúuu ZÇkOkÿzÖ!®cÇ °ö3¿Ü³ýú(¿9ðrÖ|Amþ/¿ã7ìkn󋜲˗ü²¿”'õ)yU®®R>Êó«g”Ïʶ-l\~R¢6%"'Õ@È Ú?_(;Àšßä?f‡g¬ 5ây3T:UìÌS@˜]F¹ŽG<†öörÀhæ÷;/Xˬݛ`ì×aVòVÒD<¾6¤—Óur9_¹&¨ƒ ÃvÙö9ÙkzÒ~Ís¯{”FËŠˆ °Xv¼ñ(7ÀJÔK¥ç¢¼0O­_”½ûÃ~Á}°¿ýÌ·=½¶­`FõHxÙi–g¾Ü–>Ǭ«ýÂÙ —޳?ˆ`ÉKñÜ^U—Ê RµtÜÑGlkQê °F*aÄQ¬@ŽKç“&€µ¡ÖÆZÖOèŽNÔôv×°¦q áÕ¶Ö4\ìsáܾ𛋿íñÎ.לy¢‡ÔJš` °´JâQ n€ zIˆÑ6°š/˜å4úñ† `–‹¼‘ßí!*øÝ/ZK¢¤¤.æ±ÄÖ0Ë4£ªì—²¾Ug_A€çžHC©Ø4\…üÔ›€h1;ˆZV<‰A“4UòÀú­m*õ´Ë5=–.É0X;õ’1²aÖE©»__n­× ó•±­¢µßÅ+<ÙZø…XÇi\°ìè —K€Í)‘®3©7ÀÚ°Šç¦€¤¼ zî‹Õîk]ÖŽ´²Û¬Ÿô‹2–WÆóÚÃX’z)J€M×½em°QªÉ²°)XéÜd¢$ùßÔìÐï ¯ ]fØBY‘ü DŸ¤Ó·À“>e¿9:yclϘëµ’Ò‰ËØT©W€•·Þå4e˸]sâ*3œÙÔ#è|M½P®m€28®xåq,bíë„s”ú6£ƒÍ8´$À`i‚Ä£@ÖoB'³ zî,ÍÉÃx`Ã74J¿$ßò’ØöðÆñ,4Ë$ÀÆsO¤¡Tl®B~ê@€­3Àš„„ŠÊ¸F|WX 5={fÇ…}eü ~G2UfD.×!ɘYÙׄF³\³ž(KB]Írm85Ë–I!üeH'.õ0.,ÀVÒÄöfÚ/Êét¾RGó˜×ݾŽö9I½$LØ`»^f¨XÜÆ˯’ °Øü˜‘œI_]Ji)œZJŒ`ñLöÔrQ6fX°«Vö‘™ûåū߄‚µDk¹ô¡åúæ8žÿØZZ~º÷%À¦ûúd­vØÖåaï²6«K9f”YËÌåö£®•έ–s°Ë­E“jëaåu¹Ž~ÆE9ƒ£šò]êÀ<ÕCj%í°ج 1×w .NLj`ñ2 eG0á%#úÙn†Ý†:b†ËÒyò‚Ø|QE´–9Èg?ðŽ»? ÀFq¤³ŒºZ¸Éù¡Q(@€MÀÆÝ9ä¹|Y2 ç—²xî2> FŠœ“=>7‹çÅ:¦°Ø(:ñ•‘€ÅóK†o˜3û–‹¨’ˆ%ó%cP±”+ûÊðê"ÛÅ\Ëp#{Æd‰#Àæèîâ©Pœ)@€%ÀÆ:ór’ "`gå±;ã$ëű`\˜çTnÜrÇbñx[ýt%À`sfOÔz:™X¹§åÅb¹u\1‚ßHÈoÏêk.@Ï~${"'Ù&åÚǶ£qìcá¸ö>R&êkïŸdt=°µÞFÜŸ C,67KøJ¾¨uíZ` °Å03œÏ2“ËgaíÏBSC¬óýÂŒT Ð ` °XF#„$ݰØB[kŸ<–}‘7¶8¢û¢Y—ɤ`Õ‡ÅP*¡X,áFÛ@Ú–a_ž‡¢°ux%ýâ.èxØ<ÜÊ<*¿X,á…FÛ@Ú–Ÿ©#`ëð ʤ·`3uϲ²T n ` °­à9ØK$Ý¥õxÐÅžL#­ue½ÒM€%ÀÖ­çOç °X†§óÞd­¨@ê À`[,f»õ›11Ï@„Y‘‚κTZ¼>hnO?T&y°ØÔYõ­–K€­ï=ëÑué›Äz^$Æë“Z¤8×@ˆIÒÀMúX4st‹¯§É#G}°ü޹~_%Ý¡OÑ?évX”ã`;wÞDÍ™3'0~úéjë­·Ì'eõíÛW577;åßtÓMÕøñãò"¼Ï¥¾È¦Îaê»ýö}3wöé»Ã­Ÿò ` °Q,l[ظü¤Df]Ϊ•’‹‘ƒjà…?_*Ph€È!™`Kr ¸ x©w}ü4 ª3·Óóê×ÞX½Æ{q²ç^S¿wÑÛ+0Ÿ”µÍ·¶U;ì°“Sþ­{m£vÝmw§¼›o±…S>Ô#LÃÔw=(À–î+¬“™A€%ÀF °ht´qn½d2`“Ñ™G)¦™2Š¢6à‡X4åb‘s„Ëêfà}#á7Ü·Þ9µ•N€: ÁÅvxlñö±X¼ºR&<ªæy l»üJõ‘}åx(×.ÛPwüŽí¨£¹ˆ;ê~Ú°á­ê!y‘߬ô±½ÔQ_–GàeÈW À:XlÔëÔð˜)°ÉèÌ£S¬émÄÖw…~á¶fX-þFB~3j†ç¢,@!Žƒ„¿Mx°õ3àýêƒ|²Êó;¦lGÝ‘ß&„ ¬Ú0Œ²¬æ¸WìO€Í\y=ãnX'ƒ‚K€%À:Ý*ÙÌD€Íæuc­³¡@aVÀÌže×ÙµÇÉÚIä7'D’üR.¶›ÞO»<ü_iŒ©]9¾9N×>¦Ÿ'Øô6›‹zVç*ÛÓV·ñÍò xlµµ¬“@€%À`n•lf"Àfóº±ÖÙP ðkª~cNMøƒwÕô –Ë/W3$Y<£a<švù¶÷Têoæó +63Ëpñ°Ö{.a¢6˜ ~Ô/é6@€u2zè\£rdjjjš:aâ…íÏ“nßQ6Bm€e¤Olú® k” Ûá¹z`ÑQX%,Øö¨¬l •o×°ÞK,áǵ­2ÛŠ´l²FöŒá#¼h¦li@€Mö^IòhØ$Õæ±Š¦@a†V%ø4q„Ë8W{]TünNzd† ü™!Äa|»ŽW¶=¸~!Äö$Mf˜°éEÝЖ«£‹‡6ì91?A‡m ßm€›¬)Ñ®]»c6Úh£%LÙÓ cÇN7'ÛZx´¤ À&¥4SD °2 ±Š‹I…æ8S€«=ÞU XBŒeB%s&bxn±/¶É¤K&\b{¥5YQ¶]ùÍœÄÉžJ€[&q2ÁÛC6ë(ç zpâ|ƒF )3xÇQvÒeâÅN5/ dVó¤ë›–ã`‹hNðœ©0 À²=D©zîe/«Ð p³—½‘Yxa„›<ÉŒÀ~“>a”%`i‘2 0¶ÛËÔ­³êWYÞGʳ—Ñ‘™Žå¸~ËèØç-y±¯9)•ß,Ìi1’YtÂu½ÆLÇ1ј¼Ü ÛÖâŒ\À‹µj :ì9Ô’Ÿ›qË€ÕÏ¢°maãò“°)¹9©Æd}hSü|¡@¡6ŒwQ–Ãq™ô©Ã/Š}ý&qª¶\s«jËà~é͸®K½6®ãÚ/­\t‹`ËMäæR¯¤ò`ibPÄ /%.yåò‚¤ì‚d¼:ØÖ°Ð+!À•–±1C‹MϤù{žŸZ ͨÖ^ò§–:qßâ@¬ßÄcö²O€0Y;YÚþ·=‹¸çäþHúÁ$ʶ×L6‡U~ÇÃqQ>òâÈcßãøÝãŽ}¤<¿çœ‹,ÊF>û\Íýru“óÂvꀲ$Úï¾i¹Ï°·”Xý,*@^JÙUãIÙÉxu°ØVÐã4`Å4bÓèöû½ž†¤iô×R?ã½–ò¸o1 Ö^ÒÉ-QmöUvh»¹]–¡²Û ÛQ"yt%4ÇCʳNj#„ÝÛÇ(7^¿W¿.+k2K=Lª› Ica_9>`OÖ––{Œ›qK‰ÕÏ¢䥔]5^”]ŒW‡K€-¼×9-F.ë‘O €04¯³=k·Æo‡ôÛ½åÚíù•ýÍ û6°ÊÌáòRËX{¶oÔ£\x1Ž  6_Ž™‘Au«‚ÌâŒ[!­«ß˜«³áÉÔSòR=Õ÷96/HÊ.HÆ«C€%À`µáMx¤qµñÚË= ” ˆš^Q3$_ /h‚3»þQ­™,3x£|À£€š[®,óð7¼¼v„H˜zÄuƒÊ¥ÖÉR¤;Ór2V€¼¬Q¢9xA•;÷#À` oX¶Û€„éÚ@é°¶WÕ„Vsíâ `’ñ즷ÕýÆ‹—Í0àÀ»kÎ$n[°×£vÑ'É<X'[j Î5Ç)'3Q`ÈKÁ%š£‡>ZßDȃåY,–ð#¼$i$óXéô"— !ÆMØViV_ ­ÅØtÛƒt½ý<°v",“Eù¬9îÖöÀºÖÉ`ƒê&ç‹1öÈ+“F…é ÍâÚN€u2£°N21“£XG¡˜ dQ,–K€eˆ± Ø“8™3ɤF&8Ù³ü–› Uš´È„<Ÿjÿf¯kŒú˜ž^@¢ –&, ü HʤJr,„û œ¬]Ê0ë&3#ãwX™‰Xf\ÆïÕ,ñ´šå`Ì#¬“LÌä¨ÖQ(f£YT€K€%¼Ä/IÇlì}7@lê†S–ô(@€MϵÈCM&ë“@›âç hÄÅhÄ*xî„ê¸ÛÆ·tðàœ\/ºè"õꫯª·Þz«Uš3gŽúö·¿]ÊÛ­[w5é¦Ûøü‹éùG€¥‰AW€¼”¸ä•È ’² ’ñê`[_@p1pqï,Ÿ€XÔ6€ñ¬C4½ªÔYg¥^{í5õöÛoWLS§NU½{÷.lï>Û+„U˸Λ›qK‰ÕÏ¢䥔]5^”]ŒW‡K€¥±JhgÈ`X¶âu=ŽõŒ|b\ë°aÃÔòåËÕ;ï¼*]ýõª[·n¥²öÙïÎXa› ÀfÜRbõ³¨y)eW$e$ãÕ!À` /ªqypX.=ÌÒ0ñ&`°b؃7!ÓI'©¥K—ªwß}·ê´jÕ*uþùç«:”@öØãNTe¶¿ÚÚÖÙRjrÎÉŒT€«™jØL]®ÔW–K€¥qJ€eÈ@¸Ž9JfS 0‡ªž}öYµzõêÈÒÊ•+= 6ÇåŒÅÕC,ÖÉ„’;åd&*¬y)X£Dsð‚$*wîF€%À^2/ô€UY×3_2æJÕ¹ó&%p=è ƒÔâÅ‹Õ{ïéepbJ(Ç—©“>þØq×òyQÅó‚ëdK Ô¹æ8åd&*¬y)X£Ds4é£õMôˆ¨kºì²Ët8sçÈùþ–ò‰³^6ÖÉ<"À:ÉÄLŽ `…ÊT6=È qnCÃ]:ÍaʤwékØA£#À`ix¦^²n´³þµ{«`¯»î:uß}÷©¨<Л¸éÃ?¬[³çœsNë™õÌÈœèÉÿš`¬¬“LÌä¨ÖQ¨LeÓð3Pƒ«bÊ®¸†4:,–K€eHi0öé§ŸV .T×\sêÑ£‡·æ+–¾ùóŸÿ\·„5g;73$cÂ)L<Å_Â,ÖÉZ!À:ÉÄLŽ `…ÊT6Ø…{í¥–ÝvS†4À5Ël,·®”ò4†k÷æQÃìiè°Ï<óŒç=묳Ôf›m¦ÆŽ«>ú裺¦yóæ©ýöÛ¯²ÝºuW“nºÏÓ–ç)Ö©¿Ž `µ4žÑ…™Œ.DTètmßörj1•3`#1uEÀ.úÞ÷ÔŠ+˜2¤®6¶[Š–m€m 5m À>÷ÜsjîܹꨣŽòÆÇb¶à5kÖÔ5ýö·¿UÛm÷Ÿ]¤Þ}¶WÓfÎNžõz‰C€uê³#Xmßö`tav£ [ìÛÑN-†L+‚›]h'ÀÆz³ÞЪ—ÇãfÏ;Èkÿ5 ØçŸ^-Y²D=ðÀjçwVƒV/½ô’úË_þR×tóÍ7·šèé ƒ«ù‹–öùJ€uê·#Ø'¿þuFf(ºÑ †}K€uºe²•©‡®nßZ«L€%À¶´!<$†ÔÚžr´a ,ÂHü0B©qØ6à °/¼ð‚Z¶l™š0a‚Ú|óÍÕÏ~ö3õÖ[o©?þ¸n kÕŽ=ZuèСä‘=ö¸Õ²¯î9K€u²"ا¾ñ Ff(ºÑ Ïéqýz`aÛFÂN—™‚hÖY0ñNM,¶¦”ß gXùÔ˜€eÔ…—ª#G¦agž­†Ÿ=20Ÿ”uÆOÏvÎ;Ì1/Žú™g¹—¢ÎaêËPIjX@ ʰ!~ì±Ç”,¼¯ðÈb|,@k·Öbqì7ß|SvÚi%ˆmllôîÕ"MôD€u2°Ψ‡ F °NŽ™’S€[ð<ââäZn6ŽD€Õã'L¼Qmß·¯ºè¢‹Ó7ºtQC† Ì'eµk×N ><0ÿñǯÖ_ýÀ|(wÇwT½{÷vÊ‹üaêìZ_œS·îÝÙ~8~6Ò6P `Ï×^Öö_ûš¢—Õ1öøƒÂìÀ˜µøàƒV»îº«š3gŽúë_ÿZ׸>äCJ Û©ó&jì¸k#Õ+è…@½¶` lÁí[¬Ó}’ÙLØ‚ßàØXïÝBSAFöØc›•ËgÛm{«É“'»dõò´oßÁ[ú#èóÔSO©Ž;eó¶Ÿp jèСNy‘)L]ë‹s"ÀÒto…Ýî°7Ýt“ÚFÏ>|’~Á3^O”4DÏþë°+W®T/¿ü²·~ì;ì ?üp…õ[ÿö·¿Õ5=ñÄj/=›¾~’{ißýÈýs—ëÔo` nß`î“Ìf"Àü'ÀÆzïæÞr1  °_²0–PêrÏÄ•ÇXxQï½·ê·Áj‰?MJ' ØW_}Õ×k¯½VuÑ—]v™úðÃÕ'Ÿ|R·ô÷¿ÿÝkظôKK¹X§~›[pû–ëtŸd6¶à786Ö{—ÛBLìK€%ÀÖ‚L€9s¦ê¤Ç~Ö¯aQðÈb<ê–[n©î½÷^L:ýóŸÿTÿùÏ&y"ÀÆÚŸe­ðMt…‡ÖZiYF‡“8eo®l­­?Ýû` °Q®›îÖž|í°ØV¡ÈXlZcZO:òHuÁzëyÞWØ}wÛ­b±x`°o¼ñ†7©ÒÂ… Õ AƒÔÞÚ£»hÑ"õé§ŸÆžþñxàŠæ'Ÿ|R͘1ƒ›|—û#`³®26ß·'–K€ï'À` °œˆ)5Ï{ ì¼yóT—öíÕê€]£¿Õ!ÅßÒK†\sÍ5Þ°2‰“ŒõØ·ß~[½ûî»êþûï÷&@;ùä“Õ{ï½§>ûì³ÈÀõßÿþ·—0¹ÔìÙ³Õ¬Y³°ñõc….™K€-ô â“Ï5ÀþßÿýŸ×¡šÉž¦yþçþGá;ê)¼³PCˆc½;Sc¸ÖÓëÃ1°[ÏöÇcéõö›ÄéÂóÎSÇhh…v•Në„1±‡ê¾²×]wŠ.»zõj/”÷Ê+¯TݺuSãÆSΨ õ¿ÿý¯ÂqߘÀ‰kVøÂ °ØÂß) × 0Ř˜vÚÉKÝõ²H\pA Vyäï7|ÇœÈq”E™ìh­÷À”¶õzT‹K,=°ôÀ¦æ9PnÌB hÅ$Nm¾úUuNKX±€ì¶z²&À"ƼVòÀ À¾ÿþûÞDO§žzªêÕ«—zðÁƪV›>ÿüs\.Œ>uÁ‚Xöµ‰ôéXlKCƒm —Ÿ”({€œš xÔ^ýõ±« •ðþÚuˆ<£*#b€ÕöOÚ?_(õž^(z`é­gûã±+{`Ÿyæuà 7¨½´ÇUf!~Ê)ÞÿZ|—~– Ôk9‡XÌLüÑGycb÷Ýw_oŒ,à÷_ÿú—spÅ>¯¼òŠZ²d‰zöÙg °}wÐÑÞ|Yœ„¡‘f€…“ÆL¦sFlÄÃ;L!Ee3f©œˆÇÀFÂKI´Ù¢#’ ¢oðsuJSãÆí¸™á‘•ºî¿ÿþ ëDâxb±€‹<Ç|)Êo.þ6ÏûK¹(åàa‚ü|ã÷4郺`c½Õ °ôÀÒKljžå<°€Â}ûõSCŒetn¾ùfÕûë_/-±ÓYÏXŒ‰Ÿ\=°Ø?ÿùÏ¥„YŠ{öì©FŽ©>øà8­”0A¼®(!Ì/¾ø"¶å^â2:±öÛ­ O3À¾…])+‘†·Ýv[+»Õ´c£¶Aa÷úsÔÇ©¦<lr÷I=ŽTH€ÅÍm‚-þ†§7ˆxLñ rc ƒŒ§µáÿãFÆvì#‹ý첪¹ãÚ‡ëm—õž^(z`é­gû㱃=°X,«ÓfuZ­‹µU·ÓžXŒÙ¦1|x(€|šéwÞñúG€,^ËdLæ·€+f2ÆLÇflëÙ» °±öÛ™XÛ™;4ɨ?ؾvâ²WÖK€Mî>©Ç‘ °€LxEZm€Å6ó n{œ¬ ÁW\qEÙCˆëѬSsL,=°ôÀÒ›šç@%ìsÏ=§Žþþ÷Õ=´Õ2:ýõVí…ò&xêºá†Î /k¹ôûßÿ^577«¾:,^]VÜ,ðÊb -f6&Àú/;E€uîã±lMŸ´{`ýàÑœëÛÍè?ü ›U¼µ2ÿ‹8[°¯8kL`0¯¯ØÅ°—e~™´,¶¦¦Ÿú °¸á$lØÏkÞ¸¼>lN …|Zì÷vˆ›ú{ Î ¦Æp­§ŠXz`ëÙþxl7,ë¹bMÕ^xA-[¶Ì[F³cFbxaè%wî¼óNo‚&sXYFÇœÄ * ³¿õÖ[¥ÿåwL …þt¸öì"\øã?öBޱ¶üšÉX§n{ Î¥'Õ®í“E€•ˆ@?ûTÀ‘âÄVVãÀ6Ó‹+ˆb3c;ÀI¢¥¬°^Ò8ó`kkûiß»kßœ.+yü–å!À–š9'qj}Ç`饖ØÔ<‚<°Ï?ÿ¼7ÎÔX¬‹Yˆe–âÃôdLåvì˜1j„ ÞR:fê¦ÇÒþô„ÖúyÎ=zôhå­%À–‡W¼!À:™ÖXÍŸµ}² °2Ï‹mŸšp+ðû^Y&M»Øo»äeqmí‹{W¯@“ÞµGõ»±g–&qÂ&{üjÀâ­“BlÞè6ÛÞÛ$Ç#„}›Å1°µ¶þŠû§Æp­§ŠXz`ëÙþxlwl9€8q¢¬½°Ÿi/l§¶m=ÈõóÀ=è u ÞÞm£<ˆ"áÿ_ûšçÕ•ßä{ñâÅÀš[,6‚ž¹Ð+7ù¬ê+!Àö·ß0;ÛÆ$ÀFÐRYDýH;ÀÊ mãoÎ@,qüö$NöMŠqØc_‘ßæÌn¸ñe'¼É’1±ÐWÆ›¸Â~Ã6ãÈO€ wßè‹fL –Xz`éMÍs ZìÒ¥KU×¼euNÖkÄ^zé¥kì%ç§~ÚjŒñ²Ç®¿¾®×\s÷}‘ž €‹0c3a)¬ ¶ !fq¸Þy­Ü…XÓcê °ö¼/å¼±Ø[%wO—iXY GƬ0ý Rf Æ à,·Ü9ˆeškÉÊ~2Þ|8stµ[×Úü·¡¡ÿÆë´J§0!I©1\ëé…¢–Øz¶?»v,f>ÿœsÔO5¼.IJp[lÑ `§NªÐ -ÆÊJmüÊW<øÝN¡ýÝï~§Þ}÷ÝRÂ:±Xó7üM€%ÀºöÑeò`íe"]ÖŽJôƒÔrKñÐ[c+åîõU ­‡Ç2oe–ñÀö×-ª¹ŠV•›1°Ú¢au²N«MƒŒ[9´Í°XBdøûfåª÷Ô²¯Gú¬Z,á¿â…ÅÒ:³fÍRo¼ñ†Z°`Ú¹sgPåY OìÕ_ýj hlõŒ:jýòËéHzæ™g<€5ÃߨêV_ƒš‡…UÑ÷§q—B¬8Mdr&{a€…3‘‚€_sMY±wííæ1ıÀM[”!'qJãm™²:`W´üž%ÈõØStó‚—±©Šf–Y€ÕOï&š5 Þ¥Óg&´`Ãß&°` °Øð÷д™³=£r§~»¨Q^ªæ/ZZ3ÌÖ°+W®T§è¥o~¡ÁpzÂG¨—_~Yí±õÖžWVž“?njRŸ{®:ñ¨£T7 d‘cioºé&P‘0ó1ŒpÌRl'.£¼ŒŽÖ´Ñxáº/^«è»ó¸KîCÓ8ñí70¢‘OìRüí.,ÃâÄ£jçÁÿ€U$3úPf:Æï²$OZl`loëˆÏ‰›}€½«¡áÝ,ôWà :5VÙD2°xS­ÃƒOWµ°Ú¿ÓÞ'À` °áïX@¬¤®Ýº«“N¦°­MkX¬ÙÚmƒ ¼5a»iPýî>û¨[4Ìšð:^ÏD, ŠIO<òHµ•ÞgM ä.Ñß½»v-å€}óÍ7•ê°ð|Cã7V¯©Jãj®KØ}öض÷_lhø¥ß WlÉzÉ=À¦ÓZl•–|‘v#Àf`ïÖE»††ßë6;´Æv›z€Õ†V/FëN~‰+´æ)ßõD,ŸŒ•¨aF€%À†5Йÿ3RMxµÿî ïåÃ$?äõã %ƒ)ËÛtOÖʀʔVs.:>¼¢qõµþö·÷i5o¹¶Ý¶·š>B}ÞskghÍKß„óh™ƒbtv=°ôÀª¹ºMiF‰ª=ÁÆå'% 4ëzÔnB€Í6Àâ?¨¡áݦG±)iÚ•«Pâ–¥qZÍ2dp lø±|Xl­F}÷¯°[j¸ÁÄNag)¶³l¯g>F‡cam ìÏGŒ(…cB§Óô죈¨”ºj8>V{m{o¶™úÍo~Ó*/¢#°¯¼òJÙ'ÀtðàTD: ÒÚðƒú ¢l×q¤âºàEÑ×tš±É&…ÇÍ‚ÇÕ®cÄØLضEª$¶ào¨¬YˆuZˆ•]{eÍ>,F–kÇÇBÜMõE7žó÷˜ °mÚ4zc^g=:¯êvaì”)ST{¦nÎ °×O˜ Õcoåy¸¤eB¦rá¿òûÀwV#Ï<Ó ¶ó>ùä“À"D¹\Š`1YÓ±Çè¥zOÜô']—¿hoú§ú:c¾‚¢ÀªÏ8ÞË#0Äé-¸}K€à.Jq™XLŽi¾‘Ì©Á³øv( uöYF§‹{Tw¢²”ÎÂr†–K€ ߥá53—ÑACpe{`·ÑQ@¨<ï*ì< vÖᲦëI믯°®+&sš4i’zíµ×ªJË €­ÔÆ °Ò6Çê ’vßs@h¯vœmû#=‘×§¬ÿ­gŸv…YŽ-YÜXl”!Ä)F¹bV-S+à*ë^í¿ÿþ ) ˜Õ:ø,î@,Öƒ-ìG =tÂlÅ«¬ 2tļó§jOIœFQÒe3„˜!ÄI·¹<áÁQO(dìØ±cÕ¡ÆLÂúᦶГ=þøãjÙ²e꥗^RøÃÔc=¦úuêä…ãy8eÝuÕ–]º¨z<«w¢ìß¿âÖJã[çÎë,ŽS)Å=‰Ú ¼ÛШ/w\mÏ«Çê%À`’5Y+٤؎%®ÂØ­Çë¡~ëdž)#é¼ôÀ:Ûª™Ì˜€Å €µo¿…›“¾I²|¼2›ÉÆW¥uˆq:a‘x¬6ôÂiXl˜ö¼áî¯0zùMâd{aRÜS‡¯Þ|óÍÀ.Z´Hí¾ùæ%Oí*„ wþyµ¥†ZŒÛÜN{agΜYqk¹1®°¿ÿýïU¥”ÀBK¼8À„NQx¼Ã\›0y¿³Í·>ÖÑD¿5Ì °%K Iÿ5¤V»@ëÜc®nãO}ã¡€¯Z»ðúë¯/EÂæ…“& 8Â&ŠhÀv±]A¨ œGÕžW=ö#ÀÖÚúÓ½f7\PÈ0Þ!Ÿ$ófÃÍŠeÈvÛ{ HöÛfÛ~@ è_xá…ÀD€ýÂ;o¬Ùƒá%«N½bíÕ Vx½Ö´å` Šö+’í‘ÿM€…íèg7›XØŸ~åa?ów`+Ù¥Þv™bÇŠýÖV­&?=°ù¾Ys°~Z34Âï­”yCŠgÖ¾épÓ¸¬éa•²ÌeÈ4es;ö—i©«_]ª¹‰+íC€õ'À`[ÍùÔ¾}o ‘ òpâøBió ©•Î­À^yå•´Ê$NãÇW[n¼±:g½õ«à…®û¥!ûíçAêâÅ‹ÕY§žªzꙇ¯×pÚS‡6í¾gâĉÞLʸ›†àùó燂XXx‚\êº`Á…Ù‹á½={¶š5k–š1c†×¯"%¡e=QΪwbf'ê °v´  Àâ0'r²Ç¹šXÓNäš‘ˆ¶ÇÛØvÔàxæ1% RʨM\å˜,l[ظü¤DL¬`¥1°æ )7,Àb?ÜÀò–É ÷ ! °~Rù…€Dªåʉ`õ ü´)~¾P ÷†”‹Çb†»´æ‰ßã,{ùå—+„ÿ.\¸P=óÌ3 PxÕUW©Æÿ÷ÿJ+³cÖ#9DÝø¿ÿë;K0¼¢]õ¤O˜¡øtO:öØÀ…ÍÙ†÷»ßyF3&…rIØò!Äìx£W  ÛA@÷† †.kqƒígÃf9€œÚ@+Ù¹)!ÄÞ¬ftÜvmL /EßB‹[b$Dßàçê0Wè´Ãj%DAÂì7Frùx`Ë…dØ7žý0°·ûË|(È›©rÓ˜gØK€mý,!ÀÒK,=°©yqÔ±%/å‘G©°Ï=÷œú–žiU<°æ2:AKÜ4ÿð‡^1 0 ¨5÷ÁLÆåÊ€…Ç×%` °IšìõX±akÚs T°¶MY`aË,È6ÈÙ¹Èo†ËßRÿzL±6^J²íæýX‘\$Vbèåm”ÌÐ+žLÜtxkд"dÂôr,ö‘}m¬L…cÊr>æ® Ûo­Üܲ¦­y °¹¹íRc¸ÖÓ»E,=°õl<ö—žÝ•«ÞSÇR bÛéÐßsÎ9ÇóÀ`§L™¢Æ^v™B,‹¥t‚Òoû[µÕúë{ceÏÔãfGž}v«}¶Ð`†íj†!WòÀÚQŒb›¬} ”Snã¸ìY»\z`kmýéÞ?s›TÃ/Êq°±Þ ©3\ëᢖØz´;3xL픩3Ô7ºt-ì>ûì£{챪†îI-KêŒÑÞØýûõó&[´`èm—]tÑZ+‹¼a'qúbX~Ê*}4ÖöIÀÊ8U Ù•hC`íÙ€ÍÙ„%"Ðobs G™tÉœ…¸ÒX™Ê %®©˜„M€­­í§}oì µV2©â$|ÑŽA€­µõWÜŸK,=°ôÀ¦þ9pɘ+UcãzÈ~Ue=å”S¼p^—µY%òwhÓÆ›¡ÄÓÖ]Wm©ÁõöÛoW]ôLÆðÎb|¬]îÃ?ì…cöâ0‰K€ è½3 °ˆÀ³£õl»P «DÚùål·#e˜š”iûÚQ…È‹ßíy[üfÆ1ºö6¿ýã¶· °±Ú·u/¼Q× ©ÖZ`WÄ6yUÜ78¶ÖÖO€ òtÑKlPáö`oiÜ-[ñº2'yj¯—Í;vl(ˆy晪«^J:dN¼³gÛFÿ¿ªe|ì9Ú¸5ÁXFy˜D€%Àæ`ã¶ùŠT>6Vû6…` °--™³·¾¥Sïy‰Û(FùXlíŒÇˆ‚›·Hí°c¿RXñV[m¥î¹çµtéR§„äc~ðÕU‡ Èê0/h‘ð;òHy=ôç†M\66dLg‘Il‘3îs%ÀÆtgå©X,–ë{G` ° !fq&Ÿ“o¿WuìØ©²ßýîw=ðÄ$O.ÉÙëµaÅHW¯³Ž¸RƬY³<€;wnèD€Í“%ù¹`uèoܘæò °‘ßSù+›Ý‡Cˆc½3i¸FíÉ¢–بÛË‹ÆÛ¤ã«×¨ágôÆÅb|l£žˆéŒ3ÎðÖŽÅx8—„u^;ê°b„`á…í§ÇÄÞu×]Þþ>ø °€Ñj&Ϙ1£ÛAç•õí}úrb‡ž›K€UsõóF3Êh‡ö”¥Yg@”!?yR€K€¥–ØrF!–›u`(zý1>vÐÁ‡–qã7V×]w·ìŽKÂ$N°€Øé:í«g)ƾXC m5‰›'k2Òs!À` °‘ÞR9,ŒK€%À`+ìö}ûª‹ôAé]º¨!C†æ“rÚé b†˜ëC¯¯ÇÞÛwÜqGÕ»wo§¼È¦Î®õÅ9uÓF}ÑÁ‰çŸŒ§ÕUçYÎSÛ|«w dq¯Üwß} k¾VJ&Àb6â>Ú#ûS}ObŸ™3gz Om5IVÖħØõ|²šX'CšK€%À:Ý*ÎD€%À` °åŒÁ•«ÞS#FŽrJ?>î'jØOG8åE™˜5Õµl×¼g ?[ý¸ù'Î冩³kpNƒ˜U›õNxF}=&Ýt›êС©²‡~¸þ»xñbß$Ëè£ÁµoêÎ;ï,倈V“î¾ûnuè¡_z‡ûí¼KîﬓÁM€%À`n•g"À` °بd–—oâõÍöõÅ‹©ÓÎÞj|ì¹çžë­ój§.z=Ø.:Zâ¢óÏ_kƮ‹u"Ã$€ïqǧկ+ÆçâÅÆíæ½m` n,–ët«d3S/]íAµVK€miC§èïþµ¶§íŸ{C*¬¡8qÒ-Ԥ̬¼¯½õzzñ2êÃY‹3Õ/]¡öÙ7¶[·nê†nP‹-*¥k®¹Æßjþ&?ðÀÀb9×t饗ªM7Ý´tÌ!ßÿ‘Â8ݰϣ¬æ'À:Y Xl” Û6.?)Q Y×£æYµ°Ø”´ç´U£0•‹!øè Ôæ[lYXM¾sÈ`uùãËžÿy?¿XíÚ÷ÂêãÒ†˜'½^Ûi3g{÷7¼¡H»í¶›š>}º7cq¥„<XÌ$”~õ«_©]vÙ¥t „ c\nÑÚÖ©«'À`£X§FÇLÉ)@€-ø ñ2:ɵÜl©p†U%C²è{äÑÍêÖ;§`éaÍõsaì¸kUÛ¶m=È\G¯ùzÔQG©yóæ©… ú¦iÓ¦y‹àr Kî`r7ãN;+Ìn^4p•ó%À:؂۷¯ëÔè˜)9"عºÃbʦð¢'×ì s¤ÂX~†eÑ6ÈØ¦6½ÞÅ kÇí­¯ÆÇ{üI%àÄ8ÕK.¹D-X°`­tÿý÷{‹±°~éÔSOm5ÎõŒá#Ê/²æX'"R€}òë_WËn»)CšÑN-†™2¥@TÛ¨Áõaæ0eRƒ‡5Àn’©–›ÊÚȲ L`á‘4Cf±ÿ# é´aÃ=ý0.ÿÛãCñöA„ç–ón ‘Pž”±¸È/ÿc³¾¨›ß6ì'¿›ß.Æ´Bl–…ó¶õp)“y½inó-U»í±W d·Øb uã7ª§Ÿ~º”° ¡ÄfÂ8מ={–ö=èàÁ ãmÓ|¾IÕëdD °tÎdÓ9ƒë¦í[¬Ó-“­L‘l¶N™µ¥‰)@cË5V`R†€P&y„F€:3Š¿h‘WàO Fì Hõ3 ñ»¹]Ž2ÅR/9–”: <ŒÍ:Úu2^q<³Ž(KމoüÏ1°Ò v”ÅíS¦ÎÐë%w-Áè^{íå… ÏŸ?_M:ÕX„#Mš4Ií¾û½ûl¯0¾6‹çW °N}x“ÎId™† ÉtÎdÒ9ã9Õ4ÀbÂZ~r¦6g”§“*htù¬Àš ¯~0Р“}Ä „7ÓÚ–¯r[ hÕ­¤ä½E™& ˱ì2ýB M/p±jlÐù•Åíݬµ,o3êÂKÕzë­çÝmÚ´Q'Ÿ|²š2eаçŠõdñ;¶cÙKÆ\Ég¨Ï˜qlªúyV† P:(@€­ƒè5h§0‹cʱý<°fˆs/¬=‰“y¾¢'q"Ä&A /HÊ.HÆ«C€m} °ÀÂsi)þ7=šW*¡Â2C¯ihÛÐ+ÛPN¹™Jñ»]–ßq%¤pŒíâéX–ewÌú†ñš¢v1•,„rýΙ ‘]Ðàµãµ‹£ `3n)±úYT€¼”²«Æ ’² ’ñê` °±B»¹ôL†a¥2ýÆÒš3%']pÄ6PÌ6@€Í¸¥ÄêgQòRÊ®/HÊ.HÆ«C€%Àư~ãF“4àåøú‹o+žXs›ü-3)'YW«˜`Ãë^ŒëN€u²”u®þN9™‰ +@^ Ö(Ѽ ‰Êûƒ` °±,BlÃÌú—1:H’c`)ówûï¸êÂr‹,¼Î¼Îf À:ÙRu®9N9™‰ +@^ Ö(ÑCõÑnHôˆSX'3Šë$39*@€u*ÉlMIŒÇʵX,Ö˜¸ŠÆ6‹m€m ê6@€u²£°N21“£XG¡˜ dQ,–K€e`HUÀøu{t*íµ¡£†Î(Ë#À:™GX'™˜ÉQ¬£PÌF²¨–›*Ã5J£‘eÑ“Æ6Í6€å©¨•®6‹&GÅ:`swIëzBغÊσSx À` °ô¾± ° d® `ã5êP:¶¢çøØ”]ܺ>ª†„ ZéÃò‹§: ~¾P sF=LÙô0ñºçºaÉ&Ìz-ëc™&ü/mà…å«”¹¬“:‹ü˜Ñ[òÈvx)±ô“]ʽõΩÞ6$ìg¶7û–kƒa‡úc© þ—²åœÍåªP· ö:ÚËY™eÉYi˜å<è\°!ÄN&F/ë³ìÛÑGiÒÛ×°ü²*åQŸ 6áÔ0™‰ P*v +c…yŠ'¼Ö¼ÖAmÀI—€žìÀœaàžÅ‰“n)=‹ð?ötâwù€'åa»”'eàÛ|M¨¬žöxÉ8ê'€*uÅÿR'œ'`ŒßÍðeÀ« ¡•^ôØùÁ*6 ]*ëH¨ȧ;%xZÓô±NOðx¦-\͹s*{üÖ~ce‡ýb»®xi!T¹7XÈ/ñîš¿a_¹ñ%|ßâÁ•‹À  rß½Ìá”ãu Çm ˜máÊ©²¶,¶)Ïr­pa±_M;2Ùÿ»DÚ&ÀÊßv"Ê…}‹oÓ!#ÑaˆíüP*PAj7­Ü\æMï °ò`0ß@ù¬À,¶XM˜ç…°RLXáuçugðoôÀËh9[¤€¢xd± v¬À¥í9Ø´AÑ„vD ”oª=Üö®ß&'¶8—™,d3åIW£@5+¡½q0½µ2nÕ¬‡<0d¼ªxQ¥ y@`{Ðzz`«¹ÂÙÛ‡K€e```ˆ´ `³g DPcQ»S Ò ÛÕœìÉÕc‡ ÀâöäQò›e›Wʰ!:‚SgT ß T°PÄ/ÌA`Õ|ƒ$ã DEBí ûö1Ç`óÝåì"5Zè GŠm€m€m€m€[ Âç,%RPÂw‘ÅtšÀ¶4íÖZÖ¶mq~Ôå¯\õžjjÚpî#›rÐOò¨ T€ P”)9ã(jc‹ååœxmëmám5a°†ÿõsÐûF’<ö˜OH/< *ûØQäðÉöÓ† oõlCþ[ïœZöy‡íæ¾&ÀÚÀ-u‘ãI½Íúaüo¶AxYíúËqEó¼pqz~ãº?æ/ZªÚ·oÿNÊú:V‡ P*@¨ȃë­·ÞÇËV¼Nˆå ¤ll±´:–ä7€¡À`ЄNs?äà!Bj‘ð·vk#&‹²½—Øxöƒ6ñ€JÙ€FO&r²VÀÞQÉ#ã|í}äüpnRÔûbŽ)Û°¯2\Î3xFU.´Øh£–ä¡ä9P*@¨ )S ©©éÍ[‹á•1ÄrêïEã5à5¨¶ øA˜_1€ÎôÔ"h%¿9‹±xrQ/?H(”zWKŠãÚ¡º•<°6<ãö8_ü†|Í~Y©[9PÍâäW8§ oÔ!ÄMSSÖݱ:T€ P*RÌ|&Ów‡Ü55ÙQ™Á-5•bEjS ÃFÍštÓmXzߨØbi®+“‰ÇRB‹D~Àkþ†cÀc*á¹åÂŒËA¸ßx× €µ½¹fø²Y`Ë-ÍS`³0VØOÓSOÿég?¯­wâÞT€ P*Po0}÷ÀzW"äñ¬X`ZÀõÇyð“/šùÁߪõ®p?zæØØ*µÓS4‰¼®ðdÚU?3óÔjkO2…zV°ö¸[S—J“B•«¿í•Îʽ¶é¦›}¬»É^ùê*y6T€ Pb)ð³n΂²–—9­9Î#k ž­ëYǦõ×_ÿ“7V¯‰Åû’ƒ‹õ$„± ÄÓ‚¼§~cc²¦‡SÊ0=˜]™àHBmÍcûzÚD™Ûí`ûx~c`m¬²lßãkn/°µ‚y=Úôâ¥+TÛ¶í>¨g§Æc;)Àåeœd ‰ÑŠ¡%ãiUà&]1¤<|òt.y¸‘œ&Û0—¸¨‡ÑÃcÆÔ•ºÖ» Ø¡Á¨üæ7nTÂoÍ1«2‰¶±Ÿ~øµZš€(cN~ÈcBf¥Iœ0;1ÊCÙ¢a=°rNß86þè–cH½PWÙo²y|¹fâ‘®÷5 sü‘çþ|ƒ 6IçÄBâRàh]0#êâQNj¶,5aàµÄÃÂüÀ«9­¥‘ã;hñf€#n3¹ŒGÅ$ÇÁ¾ƒJà˜(׬ B†Íö5ƒóÈ¢79áKž¹Ã Ä¢óaŒæ%± ° ¸¶¿I’t€9{ò$@›½&ªx@áEèõ{醲úìr”•^Ô¡>2q<¹Ø_<ºø6aÛüÊÄŠ× l/£ƒ}¤þö’>(ÛÌß+Müäª}’ùÉÓ¡© áÛd®,V…až^¬S®úlÍ¡t®…0ZÑU)æKµhÈf¨€¿ááßñÿË©À«™×ÑJ'‡ Óï8ø ÇÀ85!Ö®»ì“jÁY¹ð l¸á†OL¾ý^†s"¶¶ÈÛ€x#]`Êv³:™‘Ëù–Ëc¯Ÿ[KYIí{Ò©Ã>éСÃèð=÷HP±A]œ V+µ‡²í`—Š2ZÑE%æIµ|æƒâJ]cx6ÍO¥·aöÍSË›3³,©› ר›éa-ß;‘êfWUåúvë¾ù_’2txzïØŠÕ¦•–²A{±¬v¾",¼±•&…JÛýƒõÄ7ؠßuïÓXUÄ’R \$œågGãÙuƒÅŽ „ýèò=,Ž;Ñ,ÛúE Š3ÇF}ìáze(çfûÙõ–<8ÊDx¿œ«-iFFÚN%F+º´æIµ~KÜCóæÇÿå&°bpxØÐÜH(É`Mñ°¨°È[ÍÛ¨T_$Vî ôº}çý¸ù„Óf±>Å^ï|^ïJ“(É5G¨n¹¥fÊýž×öâ¢WZΡÃÛö›ú³?M½{fEå7جH°;Ë©Eˆ¼.Cáäxš(@ `Q¢a³âØ¥øˆ=-ÃáÄá"uÅþ°OMˆ;¿a|›6®l7µ0m\±›q,ÓF7ÏAŽkB,£S°‚A øy`åf“…|—['Vn8ß0BÉM&aĵ¬<¸è ºòÝÞ±c§›/¼ḩi1ŒX| ¯+¯+Û@~ÚÀ~ûú¨]»vÇd´Û+Zµý<•° Mï" Ñ=S#ñfÊoa`­’gҚͺúÙÓ¨£ ÚvÝÊ«ÌK°8GÛiã§YI[¹hwUNÏ×nüP;„¸Ò©cÜ ¸yÃŒYð{¨ø¬ Îö›9»îaT9½œù?­¯wì¸hâ“ÿE3?&¯%¯%ÛÛ@mQ;›l¶Ù5ùïss†€6Ó›»Î “¨+°6ÀÙÐXI¨Jyý¢ûÌü~ö§}.â1•:ØÛñ»ù[5+Ç}Ź„oóÃhÅÜÜ2Å=4bsbÜüxXà&ï+þ–·_2©“y¢ ¹Y$¦ÛýÖj•ý°MÂ1dÌþï©ùÐBä͘YWûd\1ÚqãÆ:=rÒɧsL,'ô‰|BŸ8Œh–I8cH¶ ¬\õžÚsÀÀ?cèI1ºÅÜœ¥í•0´£ËEÚ‰wÑ]ר¼´¬¬ÒQ ™ òf 7n@y[d{tÔ)yÃ,n^œ?ÅT ¹mÛvÀx¹dÌ•Š0›l¸Ã#©7ÛÛ@=Ú uøÙ#?Ç2k:\ø-†|1{Á|œµ =³çT‘è>‰ 4gÕd ¤JÔž„Ð ¬Év;ŠÐV ÇGyrØ–òÛì@ÙÏ,u¶áÙŽ`ˆË¹ùÍ–lÎfŒº˜‘Š8–œ/àV43ÏÁ¯®ŒVÌǽ³РàA`Þ¤aDÁ #ðŠïj˱‰7Iv¸F¥zá¸öZWa΃yó¡ÀÀ¶mÛ^˜ml\ïZ¤!ß?üS}zcÍD ØØØ2Ú¶ì¹õ§ò\Çóкîºm.Ó÷ÍGƳРÆLaQdHš¹ßoaÊŒ+o½êÅhŸ®(Ë¥T€ D @SË ¼Ü8E'.`¨,‚ P*P'zèã6Ïõ:Uƒ‡Yñ*VsñfŠcEþwC[Í1«Ý§ {ˆÑŠÕ^1îG¨ T€ P*@¨ðQ Lž½;X!¼a–xLòb nÊ$?8£“TœÇ¢T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@²¥f²Ä¬‘X£Úõƒüi\¾Áµþẏë±SOŒçD¨ T€ P*@¨@¶˜Ø)ª%°,ƒÒÉuyÔù±ÖŸëeO É([Î9 ,Ë>Aú L€¸,K1Ðõd¬|r¼J»'—¸\±Ê‹Èݨ T€ P*@¨ˆFF@£™+€–jÁKj—À¢®¨»í!Ø•Mû¼]ÎSÎ¥`£èfë‰ÿQOW‡~f+=Ê. °ÑÜo,… P*@¨ T€ P0=žâíÂö7µ@À(Œ‡Ò®J+žNûØá~òÈy üƒôP,ŒÐMÀú™^mè ÇÂ6—]ÇrðK€uQ“y¨ T€ P*@¨ȼ•Bvb]ËOŒ$¶ÜEpXxÅ»X),Xàß~Àˆ}P‘Çï­%Ï@‡–#×úË1ýÆ`Äd*@¨ T€ P*@²¯@%€d• —…Wp ¸BÂß~h,¼¥È¥N~ g×yû`_¿Ïà–ãË6ñÈ ØÉ8Tsó8ø=hœ®@.ÒQG”û¢‡‹ÇWê(:—+?`ECeáºVú˜šË5ªBŒëŽrQ~¹ëpHn¦T€ P*@¨ T€ +P `ìl(¨È˜N'ù߆#6”!$^H? 3ëc§L˘S‹ýÌcHMh´Ï»âœÌºúåµëPNy€¿htuL€5ÁÙé.WwÓãk†?ãøåZÎü®È[`%4Z¼Ý¢{¥1»AçÍíT€ P*@¨ T€ P_Ê, ­\¸+àÄöžJˆ­ .°Mâ¥5Ë7Ë’ú¬ c?+¨ËÁ£k1„‘pi?¯°„ðJÝý€1È jŠïš×Xì_Î{ëWžÒlzžMá-5?~!ã2íÝð|Ëu2C›åŇ‹—™·% T€ P*@¨ TÀYs¢ s<¨ ÅõãR•Bfzý<£(Ëï)P†mæ' €Øc6tÉ1ÍߣX¿Y“mm€Åv9WÔW>•Bš¡¿ý)§c'×Owüæ:.Û å†\ÛóQ*@¨ T€ P*@JKµˆ·RÀ€UnÌi9ÙüÂL`]AªRH3Ž+^a3Œ6 €•²m “ú›žÙ¨6hvg?€5½ž•V4)w [³ Ýý®m%·Œ –°nÞjT€ P*@¨ T€ Pšðsìd9,àV Æ/i{é*¬ r". e{/£X©«Zë «®aÁ~ç\î"ú,òÊØ`yÁàZGó8¶fâ ÷{Ñ€ýl€5C½åøö·‹—¹æÌ¨ T€ P*@¨(Ž•ÆÀMš$³Ú¢ ¤°Ø´¬Z[êÊ£ °™!ÛA­¬ÀÔ;Jˆw¹Y‘ƒ€:*€E9rýý¾ƒÎ‘Û© T€ P*@¨ Î Tòxú·xò[.&,ÀúÍÈä03ÇVFå…hâݼÊùØã8ýàÐo$¿‹ ùì ”üò–Xä5Ëñ«k±„é^.„Øö¸;78ž@Ê2¡O Í/ô¶ÀŠçPŽgžc%€­ä5Fˬ@¥ ÀæùJ=±ÝbƒÇ3Ëô»¶²]ô“o¿õnÍòÌk„kúúy³íubåÜñM€åó† P*@¨ T€ PÈ(UZÒÛí°RäÔ˜Þ=ä±Ãgíß¶ìS præñä8€¨rkŠ"O¹°]ñúí}Ê•´Íe ÑÇ–ü8<ÀeÐRE~ÚÛ ÀåúI]pü \#S3¿kk–#ûà8ø›ë¿©ÌíT€ P*@¨ T€ PŒ( Þj?¯fFNÕ¤T€ P*@¨ T€ P")PnÝ"iÀs¥T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨@±øÿw.G?ÓæIEND®B`‚nova-13.0.0/doc/source/images/rpc/arch.png0000664000567000056710000006410212701407773021433 0ustar jenkinsjenkins00000000000000‰PNG  IHDRpó ß HsRGB@À}Å pHYsÄÄ•+tEXtSoftwareMicrosoft Officeí5qgÂIDATxÚí½ ”\gy 9gâ3¶q[j[’­à X×hqÂ()Xãœ)Ï´~ÂØ=òð³:3mV±ƒqhÛêV9ƒ•x³Ty2^´ò˜‡°)Ea| #p)SK43:vŽâÚ šìY¬9KHÛ0±w©½où{›·¿þî­¿[?·ê©sžÓU÷·ºïßÓï÷}ïû·~í×~ío@và€ÀÀT \ô*Š…¶!ë9à€À Gàe#qU¡Ëm, ½îŸ¸î®˜–˜qŸ æ}Î}Λågt¾ûœwËä¼í®Y×½oôõ@àÖÊXÃÉ–Fäê.ʶàÞëôŠ[§½sï+n^É-;ï¦Ûuõç¢ÛW‰“¸î®nhxbVñ–ñ>çMókÞ6‰ºÏu÷¾®9'‡‹º N@ຸלé7–ltÌ—-'e#pò¾é¶¹J’¨!p€Àõ&p…˜y½\Ý[¦³n nðWv}Ü ®ÿZÝ_Îô—+¸e+uËfzCûÉ p \Éökóæ­Ëïæä¬dÓ†DoÝ2 ëÎ3ˆ¸áK`¹×NàÜ|êŸ7lsÕê¦8ý‚ÖFuI{+¾À¹ù“Ü7ÇÁn°çSòæÏxŸó^¿9[ŠkÁ~@à#p÷¾ˆÈ5¼å뮹Ô~ð°ÊÁnç>KD­Þ¥À•Íü%¹¢À¹iUÓ¿­ìú·¬Üy˹mÌ»eVûÏ pƒ¸’íãæ¦å½~p‹ÚßÍL[Íç$N…nž p€À p€À@ÖÎ¥Yô¦uT¨>íböîs屺)Íù†À7«xÕJ^¸ŠKî[UÑsïuݪ·ì¼{_Ô* n›º Ÿw9æªn½’I LMU@à®àU_°"UÕ&V'dZ‰¡`ÞKõ…œB÷¾ìD¬d¦åÜòE· ­â7É€ÒŽîL¢ÀiI¬¼'p@áú‚'peI+º÷«bgÏÖ[Õm‘¿ºÛGޏsïç@% \«ô–'pyÓ ªÑ¼ymZíRà*NËœ@€Àµ8÷¹¢Òfä«èÞÏ89› ÈW+rfDlµ¨½Û¦­£Zuͤ…¸¾wv¿\²ÀÍxWp2UÕ蜛žóú»ULÄ­ 2ç žn£l–«Æ\‘~p0×Lj&Œ†™™Ë¾Äyt+pGÞÿÁÛ›ñ}†Ìg?÷‰æWÌžá<8@à8@à˜‹«=jò´-ºQ£:‚´¤©AÜrU?Õ‡[g‘ƒ p€À æáTôÓ}¸é¶"CÝT^(zùÝ~ª Ò pƒ@Õµ@½‘ºº‘1_ÐÌü†[¿‚À pÃ{@-zV1¶ÀUL"Þ†‰ÊÍ#p€ÀÜðR˜÷%—½a°•¦iµáª8 p€ÀÜRU'a æÐz’øi.Sˆ8@à†ð*š:¥ù^Î}®Ûz© p€À öA7¢´Ë»i p€À7„•ÈWΛ6c#ru 1Û™áà p£{€muM¬Ê6, p£áÈ}KÍ;-öµož­¯‘¤~·‡À7¸‡Ð¬°ey(]y啟߼yó™M›6u#Mƒlܸñ…BaEÙ²eË…¤åe{²ÝˆÇe?Ý÷VN @àzgÛ«_ÕºÎ|辞ÖYÛ½÷†5Ÿe›ÀN¢c{å¡ Ô“³³³ß–›ý¥—^ú#°C‡½xï½÷6}ôÑf­Vkž>}º™æK¶'Û}ì±Çš²A÷-2(ßçòË/ÿN$‚§Üí‹ØÎÉ\2"m"[ïÜ¿¯ù†7îèi²ž]²À¹¦Í‘¤ý®D¼DŒ$:výõׯˆ4?~¼yöìÙæ8¾Î;×|â‰'Zr·k×®g¯¹æšç4z'‘Áèý4Õ·‰œ "6r½ÈO¿iôÄãŸY·žNûò©êªÀé4+p"ˆò¹×è\üÃdçÅ_\¾âŠ+¾%Ѭýû÷_8vìX+â5 /‰ÞIdðÖ[oýȨDgff>&QEN&˜f;ýÔ—Z¢¥r¥‘8¿yÔ¦‰¨‰ìÉOY^æ "q"tºŽŠ Ë÷áCà{éáq‰DÙ¤?Y$n?¼îºëVxàæ™3g&BØÚ½$‚xôèѦD/ºè¢¥YØEçf9¹`šîÝï¹eœ…d­ÀÅ5¡Ê|aJÈ4<¸îÛ¥itffæ{e“þd+++S!mq¯çŸ¾Õ,,ѹ 6|ß5µîä$ƒi8?â¦9+^½ \¨œL 5Ç"p€À…¤˜ô “¦Ñi—¶$™“¦V‰Hº•»„&Qà´Ï›mæ 5u"pC8‰)#3%Ú–ö¨Ðihf•¨ÜÆ¿;33ó+œt0i'D¨D¶,Ò¬j3 pC8I*#He$¦ŒÌäÕûëüùóÍ|Ïåµ#- LŒÀ©¼ÅÍÓ¼n!Óè’Àmذᖫ¯¾ú¯&e鸼$‚¹mÛ¶ïF"w˜².pÒÇMdJú¼…æëÈR™¯²¦ýâ4mˆ8æ…Òˆ pm.—˽÷ÆoükéãváÂ…æý÷ßß|æ™gÖɈLöK„RöK'ßyœ_ÒGîŽ;îxîšk®yˆ“²,p~ÄÌG$ÍFèt´ª¢¨2öñG~kužLCà:8‰ ½ýíoÿ®ÊÆSO=Õúùæ›o^'"×^{íÐåG-îûÈ4™'ß9 ¯C‡­ qeë•QË%pRtçÎß‘‘/p"k?üpGŠÖuòzúé§;8Ùohß2íúë¯ \;©KúÎIë¶ûÎíæK¤Sš«9ašn@à`lN*(øIxE^DŒ>ýéO·~ZÑñ%J"`"PŠŠÏm·Ýּ뮻ÖÉ–lKÇ®#ÓOœ8‘(pº¬|'}É{Ù/pö;ɶí÷ÐÏ*~‚ýý’ÖÕ¿‹Î—em“²ÝnÒï$ƒd„*iFCàº8)%BÑ'5$ÛtiN¦Ë|}I´NäEû¦ÙeE¶tž/>*híN¶o¿‹¼—ýXó—ñ¿‡¼·ßÙ~—NÖU”>w²_]W?í‹§ø(Ÿ¤‘\qœ€À!pÝ Üò¡C‡^L8Û”ê ßÔ(Ó´ QÞëÀ#=Ó&LY_"W*p²?™¦X“åýýË+© U¾wÒw–m‡føëÚ¿Ih]•YûÝýˆ¡}I9®Wnúý‡kE€nx厛>¹øÁ÷!TÀ0ÿqÜ*]®`dÌúwäÞ{ïm& œ¼¤)PÅ,$P¾ÀYñ©ñÅK›fm3¤ œíï¦ëØù*EÚ|ê œß<«ÛëDà’ÖU!‹8m6µëkÔ0ô:ö;Ÿjþý[çüž¥Z  ~þöÏœû¥;¡Bà†Æì솯¾ú5¯ºð3¯Ý —«®Úü¼øš/pûçççŸk'p¶)Õ—!?µ‡m6TqS‘³ËØþa¶é5© Õ6s é³'Óm¿µtÆ \Òºþvôo·®ŠoÂhÔ%ÊhBEàÆ9ïµÂ ¹ß‡nVаÛ¨q§M©vº6}ª°ùýåTl|ѳM²Ú‡­SÓõí÷ðNeQ¾WH:“.i]ÛLªÍ«º®ö—ÓßSçÇõ“Ú²Tg€Q œ$Ü•|nq4É®,#¹Ý¸‘"p€Àq®ºêªß<|øð? F(çšD”üév„¨²%­#Ó´ÉQæ…–±/‘!»mùÒÏÓï®Û–¦V­³ïuÛ*“íÖµÒ*?û=ìï×/O^<òÈ Ñß½ÊMF)pZ!”4Wxð¡ûZómÕÆDà©}úÄO4y%¿ü&R‘»¸>nq/IÙÉ[ƒ"0'ò&‚аIôM@à8nLNšRs¹ÜŸ>þøãƒ¦Å¿ì  Í?×ÍKj¢^sÍ5gu4 À¨N‘rWþ|‘;ÂY“›¸6¿ ZçT‘&Ùož­·ÖÕeä½]Fê¤Ê>íþýÚªv?²M‘L¿¹×.#?­ˆÊwÐõDDe¾LCฉ8÷P¸äꫯþÚ‡>ô¡u}âxýä%ͪ"pqéAâ^÷Ýwßó[·ný&òã&pZ›Ôo>ÕÚ£Và¤n©|9Ò:§"HVüd»2M–‘ee•85]Gæ  ÔmøûÑ‚÷v•K]F~Ê|}ÈÈwÖïÒ®~+€ÀeVà”ÙÙÙ#Û¶mûîñãDZµ^2¨áºë®[Ù´iÓoÓl ã(p*T6z¥rå œF²üˆ›+•¦Ð¾ty¾¿¨ùƒ.üýø9ÆÙm†¢‹7qçÛ7oÞüd¡PX¡o\o/i.ݵk׳ҿ0ú{îäã*púÞŠŽÈ‘ˆ/V>2½±ÒæÒ¸mHP÷£Bé?,ä³îG¿—Dáä½¢Q9‘Ñvß@à&Nà̃¢¸eË–ÓŽ;Ö\YYÁÌ^Òôüè£6_ûÚ×>»iÓ&éë¶ ² p¶Uû®…"cÚŸL›3-IçGÝTú«"/mÕíéÚIÛAฉ8óÀØ6;;û»333ßÛ¿ÿ‰.ñúÉëìÙ³­Ú¦’SïÊ+¯ü|ô÷ÚÁY8ÛŒ*‚¦BM›‚•¢Pj’À©¼I?7¸`÷#ßAÞ'Eàô½?ð!©Y@à¦Nà̃㒈]’¨œÈÜc=6u‘9‰´IA‘6IÈÉí·¥0= «gûY1²d›-»í›fNæûýÛüÑ®íúÀ©p†FÀʺ2Ú@ธ¨œÈÜæÍ›¿øâ‹(͆RjR£sò{=ðÀ­ ]tÑ‹ÒGÐIÕ`"Nû¡Ùi¾is§ ŽBíFà´ V"pÚÿMóÑéÂ…j›\m¿:YF[èv´/€À!p=TvHmO×÷«U*J:ñß{ï½MqîܹLˆÚùóç[#Gå{KXSù}ä÷ŠDµÌ€˜)²Q.‰j‰d‰<é4iδ¥´D†4凊š.£7wÛkåÐîËö£“é²oùjeIKyÉtù^~º›‚Ä/ æwCà:{Èl—Nü²Sqùå—GDHF¶Š‰ "K‚ˆÓ0^ÒÜ«û”ˆš|i –ï%‘µË.»ì97rôHÄ~ú³Á¤ Ü8£bèO‹+û5,8@ઉ¸„‡Oщ‘Ò‘%AÄIïÒK/ý‘È”E#y"‚èocãÆ?íKs¯îÓEÔä{pß‹ËûªDòDàb¾Ç¢îÓ߇À p€ÀÁT \ô*;I*8™ÊyÓênZÕ}^t¸º[·à„lÞ‰V%°²›WÖœ›>o¾[Õ}¶Óêî³,[Gà88˜F«›i¥81Ras2VM¸‚ ™Y¯î¶[uÛ(鲡&T'sÁ«xÛÎ{ß—€Àœ‹ŠÙió®Y´â„«èËW‡·`¶QpŸÛ \ÕÛvCà8@à{IÚÊNÔêiÚŒ©òUéEàÌüy·í²i¶Íû§ßщޢ׬ª}åhBEà8@à`ª.oßûi<üiö³®ë§ñ¶Yˆ›ø<ã-_ðæûß­à¾iD88@à8ãd@à8@à88ˆ¸7½¹Ðº¨³Èmº·ù+üåL~w¹ #p€À½Ä‡›»÷Þ¸Ì;÷ïk¾û=·t´½#÷-µÝÇÉ e›•‹9«Üð?Z™}ÅëÊðïPä<îÍ/ŸªŠX4|è¾Äù¹îT·½úUãÈî¥Ú¹½÷Ô¶ñ·È¶À oxãŽØ¨™DÞº2† pÒì)Òuú©/­›'ÓýæS‰Ö‰ð‰øÉ¼ož­Ç œ|ö£w²?AÞ˺²Œü”mÉ6¥ÉV¿‹æ·¤ïÀ p-p*j*UvÀ/v"L*u"^"Ov_àä½Ló#~‚¼?ñøgZ˨¤éúºŒö¿“i6J(ÓCßcP‡Àc'p"D*UVÖ¬4IÄ+ÔÎ6Áö"p²MåÓýX¡Ô(¡ŠeÜ÷Eê8`"N¥H-Xù²ƒB’çK[¯'?u~Ü42Í¢Q¹A Ü+®yyÇ£q8@à`àç÷w³¯xRàd]™/ŸC RàzM‘‚À€À +_¡æH00jëvd,M¨ÀÄ œ D1úø#¿Õúi›S“F«Z± œß쨃z8iÖÕ&Tÿ{И*³£LC‘6=“>sÚïÌ(ðN·'Ò%Ëh?µ~N¿‡nWæ‰$Ê:"Ÿ0U'$r”$B*ašÚÃŽ •ҲˋàÉ2)“(ŸìÏFûâ¦%m·ÝwFà8€‰8@àCà@à*@à88˜f“d¼~¢^@à€18M´+h=ÑAU/IL#7›-Çå' Fà8€‰8­^ $Õ -KWy!-ñBà8@à¸"o¶ä•E¦É¼4d Cà€”NË^ÅÍ—òV¶Oœ–ÔÒæV¿ ½lKk¡ê2¶)Væk­îW~ÚíJô¯“}µ8Û,,Èï¢ó´—F…^úþ!p€ÀÀPNú¢u ÒˆœHš–/€¶¶©,cבùZ|^dL‹Ò[Ñ“éò½:ÝWœÀÉvlQ{ÝžJœLS‘”e}9Dà€±8•˜Pói\´ÎؠͬºPDO¢h¶//¡u:ÝWHàTL5’çGäìïn£r pü-Æ^à¤É°S‰Ñe5j×”ŠèÉçvg?÷²/+g*zU“éŠ|ÖíªÀ…öÀc+p*AI͇Ú™$ p p™8m¶TÑ4GÒr"EþV™çO“Ï~6X·N·û’Ÿ¾Ê4ù=ô÷±ëól½µ¼üDàCà8 “ p‡À pS#p[_~õ ò|é×7{CàCà8¢ÀmÜxù)y¶ôÁ%€ÀÁNþþ©<û9™8@à88@à8@à8@àCà8@à8˜B®ù»oüâýåÉ} ×ÜríÈ®»¿òãƒ÷ÿ/Üç8@à8@àÆ™—Nn®·³»—žüþÜ]ÿîE`Œ™[:õîèZû«ˆ¦0w×—8®#¹(E4õˆ.(`ðQ·]KµÓÑuö¬Ê[Kà>ôÅä!îó\¢X´Í¸Ï9'ry..nlî!Û7nÜð+®˜=3É\}õ¦÷N͵uwmo+êvè'â†À!pÝÜ#*Þ´ó~>¢ê(™é•ˆ²›^±Q7½h¢{ºþ¼›–wûÕuóf™*ëî!v¾å?üìç>ÑœT~õÞ;›"q“~,ç>rò’躪F¬øâ†À!pÝÞª.êVñäM£s9#m î}ÃIXA¦Y ”uŒ¼ULd¯î~ÜúóFÞTî¬("p€À!pð’ÀÝôOv_˜äëY$nN®§èºúVÄ…X»óK/ÜûÑrŸGà:º9بXÃ}ÖèYIeÌÈYÃ[¿nL¥­îÖ)yÛ*èò&bWwó&²éCà8@à¸5‘¸¥Ú‡cî®/½øëý÷y®mjΛVH—²#p'oUÓ|Z÷¶UrQ½5g¢}e'e8¸‰¸Ãµ×ÏM¡èWqb·`û¤©„Åœ6·Ö½íøýæŠ\Å4ÓæümO¯ÞùÏ/îBÿ¸tªùÕÿô§ p7>¸ëï×§^hý<ô•ÿïo_ôwšÑq‡ð²—½ìcc/pF 4…HÃëϦ͛U¯?\#°ºFèÌ´ªYW›V žÎ{û˜¸4&¯ßwäÔ§NžkŽêuç±§š_<ýu8#vªUö,Ÿú'pgö.ծ߽üäã’Ì—ó~®Û!Þ H‚À!p€À!pÜ0"p/F='¹àBó8ï8@à8@à8nü®­ƒG9¿8@à8@à8.hÝÓ¹¥“³œß p p—ëê¨Dà8·8@à8@à8.ÌÝSÛ.èç†Àõ{S˜ñGŽÂøÜ3Ï<Ó¼í¶Ûš7ß|s‹OúÓ‡À!p\¶¯©ãû8¯¸~o ë’êÂxœÈ›H[­V[#s÷ß?‡À!p—ÅèÛK¹ß¦*U 7“hœû)e´ Þ2¹Àz±Óu;fºÖ@Í{ËçCÓ¸fKÔTÞìëúë¯Gà8Cà²y=‰ãœFàÒ¸’)U1IxµÄ•&ñµ%²¦tVÅQ¶U\E‡º)ÍU6û«˜ýÎ#pkî©§žZ7]¢r‡À!p\殥}Ò|ÊùŒÀ JàÊf^ü¯k¤ÌŠ–¹¼WkÁ¾÷¢qu‘[S¹a’£p½œôwó›KEè8Cà¸lá’öž‘ œÏÜ ®”$pf¹ª)½UrµSë1ÛmP¬ºÏUnýKú¼I“©ˆœ¾úé§8Cà¸l]G$íEàF+pNÔ*œ_ ¾è ܌ݧýéÞ/Nò`Š~ÒˆÈà‰¼…šS8Cà¸1¾-œ•¤½¡’Y€À ]àÜûyÀ™eJNÞªf»e³Î¢™®ýåtzk¶"liS¤ÉTß÷}CàCฑ^C$íEàrSXÍçÄ«h敼ÙŒy¯Í¦«}ât[Žb O\É1ãm·4é¹èºÀI´MšM~øá5)EˆÀ!p‡ÀeW2ëI{¸q¾¹è „]#Ap§£P/\¸°úYd®×fT8ÉõóXÄ~ÎanÜo0ó&õH‰ƒ×ŸÀIs©DÝDä"p‡À!pÙ¤½ÀaJ.­  p7|ö,Õj{׸?!p0'©C®½öÚÖ†^ë "p€À!pÜЯ’ö7­'}Þ4™¯DàDæ$"‡À!p‡ÀýuCÉ,J‘u‰vݼªÍÕÃ8­Â WZ Cà:¼Ö+ýœŽ:Oá{äõþ2¬ßCà†,o$í…¡ \=f^ÀI!{Ä Ñ71 p}^ë=åY4Wê÷~àî7š7²1¬ßCà†…+™uޤ½06gÊgUõ¿`Ííf–Ÿ77g»¼æ‰Ë{UM:Mê[5Ÿ«“최ŒD•T" b@àÒ8wÝêµµšæÒµ>oÈm>_|ï8w¨š$Þú>§ûTÌuÝp÷‚†Y¾œðÝÊv?7ÖÑ·åÚ{ Kà&ÉnIˆ¦úBÁþç«9Þ4÷›}@¸¾%#yš®(·U²ÿ»mjU†²ù~™o6鵜­È@%.%³×`=0mÁ]¯­¦ÎWýÜÍÏnÙöÆý1×ðÿ4÷‘²ùçmޔܳ×üLÂ÷¨{?gˆÀ!pã I{aìÎÜLäCฤkäîÚÞÝ˵'8Oa¨ã+pÚlªNœ8ÑúÜK2_8Ø5BÒ^@ฟ¼DÜ$ÿ›¦‘fT‘¸^òÁ!p€À!pÜ@®ýqއÀ-ØH‡À!p7zHÚ ×Vàdƒ6ö2˜CàR¿6(™·þ%Â&²&ͨڔ*Ÿ5‡À!p‡À’öÂØœú_Òò5nÚŒM2àý—ü}º÷%/\&Ó‰ô“ŽQ¨ÜØ]G%ǹ #8—²C“ô–\jMœYVݘ„ ¡*u›ƒn’N.H3ª"}ߤƒ¹Gà8CàFÃÜ=µí­‘§Dß`Ä·èçYÓĹ*SNò š¨×,Wð£búÞ«ÄY_§Ï$ \`Æ4œ\°¥´äs/õP8@à8.µk‚¤½0§e²b­aJeÙj U3½îÕF­ ¬˜:ˆu“]}A×sóÛ œ„«wøø'ó7¼ç»»—kG²Äß¿ý3ÿå_U»/Iä+ÕzÉû†À‡À!pˆ¾-ÕvF×ÄiÎI¹À‰«š’X%_¦ÜgmfñJe½Bö ¡h™Vtpïe?3þrkÊçK7\ýµ°ø¿>/òVØÿ`5‹wì ÿGWÒ%ò&Q7i:Õ ‡À!pÜH¯’öÂøœw“вY‹mŠÏ/˜H[ݸ‚'` ‹ œ·ß¤>p­¦ÚK®È½Ùf…^›Pm:ŠÙ#p‡ÀôZØ'ͧœ0§…æ½ivÔg=0=ï5§®îm»Ð­ÀMÂAìEàTڤߛDã$•H/ýß8@à8®?\ÒÞ³\ 0NW4£P .òÖpÓÛ \Á tX'pæ³Fó¤?\ÅM/»Ï:ó^ZJKG¡ŠÄÑ„ŠÀ!p7’뀤½0^çn 93˜ d"k¡¾nE÷~Þ]Îän[Ô¾mf=Í1·˜^ÖmùûÔo£ešËûܺ+_õ³_‘¼oÞvªNä8@à¸xy“ä½>½ô…CàCà¦\à*.iÁK0_uÓÜ@½E¸·üâï|gïáZÑÛNÉœæ7­ú%)¸)8‰ºi T7­‰ÚK?88nÊ®n2'TL¶ƒª9MsU•fÓ7þóõŒÉ¼3ÛÉù8[…Ó‘¨ÒŽ&TCฮÿv 6%•°z@àªFàμn÷ÝÿÄM/jß8#y p\¼¼Ù‘¨’Ø—Q¨‡À!p]ÿíæcò“ΘÚÝ«yNwÜ|ßg4i¯‰¶•œj?º¢¿MKý…À‡À‘®=®dÖ9’ö‡À—‹Î÷"–9烞N1 pÜp¨›œï…ãœz8M#"ýàzMà‹À‡À!p±g¹vln©vç pЗÀéKÓˆÈhTÈ€À!p‡À¥K$n;ü¤½ô%pZU¢p*s‡À!p\ªçùq-™€ÀAj'HN襤‡ÀÅžãûDà8σTN"n’¼÷Úk¯í»‡ÀÅžãg¤ •ó 8HEàNœ8ÑsŸ78kÏÜáÚ=KµG9ǃÔNšJµùÔCà8ëSÞHÚ  Bà´˜½ ͨúžA ‡À!p©œÛµd© œ}‰ÀéK2 p‡À!p½CÒ^@à`(g£n‡À!p\ßçõQ‰Àqn8Éÿ&#RyÀ!p‡Àõ†KÚ{šè p0p“—öc*‡À!p\_ç4I{ƒÁ œÖBõ¡ Cà8®çèÛN‰¾qN8; ÕCà8ëé|&i/ p0XKó…À‡ÀM»ÀQ2 8@à8¸ œKÚ{–s8@à8¸ŒI{CàËÀ¹¤½g(™ p\Fޤ½€À‡À!p€ÀeHà(™ô-p¿ùûg›ö+#á=} ›:#i/ pЯÜqÓ'ÿÙ‡ÿ¨ùý'#áCǾÚ<óçO!p€À!pS#p®dÖÎ@à Ÿ›ï‘÷ðö‰½ñ"pdSà¾|ª:±תwºTÛÉù p!p>t_sÛ«_ÕÂNçþ}Í7¼qGs÷Þ†.wi I{Cฉ8¥h{-Y³’&Ò&B÷î÷ÜÒ’8_î²"p.iïiJf×ϱß&Ç?«Üð?Z™}ÅëÊðï0‚Ÿ¦À‰¨‰ ùM©"u™“Ïß<[o Ü‘û–2'p$ím/Å,ßw&€mÙ¸#ozs¡)Ç?‹üÒ¿šÙïþ¶wÜ4ðâèY8³;-®™&¢æGÜ´)5Kç¢oç¦=i¯üåüÏêµ›eä~/÷}ƒ 8Žý䤥˜TYó£r¡iã~¼"y{ byÚŸ9òw”¿'÷á#÷{¸ÔN¤Ì8íûÖNôÆùx‘´Cà€‡8DZGà&Vàt‚ôsÓæSéÿ¦|é'DàN?õ¥Ì¯=KµGç×ðÌAà8à!ŽÀq츉8‰ ÉO‘êÏÓ Y8^$íEà8à!ŽÀq츉8¼É ¤é¨’ûös¼(™…À!pÀCãØ#pS!p“r¼HÚ‹À!pÀCãØ#S!pÚÏÍ6¥sÀBÊw†¤½<Ä8Ž=7Ñ'ò¦e´d@ƒˆ›ZfÚ4Ž— ZÁ 3fg7~~מ"25äï.ud8à!ŽÀq츱8m*‰³²6ÊþoÝ/—´÷ô$—ÌûèƒG¸ŽG€üÝ8à!ŽÀűëï„ÀM¯ÀeùxMCÒ^âÁþ/BhžŒNÓùI-àÿg>ìB×\wHdE§4‹éqôû> ë8Žâ|AàÂhþ·Nð«4Œúxí=\+îYªÕ&ý™À!p€À­AnÆ*e¡¦Mê)Z‹ÜðEìßc/’&ÇGNŠô8ÚcÀ!pŠý‡m\nZ’ö"p pëMnÞò`Bóãyê ÿc¯ò–eEàhB 5µËq’|pãØ„:M%³8¸5Mj}‘›¤m–$pÚéïc/ѵ¤‘„2=)g›^Uí=Tü\þ)°‰aýmhäoÓ{À8¦™¶¤½Üš‡¬/`~6öv8;Ïcïçn"cZbI¶¡Í®yýæÙzìqòYÖ—‡²¼÷›à8®›ã%)C$uÈ´<38¸5U‰¢ÅEÔl3©íÿ¦ËÉØv€GàÆóØ'Ix;“¨™¿®ˆ›=wÚ œFÛTø-Ñ”5‹^‹3æsQ¦yËÌÛsЭSŠX0ÓfÜ4%?J“cä÷sµ}]G•Á?îx¹¤½gú¾Éß=püäxäüiæ}η¢w.èôE‡À!p©5«iöu¬hS›mNSó;7kgø¸?7>ÇÞïãÖÀÅS+hí.$£Šìô#pîÁ]÷æò°nxË­.½ª"n÷P¯»éò¹ì~t¹Q \ÒhóQ^ÓqÇ+­¤½Ñ«¢ÇÄLkȱ1Ÿô;á«:Y+¸õËæ\Ðc]tçACà8n õ¸›v/Ñn<½fÙ›/‘1‘öP“hwePàÊ.ºVõ®¢òåÜó0/¶¡y+‚ÙqiB%¡ãµ÷žÚ¶HÞΦ‘´×IÖ‚­[©sÂV7ÂW lC£rïX8CàRC›¯Bédšm2Aà²ìu°J\F}ƒ/p~¿Èn.Ô4?ªó¥O«Z 37oæUÜç’ÿ@÷šÙ4ê¦Ë‰ÌZàäÞ çƒ—qHö:^i%íµâ¶Š‹¶åÜûª=R^pDZìS"p‡À /"cÚÜd{¸úÍÞš&¦±ÇQ@ØþkºŽþàKžö‘ÓóFG8Û}ë´¬œ¯†FcÌÝÊØŒûYõ‘2Òà \qÔ}àì±¶‰ž“ŠÜâx¹’YgRºëz,ݱ-Ë;[tÇ'Ià*+ÐCà¸ÔIÊ foäÜd{ù)?%*fû7&G=컎L“íÊycçÛeôœ’ùºL–"p*fqÍfNÌ*¶yÔ{ðÏÛ¨L/M¦ƒ8¿˜½æ~Óc5Ê¡J?Z’OGªË|Ù†VY³£Ù}Óm¤‘CàÚ Íb¨6` f`ÉäëÑœJ‹f;þ²%nzNj%J‡çqâ•;núä¯|ð—9^Càw'»—Ÿ|ÎI~×ÿw»>ðÌó{¿ŸæßK£$ò`ÕÅc-p˧NŒÛõÝ)»ï®ýR‡¿g+ ÷ú_øàDδÀÙÔRŠJšFÉü&T=‡ìöì4ݧDÖâ’Ûmª¬Ù¨œ8]?ˆæT®½ÐTM]À¢7ÜÞÖ ,8©[0 ¼ƒ7¼»0ȼ<ã.pãI&ÔÁ{¹ jXÏ»>òÇÝÁç×Þ:Ò&ÔC­*IQÂÕ~pi'ò Õ=v3êM÷œêæx—Y~îEÇ÷X°_ Ü F¡Zy·M©Vàô\ÐÉŠÍ Úž~–TötZÜ{ÿ¦‚j.Õi¡è7dó>[i+–/yõë¼ùilB•‹O/6 7ËçQçgCàÒ=ö¶¯HRGä´$1lüšG.t³õ3ÚÜÓ& ·K”‘8µË»6´¾Q™9”<5MÓè…ßoHÓ4 kêO½“'?”h¥D×'áÙý.·Ë š¤ ƒÄà_ƒ¶)Õ/©ç˽¢÷{6Â&ËhþGM ­9!“îÝ\¨j 7¼‡ZÝ4{ÚÚsSÂFÉ›¨]=¦ÀðÔ \¨Ÿ‚ˆ›^”Ó©™³ÿ ëqÖŽ½ƒ1˜VÅÛgf˜#ÖŽþþŸ¬Ë#æ)’¢'Ÿí&¡ïêÍv¹à™Η»aæûÿæ«í$îè$=û$Šýñä"Ã8}†è3ÃFàBÕ{l…ŽÐö4=_åCïWXÐÏ~ÅNÎö»Ä?²\wM¨U“ݼmÉÛä:Í—ôPň2.ýc¯CÿCÕAÕͺÀi$îmGNE"÷¤JÀç–O}jŒ£2/&%ñM[à’¢ôòàî´KZÿã¯5ßZ 'ò•fÇIzöÉhÔ]‡jÿ%)‰ï°Î6¥Ú{‰.kåIeÏ_ÆnË–Í󣽡hŸ±}ïÚ œ¶@…FÔ"pClBu™­ëI¨MXö>O­À…BÈÚQ4tq„šYå"Õ¶ôoò²M™®ÿ9% 5×}jGéAHå¤ \»:¶ò·µÇ¹ÝñÏÚ8”:Bæk­î7î¶ÛW;ë6…@/å^k¡Žéy”šÀÉß5t}k.¸A4Ku¯ì÷ÖGàî®íÄg`;†%pöº÷k"ëP¿ÿ›ýRÓˆèu«Ó칦µ“ýsP§kz[)¢³Ïµ´ÓˆlÙ²¹çŽiìW2ýàê^ê‚Jž™V¦ õK«7¹È’ú½Y!³p}(Ú T;¬ú£ŠìSqM¸~"P½ h}Å´ÿSšd‹û/5íoâ{s³Eåã†îëù¤7ÉÐ1ìd_I§ÿ(ø)ôAJ!ÐËùÀ}#ñ­c¿¥e˜Í©r¼~úÝþK42âY©;ò6“¿mܱ”{M»šÈ¡¨¬Ló×óïóIõ¸eš\Ûš^Äþ¨ëY1 M‹ûîý ÜÏþìÏô|î“nò„u ƒlÄb°P\¿“¹¬o£¡‘B6RÒÉPs?tNjç¨ÄtzcÒt¡ã¡ŠèùÒí?´Cǰ“}Å œŠi·)¸ôN®sïv 3gW$pû"Ž#p“Ý f¡ †"pþ_#6ª¢Óü!á*~Iáuû Õ¡áþƒ=i¨ù KóL²ÀisD§-±Ç ´ŒF[“Î?†ì+Nà´éE›fùœÔK7È8â/I2)£O8¸5Â÷߱ޅò=)íÒ<Èvt±³ÍqIC͸þûÀ%5jd–N£½¡óFÐè7X“¿³ÿÏ—2ª4D{ b&Jà’øÌœµn£nÚçÉÞØ;jŽÀõwìm”4î°õ,Cçà®Ý¾âNå,iP7XÓä~·Qô{K:^­âïKµ³ p0Q§Q6`€DåìÃS›¬ü>GDà¬$Ä"LjŽÀõwìõØ…dI¦Ù>r¡ÑX:(A›aÓ¸Nö•4ˆ!$€íR péæÇ4C¡ã Üé¹¥ÚN8˜Ó‡¤¼ Í¥¾XéÃ^‡lërþ(ÔÐ>´oR¨)¯ÝPs®ÿcoSih6sý’rIlSÂ$5}úgG&ÃvûJ8éÈV=?õ÷Aà/pãXz/t¼"y»cÒrÁ!p p«CŠ”„S–Ó4 òдM 1÷G±*ICÍåó “‚NK)-=*Eþû¸ãéé÷o’Ï~6í Õî&íË®:¿üö{…Ò pé¦U3i×÷‘“—ì^ª­$%½Eà’ïrmʵ˜…zÙ¶o·üÅwFà`h7ÍL[1{@àÒø»H· ›ïo”©C:9^{–jή@àº7›hÛ:„©$¦Nªö€ÀqŽcÀMu.nÃ8 bPö®#‰«!p¡9ýj"uÚE".‚Ÿ†x!p p¸,¯ÝKµsRkö“ Iš­ j¾lW²N£´ œì/´Í¸}u"p²MY?IBeÛ½J*<Ä8Ž=‡À¥'pË’ë,Š•4ÊØ'[„^#wv]nÉÙšØ~6»ŽÎÓæÕNö•$p~ôØþž¶ ~Ÿ^j(#pÀCãØ#pc)p:˜©9©›ã%¤2—ŒVpé´9S…J›Zý4T*Uv%+O~ÎŽ”×õ:ÝWœÀi ?G+lúY“„Ë>z­½ÀqŽcÀ!p)¯Ý˵'vß]Û‹À%7qv“‚'Ôôé×+EôüzÈ!ó×ét_!S1õGÑkš%û»û#â8à!ŽÀqì8šPG)pKµý!pñøÉÔ{•=»^€ûŸ{Ù—8Íú'#íÊðGà8öÜX œŸrT5P;=^.'Üù¹¥“³\rT-©œ&tÏ’Àiº¸ú½š3âÇ›hÓé¶šK\Å•q:^‘À8ˆÀ%7‘û¥îM1¢‚gËßùRÕO ¾Ð:Ýî+TCÙ—3iZµ}ó8à!ŽÀq츉8ºÙQ„òSüŽ*×Éñš[ªíˆn"Žé N›Q;ɧ}Ù¬ìùiH:8+ÿ¡uºÝ—?ˆA›Líú¶/<Ä8Ž=7ÑçKš}HjóÚ8/89.‘]MªÄ`£±¡:Ëœˆ”î+nn÷å œŽ:õ××ßâÇ›x³9û”(Jî 4¥"p‰œöKЬêr¡œ¡òjòÙϱf“ü&•dët_qµPµ¶rè÷I£Lkx厛>ùÏ>üGÍüë? :öÕæ™? Cà8®½ðsxÙÇ87¡ .'ܹI.pŸ–À}òú}GNýæïŸmþÙ_¬Œ„÷<ôµæOCà8®õw‘¦&Û‘]›¤’F/ŽÓñ’t"’V.pŸ:y®9ª×ÇžBà8[ó·ÑNçڼߨC¸»k{%±/ p‡ÀMÀMÂñ’œpÒœŠÀ‡À!p/p’s+.!ê(úö pH‘{¸²g©öht¡5§njî€ô7Ê*;þé}Í‹.žÉì÷—!·i2µ©%-U4Ìú§ýÜÜ=µíѽõ,i Üe—½¬ß{ÏI¸ÚÞõ±~ÐCà å8·÷žÚ6þ“ß„ªåŠüôã1Ôg 7ZKë?Cà8J8­‹š›;\; -< p‡À7µ'Í«£Ú“À½Tà~eÒrÂ!p‡À!p€À!p™ ×A'“H‡À!p€À!p8y¶È3Cà8@ฌœ;OÏʨT8CàCà²#pË’Cà8@ฌœ+pCà8@ฌ\ë\]®=!5R8@à8Cà8.+·TÛqCà8Cà2"p.'ܹI(pÀ!p‡À‡ÀM…À¹óõhÄÁ¬ï+®Øp8Ë5”³Îå—_öCà8¸! ÜÜRmGtΞᜱ¸n8Cà8ëøœ=#"Çy‡À!p .;wPšR9oCà8ãoÀeDàæ–NÎJN¸I+p—»páBó©§ž ‚À!p‡À%ž·EìçܺÀÝÿýͻõóæ›o^}ýõ×#p‡À!pIçíݵ½’Ø—s8n$§Ñ6û^dCà8k{„œp€À!p7=·\;"pþ‡À!p\FNÎ[99Cà†*pµZ­5Á=ýôÓ‡À!pÏ@à¸q8}}úÓŸnEà„gžy†4"‡À!p2w¸v zþ<Ê97TÓѧÒ|úð÷>Ÿ8qCà8ëDà^*p¿BN8@ม œ›DßBR‡À!p‡ÀuøüY®›[ªÝÁy7³8CรpKµÑy|šó8nh8Á¾¤‰|8Càº>ÏÎÝSÛι7p“×m·ÝÖê§/‘7Ji!p‡Àu}/G<À¹7p ¥éõ…À‡ÀM³ÀIE9—Ì7pÓº§6â&ýßB8n‡Àµ=—Gìã|¸ÀI8¿Ïƒ8Càz:—÷‰Äq>‡À \à$ú&ˆ´i“*‡À!p×=.'Ü9 ܇À Eàä%?%'ЄŠÀ!p×óù|4â ç pÜÀ.͇ÀµrÂíˆÎé3œS€À!p‡À—sçôiIîËy‡À!p p8)«%åµ8¯Cà"p2UùÖj5Cà8.5;9×çÉ 7°œÈ›Œ:•Á "tRJ Cà8ëû¼~,b?ç pÜ@ÎŽF•ѧ"s¤Aà8ëó¼¾»¶w÷rí Î-@ภ܉'V+3ØÚ¨‡À!pç6 pܘ œDÝDÚz-¡…À9‹9·—kGÎ/@à¸TN1<ýôÓ b@à8K9¯åüæüî'7š¢Ï ïÿÃ3ÛçÞw00o* ½œ”ÎÒRZ>‡À!pÁ@à’n³F¶–#Ž\y啟߼yó™M›6>7} …ŠÏ[æßûƒ7¼éçŸõ§oٲ傿þe—]öœl_ý9ö»ï°cZNF J¿7áÚk¯]}ï·Gà8Càz>¿÷ˈTÎ1ȬÀE¯Kœ Ý133ó1‘§‹.ºèÅK/½ôG*[‡zñÞ{ïm>úè£-¹8}úô@’Ξ?¾µ}Aö'ÌÏÏ?'ßᵯ}í³"y³³³ß޾ã“NîöFlŸ4³/8}1 Cà8.\ûó’Žó 2!p"<·lÙrúòË/ÿŽÈšÒ­·Þúƒ£G¶äéùçŸoŽëëìÙ³ÍãÇ·äîúë¯_¹æškž±ñŒäîw£÷ûDJ'EàlÔ Cà8K©Ê Õ8Ï`,ÎEØö‰Üˆ°‰ð¼ï}ïûÑO<ѯô^+++Íw¾óÏnݺõÓLC‚À!p pã-p.'Ü9iN圃\.—{ï[Þò–ïŠltó’Nñ:’Ñgle~ ³ûÛ”¾bi¿þàþà…Hâ¾™Ö CàosçûËœsºÀmذá–o¼ñ¯{yˆËèE[’i˜wâĉ־5—™tƿ뮻úÞn¯¥¥:yI¥‰«®ºª‘F$Cà8@àÆ_à$•ˆ¤ᜃTNFJnß¾ý¯z]ª—Æë™gž‰÷ôÓO¯›&#'mÔ-ÖMÚgHà’¾[·¯Gyä…H⪇Àñ·@à&_àÜ9Z’ûrÞAj'µ?¥|T¯¯NN„ÈOSaבmØ”·ÝvÛê<‰¨É²2]~ÚíøR%?»]]_öâÒhXùë7ÝFèåê¯î@à8¸É8)«%åµ8ï ‹^Û¶lÙr¡Ÿ‡xRªô‹‹hYÑ’y¶Ï™|Ùy’åt;òS>Ç%µM¨šïL×Õï©Ò§M¯úiTq´ßW¦ÙfY8iºí÷%£S%ŇÀM"Ñé1QŒãð#vþ?>ý×ûcæïàoˆÀMžÀœm¸''¤$p{¥hû(N%­“ýˆÔÅ œFÈìg¿D”Šb(jh§Ùï+ïõ÷KjŠíö%}á6mÚÔWŸc+öRÝCƒéü ¸, \ô*xŸó3{–jήÐie 9[ ç,XÛ'µLÓ¸¤W’Àµ[_"^Ú *‘0ùi Qó›]UÖüH™J6Ÿv"pòÒú=úx0­¾Îœ9ÝT®ø‡ÀMªÀ…®k½N埡„¼—Y“{GDÊXôªGT÷®#‰«¹i •²èUq”Üϲ‘·ª›^rÛáÞ„À¥Û„ÚÀÙ¨›˜¿¾,/ò¥Í ¾ø©@麡‘§"~þt¿ivÍÃôŸ³ßW¦Û¾vòJc„*M¨Ü4œ\or=É5¤] äºÖ~±2M–Ñå8nN„kA%ÌL«Ë{éÿ¹å5o:覜ðU¼mTÜt™_òä°Ä¹‹ÀµfÄZA,޲™Cà8€‰½FŽFäoø¼'H¹€ •mSdŸ—sѰ†Û_>°|ÅÍ«ÚæÚ«ûƒ LT¯n¨"p p©Fá–j;¢ë$3£gaBÎÐb ?\ÃJž©žÎlõû´U½}¼fÜN#p žæ½ï£ `@×É9þ0l[pѪbHàŒ°üŒ.hÒÌ{BWðšksfCN—±Û ô›ó¿_!´_7=?ÒƒˆÀ!pÀÄâr£À= ]àCàèëZyLÒŠð·Èʵrwm¯$öåo­ëåü8ä„»úêM²0V‹pQ p€À!p™¸^"÷£þ"¿zïÍÏ~î0dÞöŽ›šòÌŠÀ™Žþ¹!ÉL~Ú.jCà&¹Väš™à91|äY?si=l²ÛÊ€÷WðJ`•¦á¢Fà8`:سT«í=\é} ›psùß*Þ´F(¥È€ö_BàCà&‰¹Ãµ‘Ä=ŠÀ!pƒ8¿|V>0¿ê•½*xÓ‹*1‘¶ª)Ÿ¥Åíóîs#æ{Ôc¢wºßªIä[rI€¦x½ý~ æ{„—¼åËø[ˆhBºüÔOýí¢ŸÛyh pc#p/¸_eN8n:šPKF¤*FÆ´"‚&æ-!³ÒV4âª/ÔM…;ÝVn(©8Ù}yßµn’ú=«Æ,7ã~\B%‡’D:‘#ª€Àô„Dà$‡À!pÇEÄìÌǃoø¢æm#Ià m.g¦We½rþ~=+…¾‡Ý+yR;M»À¢pKµÑµsCà%m!Q*yÍKŒ WìFàLäk¾ADà8€1¿vÎÎÝSInzúÀMÎökñZôú´-º÷ófº]¾Ò¡À•½ïSëƒæíw1AÀªæw(˜å’Ö¯k-W¿?‡Àôpí,K^8´Ä5¬¼¡«rž$­™®2§eúÆÜ¼™ž쳑$On[ºý†‘±Å€ì)ùÀïc…´d¶ÛŠró@àúÁ¸?À!p¹š³ƒ*ê)í—&S`×Ïñˆ}7Éò¦iFŠ ËøiM )í@àqýì‰Cà8 #¸œpç†]àCàCàú»†ŽFDà8 #Ì-ÕvD×ÑKЈÏPÂÜÀzÕaÕK`®£3"r.¿aÃËn ë™+gø3¯]óMo. •W\“knÞre³ðÆëæý½?Ýš'?û݇0Èßã¾ß8œ‰D¾~úE¿À}`½:€À@ÇQ¸;ö,׎uºüììÆÏßñÞM‰¢Áp‘¿ûÛÞqÓØ ܺT^Ñz›7­â œÍéfäoÑÈaɲ/„Fzù×ò\èÀä ÜÉYÉ ×i{¸>x„&É ÷±¸€°­:¯‚¢W̾`«*¸é¶¶iÃTC¨šuL%[!µ¼n€ÀŒáµôXÄ~KKàµt•‹†ÙT~t®ÑÀÅL·¥´¦öjÉ–â`¢®¥»k{w/מ@ิ.gê€Z…À•´Ò px=ï$'‡Àu*qÚO­â‹ù\ô‹Ó®Þ¥ÀÕµò‚È*€ÀLìõ´\;" p\ZWtѰ¼7=XºÊ¦1óT;¸ª•9³ ÊY p‹\KrM!p!ö,Õj{׊€Àd„¹Ãµ‘Ä=ŠÀ!p@Vî¥÷ç%7‡À pAª2Hu@ಅ[ªíŒ®­Ó×.™oI“ø ·_Ò† pÓxm»§¶Càz¨œKRvWf9+¿.j‡ë4¸ð8€ ¸¶–#@à¸^#`å€TimTÍõVäˆËk2_3mÆ‘÷ÅÌÉ¢¿¼M\|?ùå¶8€I@*2Èõ*pŸ¶À~êKͺ¯yä¾¥æ‰Ç?ƒ¨M€Ài_IØ› E»\’Ý’Wœ¾d¦WLbÞ’‰è•MÅ…¢&êuË—ÀÙé#‡u?*脳AÂ_ÈÑk_D3«üÜ;ÿ§æÅ—]™Ùï/BÎCãŽGì”À‰¸íÞ{CsÛ«_µ†7¼qG¦DNå[/qU'FkäÈ}ž±¦"勞¸²™^qrXòÊtŒÀͶSõ¦WtŸ4¡Bîˆ\Ðüg;|>û¹O4¯¸bö ç!Œ±À퉄À}ól}UÖäZ°R'Ódž¼Ïµ,ßõÎC‹\ƒF#_ëdÉ«aZôJhY³hKhUœ®J›ßÎlÇŸ^ •è@àƒ,ãrÂó ܧ!pïÜ¿/VÒdšÌ“eüy™kûò©êºeBÓºÝn·'¿‡l3MÍJjÕy'KžÀ5´} g®ìš=ófÚ¼[?×…À•8@àƒ Â8˜¶ÀÅ š.ûYš(ý¦Ö?ò[«ó¥)öÝï¹e5z§Ñ=Ñ“n ò¾Ý:VäBr&ë :_¶«ëÛeìwMú]'¹\Ù Ú¢•³67秊=Óyófù'‹ozœÀÍ›«Âg÷ÍE p0Q¸{jÛý”"ý œÈ™HO§ÍŽ"j²¼íg¦<=•0]F$LåI§É5g?ûëèv­ä…Næ'-#ò¦òh¿‹Èâ4õËyƒr6gÞϘ¦Õ¼FÛÜûE7¿dR’øÛZð§ûyà¼ýÍÇlgžA €À“‚ŒòŽîG͈)t¿õUù/¼ÿ¾O4O}ãë=û"4"d6YŠ iÄ+.Ч2–$Z:Me+´Ž6ßj¿¼nNåÔöëÓþ¾z¸]»‹Ùê—ƒª„\ p@çH8'oÊæîúwÿ'›¿ó…Ó=û*IFàâ–µ"'kí.³ûëVàDÔ´©Ö§iM¸Ë.{YÏ2˜U[ôG¨ p}k†xëòÉæç¾òÖNæ%ITZÚn?§}õäs}›FÎðM´À‡À´GFŸJ1ûÝË­èÛC÷#ûâé¯÷|þk¶ÐhŠÛ<jBµÓ{8~¿M¨qÍÃÒ´Ú«te² pÀNŠÙ/מ‹¾ ¿°üåæ×ÿóŸö|þ«(u’NäH>[)Ò¦J‰Ú«ÀùÛU±´rfåQ$øg›bu¿VNu`0H‰ÛIÜ8é÷ççú»D†´oX»J *V*GqÒÔ‹Àévõ½/Šþ2þHU‘3ýü°qÛh³é9Ì´œIçÑ·×¾é b@à8€N%îžÚö=ë1¸ÜWR»Dl´XR3£4CJ3ÁOŽ+óüÜqqÓ42¦‚'Û’mЬÅE í|Yßßn(°ü.í~§I¸J ,Ö¢©Iª•l|‚ØÍØ´ €À!pɸ ßòî¦ÃÙ(s•D(B7îd)‘o=]Ë+YÉ3yÞB·hrÅUL~¹ŠKÊ[µÉ{ V óÞ~tÝŠùNófy¢}€ÀÙÄ-œ¤­±géÔߨÀ½íž/ p\âÃÅ Û¡³%¯¼å 1·¦ —–éRárïL%†ª©‹Zð*:TÌwj¸ùEtb·ÀÅ py‰“º¨‡jñ‚Ü-þýÌ_ƒÚ‹À æá"Q³rHˆ¼Ú¤91«t*p1Ó ž昕j¡ª4ÚŠ%÷]ê\ø€ÀÅ÷y‘>%¡BÏÒǤ“‚Õ~?`°·kéÉ/î>ôäßýkŸâ1.ñá¢EågÍ© +yN˜Ši œm ÕH›‰Þu"p%·;¢ËâçxÒ´Ãlþ‘L«à4“Ì[ûöÝ¿ñ0B…Àµ}À¬ö5‹¸œ¸¢ÀÙe¬ÀUL_ºÓ„º`G¿2ê¸õò&CíE’lÔMò8ùy•:¸´›?:Ù'Ð1{˜Ó~fùÀÉ+ «ö!pU#ju#þþêeozŽ‹¸Ÿ$팋piÞ$oeJÖí&’4«¶[>4ä®Àɵ-ÿØÉ?biäHF×MAÒI7©¸1|à-x©K5@à:è,l3«‡°ÐU¦tšbó4I³«Ÿ!Ý&Í •Àñ›p5a¨îO“qvZd®7q“k×ïJJà›Vô?h½½7 û>.Þ¼“¶R¨i[Þ¬ÛIž”DÐäæ¨Í­¾à©†êÚåýR;"LòÙÞÈCÒH`p§uN;)¡•iýS†ÀMÆC/çš[ó\Ì€Àµ§“¶´ÞýB×¶|ŽÝ¦FøBQ=]&T‹P²/Ù0xóÿë´«…ÜÚ:uH8Ù_h›qûêDà´ËG’„ʶ{•TÆ.§3À…–·ÒfßË _ÖÕi¶^¢nG>ÛÚ‰ b¾À%õ…U¹±Ÿåšõ›Yí22Mþó뚪 Ù®z}ë::O£òì+Iàüfaû{j?_Ù‡~Ÿ^Ò !p0Të¥\(b'pzãÔzŠ>º|R3 0Xayé´9S…J›Z%Rê:a—ñD…"p¶O­®×é¾âN#üz³Âf[´éX»vL¤À¹6]‡Í¯Öwó¥Ë7˜.£^‹\¤€À oªÜØãF¡vÒ„·¼Ü$µI%®Wn¼IQ? ³Ý$zí»¦÷к¯øÝ%Bç¯Óé¾B§bê­×ASöw·±&Yà ^n·†'qõ~ò¬ùÌtò·7´‰ÔoÆ”›žþG¬}Þtƒp ëëMÒ YHýiz3µ{Žëw‡À¤#pzMvK’=»Ðöü~Hàìç^öeïzoò»pºÝnåuâΛŸ÷§yó ¾ ™Á¹@I¬‚[Ïæ…Ëë:Þr…˜ï[ˆù v€©8?‡öY‘›šüÇl›Wõì÷g³ÿ½úµÐ¶ýˆ›6Ó†¶©ÿE“F`0×I8™'×_–N»ˆÄuáМqÜOæCç•´ª˜Ä¼y“þ£jKb¹ée“¼·d¦×Í6ên™²W¸^·[6ßY÷¯ÑÂy.|˜v³ò"76¹ñ…:òÊ O§k'¿ÿ\¨ITƒê ®¹V«8øÛÔ$ÀÈÂ!p0§ÿD…®1M1¢‚§ƒ BR¥ÓÓ¸^öe.NÎä~bûæM³À5PÕ€-Ö[ô>k ¬ª•(SkQ«8˜åJf•´E›óÍ%ô­˜æ]].o¤nÑFã¸ðnj’qƒi8mFí$œöe³²ç§!éTàl×Ð:ÝîËï3«÷#»¾í‹7õ8m¦ÔfИõŠ^©«†ª¸"ô%/‚Wr«áQ ̳ͯé«"p€À¥ƒüW«iCÒ¬…ŠÀ ^àTf´X»J ¶;D¨"K''"¥ûŠ[§Û}ù§£Nýõõ÷¡ µ³õviWN8©Ë9I›±Ñ7·Ýo_‹ú» pý‹ü7eyCà`šÎJöóGp†– à uy°]0ìrV¦âºItº¯¸Z¨Ú=$ôû¤Q&lnÞ­S67o¤mÑ5ÉÚ óŽFŒÀå¼m«¨éúE3½húÊéþª\ø€Àô#p7üßoÝ?zAäo$yà:\/oRäm¾7'_e7ÍNÏkäÍ-SÔ¨›·íœÙ¶íOW4Ó‹^ä­ä÷Ë@àz¸èï¿"÷Ž^‰îûû*p€À!p¬¸Ï§rÿçd@àúíϦe®âúÁ +¹.€À!p€À%`G|Ù¬å2bÌ.§‰=8Cà`„*k%HíR?=Ȱ*# p—î&ß&ï[\i«|hÙ¸}pQ7<’ ÇKÄM3±kn%ifõeÏVsð+*è²2ÏO •ú-*À‡P!pñ—ªW}Á¯„Ðð*3ŒÔբ׈@r´7äœDÚÚEÖlNmZµÛC5MUúdyM¦)ë„mÊO_þ8ëïÁ²`…ÍMóë—¼å«Àø œFÚTÎD¤$ÊÄà7¡¶+y£Ï5ЧËÉ:~m™æ÷¹Cà8®«Û¦SÛÔé×uÓrý\HMpÁì×~'丑léZöFLÄKÞÛ,ê¡BÔ¡æO-:­ge04MûÜÙZ‡‡À¥óp±5F+^’\[kÔ/_p•üÚ¥±§Âè bѽŸ1’·ZRKë®rA—î࿵ \R=A].´Œˆ_wQI£>! pᇌVThØ f¾–®ª÷SÁ[0Ó^ÿ»†-½å–Y#•\gH³e\-S€C§b(Ò¤5-4¡ p\z–urä•ȪÖiô(pu·Í†)¯¥ÂVðGººÈß|§õY¸o¬ë{f£lé 'púY–±ëH¨6­†Nšdeš4™úM¨²­Q$ FàƒIïW4Q8¿/ZÉô{[ì#g›G+FÒCëj_9mJ@àºCó½‰Ä©t "j2Ý šˆ™ >P!SÁÓþl:*U…0.J§£Ru=‘:;º@à¸ô%®aåÍÍËi³©¦12—Œ`͇ú«ùË:1+˜(`ÕáGq¹é¸Î$Î^Ð>j~j6•³ÓTÂtàƒ¦ 5ÚÔ"£”78˜Š>pcøÐ+0x¸ôÙ²#O»Y¯—¤£ê÷†ÀÜèx}+r!L£ÀÝñÞ­s†‹üÝ8@à8€®Ù°áe·È¹ £Aþþ‡À!p0÷þ p€Àù˜Tª˜Y‹~ ®¸eÓ+nÔjÞ¤©h‰/›+¦Yà4•ÇBŸÛ)éO}Ÿ°l#fú¢+«esÇ-Ú俜€ÀÜK9Þ ^Òb .ê¼™§’Îå’Óem-Õ†ÙÎB'gJ}•ß/ç—ó¾§¢"¸øNEWýaQ÷ám¿äO@à8€¡ œ™Š¹¢‘™º¥[Ð:¥n~Á«ÜМ±/¸mTŒÀUÌô’'“Vàì4-¿•}?ïw›1ë•ÍöÊfß%¯JDÃIÜ‚ù}rZÇUÿ.H pÀ(ÎJ[É«˜P7•FŠl5†²‘¯Àå-YjxߣÑNàüõ´¾ª¾Oø ¡ß%\ÓôëM¯˜ïµH’a@à8€‘œ)._54¼Ú§%/J—sËÕMß¹$ÓR\U/ –†À-ºm.øe½Ì2=+øûêRতX=n0h«øƒ ¼æÌœ–¼‘³rÌòk.ЯnMÎôGËÅ4—& œÙN5®IÓ¯ïêïÛ|Î'Ü¢™,úýí8`X·®X¼“åªz6ïĦä¢_Ë™A%•AO¼tú|§ÈzÌïW5ßuuƒŠ¤i­ø‘ÃÀ~ëÚ\L™/@à8€Q \¡ÝtÛçÍLËë@ýl×sëØèZÁÌ[ýiæåì¶ý÷Þ´œ÷ýJq9çÌ÷,Øïúüïíï×|ç‚ÿ÷@à8€¡ \ÆŠ3v) pÙx0’“ 8@à@à¦^àÞôæBS$†ËÛÞqô$p;Dâ`dìã<8€IãÿQSú‹i3þ‘IEND®B`‚nova-13.0.0/doc/source/images/rpc/rabt.png0000664000567000056710000012764412701407773021461 0ustar jenkinsjenkins00000000000000‰PNG  IHDR°š6¯»sRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÊ&ó?¯ IDATx^í½ ¸U™ÿŸÿ H ›dWV‡‘5ƒ"Š‘‘AЈ#‚,FAAq (ÃOˆ"£ˆ@•%È&‚D–@v1 (*:êàŒ3sþõmî{9÷¤ªëTuuuu÷§Ÿç<÷vÕ©sN}ë­®÷SïYÆãƒ(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(0d LLÎwÚH:.ù;+Ioó¶Ù>þ¾¢Z 6€ `åm`ÿ‘g͡޳fȽœ.      @ÖM2:q•Uæ¿ì_7ßêM/*}äÈ™Ûa§]F¿Ûvþ¾¬ °lèÜvßs¯?sìñnÿú‹éùêe–ùÏ +¯|mòlÜêÅ*@@@·ÉJ+­tÓJ+¯ü'9_z¥{þ÷/‘ÐÀ°l §6ðèâçÝYç\à·¯yÍkþV]uõï¼oÆÿˆ#ˆ#ˆ `Ø6Ðï6 ±[OÙöw«®ºêG†ï‰Î£    À€+°Újk|íÈó§~wXh?N76€ `Ø€o;î<íw&LØwÀ㜠    É„½u··ý§§À°l`Ðl@<­·þ†¿Mžê[ Ï“½§g:=©}VɤµåÇç´žòÛ „>=5*G¨C­ô`×~МÎGÀ°l@6°pÑ#n­×½î©ä¡Ê2;Ý÷,¦•„WAï̈æQ~{‘Ð'ˆȂ(ÐÇ $“6ýâÆy €WÖsİlhÐ’pZ×¼Ù4PP†[e—}Í‘ûíÿÞÿ BA„À°l`l`ò:ëý!yòo5ÜOÎPPúSñË/?áw÷?òä@¿q‡Œs<°lˆ³‹/½ÒM\e•ùýùئÕ(€(€(0Ä Œ?þ˜<äÏ8=qN:¡6€ `ƒaZëO9~‹4I¶IWƒ¶ @• ,õªWýí©ç^`™yÀ°l`(màÚæ¹ '>\å³uHË`‡ôÂ7ô´؆^š…+@–¨EÙ¨Ça;Ø606 eä4œ¦ã*°Ø@“`›t5h T©“8ဂÊ9`ÇØ6PÖŽ9öx—Wl;‡(0¨ °ÀG¯ZÔ Öe, ÀvÕ›Y7)}qÅ5ôÀ>ñô n½õ7Mú­ñ¿ëÿ*çÎ<ë¼Vùæ*ËíeYlÅwÅ¡À +À½|`Q7öW§ °,ÛUfh6´«í¦n?pÒnÿV°]½7)K€èöC‰ò±±¦Ø À°]õaØ‘ñªy«ªº+…Ý‹õýü‹.mEV-Ÿþ†¶›Ö…ØòŸ|Êl§nÆMùíiG¯v«‘ŠÕ9&)»ÏôÈr²ê¢üö×aÐõ™™ØŸXà"æÁBìdl€`Ø®ºElÀ nÕý÷#GÌtoÇÞ-ŽÑÿf›SåñS˜G]‡µÍïBlåpàŒÖ±Uw[îö3  ;-)3wL¶2ÝÜÕÛ‚ÂQ ^YIV%>l_½ìö‚òÑa¶€`»ê°9+¸ ÁRÑRm³ˆ©V¿U~ÔUÛü®W«n3›aï´–C$ °ÀÀ0?0ñÜO<éÔÖ’ ¤t òîå·Þz«[uÕUc³Î_¦ŽÙ³g»)S¦D·©h~\ô˜/ùËn©¥–ÂÞ¸çºfïx×»ûnâA|¶tûœz°Ó’Šo†P ! ¼,Ûí‡å×kcŠV±Hלl>ûØ9sæ¸ý8¨¯C÷Kbòø4GÆÔù2~Ïê}†ôê™ À6çæ£%½S`zRõŒÞUß?5°Í|0\{ýì/Ç•W€-æèT O=÷¢;þs_p‚Tƒ×iÓ¦¹›nºÉ}öÙníµ×ݾáF»‹/½’‡iMÓª¯u]å°,[ìw€-ïCT|äº)åÕ °ŸúÌ n½õ7“¶›º½{âéxövùÙ;(; ppµëœì¬Š).B¶˜£S%„̹ð7yò:£€ºÁ¸ .¸Àýò—¿M?þ¸;î¸ãÀ0šo×·ìî.z„‡i—¦U^ë:Ë`X¶Øï:á,ô.Km{ò)³[ÏYýõ³?rÄÌV4¶Îßña¬k öÖµÖr<òi€5¸cÛm[Qv¶7O¶˜£SÅCE“3 B-â*8¤>ýôÓ™é§?ý©{×»Þ5zÌøñãÝ‘3q.~ž‡* ;ÆX€-ö»ÀVæLLJR7à*?µ¬"¯8#ê™*ÈU´Vï{xñ˜cοèR7÷ªë[Û•G)Ì#_ByÒö«Líóý •©dÛÔµÙ@ÛÚâcÛÒêÕ6ÛæYç-q¾Ú§ˆ³êËj{¾PX;À°7ˆ0ÀVù;_¼,¶˜£Óɶ`óÃŽ3¾õ ƒr÷Þ{¯{æ™g¢ÒUW]å¶ÞzëÑ2&­¶šûÒigD=p;i;ÇÖg'j À°l±û€-î;d¡.À‹++íå‚jXEXP!8†¿ÇÊ'ÐU·bEfõWß}¸Ô6°å³.ÉþxZ몬¿Êûöwì=ú×ñÚî×­mJ¶Mûm›ýµèqX¯±Ö~Õé—á×¥ãunVN‘îô•u< ÀöUÄ€­ø§¾`ql1G§ì·&aòǹn¿ýöî†np¿úÕ¯J¥¯}íknÒ¤I£ «™‹5ƒqÙöq\=vP‡Î, À»ŸØ‚ŽCvö¾XAš€--b‚d¥Õw. jýnÇ…V–¾‡jûbVÀçGPU¦¶L+ŠÖcpmuYR ¸ë÷ À°le¿Åƒ_[ÌÑ) z›»Ùæ[¾1M SðY\ýã~øawÄG¸e–Yf´üéûì˲;CÞ¥€`Øb¿ële¾N߬EDÛ=ãAM‹Òê9¯ía £¥*3ܦˆ«¶¥E|cV Ù.Jkõ( ÎuŒ ×fYÖßjÛÁuQ¨H~€`Ç›‘Ü3+ûIà‚ØbŽNì±&YÚs¯½GÁRyôÑG»Å‹»gŸ}¶Ò´`Á·çž{¾RW2>V3k†ãØö’¯;vÐ ]X€-v?°•99} °‚:T»Éš¬›q˜'ܰz6øÝŒÃ.È1]ˆ‹¬µ3ìzlÝ¡c¢ÃÝ|¦u`§%eæŽÉV¦džj>Zûõ–ĘÄið'°êRâY‰%*ñÉQ€-æèäýx 5¹’&Yù1vÓ§Ow÷ÝwŸ{î¹çºš.¾øb·á†ŽÖ«Ž5Óq^›Ù_­ ôZO€`‹ÝÓle®R߬E'³ºõêw= `Eõ»Ç¬ÊT½²~bAm]M‹gÕkçd伕(«`Öœ<ë`‰ü–Šü°y·Vw÷°ÅvprÖ98MªdàºÙf›¹¹sçºçŸOf ®1xâ‰c–ÝÙ~ÇÝó²CÒµ€`Øb¿ële~F߬žíêÖ+x O€j݃Óf*Në‰MëBìû![÷bËãOeÛÊ¬Ž™i€íà~ ;ø‘W›Q€íàF©àP¶˜£“°š[C֞蘴zÉ¢ìXû®«sÊ[N¨ϱ^¬¬|U· ÀvhKl¤€l1G'íG{åUVi⪫®êŽ9æ÷ÄO¸^x¡QéüóÏw“'OÙ 7Ú¸'¨n<ô(s¬ °,[ìw€tò³ ÀòL)vÿT¥W/6ß´#s°l¤©de`#`;ÿ¡6€=ýôÓÝ¡‡êÖYg÷å/¹Q+ ~æ™gÜ'>ñ‰1Li¦d͘\Õˆr:·§N5`X¶Ø}ÀF: ùÙØ!™k¡ÓçTÚñ,“)•šLÉÆ¤Öý—1°ùO„næ`‹9:í"°_ýêWÝí·ßæ·Ï>û¸í¶ÛÎýèG?r¿ýío•zè¡VûF- ÕÌÉŒíܺñP/Z& À°Åîe¶2/€`K¿`Xöåñ¯J|r`‹9:1;þ|wçwºóÎ;Ïm¾ùæîŸþéŸÜ£>ê~÷»ß5*]ýõnÊ”)¯L>•Ì ¬™”‹ù;·¡*5`X¶Ø= ÀVæ*°li€­`úÓŸºË/¿<3UÑüìg?ë¾öµ¯õ¤¶Ó€leŽR°Å"{×]w¹… ºý×uk®¹¦;þøã[Ëéüþ÷¿oT:í´ÓÜjÞò?šQY3+W U”Õ¹ÅjÀ°l±û €-å>¤À°¥}¶f€=øàƒ[cÞ,éøßœUBìÖ[oívÛm·Jˬ²}EË`+{p”*€-æè”Ø{î¹§‘ýð‡?ì^ÿú×»ï|ç;îÅ_lTzòÉ'ÝÌ™3ÇŒÝÿ€ƒÜý¤41Ù˜ÌÃZég–®ÏŒb6Ýz° Ø[o½Õ}ìck¥SN9e еhíøÃÑ|:Æ\u»ûå^pÁ}Ðl¥?ô… `;(Ø,Ä6‰“µ.ÄŠÀÀÞwß}îþûïwsæÌq›l²‰;äCÜ/ùËFA¬ ú²Ë.sm´Ñ(ÈNž¼Ž›sá%8}ðÆ€m°7ß|³‹Igœq†K~£òª¼¢ùËsøá‡»7Þ8ºMEó«ME9î¸ãœzkô£Ãlm` »uÀöÁs§Šû¿W«~ï3«²è~]F§]VÀªè¬ºÿúÝŽ}@Õ~Û§®ÂÖÙÏ£íJµ‚YåÓ¶÷¼ç=­ÿ«î¶ÜÍ1[Õ]S®¶7û³ŸýÌ=øàƒîóŸÿ¼[k­µÜ©§žêþøÇ?6.|òÉn„ £ »ë[vw·-XÔ×ÎjÚ&—ÀfßÓoO–Úq§£Òö;ìè6J`16ÿÔ7oŸ¼ô‰Ï¯r‹³ívSÝ&oxct›¦$ãÙ7Þx“èüjS™cN9ýŒ¾þM`Ëù5À°eMmFràœ¼ƒ§UÙm`ÐVš–Ú&è4@4U´FÛô7̬«•¡cú) ÀæÝZÝÝÀv`™}ÝÊ+;­kX؇~Ø)R;cÆŒVDöÊ+¯tÿñÿѨôøã»~ðƒ£«·¤‡vËî4Ô©`;¿§›ü‚‚¶U}Øîú–À6ôYSõoQ¯"°lœYXEDÓÆÆZ´ÕXu/ö#ž\?âꬺ낫ÞnFI»Yv—vz¢‹Þ¼ðÉQ€íÜÊêB¬ñ®;&Ëèì¶Â n÷e–q'œpB*Àêþzì±Çœ–µÙyçÝ{ìá´VëŸþô§F%ÙÝi§ÆŒeÙÎí§j'€mÞ5©úS^µ×€m´«À°e tjràqy°mVPš°ávå 6Ìã¬À€Í3Mö·S€íÜ vÞ¼yîð$b¹ök^ã®K^0%7©{ßrËå¬"O<ñDk|ìúë¯ï>ùÉOºgŸ}ÖýùÏnTúÖ·¾5ºìÎøñã‰Ä6̹`;¿§ÄáÒ€m´ŸÀ6ìÓ­ßÇ.D`£ €-°F`Ø(Û$SØÎµ`·L–Ê9*»—Fàµ(Àþâ¿hõ¨8öØc[ «.ÈùË_“þû¿ÿ»Ú#·pÑ#}=þ­[ã^• Àv~O÷êÚQoo®Ûh7 €`»j l€µ®¾áÌÃYc`ýñ¬yc`Ãý:–1°]µõ*€íÜa ö€w½Ë¾ÔR­È«¥"XìâÅ‹ÝSO=å4ƒñAä¦Nê~ò“Ÿ¸ÿüÏÿìYú¯ÿú/÷?ÿó?î÷¿ÿ}ë7€íÜvº,l3¯K7®5eVs­ØF»5,ÛU`Û¬ ÒfÖ_u 'cR›uXû,¶µ›…XùäHúåúCusüjewi lW} `;w€Òº¿nÅÝs>À&c`?üá·k]ˆ}€Õ;Ï<óŒ»úê«[¿ûí·Ÿ{òÉ'k…Ø—^z©®Š¼Þ}÷Ý­¶|ãß`êT°ÝÓs¯ºÞ}ê3'Ы ¡öÝ h`íÕÌZuÕI/m¾Õ›^$ ¶t!îá:°š8ÃêCž"°‚K-¥“¶ÔU>[n'\VÇ…Çj)•«”¶¾l Ù­2ØÞ>8†`å¬vÚ6m§yd«±E`³ï\~y·ó[´fög!¶IœÚì¯~õ+÷ÜsϹ³Î:Ë­½öڭ߀_|Ñ .»•þú׿º¿ýíoîÿþïÿœ úG?ú‘ÓÄTlg€Ô Ü/€íìú^×[ƒØœ1ðçXä>`+óC&&%å.[R°6•© ið5ت mT’làœ6‰S·À±)å°•Ü¥ v€¼n¿ãÎAlÀª»ïW_Ý=<…½7ù+ˆÕ¤N›'ëªrÀ­e²´ŒN€}þùçÝÓO?í>ñ‰O¸ 7ÜÐ]vÙeN Yu2pUwa½D»ùæ›Ø>‰H°ý°·/¼¿˜ú[Äbóek#li"Û§îÀ¾­)ïÛß±·óÛ¨6ù0luøÇ…åêø^ÙpÝõ°•ù!leRRP] °lÀØ´:º°3ßY—ñ÷s=ì+ÎnYˆm°çwžÛf…Ƭû±Cu;­´’KúY¹£˜ýtÒ¸,À b-ýÛ¿ý››‹º2¶3hM9mÄ $}˜| õWy Nmâ'Ál8†V)ˆ´zôÖ8[A®_¯žV¶Õmmñ!Ö@Tu¨m:^ÿûõ§AºÎS`kõ ‚ëÉ^ÕÀVæÅ°•IIA( 6W À–Š `/Hœ7î[‰‘ÉŽªH*«ÊòªhS#ËXn¹ån9ò£G·Òõî¬s.p›m¾…ûáo‹vÞÚ¬ºö¾eÊ·Úßý;á„Fg!ž3gŽÛ"[EaÕ•ø(ýíoë¤õYMy“M6q?øÁZÕ¬$pÕGãg5³± ºöâK¯tÇî Ñ׫WNr]õí f î~‚Ì0) £žKD[ ê÷Ô®yXÎ}/n;¾5« ±Êñ£±*?Œ¦Zû-Úk­6 ‡›vNuÙkê`ó\üèýl´Td¬AI¹“аli€Ý5y°®õòÀÿ›+J*«ÊòªjWãÊYj©¥~³Á†·&2"½¬Á¤ÕVsÛï°s4åìÅ_ì–IÖ…õVãKwK^Þ\1…=þ“Ÿ, °›•î¸ã·ë®»º·½ímîç?ÿùˆµˆ«Àö׿þµÓR=še¸ŸVÑr9kO^§ ºnMp„›Ð¶s€•M CPÕõµ®»Õ´hªQõÐ"´El# `³ 6Üž5‹²Ñaí楃ØÊP€­LJ ª@¶iÝn©=]êB<+1|%>9 Ð…øg÷ÑÅÏ·@èÄ“N-Cy{Ï=÷¸Cÿå_Ü\0fØË/¿¼…ÕÙM_÷ºB«±®~z÷ž{º+®¸bÌ6íÿîw¿Ûš­ø¸ãŽku5VWa¥?ÿùÏ-xÕò<ý°O=÷b+B¾ë[v]ƒV À.9£ªò"ÀDÞW44¸Sd3Œ¤À*â*pô“?c°UWÜ¢ÝqØÎ^Bµg¶2W €­LJ ª@(€šT”ªù$aªi·$ŽÉ­k­U*ª7H€7èçÀVsÏ”-€}ÙQºmÁ"·õ”mÇtû‹u‚bV“"iÖàû5ÆÔÖštõM~ïÜÞ Èj=guV4tñâÅ­É”—Ï<óL 4µ¬&qx†iÇÍ6s›'ã_Óö© M 5mÚ´VwaëvÜ/«kóþ|ÈMHºZ°†‰À¾âðí ~ü褭—êwÇínkÝ}õ×ïNó»’°êvœÖ¥¹Š.ÄYÑæ˜¶B¶¬±ÄqleRRP D¬ê_Ae­"`]ɃϘ†`ŒŠÊ õ¿º‡YM¾²Ûn»®¬º+¯¢¹ÚïGWCÈU×aåñ»«.‹[ý>Àª £<þ~†>øàÑ2T^¸OÛçíÊ(×EóÒ…¸¾;9­&¶sÇ¿S€Õ=£et>ºì²îë „¸Ï>£ûøã»6Þ¸§>¸*Ÿ´ÉšQ£û™=÷Üs[?YºóÎ;[ëo³ÿûeb9Ä÷?òdkl¬º}ûÝŒØÎí¸Ÿ€£ÚÚ«n¸lñ{€­Ì`+“’‚*P€Àü {>¨…QƒS'€µcüq«!À†ß•Ê/86¶:Už"Ú/@UÒwýõÛeଲ­ «îp¢'ƒX+SëŸ[§0Zæx¶‚[´ƒ"ØÎº*ö±Çs‡Î˜áþ5ÕÓ“ôÁþg÷ù$*{T³!¼Î>餄þüç?w³Ž?¾²Ç.·Ü˜|ŠÂžsÎ9£°j+XMKZºGmx衇ZËüh¦sA´ŽÓo×Í7ßì~ô£¹ë®»®5ÑÔ7¾ñÑî¼ =<&Ña °þ´¶'bì¡“¯É<¬|P  À ê, ò|P,°á,Ä*Ë" y«È«¢ªÙpb§´1°á6u-¶®Ï6£²Ej­<‹À–ϪŽ`{{“°;\UìwÜÑŠ¾¾˜Ü÷–Yf̤M‚XE^ ^ýHª@v³¤{pâ5ŒB¬Ea-Ÿ¬¢ºi €íÜ€4ÄúÓØÞú!ÔŽ]R `Õm`fU èå2:‚>E*˜iKÚt °6V6`ˆê:l ê±X‹®úKÙÿ~6ì]˜Æ–ÀVu×”+€íÜáŠXE/ï½÷^§ ›áüÙÏ~æ|ðA§ett¯(ú©ñ®ï¿+{r0ÆUðzú¿˜=UDuû7¾q À x…ýæ7¿Ù:Fp¬ß¶Å‹§&¶s;^ÐèO`Ëù… W `§UÙm ×ÛnýÖxÌš…ØŸ *` •×À:kâ°]íº ° ¿åjl۹Õ°ÇÌœéVL"ªÿïÿý¿\€7ož›œ¬õv¼fEOµ=XEc7J¢¹gΞÝ:îöÛooý†T³R?v!Ö2HZËW³Û2¢á0Ú[£ÓAU(PŸ¬M´q¤¹—i4)º©±d6{±º+ªªïykù”Weøãgc Ú–ÿñËQY~Ty€»OO´–áòÉQ€íÜaÍØóÎ;ϽaÍ5ÝQãÇ»½“t '䬢´®²ÊèÄLмžö…/¸'Ÿ|²m2€Õ„NZSVßüãsÛm·µ~CÔÝ8+õ ÀjYO:Õiæaý¦*1 qçv<ŒàÂ9c7²W R©ÉY—wfCZ÷\ûN˜dPk@)Ÿœ,ù<€µ®Ã~]þ,ÄáMiPë×oåø]‘`óì•ý# °;p!Àª»ð{öØÃm“táÕXÔÖøÕd’¥<€U·âØ|óÑ®Àþ’:Y]m»€u¿¥—nÁïgœ±D7a›DNÚ.5u§ko˜ç9ì·Á†§® ÀvnÇÀ« °¸D(0¼ À¶ë>ìGbÙT”Óïæëï·r4a’à2œˆÉ¢°iÇøeêØ°MŠÈÆlS9ªWíd‡Çh_Z»bǯV‘¯Kc`‡÷N,xælçNk°ëMšäN_j©±Ý€#öÀ½ör_K"µÖ}XÑTÍ0¬q²íºþjß¾{îé>òþ÷»E‹¥æýÉO~ÒŠÀ>úè£mSSÖ_"gÒ¤ÕR¡Õ¢¯D`;·áaÎÛ![Ði ; ˜°U@e<2f=Úvz°½ý%`;wàB€=à]ï* °Ÿ8ôP÷É$bkðzH2v~°Zçõ3Ççžx≎’ÆÖ†½6ÒîË^¬ÀõÈ™Ç䫯üÿr7j?i}\à¬ó{ ‡CC"°½õC¨z©›D;×â°½¼mÇ`;wÐB€½æškÜÚ €¾ä-ksHY=ô_þ%u ì¿'-½s„Qx=)9vêV[µfVäM_÷º¶cWÛkµ}·ÜrK `¨y© ëÀjmYsÝzʶQ0KâWìXθðÕù½†Ã¡!Û[?„ÚQ — °l)€`{yÛ°U8¨i“8…QX­íºo¤û¿ímnáÂ…£Ëè|÷»ßu;­´Ò(ì^œŒcÝ{ç[Kì¬@­º d¿þõ¯w±°*7/5`ýërÿ#O¶fÞs¯wºe–ÏØdÒ™vv ÀtUñÛE/Û [™21)iNe¥Q Ô  À°5ÜhUWA¶sg7 `¯½öÚ%¢°êüïI$vÓ5ÖpW_}µ»á†Ü”d¼¬ UûÔexû6r<ð@k¢¥ì·_kMX‹ÂæMÀÔnÿÍ7ßÜŠÀj¢¨¼Ô4€õ쬱±D`‰Àcÿ– «†ležÅºII‹++‚P X€­áF«º ¶s§/k÷NŸ>:ök ˆî›t ¬>œ¤Í“èêÖI×`›¥8yâ»ÍV[ÍÝ}÷Ý£³_yå•n‹‘q±Û®¸¢»ä’KÚÎ °‚Ó˜¤‰âîºë.7þüÖÒ[`Í®|Ýu×µàûßøÆh$TÝ}{åøÎ½êúÖìÄûpPÏÚЫsϪ—lç÷tÓ®)íéî5`+ó,ØÊ¤¤ º`X¶®»­ÂzØÎ£,€ìÙXX-£³G²´ŽºîÕ¯vêR,UäUÿoóÚ×¶à0œ%øï×[¯™½"I»o·]î,ÂY³ ßtÓM­¬f)ŽIý°8öKÚ/Ûù=] —†leN[™”T—,Û$€‘þ̺Œ¿Ÿë`;wÔ²VÌ÷¾ûÝ­(¬­«m9ø`7yùåÝw(ÕDOïLÖn=çk_K½fÍšå4”@WÑØ+®¸¢Ô}öãÿ¸°÷Þ{oT`;·‹^AÛ¿×®W63ìõ°•y1leRRP ˆMgç•À°¥ë.Mâ4+1X%>9 °;»íVë,+ ûOÉr8'œpÂè,ÄšTéÉdM«%púéd­g­õ𖼓’è­@7™ÃýcÅÍÊÛnû7ÞØXilê‡.ÄÃîx§?Ûù=] —†le®[™”T fåN*À°lw[ÝE°;j°Ÿþô§Ýí·ßÞ7zçw¶ÆjÆá÷ÙÇÿÿþ¿1«‰”’úÀZÑvKÛì³ûî-xÄ*rû“Ÿü$w)œ°<_ÀjŒml`;·^€۟׭¶BÌB\±ÏÀV,(Åu¤Ëú®Å×wÕŒlG7gǰ;»;íò£í¸ãŽnîܹcV³ /“LâäG`°yËÙØþo}ë[n§‘Éœ4+ñû÷Ý7úX+CmÀ ¨cÛ¹môØþ¼n½°ê`;v"ÆÐ-€UŒ4øÈ~ªüDìÔ¤Ædž‘j>7'é–$ÚpëZk•ŠêÅÂùº¦±Ú°ÕÜ3eK`;wvµNéÞÓ÷…ØW%ù¾÷½ÏÝvÛm-X¼çž{Ü'fÎt\pÁhâ"ÛZ6Y+V3+ ;)鎬¯ÀoÝi§ÖÌÀYPl+(-’š> 18“8aÿ† »†t!.ëA,q\7v檫Nzió­Þô"i°5XúÕ¯þKe–ørAQ«Œã«ª€í=XÆh§ùتîšrå°Õ9ZÒe“7¼qd'$3ŸtÒI-€Uwau¾ï¾ûZËØä­ÅîÿØ‘Gºc“Ù‹5™ÓQ À{ôÑcʘºÉ&nƒUWmÕ•Vöõ×_ߊÀ |‹$¶:û¨ ˆÀöß5«Ë6¨'Ý6ØrþCÊQÝØYº>Øîàÿ®%ö$ÿ©ÊO4ÀVV) ÀvhL³’ã•øä(ÀVÿPøÒig¸‰IÄtäÇØ½á opçŸþ€}à\‘¤uX'ÌF¬µd7œ8Ñ]tÑE£e`·L"¿Ç~üã©åÀÞqÇ®hjò:°85D`±êÆMS¶2W €ý=÷cÙߨŸ¬¸¢»ûˆ#H¬º‰ß’8äÉK‹*€|°ÝyØ<ºøywÈaGŒB¬~˜÷Þ{o§ål-“vÞb wÝÈÚ±êN¼cÉÇuT«¬í6Þ¸µ,ÏÉ6M$–¯ÙÕ¾¢ €íŽ”}ÈçG¶¿®WÞõd÷¯'é0äg`ØÒÑòAØ©‚Òðhìqù¿Ñ9ØH©Øî:G·-XävÚe×Q]6éþûÑ~´5U݉‹¤Ù³g»m’%yîX‡=2ù¾ÛÖ[»7Nž¬—@N“<½ïÿq‰r¯»îºÀ FË$E€5“±Ê¹úê«[ãm-¼pÑ#¥X8çÕÛ[½¦Øé`k ÀF: ùÙX¶´?0«{$×ÙIº™4\‘8ß›äÿ6Fç`#¥`ëqÌ.¾ôJ·Îº¯…¾I“&9é¢E‹ %óƤ×Â;“åt dç.½´›¤ù#“d_‰Êj’§d:x÷Öm¶SÞµ×^ÛØyóæ•Jl}vkOYùØìkõáÃŽt;î´sTÚ~‡ÝvÛMÊ«2‹æ/sÌÔ7oï¦l³]t›ŠæW›ÊóË®,í´vjïUÀF: ùÙX¶ôoÁÀD`óïr Û© °õƒ‰–Ý9ð}3>ö½ï}o+2Zb•×@vß$êª.ÅX¥Í“íw¿ûÝÑò `¢e]ˆë·•2Î9›}ä ÅÚÿg$“±%“¥u+¿Ê-ZÇá‡î6NƻǶ©h~•[ô˜ãŽ;ÎíÀA¥Ö26^õ1l§žÄèñ“ÿ’u•~˜…xH €­ô¾¡°>U€lä…`{%×Þ0Ïm±å›FAv¹å–sG'Ëãh)œ¢i«õÖk5€U÷bu5¶r4nUXM"U6°½³•";Û`]äGcÅWM–¦Šýͯr‹£!S¦L‰mRk˜B‘ü*¸è1sæÌ`#Ÿ·d+¥ À–2œä Iš“w°º ø @`#¯Û{(9ëœ ÜÊ+¯2 ²ë¯¿¾;ûì³ A¬f!öV «q²§vZ«Øo¼Ñ•Mlïm%dXÖ·(Œ°‘N²Õ© À–µ·(€–”^u·² æ8`#m€m”h|¬Àcéd2¦äÒµÒÎ;ï쮺ê*w÷Ýwç¦m7Úh Àž—ÌF¼Á„ îÌd¬¬ŽW9ŠÀ B;I6 ñ¾ðf!n¨SÀ°l±ßuºG: ½ÉÀ6ôYóBµHžß§J+`«T“²jQ`zR‹ —OŽl1G§Èq™¼Z’f·í5 ‡¯J@tÆŒ­µ[.\˜™ `Õux›d<컦MsZûÕŽ1€½þúë]'é²Ë.s{î¹ç+³)¯¶š|—9WŽéŽí°,[ìÞ`í*°lYš˜»D'زòr ôP¶˜£Stͽêz·ÉÞ8 Š’hê¿øÅÖú±iI»G½}ÚkºóÎ;o‰Cï{xñ(ܦ=ß´_Û-Ÿ}÷ó¦¬åï0wû9 ÀV|çP ²l1G§Û?àEËW·Þ}÷;`ÌøØýöÛ¯µTŽ–Þ±¤åo° ë§9sæ¸vÚiôøñÉ8×#gã4î¶h[Èß [`X¶Ø½ÀVæå°#ãUÛ¬"³ên¬<ÖíX]‚í*ðÕ¾œ1¦K²ŸGðv!¶r­L•ÑOÏe¶²û‚P`ð`‹9:M}¨›ï[¾iD—[n9wÔQG¹Ûn»­•.½ôÒÀj,¬Ò÷¾÷='Ðõǹîú–ÝYgf{`X¶Øï:[™¯Àæ¬àT€éGO T}€Ìi»m ó„«¥÷tŒ¹Ú¦ú,ií`+»)¨˜‘´}f·¿¶¦°Å¦? Î:ç‚d ë*£ ûú׿ÞqÆ£«Hì'?ùI·ZÒExäaá6Ühcwñ¥WöÕƒ®éס—í`X¶Øï:[™ËÀæ¬àÕSë&¬íŸ¹þsD]‚ýˆk°*³ß"®ás² ;-)svžu+ÓÍy™Ø5)0+©G‰OŽl1G§—`[·ºÿªðÒK/ýÊdL[oí&Mšä6Ûl³Ñm&Ll£-—|ýa+, À»WØÊ\%6`ÓÆÆ `m{Àêù«<'Ÿ2»õÌVð À.aÇ fåN*ÀVvÿSP °‘"°Å~‚¸…‹q{¼m¯1ãcGÞpº÷àC,‹3Ý…Óì€`Øb¿ël¤ÃŸ €í` NÛ¬º °ù†8’€–ŠŒMQ€¼l1G§ŸÖÚªÞë×[¿²[OÙÖÝ8oQ×…W]s€`‹ý®°‘C~66`%}û;öó >ÿ¢K[Ïg›98mœ¬­ûjy¬MàÔ>еyä{¾•ÅçˆØ­’ò®ˆ/“œ(ÐUØHyØbŽN??hûp\k€`‹Ýël¤ÃŸ €ÍXƒUY„ LÃq±XÁœ‹Õ1úßß`µ|Ž•ã—ÛOËéô `óÍš(PŸl¤Öl1GD¯¦Û À°Å~§ØH‡!?;°Të>3¤¶„NÚr;ÜZ¾pâ'©öù€ªÞV~¹þŒÄMn©}lþ FŽÁW€¼Æl1G§´q¸¯) À°Å~ØH‡!?[§ûõ¤Š·ÕÌÒõ¦çZÖØA׀ͿÁÈ1ø °‘×€-æè ú„óë{`X¶Ø} ÀF: ùÙ:ØñIŽèC,;Às6ø>›ƒ‘cð`#¯1[ÌÑðЫé6À°l±ß)6ÒaÈÏÖ)Àª†b‡`Õõ8œè©éÏ*ÚÀæß`ä|ØÈk Àstªø‘¦ 4ï¦ °,[ì7€tò³U°!ÄÀvóùÐ䲨üŒƒ¯yØbŽN“üi×’etÚÛ€¤ØÏ­·ÞêV]uÕØì®h~\ô˜Ù³g»)S¦D·©h~\ô˜9sæ¸ý8¨¯Ç(°‘C~¶ªÖ‡ØoÛØa}–°ù79_6ò°@ϰ>,õ¼åì­=y·ýŽ;tzÃ7+|~l>ûØ/ùËn5Ö,|-šdŸº_ÛXœ¤›Iipk¢ß‹j8/)ë¯ïØûÝ}ý‚dPŸ5UŸ鸓m `#//;øûÄÓ/´Ö›«úaÓIyMkO'çÒ´c]ü¼Ór ƒžæ\ø½ÂçÀV°ŠÀîú–Ý _‹&Ùç~ÿ| ö[IšFj”º&OñÑ£õüjÚoþ ´§W«n‚>(ÐØÈ«À>À6mJþpöAyørÍ¿—Øî,]ˆ#¸d‹U`b’Q³·|9º7ÿ·µŠç_¯Vo­Ôõ‚ 4A6ò*°Íz0Ü÷ðâÖìƒú[ÅAe°ÍºÆU]WÊ)~]X6í¾a l¤ÃPO¶M’jæ'iúHu,Ë蔵¼Ésò`ób è‡O†Ë'G¶¸ÜMpèFt€mÖ5î¦ýP6“8ùˆZt<+“8á2ôX­ÿ*xÄÚ€`ËšåÔäÀãò`ób? 4Pºöü‹.u8ÃݾðþV„q»©Û·"ƒ¡Ã­õÏ´OIùÃH¤ŽU:Vyì»Eí¸4Gþ#GÌ-[õ„y/Ú¬^Õ–¡1]vnú{æYç)+OíW¹‰™Œ¶SÛÔ;Vm ÏQI+Ó-lWY€Uv>víL›´óó¯…òùzøÇ¤k¿µ;íZK?_Oå÷¯®§ÎÕtÓù‡šûvaúè¯éªý²'k‡êó÷„ƒÿº·b?Eg.šŸYˆ›c[D`ãœìŸ´dbКZVÏ=#Ö[ƒÑ¤g Ï‚î߯ú}î…%°½P:Q CêXƒ(=ô0PÒ•ÿ`P„ Ä"zøog0nÓ÷ppƒ]Á“Ê·vXÙUiPíC›Ž4hÛdD*ÓÎGÛìüüòò4Ð9ûº(¿àÊïlí–N_Ú¦ï:^í Ï¿ ÀšF*Oç£òU®½P°2í» i¥¶H»žþ~ÓÚÚmeù×Zyt ³êW=>ší¨N@ €ýënZùúIwåQ9a8.Ýw\êÒ€ÍÇ÷¢Q[–Ñéð!Ìáy Ô°þsKÏ%óE˜x°ûÏ6ïV`?  À¨u¬ÿ4b²œWåõ¡Iù zBPòAØàËòÔùõ„y  Ã6i»~\Ã( å XÛÃó3Pk§AVâ,°2@óÛ+€S[Cø- :ŸæÃmöR@šhŸ¯Ú¾D0صsô#Åjsx­Ãöj¿aµ·ä~¾0:­c|½­ÍþK‰°!ŒѼÝwr:Ñ€`Óì‡l£¢ÚÖ^ŠÆüÆèy§gKšÏ gõæ2nçç„p¬2ÃÞ`*3ì‘fu[[ü:Ò¶¥íO›oÃÊÕ¾:Á€mô}HãP Y Ô °þhVdÐ~ð-ªéÿ€†•|¾aäTõY²Hg°†›vQÌ,è ·§•nk°i°Ÿšú E#°ëÒÈ×,ŒìÚÛê4xÏj—Ú–uÍôðJ{›=¤t¡ãGdÓW럧¾·{±ãܧ™ À°l³|ˆÖÔ°öÜK†ÚŒ=#ì%ªþú¾„¾Û%늬m>”ÚK[+Ñš6Ä*|¶Y4ëeõXï%¿Þ°ý~»Ã €ùöâßzÕñL`#î²  ¼¬@“ÖÿA·ëjʬýXûbÿÇþ(À`†+ Âñ¡ººFáC¿S€Õ›_ÿ!kcƒý¨pÚÛòp›9ÖÅ8Œ”‡¶‘6F:ÖVÈ×LxÕu`X¶«ÞÏĤôÜY_ ¶ €µæy/µ­×“Á¨þ†={ìÙeÏ{©>¿üžc~$4`õ{fÉÔƒV+×êõŸgaï(Ëã?휬gÝzÆ°ï ²£À0+Ð$€µ·–ö㘩+°Ut Më†líÌê^vSîV6œ`BBédŸ¢Øðø¬‡•½aNÓ7-*Û×:«Ì"kç!è6@ »Kµkg·Ò”Û;À`X¶«ϺIé‹+®¡€yNÚ3%|akÏz{¾Ä¼\ {ùv °aO¡¬zýçfØ+Iõ†¾W¯ž‹lÅwÅ¡À +Ð$€ 4UD`íÍjø61„ýÈg½q´—ÿ YD2|›öV6`}èŠénN®¤cl›ó`NëbŽ ÇÏ$Û›_¤­ þ[jÿ­px]C€ Ä6ζÀú]¡u|ÚX¤°vŽy³Q¡½ƒÐN´`X¶«M߬ùí"°±Ã…bVeY¤6œ…¿[kí·«ÿ×Þ·ëAÕÉïoÞ±lWïM GÁR IkãRíÇߺv:Ö‡:+;Ïi?ìáÄEþnØ>?ÊiÝp´Íïúš¡ipéo]ÚAh8žEmð5+°áùXöpß[„ÚŸü!l—= cÆÀ¦u'/3Ö«”u]®ÊuŽùÉ{ ³¿ZP`X¶«>LßlÌØ<€õŸz®„QÕp›öë»=ƒ,·6ob&¶«÷…£@[f&{•øä(PÀ*¢þh¦m³5^íA£ïat,íÇ8m›ÊÔÖf ™¼uåW:>+R«íÆ´²b5P=*ǯ#íX¿ýª/+‚œwl;HòËõ#ÖYš‡×Æ–ö ßjÇ\3ÕçkĮ̂ïa¹þ6ëÖå·É¢ÅáWµGÛtLÞ( ²Z¨¬SO€`»êõ-ÀZWÚ4ÈÔsÞ#ip®8 Ÿß~¯¤´ãíåx»žU±]ˆÛ½¨Wù°Ó]gçYü´$ÃÍy™Ø5)0+©G‰OC¶N'•ºú0ª¼vyöÖ"ËuONQåyQVyû`X¶«nQ_¬=,ªgˆÁ©½tµïö¢Zõ».ñ–Uï Úû±îÌö×ÀÔ/3«÷VZ½>°ZûÕ^V£mþ‹ÛØɵÉT €íêo…T€¬®,Nwy§»Jílr$ÿèÿo3VYg¯ÊJ›É¸W“SôJê{ß°,é”ËÖ×k½¬ì9iCÂÙ÷„þГp¿ŽŸ¥á6¿ ÁlØóÇ@Sõ¨|Á¬_¦¾‡óT蘴zÃGaûÃcTn/fâ×ïs9³Ë< €­XPŠë¾l¤Æl3À².аÅÍíÍkøw»Ïúç\—ÎÔÓÌû €`ØHç \¶¾X~»{÷ÛÝ+€Ý*©øŠröÎQ(P¹l¤¤lï~¬yP¢=6P¯ °,é”ËÀ&cHù]/§A¯¶œ©s tG6RW¶Ü-(tÃúÏX€tÊe`ØÒÀ–»é8j°`#¯'ÛN8àÄ5ÃÊÙ À°‘ÎA¹l, À–»w8 Z °‘†À–s„tÃúÏX€tÊe`X¶Ü½ÃQ(À±¶ÿœpÀ‰k† ”³€`‹x…ó°,[ø¶áxE"°‘ÖÀ¶w„5-½¿¾àÒ.oÁtôŒ×­:× €`ØHç \6€`ËÝ;…D`‹Ø›íkQ­7ˆKË”…!­I®=—U–i—~eëâ¸Î YvÊ”)ùÔêå˜={¶+rÌœ9sÜþÔ×÷ù1ǯõ'õâ›Og °léß½`ìÌü–8zF²eNÅeR tU"°‘ò°Ù …¼C¼Å\/à¡Ûí“&E8—~D°Ñ^Ü iuN˜0QRtZj©WEçU¹Eó—=¦È9Ô‘÷ÃŽ,í´6Á6ØH‡!?ÛÄ$ËÍù٠嘥ëÓ;¡ Ý}–°…î 2¨lä…`³mT7Xÿ¡¥mJM}5­}8£ÑÀßÔëH»ºë(¡/úú6ÀF: ½ÉÀIT·W«n‚>(ÐØÈ«Àf;rŠª¬9:únÑ ýoÑYAc8ÖÓºÏ>ñô ­.ÈÊ{ÃMw´à׎ #“áþ´H¯êñÛä;aYí³<‚I«[Ã6[U¯å >-Âë—«óó»\§E±àÀšdl¤ÃЛl,[Öòf$æv!žÖ…neÌq(ÀFÚ›îH &«úkŽ–¦ Mÿ¨ ò®Ƃ:‹ÔZYÖVß ú4ÎVåÛx[F}Ç›Z9Y#eµOå[dÖÚ,°ÔùùmЪòUVZž0ÂkçqþE—¶ÎAÇúí³63Ž`i°Ðì‘l¤“Ðûl,[Ö زÊq\ϘžÔ¬Ä'G6`Ó& Jë¢+8 †ÑZƒCƒ8A^15ØÓß°¾´I| NsÂÓÚ§rÔ¾"Mõ»C«þ¼.Óaùyc\Ó^À6€ 4ɈÀ6ÚU`ز:59ðмƒ‰Àæ)Ä~h lº#™”~$3t¾vŠFj{žYçc]‹ íoÎsúÒ6ë\ÂíiÝŠÃ<~ù1p“'ïœØì`Ø@7m€m sòJ“X¶« ÀvU^ Gî(À¦;†YKÀdM’äõ ljƬuMV~?qÜXý"öB^ìxɰÝñ/**uÖö;îܺF¤ÁÖ@/ì+²™BŰ…ä"3 4C6~ l»¬ö)‚i ëw'ŽéBœ6޶Œc™°êʬ¶Ùx[+·Ó.Ä6n·Ý29ŒÊØ1Ç`7uÚÛ $£[%Ûg‘†Bƒ™½°D¶ªS' t¨? ±*MÍ.œ„þìÄþäOz³èÏjNâdãTý5VU¾?nÖ&vj·kVûT·€ÕÚl4ùåÛ,ÆV¾ÀTíö5dƒ`¿\ëJ­ógb@¤NÑ Ý›uÖÙouå£ï·ó©¢½l‡Ž‡£@+ÀöñÅ£éëÛ`ÃIlIœ4P5õNΕE! .³–ѱH©¿Ô_¿ÕÝ`³Ú.Ñ“6ù’¶ÙìɱËè¨>i ýïGŸýÙ˜«p4) ngiã¸ÍfôÒ(œ.ö“ž~ÿ°•ù@“’r—-©¬6 B `+‘"P nØl h×­7Œ¾ÊL›ØX‹|äE@Tv§ËΤµOmiW¶ïü­_å¦Õiݪclò¨Ø@V¿ዯNÎ)L¸ª^%;nÚÛ‹.BH•å_ŸÒ šU—’-Ç•vM `m¼»µWÇøùóÚjZ©>•) Ú‚o+SíÒÿeÇëÖi“le®[™”TÓ“2fç•Àæ)Äþ:˜•T¦Ä€eFÒ!Y½N‡—ºš ýwQ>h¦Íˆmk×3Œš†ûiy=2Òº[Ýþ Éúßïõ`pÎDöŒǪ ¨ý‰ÓÚÙfZL•éÏ$®ïíÚêÖIâ¡u=Ø¡½ô<ñI«r'`y톶Qlä¥'ÛLG@âº`ÅmÀº­êo%µH¥Ô’"‡@릂™Ž » ç]—4€ÍëoÏŸvÃõáÐ@=¯M~”WçëkÖÓV‹ÀJŸ4x`#Àƒ— €¼kÚÏgÀöóÕÒ¶°‘€-î$Ç:‹äC[l ^°®±ú޽´.»>ÀÚÿa]¬AZltÓâfuÉ í!`ý±´MX‡ô±nÚ:¬.ù¼lìà]Ó~>£(€Ý*9Ã+úù,iû@)ÀF^N¶^ Aol {6`à$U¤Ñ»&±òÙR5áøÓ˜k—ÁTy!Ôª,´³"°! úßÓºEÇt!ni«_‡u…ö#Õläxð²°ƒwMûùŒ¢¶ŸO¶žlä5`»çLÇ8»äAl :H›ÄÉÆvÔ†Õp^[þ)-Škc`ýñ¡áõKå4 Ûc+еò¨q¿Íþ9Z”Ø—+h7p×__$‡ÛbÚÎŒŽNÓ®ÉvÎ$N‘C~66_#rÔ§[ŸÖÔT‘l¤luÎs“4ÚÆu#a4Ôïòj]ŠÓ–ÎQ9ÇÔòf!–Æ‚F›V·¿ÝŸÝXÇùkcy•?ƒž£ãµú´ß€7m¦c•Ù.«¶¨ÎvmµÈ¯_§¯•ߦ4}›f‹l¤ÃŸ €Í׈õ)ÀÖ§55U¤)$ Ø4Í™¤=ØdY˜…³ç†“9 ´e´%`ÒêÊêFk ™×>«#`}W½6ÆÖ/Ç„Z¤7míèðU†•›]ËH[(m[»¶¶«ÓÎÉ–*J[c6O¿º÷°‘C~66_#rÔ§[ŸÖÔT‘l¤,°P·³H}Ø\Óm \º¦Žöf­£îa¯€tò³°ù‘£>Øú´¦¦Š`#…`‰aw^9îßl)º#‡lïì€tò³°ù‘£>Øú´¦¦Š`#…`{ç4Nh 4ÏÔ9­ën×*­+oõ{l¤ÃŸ €Í׈õ)ÀÖ§55U¤)$Û<zØIΛİ:m€tò³ML²ÜœŸ(P‹l-2SI• °‘j°8Šu:ŠÔ…½aØ@Ól€tȆý¥@ÀªÛ€  4A6ò*°8“Ms&i6‰ `uÚé0 úK(€–œÝúëÂrkØÈ« Àâ(Öé(Rö† `M³6Òa  ô—l]/Z›(0}$!FŽ,ÎdÓœIÚƒMbØ@6Àâ*¡À@*059«CóÎŒlžBìG*Àâ(Öé(Rö† `M³¶Î MBš`kšjP JXœÉ¦9“´›Ä°:m€­Ò« ,è/Øþº^´Z °8Šu:ŠÔ…½aØ@Ól€Å!BáU€ÞkÏ™÷±,ÎdÓœIÚƒMbØ@6Àö±CÓQ CØäpè…,ŽbŽ"uaoÃfs¯ºÞí¹×Þîýø»ÿ‘'ݰ?œ/[™÷11)iNe¥Q Ô [ƒÈTU+Àýà`ÒFì´ßlàÆy Zàšüf¦ &:ÁÒSϽÈþ¾96 ÀVæY¬›”´¸²Ò(jP€­Adª@ª`›ãDõ›ƒN{±l`IX¸è7}Ÿ}Ç€ë{ìá6Ø`ƒÑm“V[ÍuÎ@lC €­Ì³`+“’‚êR€­KiêA ` À:·uÞƀë›ßüf÷½ï}Ïýò—¿l¥Ïþón„ £y¶Þf[wí óÙƒ,[™SÀV&%Õ¥[—ÒÔ£ÀÌ$“ŸØÎWœ4Ć׮‡v„?~ü(˜nºé¦î /tO?ýôéÁtÿò/ÿ2t±e|lïl€­ÌU`+“’‚*P`zRÆì¼rØ<…Ø_§³’Ê”ø°D7zÝîz瘣}÷´tñó­ñ¬ךü̶’º ŸsÎ9î™gžÉM·Ür‹û‡ø‡ÑcÀŒíÞõjw/°•¹JleRRP ÌHÊÈT €­@iЍL6RJ"°½q˜ tÇúÓ4ÓñŸûÂp]{íµÝìٳݯ~õ«ÂéÛßþöãc¿tÚ¼x«ñÅé0äg`ó5"G} °õiMM)ÀF Àö§ üpݰúmàÄ“Nuš€É"®“&Mr'œp‚[¼x±{öÙg;J*GåYÙ›m¾¥Ó<\çî_g6ÒaÈÏÀækDŽúˆØ­’ö\Q_›¨ Ú*ÀFÛ}ç±þ¶¯œy¶[{ò:Þr8ÜÑGíž|òI÷ÜsÏU–}ôQwä‘Gºe–Yf´®=÷z§ÓÌÆØP÷l€tò³°ù‘£>¢¶¾æP ä+ÀækÔÊÀvÏ)ÂáD[l ¿m`Î…—8EB-**°<æ˜cœ@óùçŸïZZ¸p¡ÛsÏ=ÇŒ=rfRo2ަØH‡!?›¯9êS€­OkjªH6RH¶zgM±þ¶uÝõÁU,rÈ!îþûïï´¦ñܹsÝf›m6fýXucƾªµ/6ÒaÈÏÀækDŽú`ëÓšš*R€€­Ö±DOl ¿m@‘N‹¸êïþûïïî¾ûn÷ë_ÿºgé”SN3>vÃ6v_z% [ÑDOl¤ÃŸ €Í׈õ)ÀÖ§55U¤)$ÛßÎ6°ÄõêµI“^ž¤iõÕWw?úÑÜo~ó›F¤ÇÜuÔQcÖ›Ýõ-»»Û,d;Y6ÒaÈÏÀækDŽú`ëÓšš*R€€­Öù&Ðèo0€1c†[c5ܧ?ýéÖÒ8/¼ðB#ÒOúS÷ö·¿}L”øÃŽ`|l ÀF: ùÙØ|ÈQŸ½Ø[Æ›¤›IC¡Á7§¾ª>l¤’l;ÛÀ×¨Ö `¿úÕ¯º›o¾Ù}ðƒlìœ9sÜoûÛÆ¤+¯¼ÒM™2Å›y¢c|l9[`#†üll¾Fä¨Oú6™© ¸:Òðh\óã*´i6RL¶œÃ4 60˜6àìí·ßîæÏŸï®½öZ·ûî»»]vÙÅÝzë­îw¿û]cÒ7¿ùM·š·6­–ûÑìÉØg¼}°‘C~¶‰I–ÄãƒP ';Mðú“Wtwqi€5¸u­µZ/*’_ë>ùÉOŽ»ýŽ;»ç-d#º°‘C²%¾átù‡¤¡Ñ`õ Í, `×M*¬ @Cm¬àæ‘G! °wl»-[áÝZ´(vðr ‹kŒ ÄÛ@;€Õú¬÷ÜsOk\ìë_ÿzwâ‰'ºßÿþ÷Iò—öÙgŸ1ãcßÿ¹ûym²lQÏ¡žü LLXà%ñih4øz…Ö°Ó’ +ë6À´°Þª%Š`ã[ ­°Á·€ÕDJêJ|ðÁ·@öŠ+®p/¾øbc’fOÞf›mÆŒ=þs_pO=÷" ›²l 硆C47ŠëÂ÷¿ŸÞ˜ÜÓX ¹Þs*4-–(p÷€ºK;=¹”øä(À¾Ctq±xˆØE‹¹ûî»Ïi2¥wÜÑí±Ç­õbÿð‡?4&{î¹cÆÇNf|l*À°Ít•|€}ðÇ?¦7æ÷ÆÔpÑ‘—UìÔIJͳn"°lXÝø.lž½²D6ޱР|( °÷ß¿ûÙÏ~æ¾ño¸õÖ[Ïyä‘îé§Ÿvüã‘~ýë_»ãŽ;nÌøØ­§l뮽aÑØ‘h,ÛL—€í^ð¨›~}™²»°Q† À°¥ÞްQ÷W×2°qùg>ÿ·ãN;G§m·WåÍ_ô˜íwØÑm·]|›ŠæW{Êó•ÿ&NtÄ$3€sÜ}Z…N1{òÉ'»üà­¬ìƒ>è~øa÷©O}ª²gœq†ûÿøÆ¤Çܽ÷½ï3>vú>û2>6¹ÿØ®¹ À°PäÁ, ÀFÞ,MÊÀÆ9Æ“×YÇ}ç;ßi­ “’k•ÏÊ*š_Ç9FÎtr­£ÛT4¿ÚSô˜Ù³g· ¿ è Œ8;F§|ÚìE]ä¶Zw]7áU¯jMà”°Š>hùþçv[mµ•ûáèþô§?5&©m;í´Ó(È.3~| à†y|,Û$¯ä•¶°l– À°lwZÅu°ù­œ~ìâÅ‹]ìGpYäS4¿Ê.rŒ&œYuÕU£›T4¿ .zŒ €³?À³>ÒV“"½wút·ùòË»ùɽý¾å–k °=ö˜SÄóꫯvS§Nuïz×»ÜC=ÔˆPë…Ü:Éïš~G”vH–ÝV;`+v,**€`+2¥¶Å°,[ÇVqlœc Àæ³/gKà ýrÞ!À~ñsŸs믴’;}©¥ôÖ¨•bö‰'žp¿øÅ/ÜÙgŸíÖ_}÷ÙÏ~Öýö·¿uþóŸ‘﵃ ¶_®QÕí`+s,&&%U6  ÀVf™m `X¶Ž;­â:Ø8è`تfÊ‹»÷êÖ)Ø7¬¹¦»n\Ëì“O>é•ýØÇ>æÖ^{mwÁ¸¿üå/=Mýë_ÝÿýßÿµÖµ`—³*~¼cqë&'½¸ª`تl©]9, ÀÖq§U\çD°lÝ E}q÷fÕ:…ûÍo~ÓMIº¼–‰À `Ÿzê©ÖìÄ÷Þ{o«K±ºÿä'?qÿùŸÿYkz饗ÜÿüÏÿ¸ÿþïÿv·Ýv›ÓR;,[‘kÀÂ¥X€Yˆ1œR†SfÊ몎aâŠ%‹`ãœd€­”(/îÞ«[§´1°»n½µ»Â‹Â^œLâ´~2¦\ðÎB¬g£µ.Ä>À>óÌ3îW¿ú•»æškÜÖI¹þð‡[p+°ìvúÛßþæþ÷ÿש]×{Ýu×°ÌB\Ò{H= €…CJq‹á”2œª`´L9]Ø™ÉO«ŸØ8'€`ë)ê‹»7«Ö) `¿÷½ï¹ÍV\ѽäAìÃÉÿ;­°‚Û+™Ñ÷–[nq¶ŒN,À>÷ÜsîùçŸw§žzªÛpà ÝI'äþð‡?8uí­:)Ú*pÕÚ´wÜq‡»é¦›Xoù*ÆÀVæ*°pH)éÀNO,{vžuebcјKš ASæ§Ün»íæ¾öµ¯•º°eÀ°_ŽéÀÎJ V‰[ÉÌš,[5(Q^o5O÷¬et4 ±Mäô¹W¿Ú”t+Ð~'I&KT}ü#i­[`ó›ß¸ŸÿüçnæÌ™n“M6q—]v™û¯ÿú¯J’«"¯>úh ^Õm€k{le® À–âœ.ìŒÄ²s'J€¸jzu²”ˆå«!@j›·Û`yðÁ;¥n×SUùleŽRs¢X6|Øw/5]§,€UwÛMWYÅ=72 ñ®»ìâ6Ÿ0Á)ûb’>¹ì²n—-·,°/¼ð‚ûÝï~ç-ZäöÚk/·Ç{¸x #ˆÕ8WMÒ$@þéOêîºë.Ö‹ºúvÀ–rÒ`ØRüÀÖl8X«sŠÊÔVyEÊ1.rL/ó°•=8JÀÆ9Ý,Ûtð¢}q÷ržNY+<ú¨£ÜQãÇ.£óýïßmºÆî¼dL¬&wzgÒ¥øÌ3ÏŒk]ˆ˜°‚X¥K.¹¤Õ­ø£ýh @IMж \5žVkÑjŒ.ÛÞ6ØRîë1‡ßSÿ‹Ò|ë÷¼ç=}dª‹z °[%–œÌqPÍ'™:{Ú-ÉÃàÖµÖ*Eòu ž°ª[†«Hì)§œ2Úþ° ±¾«»±¢¥‚Nߨ©5M‹¦*¯ŽWÝ ÖmYÛÏÐi‘ິ‰­€­æž)[ çô°lø°?î^jºNíööÛowë'QØ—YÆxâ‰î¾ûîs .tû¼å-î³ ¼Þ›<÷7}ÝëJ¬Öd Ó¿øE7yòdwúé§;i^Ò8W%Á±&’z衇،¨+ز^CÛã†6öÆ4_ÜçùÅòÙ•b}ä²ù´.»@Ë–Qçq½ØJï„~X]t¯Ÿú.à5ƒð!Sùl|¬ [ûô] FT Ž•O7†þZغ4[]utYîÔÀØJoÂ…°qN7 À6¼h_ܽœ§S;€¬žvÚi­Ô°Špj-Õµ“îÄê^¼E²W^ye+úÙnâ0›°Ú¦2=ôÐÖøX]U×à0 ZõÑÚ²šÑXǰñö@¶°ëuÀPìå—_>åŸë·¢óߨ-ª;lS§>{·Ž`Ð…Ø.nžYëƒ"©28?«·':ÖŒPoTÂȪÿ†….Ä­ßÕY#©²_åA-€srX6|Øw/5]§<€¬n’<“}€ýÙÏ~æÞ¿ï¾îô¤+±–ÛÙ#Yãµ(Àªq»¤ ˜vØa7=™LJ€jÑVÝ™ŠÌêX-ÏÀ·C¶2€ 8ÄzKš¯ÎS£ï 6) %ÿÝt‰ ,H•5¬õÕ±´0ë»x˜ ]v؆¬šiëª.´ŒÏòùýéµÍ 3,'4€-ò(`㜀m:xѾ¸{9O§€Õª ,hu!¶u`µ®ëFÉÌÄ «(ìUW]Õ8W?]qÅ-·ëûùçŸß{òÉ'·ÆºþùÏn›ýõ¯ ÀFtN»þl¯.Äi`æšüýÖ32+¸åOëíàÕzr*€¥rÂh® ØÐCåULe)¿þêØ¬RŠ@f7ó° Ø<`MQ3ôp0¸o|lÔ-Ø(™Æ`ãœ^€ÍöÇÝKM×)`5)’f öVëÀþýzë¹ù‰Óøõ$½÷ÝïÎعsç¶À3L;n±…ûç·¿=uŸòÞ}÷ÝnÝu×±`ËÛé0äg#pHØ•7 2é»üúp¬jZoKÈ`ZM0éBÜ2ÖIÊ]F'߬ äè÷1°z¢7þ< •á© òµ{ËŽ­%›jXläýÀÆ9=,Ûtð¢}q÷ržNìç?ÿywH2K±Ö‡œDcï¼óN÷‹_üÂ=ùä“­®½O?ý´{æ™gZ‘R9¬’¼›'“4Í™3Ç=ÿüó£iÇÍ6sãÿîïÜ7Þ8f»åÑX\¬¡`Ë_6ÒaÈÏÀkóÖ¤l¸š‰Í£#ˆõZqÕþ0²² À¶…É´Yˆ­p8ËX ÀÚx×°k±ÿfÆ&yÒ`¬þú“Eùohºl&qÊ"t3çô°lø°?î^jºN¬¢²’а{ðþû§¬ vÊë_ßšµXiï¤Ë±¬&wÚaÓMÝÌdûÛ“1¯ú&-é#€ #·Œ-gƒle^llâЧ3„«²l8!e³D`³`Íú¤ÛtÙö7P{›ÎBœ–Ï¢°~¿x?*+˜µ.ö×k«ÚŠþöËXX6ê&ìZ&6Îá`ئƒí‹»—ótê`~øa÷{ìá’>k-ˆ´Ür­®ÆaöÝÓ¦¹‹—^º5^Ö’²“'MrIO´ØžwÞyîÙgŸ“4þV‚-[ÎØÊ\Œ‰II‰éVóI Z÷–ä>PzðÇ?nôr0Yc`å‹û>z‘lÚ2šÆ#,Ëâ"°D`ÛÞ0Š~ÊH,µ$­<5•Á…ß}#¤ªÛ7k lí—§Õ©má¦ÚÖ9°ÕüЗ-€sxX6|Øw/5]§NVÏæW\±¦]vY÷ÙO}j ÀžpÜqî£ ˜úð‚ì~I×âÅ#ÑYEf³6ÜÀ–³A¶¬ÑÝãú`å[—_ßذ·¥1‚qDZM°l¶aÛßôÙ‡í¼z9‰“º hÜa%Ÿ~[U7Úa.'`í žþ–ýÈ+³Ç²è‡ãØ8‡€`›^ÃÔ¾9^┺qάžç$Ô‡Õº°“'Nt>úhk ì·“hêîÉwXDb'¼úÕî£I´VùC°}_ó“Ò‚SK[«¬¿Íþg¸ßtßvØfz+ý°aoLlHŠXýŽø½-íëB¬ý6Ó°•©úýe8ýãýãšÈ½Øiw˜¦.·®µV£» 4Ñú­MÀ¼nÕáÏ)) çì°l7`‰2ãî¿P§ý8¨5Tf™eÆ»éûìë¾ræÙîÑÅÉ$H%—Qñ«`™9Ó›Œ…µ(ìç>ýiwûí·»-VYŽèAªàõíIÇzÈÍúÌgZ°«¨­²ÁZûU@)ÍŸ?¿5á£&… [ܦØH‡¡ælý°~o̬ž“òÏ´>Ô†ßÓ&c²ub³z\j¿®~Úžµ¯I¼ÀÖ¼ŒN“.~¿¶Åö»ãÆý¿‘— «~b§¤šnû¯:6ÎÙ`Ø*àˆ2âî·< `±~Ú~Ç݉'ên[°¨4ÌV°‚ÕÕ’nÀXmÝlíµÝfk®ÙŠÊZ„Õàõç?ÿù(„êÿYÇïVK¢±~Þc“ïŸJ†¬À VÓ’`÷±Çk±Ö©U·CMütÇw¸Ÿüä'nrZËöºë®sçž{yºÇì_¸èwÈaG”Ö?¦Žªó°Íô]ú `ûÕoJ»»°SË>4Ϻ‰ÀÏ¥¢åØk’‡úZãÆ%C~ÆU¯y¶Ê~O6Ρ`تfÊ‹»÷ÒtÊXf7ØpãH]{üB0UÀ ÷˜:Õ]1¬Û%0{^2+±¯{&‘×ÇÜýò—¿\"mÿÆ7¶&q²ü¶,Ï=÷ÜÓÊ+UV“C¥¥^¬àU/ô·Ÿì€m¦[À>Rʯn ”iG—6ʰX¶Ô&€]1yP¯:nܳ#XÍ`GªIƒ¥–Zê7rôät²5Xî5¯q‹/Χ¸‘r¤‹|ŠæWÙEŽÑ„«®ºjt“ŠæWÁE¹þúëÝJ+­ŒÝqï¶I«­6&òFbÃï‚RAoL7ã<€½å–[ÜÁï}o«[Þ}÷Ý׊pþìg?s>ø Ó,ÄrÚ°š8§Ö-Ò]–¼K‚9]ˆÈRÙË›É&qºzܸ/Ž<<4èšOM °qÎ6 Àv\wŸÅê”°‚(ÍHüÔs/–~1ìÇ;Ìm–D@”6&5`ÕÅwó¤Ë¾ßuXãaßöæ7ç« V«¥uŽýèGSóÀ RÛ¥:&qzìÉçÝvS·wŸuRáHzSzþì÷Ï `¿E°Ž{À½#Ñ0¹U:.§Õoʸqû9¹o4Oʃ?þqßù·½ô­û­n€í»ŽÑ) `Ó¢°Š¬î˜Àé±GÕZFG>æk¬áÔ]Ø¢¯»%‘×óÎ:«µÞ«­ {z¿ï·_k[Ùd«zóRk‘XÍú£uSò°•¹J°ZËzdvÒ+Ó‚bí˜ Ì#9øàƒ£øC/²Uº° †ÍγîJ»}ÖÙ…XF"ˆyƒ!#1CÓq‚_ß(wÛm·Ö›˧ïaWdÝÚ®r|hÖqÚî—§òµÍ¯'­{»cõfEŽÕfk¿ºKûu©]:Nos¶uÆ»°³ƒUâÀVâH°lSœmÚQ¬¦i™µŒÎ[¼±°šœIÑTEXL–ÊÙmÊ÷Žwvç%pjðzTi=þãdÉÖ…Õ1ŠÂj¢§¼I˜²ök *ù-<ð@nª`ûÑ>ØÊ\¥Xë‰é÷ÊÔÿEýá²Û€mk»3’½sò¬»o6Ƹd°–Oݕ̸}c¶.È‚LƒIm3µh¯àQ ( 4øÔ÷pRmK„oåµz¬·_‡A§¶)OÃÚ¯m*˺Lûoxì8«í±7OÑ›9ÌÀæÝZÝÝO6Î`Ø~tÌisÜýÝn'u÷½óÎ;Ý·¿ýíѱ°ï[f7!IЦ Xç&ëÂ~rÙeGáõôñãÝÁÿø­õ^ý4idžc“cNf÷Ç~¿é¦›ZÏyÁiLÒ ú»îºËÝqÇN@éøýèGîºë®sçž{nËÏPV{`+ó3 `ýȦùèE}ãƈɓåk°C °lí@LÀFiõ@Ð6?’Ž›µ<F•oà˜VW€õo Exõàña4¬ÇÚï×æÑªrô–¸S(-r<[Ùƒ£TAlœƒ À°ÃêÜÛygE`€oI"­šˆI“8}âŸpïK uÃ$ë/±#˜Ýc›mRáôÃ3f¸M Wܵ“5bŸ±Ðêç3€]´h‘‹Ilûßy¶”ûvÐÀ¬È2×zD†=1}?ÚàTÛìÿ°Ëo°òç}Ÿ^.ùýˆòÇÔÀ¶ëù©ö…½?ý2¬·§~'tNa¬ˆO_6o—ºGE`·J,9ù]¯æSgâ€Íz;nO{âo³lZ×â"¾ £´aYªÏ7r¯›CÇùÝÃpYC,r[Í=S¶€MCSýV­š¬cYäSôuƒÜq§‡6ê3l`Ø/çÛ`/ºè¢VÖŸ…øÊ+¯t;o¹¥Û-Y"ç;É3õï“Û‚Ê´Ùo¼ñF7yd†â£’ˆí§ΛI8«{©.§3&Íþ­`ËzK7ðëÃîA¿‡¤|ßü®È¾ïíûñ!K¤}·!~ HYPÌê³^—i=?ýà™õþTY~ïPcëÉ©ýáÉ"~}™¼½ØÊ¬_Õ °1¡ûªVUFaÆ£rËt!. °vùÒí34ëB\Æð:9€­ôÖ)\ À°q6Ð/F;;»žíváÂ…­(ìj÷wîÄOt÷Ýwßè,Ägœq†[1‰Æ RÛAéÔM6i­«Éž6Hf2î`ï¹ç›X¶°ƒPü€Ø´^ŠEÖ÷“l[;€3„Aªpè¢ß+3ìù™Õá6®;qT'>Ö±l‰Yˆm§volÒ¥Pø0z›õ7#3ó7 ~;²ÆÀXëzÐÎèØâ¿ÔƒpçìÒ…8?K6ΖÌfëôºµ'·Æ7ÞØ}öÙ­%o4V(€½øâ‹[ûC€}ðÁs—´Ñ’7'tR+‚«±³ïL¢¶_ýêW£Žó—ËÑøUùwß}wt`Ø|–XÝc~ò#Ÿ1Øpò' ýó{<ÆD`•'¬?N$Óï•XÇbYÔVe¤±F7 µ]™l €µn½2e`—2€ß¯Ý¶ù³ çl¸”õg·È¬?¦V€«ýá›—°Ž4à »§µÕê$[ÃOzƒ«`ãœi€<ãî•~×é+gž=:©‘ž¿z}íµ×ެ"ž»î°C*À bó’ží“’H­f1¾7I›®µÖÇüÛÉ'»n¸!³,í“/ (-’˜Ä)݆éB\™“2P+è“?µŽkl6XóÛ­Ü<€•ŸîO´v!ÇÔúœàGWÃ^˜Æ4leö_ob{û¾i AQ0«‡™šö§½igH6™’_†cøŭ7H€Õ9úÝ–­K±ß—Ÿl…ÜGE°qN9 Àö;˜Ñþ¸{]:ݶ`‘Ûvêö£ »l2^õðÃo-}#€7ož[°`Á˜.Äyë±úû÷ß{o÷õ‘õbwJ¢°sæÌ³ž«ºï¶í¶™k¼^ýõ-€UŠ$€í²{2P›·¾j ¤ A 'VX 6…Ýóg¤ —ôô£¡l…wEc`ËhƒœÓBÝ‚M@ÖåXF®'n³2²ŒIùU‡•vOȪ#loxœö«nAwÚ9ªÝiÇt»c`+¼qJÀÆ9µ, ÆÝ+ƒ¤Óœ /qk¬±æ(Èjb³ÓN;­õœ×DM66f=V?Ï¥—^:º$f5Þy«­Æ¬ç*€ÌV¬I£ÒÊ6€U÷æ" €`K¸ E:€õÇŸZ*œ IÁ/?àå÷¾”Ý`õ[ãó† {4ß<`­üv3 °EL<'o¯¶Û°Fù,±D[áS¢(6Î)`ØA3Î%î¾—NO=÷¢;þs_pË%Ý~åˆ*m•ç÷¾÷½Q€Y‹5ÌóƤ밺k,¬ÖÝy‹-Z^åÛ.;;Ù®—¤m’ï+&`«mú¾ö„ ­‰šü²¯»îºVVNhÑÀ°eý…ºë%Àâûw×÷õí%ÀªÛÀ¬ªŒ›.ÄõN/oT¶ª»¦\9lœó À°€^ܽ2,:uÎnå•WÙµ×^Ûýû¿ÿ{kll‘tÎ9ç,²Ÿ_j©¼ j¥ýD5ñËüÁ~ÐXM&U&ÝtÓMNKñ„Ï=÷ÜÑs–kž']ˆËùÝ> €è%ÀN«¸ÛÀ4u¸5ÿÑK¸¢îîß<l·íË`ãœr€VçžóÎþÐøXÁÏÒK/= o~ó›[ËîÜ{ï½…Ò7¿ùÍ%@Ö¢²›®²Š»ñÆGËSùØ[n¹¥T`Ç^S¶·~HVíl÷}ð¦pKâ¾þ.ìôäQ‰OŽ,›†¦ê–¨W‹|ŠsóÍ7»wÚÙHq6ˆNÍÕIãc÷xÛ^£ûªdá|à­è¨ÆÑIgŸ}¶{Ýĉ£< bOO"²í³Ïh9×\sM `u•I,ÛÎ Àvh§S“ãÍ+ƒ,ð\ ž»°yöÊþØ8§˜l>ʰq¶ˆ®Ns¯ºÞmò†7Ž‚ì„düê§>õ©ÖÚ±E’Mâd<½¤±°É,È7ÜpC«œ«¯¾º°?N&¶)›èBüŠm¦KÀ°uX& À°uÜi×ÀÆ9Ó, xÆÝ+èô’ûÒig¸•V^ydßð†7¸9s渻ï¾;*m»ÑF£; b°ÿ´Ì2îÐ:þª«®j¬ ´“ÄØ—m€­Ø±¨¨8€­È”ÚÀ°lwZÅu°qNù†mì¶ÛnªÛy—]¢Ò«_ýê¨|VÞRIÁزËó÷¿µ[VkMF¶_ù5®/6¿ò=fË-·royëît!Nh oð4xtñóîșǸW½ê•ñ±o}ë[Ýõ×_Ÿ ±>ÀjyE_úЇÜwÜ1`‘í$]|ñÅNcv“ÇJ+ «°;À°™Û”Á΃Ôº×q{f×ÀÆ9Î =âÔ=06]úýDçU™—^Q,™cŠÖqI‰6=Fc‡Õiæ¼ãî½~×é¶‹ÜN»ì:f|ì‡Õ2:Z'- `ç$@¹ÛòË»7ß¼5[°ŸïÊ+¯lE`ÃeÓÁìô¢ÍàõøÏ}ahïE¶·~HVí,[‡e%K¶Ž;­â:Øáp¢ûh?vÚï6pñ¥Wºu_¿Þ(0&¿½îÔSOMØ·ØÂ½.?û•¯|%u¿¬À¶hš={¶›_.€€-æû{ÃMw´ºÍªû¬9‡CmûÈ3[)ܯï!ðúãj‚iy|çÓÊ×_µGU†uãÕ¶äò·êSÕ¯ÛoÛüè¦"r&¦Ê—6VTgǨ ÕqþE—f:ž…>4«r©( ¬ÓœYZÛ§6«µ%Öù ë ÏSc¶˜­ÅjO¾ÁÔUKÛl±å›FtÅWtÇsÌ€UWâï~÷»îÝï~÷˜5]œyŒÓDQØFœm°‘C~¶Jv$˜µ.¬¡dµ–ù&V(GÀn•yE¡bÉŒÝS€Ô€spÌ4€Õ_?òªý‰ŠlúÉÔhÛe–óFhU—Ö"ª:΢ÃV†ZyÂè¯ö‡kåtÇ8Á!ÀZ”Ø?–1°Ål-Fwò ¾¦gs[Éëüú׿ÞÍš5«=þøãÇtÞzʶNCaÅì€tò³U°ùU’2ˆXôC&)ÀF^ ¶˜£cQİk°  ч\ûߨúpg]‹8œeVå« Ö~¾ ª5^Uðª6i9 `CX`‹ÙZ› ï`k«ñ±‚¬¥—^z4"ëÿ?aÂD÷¥ÓÎ\K.QÀF: ùÙØ|ÈQŸl}ZSSE °‘B°Å_H«~wa`Ì$ErÖå8œè)Fʬ•+õ!Zí±q¸i‘å¼ö¼ûçž6Ž€-fk1º“g¸4]¸è·ÇÛö3>v¿¢»pIpµû€tò³°ù‘£>Øú´¦¦Š`#…`‹9Àþ$N6I“uµµnºi3üú aà*hL^EA5f´Ê.Äayþy´é2¶-„Ó´(.c`‹ÙpŠ^Y6 —^ïÿÀ‡œÆÉb'Û é0äg`ó5"G} °õiMM)ÀF Às~ÂetÂï6y‘àVP§¿iÝqxŠà†3ÛøÓ´±ªæ¨–‰ÀZ”ÕÚ¤º­[³MÚ¤r-ù³Ç8È!ÀZ·d§±Íö˜òÉSÌNÑ ½°x`#†üll¾Fä¨O¶>­©©"ØH!Øx'G¡º ûë®*š*@ó£—ŠÀ¸¦­kå"Ó¢žayiÑδõeý(«þ!XíDªmþñiãpâEÆÁÚ:¯~[µMå(éÿ´6ád³?ôB/l z`#†üll¾Fä¨O¶>­©©"ØH!Øê¡³( Æ”Y6¢®á²=Y‘ã²up\oì ÝÑÈ·6ÒaÈÏÀækDŽú`ëÓšš*R€€Íwnºá ýHn7êˆ-ÓºöZ·gë-¨µ¨©¢¹Y©ÝxÝØ6¯7vˆîèŽ ¼Ôšá9ydÊoàÓ™“Óå<ù @#`qhDØHµØúØ&v¥µ.À‚TµºF˱`ë·€ͱzm€tȆý¥@À®ËÛ«þºªÞZ6ò°õ:J8¦è `Ø@³l€tȆý¥@ÀN£Û@]Õo-yØf9R8¶\lÀêµ6Òa  ô—l]/Z›(0}$!FŽl½ŽŽ)zcØ6Ð,`q•P` ˜šœÕ¡ygF6O!ö£@`›åHáØr=°l i6 õ©óÚ¤Éàš21]^[Ãýlš„5)ÀÖ$4Õ @• °8ËE=òc3ØÀðØ€&tKž9K¬+Ú€fVמî;`«ô*( úK¶¿®­E–ìð8¢ýâLÒNlh– (›·\‹S(Ð l¥7t¤Y9'=1Ùÿ"ågª„>ýx×4 Íl³Ew®6PÞ-4ÐtµëúÚnÿí ïou™Õß°Œ´mþ5StŒ•QôzZ}VŽ-i¥rl[V™íÎÉöåé’VvÖ¹øç À6àÞû&ŒOšðp¾úœœS üö¡OïïZ€(P‡lyg¹¨cJ~´ÆºkÛMÝÞ}䈙NTJúßj¿m·<ç_téèOuƒÕ18c´ å¬iœ§_nx=í«_ý<ª[e´P•¯±¤Vþª}ÖnÛîC¥öûçä×qæYçÙ'¸( ›a~+ÓÚbçMâ:žÚÔ(€(€C®Û]‡`A_l >0p´É„!|ù°ªk#øòAS&ÈÓvíü¨ "µÍÊõ'-ÒqÊgSg l6`Ü`U·ÝvŒþªL+ׇTw+×ÚÛ>}·óˆµÍPC}7}LCµ€r‡‚ÓG@@:`ës®cEòqM°r6 £žiÛ|} (PH¶\dîX€¡[N!åb[uÛ€àÍŸWõ‡ÛoþxQ}÷#§‚Î0’ªnÆiåú³«.¿lAŸÊñ—»É[FGyÕ¶p‰œ°=ª×oEdÃ:ý1¯Õ6˲AÍ»Nªß?WµÏŸYßÕžP¼r›²€ío¥m<0iÅÜ”–l”l#¤Áe•½7Õ“!…0…þjD°2bÒ©I’Q)ÉPí#COëú«ãtŒ},šV†ò„X+W7Œ}ü6¨åñ?Ú¦Kç¶¿¿®­­LÈhŠI;°El ZÈêj\f"§A¾6le.Å $¿[=-äOûùéÚî3€í¯Òç`ݺt~!À Ê`·N’ÆÇÊ€mœì)°hÛÌø <÷i³þ† ì¬ÀTuø€jeÐZ;üÌàØºøðÛ%¹(¶`«uÙÉãܰl ¿l@Ý{å3øÝšý.ÆŠœZd6í¯ äkÀöƒ·Ò˜6ÊŸöƒPj˜ñ@´Ê d•=¶¬rC~\Àj›ÿÑw¿{þ÷¿ .}×ÿáÄPaXArVt5Œ¸†m ¡xÈ/%§o °ýå²ɹa‹Ø@õ6 ˆUw_·ªn̶̎`Ö¶§ý »Mêõ`ñ‰ (  Uèû›/¯¿ TÙ'-¯Hâ ,©, bÙqY½&C€ ™CœàÃtØÝY|¡mÖáy¬ý¤@,ÀúaFb­}öæF†¦·£úëÂíÖ5! B-¯Êõ“òú†Ë[›~²´Û ÀVï0ª“Çya+Ø60ˆ6ÀÖètôU¡_?mFufaP*ô½­¦ÀVÿ+éÿ|³zMúå™ÏoŠZÙÆú®Äl¼nüêÿ«Ã¤*P`}ƒ6ã²î»EÖnŠ°ï½•!#Ôÿ~òß°uª,é :¤œv `±6Àâ TÀï )`´€‘ßë2ôùU… ù « »%gõš4_>-²ë7´ò}¸Ö6ËÃ0‚¼ß³—X34…îýèlÚYi”Õ…XûìMÍòž˜iÝŠÓn.£‹“ëä‘[Á°A´Ǩ òåmÂ&ýoC}ÿ=mük5µjÃíYA'‹Ì†[•c]‡Ã™~OÏ4È-xêdïGʬ½ »Hë¯<úXÞ´YŠ}CÇÙªm~ÄUå„ Àö£Õu¹Í,é :¤œv `±6ÀvÙѼâ ågË··ˆ¦¦ÔR~z8+q+>»Àj{ØÓ¦Àž-F‘-›c™Ã š HÓÖˆ²·&>dúåøoUÂÙÍBƒ“1†ogüáÚ-]ˆ£.ñðe`qòb<òa+Ø60ˆ6ÀŸïÓáÛr:aIkÝÓzGZ/ʰúÐGo¤q@V÷d¿.¶Ã Ïáé „“9•Ñ©Š2ÊÔË1}ª‹C:ˆ)ç„]û60,ËÁÔe÷Zr§®ºê¨€íS¦·Í¶ÀS´2HÕþð3-Ù .½~ *k§´^“>؆KfZÙþÊ& šù3°½µjG¨JG¿‘:°³^Ù€–‚é÷å`´ Ž’ixò)³Ýù]Ú3ˆT[´¶l¯®iÕõ°UyCUŽg¸¦@R ™ÖSÙØX`ëöë‹—µ¼aD7,Û¢Á–€*ådQ`€`‹ªAÊæšdë­¿›{Õõ£°¥h¬°ImÌk‹­ÑjùôÝÚ¼ãËîÏÒJkËJײå6í8v€œæžšº!§ +¬¢Å*—™†«P’2P©Àø &>·pÑ#ãˆ4Í1¢=À6Ð;H-Eû ¾B€­Ë¦Úi¾¨«MݨçâK¯tWYe~#ŸÒ4 PPP`¬K½êU{ê¹Øß÷ÎÉî†CF™\Olà¥V”ÒTªÛ­¾ ¾gÖ VÙ´q²~7Y‹Ú*Ÿ{ßË—øíô÷û‘ß"×Cåúu„«óð»ëÕeuûõª,µ]å èÓÚ¡üVŸé¥•î÷®Ùv.×Þ0ÏM˜8ñaü@@@>P`¹åVx,°SĹ&/öÒ/6` jípi[òÓì x!è*¿ ÏÔêÿ1³µMÇꯒ±ñU>ËBžŽi×ýWàhuøõèÃïvn~÷bM¶¤²´O€šÉU;¥‡i ü‚Ø,­¬ :ÎoC¿ØDZ;ç\x‰[i¥•nêƒG6MD@@Xyå•ï-%èg‡…¶aØÀàÛ€`,Œ:¦u‹5ðôaTpæÃ§AåO¿ÐIý58ÔwÖ§ßVÁ¡Ý( þ²ì/¬× 4`UOø[–em¶h²µÙ¬¶Ú9¶ëBÜ]±³4?ñ¤SÝrË-÷5<@@@>P`ÂÊ+_{Ö9Ð…˜.ÄØ60P6`p.ù’^‚Ïì|ð £¹‚!®¥A°êóSDg”µ9 Dó6Œ†ú@í·Ç‡Ú0ʶkXö#GÎü[ò¸>®Ù4PPP Q`ÆÞÓÿñOD£?Å5æ“ >•ŒXFÓÀ- `ý|úßïšlÝuõ7vɵ5­Íi³·Z]g+Ëo‡ýoà¼(ê°ìFoòb¢ûT<@´ŽS¸ÆS¿žÛÖ^Ã5ŶևêÖÞýªÑ0¶{õå—_á/ÃäØr®€606Û…Xö ¨§iMƒÕ¼l^43Ææ² » Àf•å·€}ÉÝÿÈ“nÙe—ýã0>ü9g@ÁT@k( ä TõfW‹ÛGp®Eù ¹š}Q³0Æ8XäÇŸëÌuHƒÎv i,…“3I kº„c` ³fúµã”/mÆcÛ﫵HjXwÐf•^Ó´6«ë±µ¯Vƒ2‰Ó—N;í¸âŠç¹Àé£ §&çrë"°Ë{çsà Ð)r*e?~üg;ü£/ ‚ÃÊ9^Ø6`6àÏÈë¤^èªË°’?q“ß 8´#›uXeÚŒ¾!èÚ̽‚;åIƒ¼FÓÆZûüY‹ŽU¹‚i+Ë–ÉQ9~·j[VÈŸ9Ù Ü7M«p‚¨~½ï¶Ýnªº¿­Ì³“c«€4 BñéÒZoª¦V) è«±ôU ú]róSÔŽ)z¬•mÇ¥uûõoTåK»qÓ¶é1²¼«7øûÇ/¿ü„ß©+U¿:!´hÁ°Ð²"ˆ“jKËøÇØüÉœü}ÍÜÙLÂiëÀj¿« 1œImÊÖ¡2ü™‚õ»ï~»mW§Ú•!VYvNa{³´*29USïÍç-p'NüÅà?â‡ê Îôþr+(ˆåÓlŒh©ú.èSdVIÿëfi÷Ñ…´côWoDõ7€÷Éï—¡2}°Õ>ÕÕ_«#ìœäí ½²Se¯H¢°ÇÌøà!nª£A»€l(jY³úf•c3 §íOëŽ\´=ƒ’¿NýtŽ“×YïÉ3w«^=w©·+ 0<®:Y-Š–8HC0‹ž{Ïó .šaÔRÓ‡GƒÆ¬Éì"¬O+p†Êë—ouú«¶ ¨-Ÿòh›_O¬ˆhYTñ+¯òÚ§n[°ˆ(,Ë©`ØÀÀØ€u«´.ÇþxRETcÊô<ŠÖúkäöãùιð·ÒJ+Ý4 Ïóa>-zVwõ; r•…ßêZ?¤% ü$¾ÿ±mþ8Ríow³XÄÖ/§ƒP9×Åáw«+¬€Rc.pÚS7ÝlóžzîEœ4ÀÆÂ¥t²€K€š5ÁR»}ýp´9VÏNêèæ±.3yòº¿Jžëx>’µù ¤ùîæ7[¯IùÂ1Ý[5„Ðz5ú‹¨ žv¬ê÷ƒbê]¶ÉßïÀ,Ÿ•áž‹XÚf]©­÷¦ßë3M¬>ýµÿ­Çh¨™_vØ3}Ô†AšG¨Èuïi^3\¿Úæ_Lß ³–Ú±ˆ«u3–Ñ¥EvóNVå[´TUFÀ*ŸoØlžÊì÷Ú×¾öcûî·ÿ‹Ýt(›® Ø6€ `½°½ Õ‹ÚäqϺ¯ƒçó¤ùîæ‡[ïI}ýãP 4ó³m(`ìÐ?•gð*¿_uªmJú ;Ñœ’ï®üúÙCÈ i ¼Ó¼ã|˜LØ´à˜Á©öYO«SõQi|‘v-ÏâxFi·ÌŰLJݼ1³¡2Lßu|,ÀúÝØZ›´êª“ÎýÜçOúÏ^8Ô‰S‹ `Ø6Ð-Ð Z½¨mâ³—6u¬€üô0ê—æ»teU–“ÕÓ±]ƒåsgùûi-ˆõ‡þYŠªËÎÅÔ†+¦X¿·¨Î7äµÏÿ¤c7„s줭ÚR†™:6 X²›®4±‹+Cˆý¤ÝL±ÇZ¾Ð€ÒÞ’È0ý(°³¿-Ö‹¶üƒ§Àøbçù±cþÔ-'‚rqP±lÀê²E^ߺÛÛ~¿úšk~yðٜшaS›ýî·Y=CùfŠÎlœ¼iþ¸ÚàoO+#«w¨íYàèuL6lÏ$*Ë×Ò‚t¾ŽloË´·'vÁ±2ýõßDX¿që&`ßý‹®O¥ïá›ÿ´­NÕgýÞÓ"°fø~7¿œ´FÛ²º?÷Pzªn‚É[êÛqçi¿{tñó3®.g‰zp̱lh† ,\ôH²<Ò†¿]a…j³•6tM´H ïCË?öSVC ðÌwoMM+cÐV:‡Z†“Þ°]3óü‚eôa˜\GY—^3lƒUí³¾õò÷ûŠÛŶ~ñÖ‚¼õ’d¹¶8°Áv£è ‘Ÿ/4&íóaÕº+°Øs¾- mŽ &ì»ÖZ¯ûflÄk†3Æuà:`Ø6oŠºÎúâÉ[uÒ¤g’‡øVCû žOƒ&¿ZDùË:Nþ±?þ3¶Œpþ›ð8ñCÈæ“«>}:Àæu!çã±q¹V¿ÚÐIÏM6ÖZº/ìBP¦ A°?Õ7ʪÀ1ïMOV»ÓÞT•9GŽ|ÖÕrm¼É‹á"÷8QùN¡6€ `õÚÀWÎ<Û­¶Új/Nœ8ñìä=qðÓœa¢À´èòÅж´avþØÒ0dÐ+ˆ)ùãY-XÕ®£ÍÂë/oiÐjpgeZy~WàNÖ_ZS ›ÖµÚ‡hÓÍïÝ) ý@ž‡µ]ÇØG<ã×vf!îñ-© ^”"M²h«ŒÔºË b¦ðŽ­§,ÀÒ}8Vaò™SW^yå{×XcÍ?~äÈ™»qÞ¢²,µ‚ `Ø6ÐPO¡ý8è/Ë/¿Â_p½4yp­Ëã{¨°Èg跇˾ÈÿõƒKáP>Ëoðj@kÇØðÀvk3 ÛøPùþ~»¬LÛrucö#¢i5µÑ?Ö3\ÂÇ/GF‘¶ÌŽÚâçóóøu„m×qþ •Ÿ½*cìõÉÚ4Õ´#\§ê1§v“ØÛ‘˜¶êŒßõ9æò €)°IòÏq‰ƒð 9 ›oõ¦wßs¯?sìñn‡vù«¾“ÐÀ°l 6`Ï=svÚùþ :Æ_ö¯ê)”<›MÒê<®‡V¬áD~rLïÇ´1¯e»ÄªÎvþy–^¸°yç›·ßtKk—ŽM;7†(vz9P ë ÈQ˜–¤ý“4k$½md›¶“ÐÀ°l JðŸ7ÓGž3»þ´£‚~P èŒÁiçŽQµ.¾ióâ4M“² ]åy0D±J5) PPPPP` ° OËž¤­øaÝ{Ó&]*[v·kÀJÿ°Kq·Ï›òQPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP†M[”>f};_›qrØô”ó-{Ýåü9@@@@@ŠX>)G€¡Eæý¤õâ´n_••ï’Ttø2kФ¿E?lÓB:ùh=B›W¯Ö/´5ùìü츲ºë8+£ÝK[Š¢®åt~e®{ÝÉ‹(€(€(€(€(0 &YIðQ¨BùêX«KÃÏáÉ¥¬ÖëóõÐ÷˜àÕ?. Ô¥§AòûkÚñyœÖ;g•‘vÞvŒ_ìyÅœ{»<l§ r<          @KXA†þ·4m‚ °1ìôS'Àª½´´vçEt ð‘vþ1çîGR³"Ž‚WƒTo)XðµƒÐ´ö˜¾v|¤°1W“<(€(€(€(€(€SÀذqê^l0TE´®N€m't,Àêœíüôí>‚E©u!NXÁtŒžº&1ùÂö˜¾êšÜ¼ØÆÝ†4PPPPP Fv«ãÛuÅÕ~Eo‚"¥vÝC€Æ§ê°ãTFÚ'-kÑe+Ï8ûeø€çGcÛéiçfãgÓÖÀ2&²š¡imñõµÿÓ^<ĬÎÃ4λ®ÖiîÛ‚E¿óº۵ʻ¦1öL@@@@@V `ÃÉŒ|P ÇÏZwäP6ƒ*A§uÏõÕþv]~ÕVœ§ß7¬+ Ò ¤ÒÆûúàíkQS›ÕZç£2mÜjV¤9¯k¯Ò×Ú;k³_¯uUNkwÀ¦]‹èf½ ÇõZ~¿KtÚ˜`Ù•éâ_KìyðíÉ©¡         ø ´X¿+« ÍÿX÷ZˆÉ´‰ŒÒ¢ŒþMÁŠÊб>ü¤M^äŽÊP~g &X #ifÑA‹ÀúFu—¶Ox¬]Z4SÇØ~ƒ»<€M¹4«´óµÏ²à°^û®k^»4͔Ǹ›Îj¯?AUØ}÷_&(¿¶…/ÂóömÅÊ´¶³¡¬sg;          À(ନ`ÂŒ…$Y|à e2ð #h>À†0ãÃrxœ•—Å6öTyüO»(£lÖå õ£™á1~„Öö¥l»Yí°óÎçð¸°^]G‹„úšficZ¦é¬º ,× ðiv⃯Í-¬sL³%;@@@@@h)``•Ö­VpQf­Ð¬¤mÏ*Óö‡ÀfÀ™Íz> U °Ò)+jõø]¬«Ø,=²L7­^ƒK:³´±ã³º ûÐé·!ë……åI»>6Æ7kÝ4]¹eQPPPP†\?2h]j53èÈŒP6‹ÞêoÀ†ÑW+Ë &ŒðåEL­>Ù˜ª6¯mþøØª¶Ó¬tõg>ž6"t–6i ^ç0OLd9­\ÓH×ÚìÎÿkà òÛ–ÓG@@@@áT @;•5ð R$U€a€FqCPÍ[+×À*„–X€õ#·UlÚdNÖµ8î´óô'eÊøÛq¾-©¾pMà!»=9]@@@@@_‚ ¡]´OТ(€(€(€(€(€}ª€œú¸ëÖéÕ°±mÏƒÌØrÒòå•]G6¯ œ_™ce{U¿10N+W/jô†          @* pM‹8ÊÉ×v%ßá#xú®ýÖ Ùò«Û®ÿ±®ÇV¦_nÀj›€.­ ¡ÌYP¢c÷ÉFä Ú­µßÎYÝ™­^¬ïaç°LƒC;6ŒjçÁ£¬Æjf•¡ÓÑ9…Úøã—õ¿¿_åéÓîüBMUF– Ä´!Ï.T¾ÎÑ×Ú·3ßž”ÏÎAuÛ>ÿœÍòÊmZ:Ôï(€(€(€(€(€mHƒ*sòmR%¿Ëe˜ß¾ ˜¦ÀU½‚ í·ò*þ¤Q!ÀÚ˜Uƒ}×ñY‘3šÃq½a¤ÏQíW»>Jj“¬YëGùtžvŒÊÌܬˡsÐY9ºúîw±¼úÛ Vý±³~—pibßîüÂ6L›.þ¹Å´!Æ.¤•Á©þ·ë§òý—¦¥¯µo¶ß€6«\£ÙZ¯&,˺ölG@@@@ˆP@PNdÀ™vxÀú3‡yT¾áþ`Ó Ú %ë”BX Çõ¦lڸ߬(i À¦A ݱX¿œÜÓ@^0&¨•Fú„K¿¼¼6(¯iFÑ­œ˜6X=íìÂÚêÛ†¶©üðÚèÅ‚o§i܇ö•U®¶§Eû#n²          4A‹´ùm18оp‚,€mI!L†€B…uµ¶Ùßv3‡e âböIØœ”·S€UäWeøHkC<¦]‹p[xiçgÑgëÚÝîÚ¤Ù`^;cÚVFÚ¶´²´MÉ¿þ¶Ío¯ßÕ:<¬6°MøÕ¡ (€(€(€(€(PR0²•;‚‹Æ•Xæ Nýî¹i«|Ú¦¬Óœ–ìà ¾Ó–ª`U‡ ±×+t½lܱ¹ÌƒS›—§€•]†×>ìö À–¼é9 PPPPúU!ÀœøÀèOF”%›‘ %`MµÉjp)pµI¥ü2 ` " ¬ŠD?cÖô·ëam ë1¬]v.YçjBª´ _bäµ!Í®Âm>¬†Q~ßFt=ýïþqj‡?Xç’V®¶3 qÖÝÃv@@@@èlÆ_|´¨¦J€µñ©¯únÑ3•kìG- ¨ú-…Q¶P^ìnY´Tû²"t?Vg8³­mWû¬»°Õ.;£ó,2‰“ÍÎì·=m[x}ÂÙ™ývø/¬\{¡ }Y‘Hp;_•é}^b#°þuöÏ#,_í°Ù”­l‚(ëC|V¹í&(ëƒÛ•&¢         duÙLë’ZT-ƒÊpF[Õ™µ,Ž_‡ŽëEwϬxóڢ㲎-ª]^þ¼ëÓ®­±mÌ;ß¼6äƒío§wl[Óê ‡“Ŷ‘|(€(€(€(€(€ P mÒ£ªšeã3}h˜–ž¬ªNÊA4ÌîPPPPPP ÏP4T~7>ÖåÓïìwçíF”‰¡ê–œ× ÕPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP KÿäE­mžWòƒIEND®B`‚nova-13.0.0/doc/source/images/rpc/arch.svg0000664000567000056710000004404412701407773021451 0ustar jenkinsjenkins00000000000000 Page-1 Box.8 Compute Compute Box.2 Volume Storage VolumeStorage Box Auth Manager Auth Manager Box.4 Cloud Controller CloudController Box.3 API Server API Server Box.6 Object Store ObjectStore Box.7 Node Controller NodeController Dynamic connector Dynamic connector.11 Dynamic connector.12 http http Circle Nova-Manage Nova-Manage Circle.15 Euca2ools Euca2ools Dynamic connector.16 Dynamic connector.17 Sheet.15 Project User Role Network VPN ProjectUserRoleNetworkVPN Sheet.16 VM instance Security group Volume Snapshot VM image IP address... VM instanceSecurity groupVolumeSnapshotVM imageIP addressSSH keyAvailability zone Box.20 Network Controller Network Controller Box.5 Storage Controller Storage Controller Dot & arrow Dot & arrow.14 Dynamic connector.13 Sheet.22 AMQP AMQP Sheet.23 AMQP AMQP Sheet.24 AMQP AMQP Sheet.25 REST REST Sheet.26 local method local method Sheet.27 local method local method Sheet.28 local method local method nova-13.0.0/doc/source/images/PowerStates2.png0000664000567000056710000076145012701407773022306 0ustar jenkinsjenkins00000000000000‰PNG  IHDR§}¾ç½{ pHYsœœ&Í:4âÚIDATxÚì]\ÏŸ“P1À–CEÁ¿”"¡(*&ÊÏ"Ä1QT L°›0±E ;±,lA¼ÿÛcY.÷ŽãïûÑeovvfv¾ûfÞÌμ§ÌápåʲM.ó£·¡N]–¤‘?¹v-–Àðoß8ù‘Ô·‹ÍZØbïå¹QŠŒdÌ_n«„"ÿÎaI]ªU«æ»ºÎ•úv±Yçýá¨(³Š_l)žQJþXMFq^mv•Í&Ê‘ž^"-3=q8ßû€c®^¡šÄeË}§O ‘ž?‹ûp8oX¬F¸ƒ„ÀNÇG¾†ŸdKme¢‰˜ PÝgp³1=ýh1ïÁ•k­;áÄ¡ª®K[±| Å_Ð"_¾ÕkêH&7âà lÝÒ¢y®®®+û²H ­D Â:¤º<»Æƒ= ]›Ç/‘yy¹ÚÚ•%’T ަ 8q»Ö,†·³ÙFÚèz9b0Ë5–8Iù…Œ«Š—øðYýÝ£ ;T„Ô%o6¤hÖ¬]âÑw¾ˆ{ð·bÅŠFMZù­…s8B ··w_{çð£K[ç EN¢Éƒ£ ÇŸøåG„ †ƒ€hø°ž7‰Eš\ÖÄ©%°ÜT•8cTòÙ¬³Í3á¶Ãq8l]¢9IO?/âNëá³éMwk©È`È"=>OO—Dþ€'8†´µ€ãõëש#àhô8&Äï]‚mÛÖ|l5¼å›cì šÏ¦¤®µ3ˆTî˜Ø„舠§b¶ìéj!ÑÉ»— ‰=ç}û÷቟}sÙ˜,Mþ„Óÿ–¥Ú4rÄ8±u7wÞ€wßò¢Ò9ã=§t´lœ¾s8ªªåéå<Õiðô‘7.?pîáHûV¸6~ÊKKç¬^½6%åJTÔvGWûãqûEgí8ÞäÕ׺ï¯ù¡p*¤YYˆS ·mÛìâ8)×Ïîö$~Aˆ7u¨ßÅËõuÚÛu¥«K¾zÚÚºIIi<‘í]ýñIXD4ñÓDÓ>6‡l çyyO²±%Ò¼2§oP­ fŸ›ëTn²a«¨@¡•(èFŒVuUà8y²;üƒ“(¢Øb*-Š|"z^"ò}˜ã«”¼Ü!åÔäXFúËb fÎ×€rwoD–É+{O9™’Lªq›¸/‰³4Ìîx”?ªqï{ä¾_@ég˜ÔARe,3-”Û˜8dQð*ð¿å­Ìœ“͈c},—¿™8;n|eXϵèeã ³ˆ£Zaˆo×EᜅÎ-•F‘?++foÊÈü‹³A]ç‚Æ“ÃáíÌùÛU-­¦&oH£FÞ¼y#0oþÈjjjÙÙÙ #3Çܹs‹SGH]“&MDÄùþý{Íš5srrªT©"åø¯ÔÁœ<ÿ   IÉ;wî\·nÝpHRÂ;xð`·KGOvY¤f;yÆaÛ·oOHH€á¾}û ñÄÿûßÿžþ Á˜Œ®:ŠD£Æ2ò0F޹k×®òÊ_JJ Øzzz’Ž ’‡$Y?!síQVä•Ì«t‰RNLLd™ùDZ”Yý Lë/k×®•(>óåk’е‚?ǯÜmØD÷@òƒ›ï~ååýîÔXèBˆ@v):pr+ñÔ(û¢SæÆÇRxx—Çø± #ݺjÖÌ™Âb½tCK·uìå{pKîï_µk3Ióæ…ã.Cí˜Ä<¶-l¦Ÿo9àïþ‹·¹Ujy´Zex’ ±;<'¹Ñc/YÚÛ¹ÈP½cמS¯¡àm^›wî7´îGéÚ¤°øôºÃè;zŠÀÈO^ÈR©äQ!ª•«BÌÄQÆ)ðÒe½Ü‹tÛ½¦™š‘ÅbU¢‡ôqññheˆ? O`¸Å`ç_99U È1ÇÎðG¯zþç\²|¥Ã†ñ±01Œ ä ŒÙucNnn•‚eœ{œä!OXšÇ’nj6kÉüÑ$ç/'UÕ ñ-­d±XôË…ÃÙF…ó\Ûb<ÍÈÖÕ,²,õÑWŽ^ƒ~Ñéð<§0òÆFƦ»Æ9&¦t<üœO¸¥‘9Ã#cݦ-“Æ‘üaJò‚Õ´ÅÚ?“çšJñáßÞ njF6b}Õo8,5ãDqd:O’Ô‹‚÷3ED#»’k~,\ØÜ4ºô!ƒöSuÚǔ蟠ÅïP¸ï->ØÎÖ/ŽŠ'Æó.£@‰ù#È#RÑ “—•ý«šZÕÛwuêUX‰úšj£×¬Ûê1)éÌò*m&ò'µlùŠ>£ˆå°c×_Þ<±Ë¢-ç5;êŽ1$¦Í–F]áØ©H?G’oä‡éc»5G-gMp¯ÅÑ8÷ô×Ò¡u⯠Ø4£O¶P`8™Ð£‰[äCHv× Kù‘•]£šÚ†-;Llÿ#_ù¯QƒØŽÒõ²áꆳ—&t7[3ËmN ȃÁJw2ÁÍ+½ †/‚4qUPi›¿*–<'çö k%‹Ê)ܦ$Á"vªÄ7¾!uwá•wIGùÔô[OZ•u3XàüqÞoîèÈCù÷¢gÙ¦¾ƒT;¸rØ÷ío‘c‘åñùó•*)ý²»ýGRb¦Ì™å£©vŠz¥ø°{~ÿL»}ð”IÇ‚^´›òjÏhHGòà¨V­:yy2rD¨ÇÂd8¿z…5¡;X îR¤¢û„¦n? Ó`Û¦™'·Ûéi–ˆþ"CëLPÊ-IßWn¸ù!ñFxž?¬½è#÷§w<Ô¥z‚7¹Ï™3§0¾R[">‡{ׂἑ'ŸmÝ¥g-"ÙK¨à¸Ddŵ¿o¬­¢7¥ü¾•'Yçaƒ¹ š"÷¥c‰Ñf?¢o7¾ð«oê¹#úÝúyp~èj,¶þ7áKSfüYÚ;_ˆ‰l6íÜóåݨ.¹Î"cLkŽ!«ŒËŸdïë¯d;†>¦ÝA ;ãóƒˆÖJ’ŽìÖ“JƒUà?Ð`MËèÿ‰Îýê±ýz£eÉ—¼¸§±Ÿk9bµ AE—)úÆ= ±ûÓĭUE”ž®"ŠŽÙ‘ÍÛÀ¶©£|?S(©=»v.òD±;`¸",òDy2+p£šÔùé==†» ‹9^*ò„òÇÕPüö²’Ë“ïûÑ5ÀLÆä‰~Nþqn ÎÓ,&1‰1µŠÊß+Õ×aßs’[pÈ’Þ£&3‰Ì¼À:Õþ>˪$6æô)^AÁÁ¶.^ ­Xüj(ïæ@GÃ.]ã ¥¼÷8-¯&×&Ðó v¶Vj W ­Ý°Ñl#iR5¯¶zMaÉt V.…,YÞËyi_OUØR|?_bƤ€Åüìï†: DøÆÝû•ê6+èAOÔŸ?ZÍÕôj 5ë#ºÚqmâ4UËרY?¦?¹ÆpÙªP,ˆœßÙúÚuŠÙDËuþ³m‹æ…µc+j —{‘9*ñ¯§¯Ï4æÅÀ,2IÙ ]áwf=AäQð˜èÊ0wDæÏU¶ø+iT©R…ù'§£GöíÛ·¼?r…âùN>D~œ*-þ¤ÞŠVÁù ÷ññaùüùó¥UNY‘WÑø{úôéÛ·oµ´´ÄÆÌÈÈÈÎÎÆûä ¼ÇEÁŸÐÚaM“4•#gò l¿ÿþ·Ö2äuëÖíܹsqqqvvB?,R¦Kx› Å®ò‡É[¾|ù´iÄXÈÃJ&æZH±yóæ±cÇR䙘˜$''˼HK—.Å[“Jbç_Ål?1y6mÚÄ;ûûS_ŒŒ®]»F†É>|8e2gôèÑ<{!ï¤T$ÌŸ²²²‚?é¡««K7º“’’Ò¯_¿#GŽÐ#¬^½š"/((ˆN4}Æ ;qBÀ׿֭[Ã]Ô®A8§v0aéWŒÿ¤DÛ¶mÕÕÕñ60:y …wïÞ¥“‡#àÙ]‡A!µ%33~>xðç÷ïß7hP8Ù6jÔ¨G`Qäa VŒß¥GZZZóæÜ)º{÷î!AfšÌIà>ÉØØ t„ôbÔ&SPªW¯þíÛ·Þ½{GFFNŸ>JçôéÓt}^ èh}}}©½¸—.]b¸SàŸæ:3èÒà„"·nݺçÏŸƒ*?»wïNUhݺu±\RÖH¿ÿ¾œ"¼~¬š2eʸq„eæãÇó«»Ôf]€©©)’BßhHíG„8KùÞ$€j`eeEé#ôK]ºtÁÍ&ƤI“¨sÑÒÀcæÈ4hЄŧÈCäÆkà^XLjc0¼4˜¼}ûö :TjRåž? O__ŸnC0;;̤“'zzz7oÞÈ õòò›|ôÉ‘#G2ß=JÁÐÐÆ‘tm³D‹ŠøöœT(þàÁ¤hÍú÷'V¯HA k×®ÔÜ©¤Eenp¾"`ÌKÿéããC枃简€ž†7öèу'Dà½üq9A*ö^þóæÍ˜¦ø›2ÅuÕªqnÅä ÷S©}¦Š½±’‡áLÊPß®ø ÝÿàäÆÚã6ß¹~ý)*â”3—Õ¼5BBÀf³š ŠJ^çÀü–Ì«a®¡1Ç çÙ¿_›ý©úpVí¤i?ñd¼þ¤ 7ëð„žÊÒÄûßüÇq`½ÃÙcþb&OüÒ¥ánPÛô"¼«ýL@Õ-±ïJ¤\¹k÷öÆ^SB¹Ÿ:~THuô霧òÜ¥®®þíÛ7eÇb29ÀŽJâéÊ¡þISµ¤~ëDc̘1§Nzýúµ÷JjBQÒ,—ß$òÉJ¡uëÖÒåxýúuggiö§_»vMºsæ_ðËå÷éÜ—þþý[ºoݺ%ÝFFFÒmcØÎ—×öS:ŸžRû=Eä~3ÍâtSÿ¸þRê¨UKâ9ŠãÇ‹vÓ"°i‘Ô¤œ‚?F³¢¤ä!©–{þ@;‘_ÉÅÚ¿>}út=¤ËˆrÉ(–6þÅÚŠöS(0y [>xð€>1bD«V­x"y¹¹¹Ò­iòV®\éíí-ðjÆ ±«±ä©©©egg+ø+:y:::Ïž=ããååõøñãcÇŽQu-)èäuéÒåÒ¥KT‹ÇŸ M³fÍÂÃÃyÂeH^ÅìÿxÈÛ½{·­­­ººzhh¨°ºÆÍ#D ïÞ££}ûö”çFDúޏ|ùrÇŽGŽéë+Ø9ö ©h?‹ hH©s;;»¸¸8aÍ#ý'ö;Lµu<žZð~Z©Ç… þ¤„0òø•x ÏÃÃjë@dy¼>*Æ¥ gÕ¨À…††žþX³f ý*Ež.Ñüɘ<úŽÎÊ•+#sWeÊË¿>~Çä¹»»¯]»–áÆAEûY†€3JêhI†mW™‚0¯ÿ:@Ï>Þ2Hî£E1ÿ‰J‹<Œ(;µ+É(Œð|À†Ÿ›fm¥†Np‹wïªm®h?Ë(ã² ;ÑœB ©týŠþO‚Í÷ŸElVŽÚùÙÑAb[„ÃÍ»ógÏŸÿnÖl²ÔÅÐ×t…"´A¨N¿‘$»Å%L0©òw.Æ« »­©©«.ÿQþV¯Y8Ù£>&‡ÚE|û–¯®>‘Éír?*«ÆaòÍšÁÀïØr•¢0 —Ûh´ŸgÖž¾­—Y‡&ÚÄâ‰9Ni˜µ8yíO¸ÈÕª4þ¾'°Ô­xÿcRî)-;ض}1Ç®®®DÒ ž OÈ•)(ì;’»œÀÑÍ… ´hviŽšM`62ôdÆŸ*wê(%ØÆØ˜{e5…^ïø’•{}{ >N1‡Y 2çngA¤k(•rŸ˨bZíµ9òBB˜…¥¨‡ Œ=yQ·#ͽÖï y¹uTjüï\Få©ã—Eö9ªìxb„CØLßü´Ÿ&3OqȾ ÷ÕvUÖ Ò® &˜¼´õlÂ5ž‡Îb¹­¸ê­O|^ôšç:O:¢`a)àK}Ppˆ­K!©EÈ#&~,T*£‡;†usÞ‹®§¶;BL)¤™†Ð4Iøãó?¦B{eø_"ê\­hä2 ŽYܾ€pÝ:Z¿ œ|à„;‡e8…й}ÕªS¦V³³«¨U~|#]«q zÄŒZ^Þn}¦>|–WUCµ2wÄF'OhŸúã×¥³ÇS3¢}êЉån+˜øG”Ÿÿ±2…zÄ£…Ó3 †¬,n…\Ï«bŽ8/·‡· d‘¢L‘G¨”­t$¿·s?T8¨Ð þÑB$åz¸Ü/o’‘–E-VEb1?Ÿ£¤ÄB4ùí(òág ²áÄ\¨ÛÐ@†Ä ›Û±/®ƒ…˜•Nñ[CýgúIƇjKµÜæð¾2â™õF¡[;ˆŽúú#sƒåŽ?%%7¬€¬\|–ª÷AƒÝXnáäÏØØ?ƒ ½%ij’Ôp*Þ–”ÞL?$¹ü[ÅndކF,TðÍÙ°e¹–Kï™Ýiç½ÐLn{5xð$Ñ7†„¼óõa¢ÌUžO¡,¶ç£“týZÅhH]…Ä×¾¯ïœûBÛ´®ZêäýËóg®W®êܹp!ÓÒ¥f̘Íðæ6­½x‚ÕhÑr„üãßÿìÜyýgÙjI嘋-Kç)ßÊ7”Û Ë72LëÛ7Ο<ñÑêÔ%´ÙÌOL߈ÿù‹øÈÕ«#UÁÃ\&·j×’òva7оW­*ªR…Å<õšˆ¹–«VÍgnŠ-7O‚½6¿sÄG^(Ìž “Û‹™»÷òXl. = 4ü­µ×seü›Íf{]ŒóíZ-Ʋå¾Ó§…TÔö›xbb¯I9 ¨Š% س8œj…6¹ßŒÖ$}õ0Õ€swqé™Ùû3„Í¿%¾wï>ÅÂË—T`þ\¹Öº³fñäS6ƒ¦§Ÿg³­à(YÿWmäN ]t¶åØÕÕõöVÂ;©OS1…À¯ex¬Ð2[Ñ× B^¦}VQ«Eaøê1I!//wغ+±^· pcßWNRÍ ¶(‰H¥¿zTT¨»²o.Pb‹È£Îmõ„co‘OÊÇ_ÂÁõ]ó¦$lŸ»®i-îÍ¿2_ïÔ¬ŠikFä˜ØpÓn^;ýù9v³n$ÁDÑ—&Z9¸^ÜŽ]kÁ- s‹p®*¯'[òvlŽJŸˆ6-FÏ“ØÎ5·1l“7†1ZÒ¸ó0·ÎLk‰²¹˜,’<¢=ç™ û–lMVÚé%02b$ô…-YWæ«u ` ZN¢_õ:Ö“¨ÛggÕë¶PpY…´oLª˜ øíÂU0pCõ3˜¶ŸÔw¢\rUÄ—k±þ’÷Äá¡=ê`'KÂÈS tæ_è6“)h00;Hÿ)lø,YÃáÌßn‹ÈÉíÌsß´iv (é½<™ò˜'þ퉰d篡ċ/fèf"ïÚµ‹¾Ý¹lbüøñÌù“^°”MM—,ššš ù@L&ü§TØÿ Î5jÔ`î¨N~íg‰uÅéÌ#3oÐRRRš5k&u©rrr6lØ $ÕJ–?~Hïß¿e^åõû_—.]¤¾÷àÁƒÝ»wç1ùÃØ)®Ô½ Øõë×gh¸° ñ÷ìÙ3†‘wîÜ)uF¼víÚ`«Ì„cÙ²eRóÇ^^^Ì­N–!þ™óWœÚÉÈÈpuu]´h‘·¿yó¦äʦ««Û¿ÊÎP™à¹±*æ6nëÕ«‡ êàÝëÌ S·n]ìå]*pèСûöícr»ó?¢qöìYh±ED˜6m´RX(-qþ¤³4&”5$ ïõë×7fx#w÷îÝvíÚakKk×®uww_¸˜‹øúõ«†††À»@Wzþü¹£££¤ä-_¾ˆyÿþ½0ò:tèpûöíŽ;JmÊ©üé/” aybÔÐ`"š5ø‡"Òlè¢-Z«È1yGíÛ·/½A™òfÏž%iQ<86hЀ®UzzzbðW¯^òPñìp• þ˜ËÔ8< LÞÈ‘#é­"¦¤ ‘_.qeÊêŽ ž?ÞÊÊ ÿ<~ü8ð7}útÐSzöì äaU˨hX[[Ÿ9s†&ÂH:³^½zyxx ÒtWRRÒ’%K0y€N:•õñlNØUæ  C-@Œ A°`TŒ›DC@êÜÌÌ ªþÀPé¸r<{{ûÎ;ƒ¸ÐUAh}}}DZ½sçΨQ£îß¿o€ºº:–¶½{÷N:>Lï;¥s›]&øòø;תUëË—/Lnf¢Åßß?((ˆJÙ˜uê”ÍfóòZ¶l9aÂê§ŠŠ %7NNN µÀbttôÆëÔ© }ÿþýôéÓ”¥­ùóç7mÚTYYùçÏŸ#FŒ °µµ…KÃHTùžÑk¿~ýŽ9‚Â33$oÍš5ÂL´yÇÅÐ샾ðñãÇx*@àË*7S\ríÿ(òñ•Ò“É-ÐâþŸjÙÂ&ä²³³©iIºXß¼ySOXª@‘Ê Tš4iBÝ{ýúu‡ §NêÝ»÷Ë—/µµµá'G]‚¶qÅŠ˜<##£k×® ì,ð 6Tîù“tÖ"fSésʘ¼Ý»wCˆÉãWvö¾yyy˜HÐ\àˆÉãG $Ž’&¯ø«V­ZVV–D·€Â cI3Âß1’““MLLDO3m ZXXHç–§t!oþ€¼+W®€jÇ\maHt“TK‹ Ìè $ïóçϵk׆¬[m@žèÄ·oߪ¦‚?n³Ã?‰,ËA¯½ ¥. ‹‰É«Y³&d;wî,›ä•“&M*‰dñàF ýŒ1t'Wœ1sæÌÅ‹ÿ+ú‹¤`H -@eèæ †:L´eþ !yèŸÝ¿9`Àæ>Úu˜‡ññãGéü*øã‚>¤“?öíÛçîî®à¯¼BVäÉ•¿§éT*\ þò楽»[»q‹YÿÂUhÂLwö(!KúŒšÌ0þš›»öÉ$òÛO_?þ©\8{pµe1Ó\½~£¹#óG++ü†¾*Yǯ­g&Ð&7ÒÉCF„=*\¥“':>\¢W4ytãò0[kF•é!ì¶…˜?M€p:y8äòѽ“ƺ”iþDXYç©1¯§ÿ4dWgžò©ë{¶b¹¥A— à`Ú"UÙøÍÃF5ÄÆìÒwXð’¥~>3ŠËðÎ9ù)°§ó¦xí7u¤&8$Ú²¶h¶ç,âC¨¾¦Z¡¹î»÷õÛµ“%ËWÚ8Lv¯R%Þ嚫֬³²-,~=-m†L#¾£ó§¸uïAǶÄ(%xé²ÞNB{2%ee†¹÷vö™üëÕa±&"´Ñ–æÑ&¥ˆÚw?–º¶Kkê…<ŽDÂwûÈŠ6Õóó~Ó r³êrW⊠cwÜ‘vý¨Ÿ"ÈÃX°paÀl¦&•¾ÿȪYƒ0ù–š‘ÅbU“S§)·ÞĨ!QÑí1É=7/OUªÙ×"ü=Þ"Là̪AÖSÀ‰j‡™¼‚¸:ô•æÊ+$ÊlS#›ø_ªoIû ³wü»÷îíÄ|·6µÿ% jH|H}qUm¢ß  Žßœûµ”èSHβ·EFyNr#°’~ß5Ó.ßð=™±f£ŸYOý†]z†Ý2Ð_>œ„ü¸\CÏÏ3`áø™³u¹íºöâÉýÎ'äÜ^Í\-Ú0Ç¢Ï|2ÚW}Me«B§yy‹?,[ƒ´í9º½´ÍÒùx2mÉëAfMëñÌ12ûÆrËÈ_ô1*pêèˆ>©žÍÊ¥dd þL;›:Mpî |D|«ÅÄ´ GcéXr5Û–Uzþ…Mׂ‚ÈdÖ6©Ä-›È@©ÊÂT•ôl¤Uuzíäî«©KÛ¯ÍÝ1l|è æògk¦] ݧ÷™‡fóŒªç|üYEÔ¬Ò̢䥞9¨o=PDüq4òî§\hclÁ¼À""P®üãÝï" ¢Yä=½uE·cç’ü7/î]h6ŽøvƒÉ£¶V2Ô"Œ—išÚÍÅFkÞ´‰ˆŠ;³w“žW‘qÞh‡á"⟈Z¯7½p§ùȽDDþ_Í"?ß¿zÚ ‰®Ø·þ_3Q˜ÝázÞÜ%3CzYŠˆÙB]¦ü­të35â89<˜B’w›]ô™ùh™Ó3 óècÜþÀÙ¤¦mx—2°8ù5«ó¼ÚŸóbìŽi^“Š‚Àø F ‹œýæa5#}zH¯Îí%\kÒ²-_ÿvÔ¬Æ$Í„è­3¼§0Ëý‘ZC=Yòç~Ì;ñ()…çK¥ÈfPwS¬Oc•ì}Ú^];‹hÎß~¦QŸØP|åØ~×ÑÎz¤f("~ÒÓU«ƒåsû6{{z3#Sõx"r-Ñí5Ôç9À’0F³ûÀÑÖ&„Î’ùò‘µ±ž¨ßy®QhK“ã÷Nã¢ç1I`Ìüüü;s¹ÝùöÕ3}}P1È“÷üuá`¨¡˜ÎÀªw#’ÞhF›ZLu¹þpô<Å…©L¯@g†ƒ  ‰«e«öÜQ¬ÞQ“aJJJ…¹ûú”§ùë2…K—.™™™•VîUªT¡/WðÇ…––ÖÛ·o™Äœ={ö¹sçJ«œ²"¯¢ñÇ|¡¿¿¿× Dfff:uüñ¢mÛ¶÷îÝ4hÐÄFöððxôè‘§§gXX˜< yæÌkkë’ÿ•Sy?~<þüöíÛ---›6m*,fbbâ˜1cZµjuøðaj͵| [ò*ZûY¯^½¯_¿FGGcò¨Í<èJÂ×—˜ÍÁ ´cbb† R¢e“í²ÁЬÚÛÛ»ººFDDy¹¹¹ªªª Ž,;vófÂ&¯víÚŸ?–aIðö||, ò*ìøȃ#µ] È;qâÄñãÇW®\9eÊ8âé$ ¯eË–ÐRš>Ñ ‘ÒDsïÞ½!#b(>ÛVÎÐÅ?ĉ —mÛ¶!Úvµ   P8ñ¾ÙU«VÑ÷çyÐx‚p\»vZrwÇS066†Á¿Ù¬}ûö…††ÂP’ ¡ö×ðvÎ’F¹ç”˜R9r$eM>Õ´OŸ>0>‘ôF‰l%”?þ`ÌW½zu)î’.;lßQ:/¥x"MR¸¨Èö¯¥k ™o˜¦£C‡¥2/Q¡ä/00PØOan;xn™7oÞÊ•+yâ0¼—?DàüwA¦ü<·ÿøÁY±b>O555±7 áïÏs¤,Æ4ÿb ÖÌ ru?DwËÊÐE«Øh ï•UŽñ÷¯€&%°Àüü½þ --{ÌØ»¤¿&%Ñ]H_„ŽÂùL¤€0µE¨T“ÀsÉs»1¼ëáÔŠAã(8Ê”a½œûçiˆ)-,‰öûn§­ÿä¸ü@Ô´AÀf¡“,˜¹”oÈ®5‹ô])Ôaª8þª£§Iþð1ð_%Ï÷zËt›'‰''Çr¤¸W2s¿ÏçUê²$!sVMÉKjˆÝ-âL]È“ XÂrbJ¨9âìâ ñ;<(1ó–Ø.û³¢°ÝîÐû² ¯†Í{@3ž šÜ(a¬ÌËéÀõÄÛ7=ý¨èì†7‰‰¹)ì}ÀÖšõ0~~MÒc-v­¼ª«ðr^£‹”Àaò·Ú“8êÐaÊn'Ço}á"Ù,œ¤[à¥FÞ÷çs—Ź 뀔tžÁ@À@åÁ1-Ì\ÿEnþï¤ß?¿ÇÈ~ò§Ä½„ÿÔ¹N@žç ¿vÿ¾å®Js /|Mi{sudjbQµ®ExÄ Êékm±Qœ_ô'–k$Q'Ù™Õê´B(÷Ù³ç::-EK<ªle’è¡û|€½ÓáÑý{ôz–ñ!Õ·/¯˜w&^þ¨îÍ¡Wækt X{7Ç£=ñ>ú(äY „ªq+N+þü }Q,ÕÊÑÉÌ AA÷¸qãjÕªeee•;vÌš5‹ºª®®>vìØ²0÷ûoÙ_Âäûù 6†·«ã9Ê]m‹- îÁFäl»BÁÂÈC¤Å|B÷–Ê­)’¼#GŽôëǵÁ]ü*ü¯HLžÌ-U(ø“ݺu“Î Ež··7ÿ²`rL€2"ÅÄ"7„þ÷ßÅ)@غ‹ÁNb£%Ù=aÜ`jjJÙ‘5kÖìÒ¥‹ÔY_¹÷˜ y“~#ò'Òmj¡K04ÂÒÝ{,z÷@7„+áSâÝGôëŽ~§³©wü W­¬à}Z˜ú.]¿!;5#ûO>GY‰¥àÒµœô´øe òÃH²ŠIêÂÛ2vE@žûCxªH÷ÿ0œü¯ßÂß¾‹PÄÛ7µIl‡úä‰e6½ˆ=õkÖ|ôð˜%])(.w² GEέŠÅ—zš6}øÍúõýÇ©'üñ ±†÷ñµ]þ!W nG“‡‹õ dcÄçñ®~Rßçɦ<ïÖá g_EÝ?¾«M»ÆµÔ«§¼orÿÂá^N«ÉåUL`<ºç1žhr[‚-Kò„†»J}ûx×ÚÛ¶-wq™&iij43‡c{M•ö®„³o‚m^»ùI0~ ²Bˆkjq¯o¯¡ÁÄ`V㉴\¶¼Ev¸‘Ë…⮊¢ðæ­H½ŽB¯º¸Ô›ýšˆÍ] aå¸Ôú…ÐÏ<s=s‚I}ÍÁ‘³ÿh›uÑ×ì%¢ñäk?ßÅr8çY¬~Π“È}½^•µŠ•}+L­£gêÓL22«œ6 ‹õÜüoÁn9÷y·sÁÏœçîÇ{—^G1êvï1¢ˆ'±Ï?²¯?~Eù#Ääñê/¤×§ª*È#Cbë+âh`IŒø£Èãi!UŠ\åžZÔA¨¯¿žys±¿íËMÇgF t@wŒ#Ü–dý̬V‘}ñì<¤F΀޻—ÞBGE¥*á¼iÄÂ}Ú¢  ¾c¼©†Ç™$?¶$}_=¸!&rDðeÎßͬ ó/¢Q›o‰å‹ôl­pj*úǧÍð÷bß6­¡.dßáBŒÔÕYþþLJ@y‚cZs åÌœ(*¢øÛqáÉ(Ëœ?—‘’i¹ÕS„âÞÚ5í<îàzEÃÊbÍÃ4Óaä(¬)IÙ…SGD˜–ü2aÓbsŠÄMbùòà˜²t®±ßÉÈÈMÎÁ™˜B//¯U«V•w.Ûº{pÜ‘ó„mk‚¯©‰&]zDvoŸGŒ¨-"„ ¾•¤˜.ÄÅ´Ÿ¹9?Y6¡¹ÛÑ}ÂðáQßv[’!´jˆ“S¹¥Ð•RAwlpá¹v÷Žn»ö¢n1ÂO„›ÿ§š’|{$¡¹*,Ä¡Ð=36ê½c‚å´ñ¼§Û¶­€u+Ë–¿Ÿ>ÍU¢7€w6NÙAÎÏò/ê/mÛZ#dŸ®TD)p.ÁÈÛ5"|±«[¡š“e¯V­¶üŸåßÕ?•”ÜŠÕ »±£V­tžB±~¢|CY±ZJMüÊlÉ¥3'ô½é2.*•¸¬RŽˆ±;_ÎÕÅÅðºõ8*ÊO¥K¯è.þã0dP¡½( @YîýrXU­Š¯£®´`M½hÉ᜗îvºA87µî“tæ¶h!¦¥71xùê"™µÁ’ÃÏ“‹RZÎ²Öæ)ù<4/™¥¬Uô!mô²ð5²ê±û¯yÝgî‚óiœöj%¦|²X¬·_²ØDkmYÇ çG©ylj‰ õs¯S£a‘oàçÔ‹2¬Ã˜Èø!\Ó–ééç áL¾~ñâ…9Ƭ2õ6Þ•8ËÕ†8û{ùÎß.TÉ]c9ó؄֔oò+LJôc{Rª‰Ül¤ªVÞ¤>”žt¼Ÿ '^qÚ+ƒ@Z-Š·Æ¯›ÍÔâ®äâ—ClKÙ›×—H|±Fúc£Š^ï@tbÝ`Ü–#.Äþ»€(rtñ+‚.Tزi ‚_57·0Oa*{TwÊæÚse¥güdkVxhÖ­Óóy´ñˆ2Ð,¸Äð™­YûÁKNkíÂpѹôw-0+_©KûJJn¬.û—†zˆ^Ý:ùp“Ñq;/ù¹k2ˆ_áã¤sÎdæYçì¤"ŒÔ©*mŽDjïnFDæ v¯³+2c€ö5·¦ž'º7a¥¥sš“U—ÇQ¥gÍsÌÍû[¼Gfí=ñܼ}SâÇ—S¨VÏmN,ëpÎÌæ¬(2ý#—fÇé:ùRQež,}êekÏ}Ï¿¹ àµ”X³c ~U, ìJÇyG9óhé—šK$O¦“G$£5N‹.|šeQW FZ èBÛÜma*âT ÚpúAæ[s# äåsŠ€šˆ{ÀQ'ÉŽÆz‹ñkõýÚìšF E”¿dñã:t¶Ñ£WG@u¯÷-¯À‰OaáórYôÅ̰¡žëJ6ëBQéž2ê~ Íj@'rÙ–ÌL9`Ù#^‰çÄn@ÿ3(] Qó šµ¼Q¹šD²Çe6¬é'yß7lR»ÄÆ~eÑ®,{xE¾QU5Á"—²ÖÌØðøíÕÄ>”poÚ+äl½P먂¼‘’I³'NX¶ÝIZ&v`MEXÒCs ·µÛÆÏávVéé„Û¬3ÜE÷C,:ž¾ÞÌ«ª"zù .A¯(£¦„Ö ×0¤@o¬®âËá)|oMeÁ¡"_7¡NZÛÙ²àgYuê¨HïÞ±Ë Î9Å{dÁfâÃhy¸Ki€ÍöNO_éééÖ®:w@x§’M¯æ_ŽlÜ»ZIÑõñ‰_æaVݸ›ºlc1óÏ^‡‹í,æÝÃ??¤„Ö7öZ“ôÑݤ.ÄY1¤ÉÔ˜WYNµÂýÙ,ñ9ÕH55Χ—Ý’>»ž„Œhî¬ÃŠ|ŽŽ¾xÞ·i3±£NÇžw²Ñ£àY»DssÿÀõŒÐÍÉyÛõO.FJ_s*Ó-»g•Vã¶ðKTùI\ÿg§6f˜Ô-¥´ úïèèˆv&äϲ ̨åþMAï·ÏbaWîw’ãGîEj̹ç{oÝ 8ÿre>š7ˤJgOÎ¯Ž¬ªõÜIáñŽ~Ù%ئmw–=DzÀ6(ä8'„H3dDÈQ&¥ÄíVû[öÐ) $’êx‹jÕHÕ¥Ì5Cs ›ºã¥:ÃßÌã#¼m3ÒyËO?*P‘¾gJ‚8µ@ ’ wEÄohø›¡kßÒ»;,K,–þùõÒl*21PÈÍ-2·õ;?ƒíw²GµWó=Y4X OÛlL¾§`W|c?e-UþSÃl!‡³°0°fOúÔ‹ùÜ»œ¹üó1È™< )ºíŒº:NˆæÉdÑ$R©R%I'u.ébW€Ø”µ´´Ò¡©—¬¤K^Þ"z_uq úÁ™åË’m¾©•••0£L¢«Kìã(++çååŸÁò½êdŽÕ«Wÿùó§lSÆoŒÌm÷‚ì¡RÚ}×¶m[©›•âפœ3=þ| ¥üçÏŸ’éýÊà5ªV­šÌe§|éÒ%333™'kii™ ÿººwï^@@@ll,œÈ™#ssó‹/Ê9S}}ýÔÔÔ’H¹$•Ïò†þýû—PÊ2—=Œû÷ï—V]ä={—[¦ÙÙÙƒ–ó“véÒ¥$dOæÚP¹¿={ö”£Ò ..îÀƒ ’sÖŸ>}ª\¹2±ö’†ššš···~ü8]ööîÝ;iÒ$þ±¨ÍÚÚÚ<Z±ìa…–.á2<Å‘#Gbcc©È">>ÞÖ–0¼Çãylܸqð2À³`Õq¹{÷.èô’ƒB.çÏ0vêdïñãÇ-Z´(N"ÅôÜÁMMÍŒŒ ~ÙàdÙ«S§4ÕX!}¸gÏž ´ýô8tÙ›”>i1pà@бŽÕ¨Q#ž¦Áݽp¿ ´\*䨱cëÖ­™±™³³3Œ©°[ð3f@!½¼¼@l¨±Ãž={à‰ S…ž®R_eÌIˆx~·å ðÖ=< ,pﺪªª óuppعs'"¿;‰ˆÚQãÆy>Lá^7„„ØŒ=z„e‘ÎpbggwàÀ*h‰ <0¶Áƒ ¸³‚*=·’ùêÕ+(gÕªU¡çÏŸ²·xñâ™3gÒ“JKK;{–Øs³˜u)''úF¸zôèQB333,{¸¿@èO ãí$¨¨¨(777è–E‹_yG…ýî'Lö@.dO†yáÉ,{<€‘¨CtƵ#þ‰~hii½}ûNø'ôA¥»•dzGçýAn¡‹›5kÖ®]»x ÑÇøÞP] ¦¤¤P²!0$KMM½zõªÀ¢‚̃ÆH ÕPÑo€ “ÓK+Ž$^š6mèÏÌ“†BüJÐ tìXèV ¤Ž®ÎÉÐóðL6À(ÆB”8I”š@Ùƒ„Ëô]¦¦¦”ÚÌó6ƒì-Z´èÁƒ­[·¦ŸSð$ÁŸ %{ÙÙÙ'Nœ ¼Üƒ¸ò(xðö3Ôú¨i'z[@×ÉŒŒ®]»&Qñ¯Ú)¡5 ñ+.@ö@jÞ¼9œ7hÐàýû÷²Mt$þFš’=é ¼ßÔÏmÛ¶Q&þ*…Ç<ú6Ò¥ú(<„ã^&ÂÓûÁCÑ—1`ýV Ä®¥6íDARÙºìá/æ ñ++‚±øÉ\ööíÛ'LA*è²$hG '§¾JÁБ.¢u9=žø…ô++¿|ų̀x|È {#GŽ=–8qâÄõë×—Ý”ìµlÙ²äV«(Ä)†Žhk¯dˆ¡C‡ÂÆEñññ²M”&L˜°aÆ•+Wz{{ƒì!rËþX/¢Gˆ/lÿ”žžÞ—/_ùÑBŠ¢òÈ ˜²WœÅ+<Tp¬U«~:…ø•&d.{W®\‘¹ì¡‚¯ð {ÿûßÿžx¢×ú²ï÷¾e%]»þ'¿ÒÃg/[éhË0å›ÓÒž>Sª¤db¤§Y¯Ž¬’½œz7ûWÎÙËW»wé$“åä&$_ÿ•“ÝL»‰¬jøþÓWÓÒT”•»v2P¯Y]!~%… ÷_Ö¬]ŸúYK»uüu‹ÃÍw¿èÑšªåk0£áèù$­VzôÍVÜ¥÷ô4o_:íÏF(»0ÍK‡w¹«¿ââå»Ì/¤ñlºì‰À‹l% áÒîÆ ‹¼dyoçI<²'tVì~Wô2uÓµ#æ0ÜUTö„¢Ûc˜§c‘=á°"S~~ïºuWÑÒ3¿}È#©ÓeOžeUBY¿.Ù=iœPs!KWôršÈ#{ÂÐÑ¢“ÀqŠÈžp˜õ ñ\¹8b€ME?CCÃë×åí3dË®hƒî}¥¸ÑlCjF–¾f5,‚ìI£ã½ûu"r­ïŒéb£ìI—þ›[—ûõ²x5þ|›Y{Áƒfm !eaoöÆ»;ÙHc­K¿©é?õÙÕ>Èžt5p2jƒÏt^j‡Î\lÂL’yк³¹ˆg—¯øÑ¼I;³X‘äªbºã{+¦ .åË–Xˆï ,u2©w»XšT:DHN«j$ »¥ õËË„¾¦ZjFöÇ+;ëuv€sÔÖÝãîH€pzL«Òék÷{µáoA¥F/'÷ —¯Xté,º…–:v¡/פÏáH'{ô‚ñ¿…‹‚‚úŽ‘ÞX «’Òé«÷ztj+ðqœpùjj—Núôg—NöD?{©õ~”¨'†®®†ðóqŠ)ºí~ž@.jZ ‰eŠ’§Ð|nrЦ\\ðË…×W÷Ö3l òÖYÍó]:âhèk XxU·Q3úÏ«w©ÔkÂçüñÃD™Õšw³hͤT5uÚ ìmÙaj[Dá<è°Õ€¢ãFÖW(­ˆô“î¿4ï ËxâòM¶n+|þ0ñPBÒ#·3pK$,¼oW³Á–гjõš)“‹ø æÊÞŸa!¡ž³$[PŽs¯ÛX§èlÍ=5MüàHYõ.|ö7çÕПÂ}v²n|C‚ÖÒ¨5nEÿ mhƒÆ:˜&z‚•s͆Æ—ªøUiŽ7>‚x<ŽX¹r%œ¤ÄoŸº?á†fÌé"&Hß4 ¸Îá@ vqýQà«"–#[OêgI¿Lî\ƒžW²±ÿÆÔŒKã¿Íx§¥Ùûf'&XØ9ñDðôkÌAŽ“0Ocƒ/¯²¾P£“ß×ËJj•kèù1/Ûïì"VñŸîfå¼’½pا†Ñ<¢´šjaï“dú}× >ÓLê²|®/]Šj€Q²PA¿7¬˜[¿wܾq,)5aÚ„¦ÀyhH 2…ÄÇeÿ˜[˜rµBÄ^Emëš[ל‹œ]§ÇBBX§!‰ 9PG–†àÒzš¢Ñ±…ɾz“Ѥ×wÉ•K Vö„øy» 3·îå=zX*Ù8FÙ© œ;Ç!“åÞ;vÕµÍÞ͹¢¨AdÊ_º¤„CRpœ¶÷áòa­¦Å€ŸsÂÚ è†ÅøÇÍ` ?8.']!ª$Sk#Òõ~Z”Yâ¶®-{6ÈØvÔQÛ­hxtÇ£7â@~“Æô|n>~*ÿ%™€jé/}by´¯Škê}l+5sm–òQ¿¡MÒÇ9[ŸžŠÊMÍ(´­@É`¸Ñ÷3y}h¤&qmò %Óßìš3¡%׳än„×íú_jÑÇÚQ&ÊŒjpϳé™ÒsÔ¶n“S­°˜yJ à„õ׆cÜ\zi©[(Ù¸Žqyü½è@€LÐ1.›:'z<²„“ÍSŒpàËû™Pe"_NÞo„ uÅ{ÉçÚšt£t$O蕆»PÜPF+ZNM¶f©‹ŸPE”êèG9¥â»çåÍËÚz…ûßöm _<Ó×fÈìáT¸WØö‡yÕ •L;·ßÍ3êTTsxÏñ¥ ±;ô&¹Q1UUT~|~W£v±Ìo¶×Tvi`¯îÅùõ²âtfWü”‡Ùñjn/R›ê åj†"UåK‡vê¹Nê©©ý|ý¶ºzmIËfgZ! nSÛ¸HŽƒl‹ÿìv}û”Eñ£°|Û8¦ÛÐåζœ»s eÕ4˜sÙ—º Çì[a±Y!3eþ {[oݹGßz Õ† uq£·pð~P穱<­rÒ‘Ýž4ÙÃ0oÓøÒ£ôj굤+’–Êo%%Qƒxâç]1CP)êÓssÔ.ÃvÒ¥yõx´»+ï'®-µ.§½W«^Sº4›TÍ£w}%]«¥*~9 ôNoÚ’àdîȈ¶›ƒStÙQÒ D,œ%ô!¶„µµŽfKôŒJBü£ˆ^îÚëïÊ4«* k\OÈW)³–ìŒ÷ßs$[!‘ñàzŸn]ùß Y9qºQGÉü( _"Ö¤¼diog‰R~œ|fè ~®ŽuIŒ/ÞüPRV–¸†]œ^êÒ¼ÁÛw>¢%øæî•~=,…Õ0d·tehÏ®¥™–rÖ~`_$w0«Ê*–œ¢¾iòåøþäñ\BL$DÅ6d KZ/5jL4¥‡ŽŸn¢'æ>¶}õL_± j6¨‡‡akÃ-†8‹RÎaP‘ûY§‰–^îÌÙöâ~÷Û}ðXkc+Qéÿùݲ¾ZUU=f¾{ü|C,ùùù)Ï?‰îaž§&ÚõéI¼¸Âe‚a#BTbÅëtî&:füÖPÿ™â§£´Ö×"OV¯ßhnç(º†5ò¿hk±õZŠNs†·1£›——šþSµrÑÃPˆg/ ÙCrÑÙ€Þâ¿eë1=:<Ýݘ¨œR—yÄ@±Ciô"%%%èaÄT){að[ñ5<ÓO¢4'Od2Ý/A%¨¨¨tÖ3vÐëm]ºïªÂÁXEûöíïܹ£¨¢oß¾G-SERˆ_ÅÇK"Y+++H¹t”bUˆŸШQ£>}ú:tHEÂÉ$Ñ8sæÌ¶mÛx¼²ü ¿?S…ø•W€Ž4cÆŒž={Ê6ÙI“&U®\ÙÖÖöÙ³gÐÖ«Wï_¨Ìƒ¦¥¥•AÙSˆ_Å–-[bbˆìÈ¥3öööÑÑÑfff—.]’:M‡³nݺ¦M›âU„xeG´â!++«Zµj%í)U!~Xö(ƒÍØMÈÞû÷ï4h ©»,___ìÒäÅ‹ˆ¶†IM»4§E‹?®U׺uë ÒZL```Y–=…ø•E˜ššb/| {999UªTÁfg;uê„]š€ìÞHÍÍ`/¢Óæ8‰Zs{úôi|’ŸŸ?oÞ¼²é O öîÝ›ý ‚àQ enÛN!~C† ¡{ÀÙƒw ÷Q”;¡=zÐçEAöx|<€ÊZ»vmϯ0˜sæÌqã¸û(gitÁÃ`±XtÕOÒ­ï€RŠgVùéB²Xö ŸÜ½{7ôEP0ŒÍ›7›˜˜@8®^½zWæÆ` .Ü¿?´)Íš5ƒV aÆ;v숋‹ƒÎ ’0`€££#„sßBee(¥O†……AŽÐëBœiÓ¦A:õPˆ_)€š‡ä_Ú2pàÀÙ³gó¸ã‰â™\kc^zèm@&L˜@ß®DBØ ÊvëÖ TPúWÇsçÎÁ(qРA{÷îë=`;IàŸÐOŸ>}Ò¤I Oò쮾qㆹ¹9Œu;tè@×'½Il† ¹Ù´iŒT¡ÏÇ~êé:<©½½ý† â§€PÐûhõa¨Cý|À?Åú3àÿÐC/a‘éºkçί\¹‚ez­Û·oº…'äòåËô¯)Ç™‘4YÐlñ@z¿Ji P· ~·jÕ ÇYUˆ_–=wwwèCx<ȾxñâäÉ“ô@Ê¡4s€ìÁûôðáC$ÄŸÝñ²ÔÙAöð 4 Å—=a +ÛДà“~ýú9rD&éÓ»n,{Ðóc}A†žÒâWVP½zõŸ?þúõ‹Òß~üø¡¬¬\µ*±‡­iÓ¦Ì]:‹–= Ž”‡WÙ:=Çnýòòòd»(œ›7oÆ'Øÿn:u233ež ¥«ƒìA· #X9¸§VˆŸœ²G,lx6²F%‘¤\¯^=¼¸¬qãÆ¯_¿.Q¯ { 999rð„çZJBöøgéžZ&þzâWú ;§´©’ˆvgٓã<Á–455eî$””ì•Ð-…ø•8FŒ|>gggC××§OŸ¥K—Θ1CnϸeË–\ºtI]Èž¶¶öׯ_Ï;g``PÒÙìéèè¼ÿtTü T!~å0ÀÛHâîÝ»mÛ¶-ѼÔÔÔ@cbbÒÒÒä)~x¨9gι嘟ŸÿêÕ+D~½OŽÏŸ?‡£ eO!~ò@dddTT”ܬãy—›7oÊóq¦òÜd ¤¤$+!ÌŸQæã@…øÉPz)=#⪦¦Ì2b’"¾:Îef°P†™VRâ4¨ÏHZ@Œ]]Ol¦L*ùÛ7Nv6£‚=|øav k˜øÑü+-XS/Zr8炪€÷~¸£ôöK ¶Æ&{ÖødÑÇ)!~¢¹v ¬Êûø¦N—, 2Gþ‰ŽûÀEEm+ Øl«ôôóä º;8â~ÿ|;²ñà]ÚlV‡¦HmØ™m–—ZÙΙa‚–&q$̈UpK.Bªl2ÍÛ­Ö¦oÁÖ$ôõ]rï¨O[ªã%JRgÊœ§ÝAçåígHÛ½¼!Q¦’)Ÿ®®®-µj!M?² nÕ˜C<Ô¼0&>™yá½?«½½ýÅ{TÂXëæê^ºeÈ{Ÿ ÒÀ²$RBÊÆcJB0™yîÕ»éö·\çô&ÕËï7/áC¥Î+eE·ö^"os¬ë"RÚGÄ=à« ÁÃøù5õp`ìu1η+›=/éøSBt“®ãp™Š_…y| 'C¹»!ÃÃ#Ÿ§1Ñ?ÿÖš•ö’£¦Â}iö¼ü­­]™j­éÇbæ’xéNW³öTjó<@üxrI;ºøžÉèz'wWšpcx=(ŒŠŠª”Yþ>1ÇÊœœ}° IŒ(êq–•j>=¸ØkA$¼×_O/ŒtP¥)iìpïdúkbÙç¯côhÚ7€ééÛˆ˜¡æ1œÛ,Vr`LñíÛ·š5kÊ!/*Guuu~Ó’¢F “½_xHŰb>‚BüJëÖ­#Îß¿544Šÿ®ˆÖ)䳇çÕaÛG&œ£Ü§”ÐL¡BüJ“H@s(ŸÉ Ìí É0S9×*ä8`ÀöiÊÅ*ÄONÀ²çàà 'f8-òQ%0ôôôJh‰Büä l`“‰S”â€Ç¡Ü 7oyyyò‘=ì£ävo)ÄO®ÐÖÖþóç%{%'‡Ø²“œŸN²Õ•VÒÆú÷ïøðaDNç–hF ñ“7”•¹u>|øpºì EEœü/¹ßòs¿(**çå~»sîüÍÿö­ ÿ[}Y99EEeE%E%e€ŠšŠ’šªªºª²œ¬,6V®îçPÎ*Ö}»Ö6jk¬ÖÄ,±ÞýÑî{GsÍæ›ç®ÝÿtìøîO}MØŸ“s+cT–xô« ¦¦ú ©¤3bê—´ohå¶ol;ÕíÑ£ô¶mÙœ¼ï“?µnßP¡žkanˆŒ|}ð¿üo‰Æ=@Óf‚-u8ûMÚÅXݎ͈8›Mç\½ûν-*ʉ ííwCÞ\BùrK¸ý}/|Q¿žeé±qá5Ïóñ³ç…J‚§âôýv¸¸»óûwë Ðˆf\åÕ@®Âh´äèQ>+±ßlÝ7w§ñ À泯½XÛ ,7èLƒâ³ç˜c%llâzÜJŦ_*¦g‚R¶‘ÀA X¹2ÃËK›O˵x¶ÏÛ óËSðàŠò6b`‹µã!g ÿÅv?Šdëˆ:@>jälY¨h'Ê Ü#}¢°JTÔDøÇ;TPÏÌÉêÒ°ý5cزEj×kø¦¦OãOÛëF@ï.ÞRƒº²Ö§}®ÏbÍ™²3 ÿ ºS4Û´¥aÆ·2e¬¤îÇe Qxyùž;»¦o¿’¹õ+Þ-ùõOì ˆ¤–µiÛ“<§V¥EºâeÁY€ÕÂæÍŸ¦Ooõ»~jÓoÂô‚WÉÏ”Òf^Yí´×`‚p`X!Môí7÷í››ÄW=ðàêÄÿ3põ‡yž‹èÈ‚óÉ!m dExqúôßùÓ0Óî7iy í¡u„¸tI±G±óC¿?ZZ­Çk]ƒ°²Y¼Å÷C? jŽ~ÌÜŒôcÀ€¡_­Á§ô‰å¿†:o¸//ŸÈÏ¥3Ÿ x1ç~#¾sè‰S]­Úë$?gÑV]òrHV¶ÈÎ&~5PdqJ]Ir¹¹Ä÷B)§öÒ¯CÛmÚÕ)ùH«—:;Ó¹‹ ¾–‚™Õ5A4Ç\-|/ ­º6„Tï^¤àà+8¥¦°fôŠÃ(Ÿ 0Ê' ý$SSÓ»wd\f7çͧ§ÿJCµl6«KO‡ŒÃ_‰Lß\rA†>,÷ó„9›ŸÎŒ<ÿzŸOzôc±X>W>úYiˆ™RBB‚8¯ý0SáY…l6¹æ¿tâÀ0Ö’$!¡š03ÁaÞË6>æLíaé‘ÈÝíÈyÀCÀÍ3{ÀúôSÌÇÚ êó}IZfП\µóï+âÆ$Ö‚r¯íéꣴšœ°Ò/ýÌ¢ÆýW`7ÿæ÷ÝÉyŸœägÇñ<-^]Þï  ÂžÅ:LG- mËӼX²ÙÝ—t!Ý7×vã/¹Ó8g _-ùx1“¨‹‰=gË_?„?çÎGÈ/Çp¯6ƒÿómîÃkxCW]=A|ßèÞ!/Øõç>f¢  K?.÷È3—(ŸØä·–­É{Ö•á CQ4üÉà^!AÈ}E•ÒσMÅÃ["¨ÚX;^Ì%·_¿!µGô±Ù#ÓÓB¸jŒ}‰·Bdf×Á8Ç‘–5’thØZçÙ¿ òYòùPŸ¥üJ)´±ÔÈ…”Ï£×’´ý00…À]˜y§ž&Þ)¬¿¶ôý˜öþº¸¸8;;¿}œŠ*8çzàR31!|ª&XPàh=jåã?¢ˆë6MO¿S¹1ó­›•2±zrkbâõ…6Úå©ÝYˆP3ã!n8?èßÿ$sî‡l‘žNçéRãÙ¬Èôﯟ$˜õ0è†l¾ÆO?܆hH’ó2æ§—2ÁnV¿¢ý–Ì‘ ýþJí2éÄ3q‰**(éé—Eþ|èoïUF§·ö’¯ã§l¶ˆ¢EXúñvßÍt0i€XO³0íÌ¢æýWtÖÅ]@ÞÁ³Žž˜Ý`P(yžëôrr+'«ƒ÷é±ÉbÕ¯bËÒ*v¸Ÿ'|ƒ“ƒ…в^]´ âE¿K›Ø`µáûxäßGç‘Zï]Xs¯Í3‘ÖÍ:u-ùíúÞªsï_˜©¶ø0QeÏb¿àˆý‚ s.1)]O§u—ôŒÏ«ã?®¦Éß"OÎhJeq8kVi“j*2TSb—®Æÿ®Ÿy€{ëÛ(àÔÔG&®çá×êdÞþù÷ƒ_φâŽ?НÎÌBh&J²‘ñ™­­þßKQ’(õù:-ÇÿÞðí|©†ç!NÕUM¿µk×v7Ðð#ûà‡™…xmÞSSÓ¶–6xÉè„£&ÎËŽ„­øÞª!Òè–õ$ÞnDÿ„7yÆš¤¨„†.w×Ö_W«ôŠQýLÍÙD|¼Yzúv(jÃíÀârx_î*ôv²dO¹ ¥kË›D¤ ]OžkÒ¼õ€C¯Ñ!R*>`¾Å/1|G¹]ø2O)&"wÞ%òSÒ”šQ~¿ X çæ#v0bk7‚fš°©ÿ‹ý 4ûe|¾™†h‹C¿†óë'7>U.°\Td$U½ât[UÓoÖ,Þ¡ˆ¡áQ%¾rùg¯”ÕTmÕåô¡hê'IÅ#N3â/ê—;‹”‹;$QŒÉ³wÍxÚà^%þx€ê á3Îl˜ßLëÅ%~õ¦_¯ö7KÕ8§š¦MÙ²º´Õ!¹uX×õœ!Ð\H¹\âÃ@€m ‡B't€h_¶ H¿ìEVlý®s#ooZ™Jê.³£v›irëóB<Â}¹µQÿïÅâ$>ŸÍZ•N€ÀpbÙ»âždÖ‘LÏ. J÷,ßÄnª0j$¤|Ö4 ÷Fó¹(kïÁScÇN@uy§qíÝ·}옩9ï^Œ\}ý\иœwO.ÜJµd[Y|²-ôëóZ^èFø;¼¯éÀ©Ž#Ð’!u†³~Îm£3ȱ/çþL'-tQ¸$¿â4ÁZ«ÃÈež®EŸzëh¶2ï¾ÅÔ„Ø4ŽRO3cü§>ÞÔÍô´šÇ]: Ú¢g{Mzl?ÿ5¡akëËhbšÁßaFl%/òuêæ2®$ó Çv¯‰ä "dÔ’¬MBhôò{ c‚¦Î[¾îê¡°¸WE3ß?Þ,® Ûü‘¬éÿšŸC}ü0ë€WLfëQÛæÝzß,ðo±Š Ü»zjãÖÑóWìmO 9NQ72>îúaEЂyBAŸ;h7a€ækÒ©¾<{2¢U§zÚª`EíÛn?†¼£ûÓ‹øK²¡™ °0µCG¯õoK?6›´éç’çöñ¹uÞiøF/°4ä¾å¼ ‹¯çl?¨uv6Ѥ¥¹‘Úýás*Bã-QBÜ|ÞR˜™OûGÈ5„Àù×Qñla&Âa|ñOߨOK­A0%ˆ;B’2c¸Ÿówœ¥]òEîQÁj¬_OØ9Á¹r$+Ýôí†G«ùó&É\$|KE…oÄõ.•êÚB,ƒÐÕÕMKK¢˜%üò…éÓ§oÞ¼Y¸:d ³än/¦º„L½ºW:W¾FJ-//Ÿ——'BœÒ,ί½ÛýåË—JJJ_¿~¥=æ-[¶ìÞ½;77—ö˜ëÕ«WXXXS5&ýÛKüø!ýb6hÐ ??_1ÓÛØ~ùÃ&¾}û&¡˜Eë;+Áóçä˜'‡Ã©‘Š’••­‘tëÖ%ÛØòåË-Z$µD?}ú$¡˜•••1 ¡ßÿ£éçà ©›’6l(‰h[¶l‰·nÝêܹ³”«ëû÷ï ¤œúÒ¥K¥F¿uëx“>>îîî´G»`Á‚Ý»w3ô“8@@I‚~Ë—/ïСC”¸‡Pff¦4ÓõððˆŽŽ–fІ††’0L@!¢—{¿ý^½zÕ´iSÚ£5j”$r[SÜKIIÑ×'÷d=þ|ÅŠ  ‚Ò ÿ”ƒêâñãÇ´ÇùôéSI4³_ž~ OàÓí¶mÛ$”á£G‚jôàÁiÖRãÆutt^¿~­©©éÇņ $¡žñ„ž³³³”ÛÃ? \B¯X………’˜8ýåé! úI^^^Ož<‘fŠmÚ´ùï¿ÿ¨Dcbb4hm4##C[[[réöïß_èUô`ܸq{öì¡‘{PQ?~äp8íÚµ“D†yú-[¶ìWÉj‹-@÷ëØ‘Ü ¢¦¦&¹‰~÷w $ö¹zõª¯¯o«V­@¡ºwïÎíøò勲²²4¹g``œœ|çÎN:‰ÛÅ‹mllV­Z…¸ó–Êó/O¿¹sçÒ§„fÛ9<ˆ$9i)ø¹k¾øÙŽ—÷=zÜkÛ¶-8èM"W]]ýóçÏÒ)æ´iÓ€{-[¶|ö왘Q]ºtÉÄĸMkÍš5’ý:ˆAìÛ·O ã.R[‡)Àö¦M›¾zõ ˆ<îíØ±cÊ”)t¥EpR㤸eË–ÿýWLîឨG 6üðფ¹÷›Ð4:ÐëhŒZœI#zöì í»srrTTT€{sæÌ ’DìÚµËÑÑ‘¢ýýû÷ùŸ÷(³pæÌ™xŠE|}ϰ‹y™Gµ€,¤5Ô°h‘êèèŒ3¸×¥K—7n÷¤¤›üô:t(‰ªªªÙÙÙ´g’âVnq? ܓĢÎ;ߺu »¸‡‘››« @ÞôÜkß¾=T æž–––h ¨‚¼~ý´D?º™™ÙÍ›7±€µ²²Š­n Ý»w_¹r%ðmÞ¼y#FŒ°µµ…OÜ“fÓýèGï ¾$¸7dÈcÇŽQ?ùu$h²©©©zzz´$ô÷ßß¾}›âÀØØ¸ÜÀ½þýûã q\ëÖ­›5kæd8** ÛUbñâÅË–-{‰Rª%Ê=P@} 6,Z´HxîM˜0fp0y¾÷åË— °bÅ 0€:T#M÷7±ý¶nÝú¿ÿýOüxúôésþüyÚ³ÇÏ=Œ… ‡Çn̽‘#GâQÑÀf³ÓÓÓ{¿ÜãW‰{?~ü;ÐÙÙ™Ê0XAïß¿/WŸôôôôòòÒÐРŸA‹I"!ƒÖÅÅeÆŒ†††À=ìÓ¤I“7oÞ,_^ÙEÛ·oŸ?>ÈÿÆÉ ¹""" ©€uwéyƒ€ôWÀþžô£…{£F’÷@A‚WÀ“âà‡Ã‡ð‘_¼xô%À=øá¨ÄüsuêÔÁåÊÊÊ_¾|e­Zµ ñž={@H‚|óöö^½z5TgÏžîÑ^{@0É@- ¥<±™ Ü y–ÚÙÙQSS§N6l(`ââeÿ㢖´ÛßgäÏŠü:è!’è ñ|¹ð2~̽*WEOž<ÙÈÈÈÃÃÆ‹²Ü†{˜{Ó˜{”¦÷éÓ§¥K—bO’ÐÊAwÞ‚:)êààЯ_?Zj RiÚ´éÑ£GqU¼}û–z¤’——wttîÉ –X¹[¶lÁ’ yxùòåèÑ£¡Î9‚kú>鬰û£éí@dTÃ-xx³"îáñ È-ä¹ì#̽ãÇŸ9s† 0xð`mmmü³¢-Kx[ƒðÜ£€¹gmm}å ï° ¡­cM4Ì%K–ôêÕ ì%ÜÊsss¿}û½ž­­-~ ýgD˜¡ë×ÉÉiãÆxò/þEEEAAA  gddLš4)$$dúôéÔ[ Íz÷î p—.]|}}AÊQöïßÿ«4ÚßjÞZ3tŠðaªË[Ú¹æ\jjª0&+ôÜü¼€ÅøôéSìVRR‚.¿¢±ððpà9¨Xx[ƒ ,+à¸[·nýäÉm ž\€hë ž¶lÙ’R>555Ëná…§ÀXÒ6là÷ øž}ûö…D>üüùó¡C‡‚!'ЀÒfÄÖØl Á,XЭ[7* È^)ï¥`èW€{ÂCãY#è¿iÌÀرc÷îÝ+ ÷0€{xý×»wï #-Ô<ühçÎT0j>‡`ÞPŠ"Ö`«{ì Øo`.Ù@ªP6%@ðêP‚ @Ê;w‚è­D¶¸øMMPMAr‚2Œ}233ÁŽ…ê…$@œúùùÁÀ¾K||¼¾¾~óæÍ´Þ\ îz Å‹Cr ß ª`Á\ ß¿áªøÆxé­0ƒ"ô&””Ü2pAA®®.´6àôôÐ…Y!iaaÜƒÖ ½4waìU (‡À4ŠØ³¸¨ü-‹õøñã?~€À˜ »ÿ>ð 2 ,æO›6íÕ«WÀÏC‡{ö¬òõnÝ»wǃÀÍìÞÓ§Oã&.ü(n~~>è–P`Ä‚·ƒ7fLÉvÀ=ˆBb•”^ª 111@ÎÍ›7Oœ8ûLæ‚Ò®©ñ-ŠÀí9sæT™+þEBˆ»H5,,Œ®ÁU†~Òh\e‡$±g¹îá5„NNNÂÄSåZÓr¹·cÇPgr¦ Ï¢¢"ƒ®î‡ ¿³OÊ lV!ƒJ¯Ÿ•ØÞÞŸr :y||¼@¿™C ª±±ñÕ«W©‘ÆÝ»w÷€<ü|¨xY9U(б1÷¶lÙF£NqOB«súÑÞ¥ŸÅ {áááeG¡UAÛî‰áÑ£GËÒïÇuêÔá÷ÁK̨åWeñóçϳgÏbm-))‰òÿðáƒÀúÒ²6'^uM©ˆT+§†[A,»f ,öîÝ;vìØŠ¶t@-AÿH­8W xAþSƒ® ýjÀ|§†7{÷îMUÐ…qãÆñÿœ?þªU«€{"G¸xñbŸŽ;â³¢1ðªT%f”¼BâáMÌL<õÏF`K‘ÀŽX}}ý””< 2ó¶\ Ü•„jEf$ sÏÍÍ Ï7Šд1÷À.ˆˆ`èW‹Àß¶hçâ.’âÇYXXX¯^=JîñsïøñãeW¥‚ç›7o\]]Ëi÷@Wk­\úyzzRõ6!xq>ƒ_f–‹J¸W­[œ€{´½¹œ§w"—¡ Ä¡&ü´,ºÎýoÒ¤ µÐùÊ•+ü»HZlûU®å÷øóI-æDÜ…)Ø7w£â]ðø&&aPîA–––Õ½A ²G×ÉÀ=\ †~µ£FJNN¦7NªMóooü;h*ÚÁ‡+±ý*R8©!JJå:t(po̘1ûöí«n«-Ë=0¶E[øÜ£Ô]1¥ ë0†~â"88xÆŒ´s>ÆÆ©Aùr—ƒb›ªº›»ñ¾[J.aî­X±âèÑ£!!!"oóôéÓV­ZQ? @‰-*à^QQ‘{|+Ç/Á½?‚~K–,Áë6è5ß@/÷ø¹G Ÿ€T½N´MUÀ=‡###ƒ/6ûï)i#ΰ¡··÷áDZÛÉÉIÌ㉥C?ÉÂÚÚZÑöêÕ+##ãÌ™3¶¶¶’Ë šãÁƒGŽIûm!t]¶A#÷w ¶’-] ý$Žž={vìØ‘vúmÚ´Éßß_„­ÂàÂ… `5oÞüÅ‹‘‘‘t‫;vøøø@tˆ„­kÈ$]û\333555éÕáúÕ$±DÅbIˆ{ˆ»:»„çÁéŠvàÀ 8wž‡‘-Z´èGãsÐ?OŸ>Mc}â‚¡_Ít-Úã få¾hˆ‹‹Ûºuk‹-è=¤$--mÚ´i­[·~7°UAoñéå vrïO¡ÿƺ@3) ë>|Hû\uêÔùùóg^^½Ñ:;;»»» *QÛ Í 'ú•‚$.µçLÎ*ѤI“† JˆØÕÚz+$>L/ýöíÛGo§ùþý{Fú1 `˜ 2D1Khù½zxxÐK?É]ÆÐïwƒ‚‚‚„®¡¦kaªhד»víú'|h†~µ•oÿ0`ÀÚã¤ý&,†~¿'¤y)lí„™™™$ Ôùóç3ô“^¾Ë¼›xøù³u+ýv-õi‰óΣ§¯_½–S·éú7-ëƒp=ñQvNN̵۽ºýMc´œ¢¢‹Wo‚ w+Ѳ³1]Ñf~ι™xïGÑOCý–Ítèªáä§©Ù¹½hªa‚ bâoÇ^‹ïžôĘ̀-C?ÉbE@€Ý$|Öµ¢ž1y¤dBIïÈÕÆqÇö¸ÿoju#üA{Ÿì`Ñ›¬2 æd;{ø‘­ñ§öO›:Iœ ú’w÷¾nòœBym½+ɨ8ÃEΫûׇىxÈ×ÁSçõ:v©ËxÐ60¹õ*‡Š9ãųN­šjj¨Š­ÀÊþ“ðdZ½&mÉn"·8ÚˇwÍt&J 9Õ¡› ®áv¥jøFô¡ÿMv¨nœQÑ›µ3•©GÞ¸Ô@¯ýа}TÙ_=}Ðݸ­²’C?:¼¡×¨©ÅÜ+–CÆÁ¸°?Ìs¦°S±‘GNµëÚ s¯\˜ qf§>èÞµ³y>wëa£¦ú˜{å|!=Skˆ_»nA£jÂGûú}Ö'B¶U'ËŠh7où–ƒ=JëÞVWøh×oÚÒ}¸c1÷Ê<…Üž‹Ü2îl!ãÜs,ºm—˜{å¢Kÿç×—-ÍL…*{fÖ§²-LºU i«ö©¹èãÓdS†~4)÷çb{„ì=Ú91#×X[¡Ê.ùÁûàž0qªêµXè=¿ë3ß}þò®P¸'LàŒ"ÙˆõÁs…ë5ÖmØÔcÄd¡²­ÑZvCô•­UõLãþS€]ÂDÛwü4ák¸'LœJÍ …©aáËÞ q3(»Žl¡†ª C?±püÒ ýŽÕ0–X¬¿ ê´*Û -£Zy°è¾2pµ×ºé·ÉÞÈíVGâÕn)C4îaàÚíÄn Ž‚"‰§vÛª-@lA‰ÿ«‚zê•ô幨¼« …ÂöÈ}SÇ—¿ÖD4îÀ¦½~'©k'AË„I´8´*ßÌ(}i˦mÿ¸:M®yú \ä]ùþÿìÛËÔ:ûPa ?ÜC¯Uù£Âæ7g¦¡½»x§úœ¿é×[ckv4ŠÍ",UÅ-ÃŽ½‡L{ ÎÒ&_=q%þ‰‹g‰è¡-’QÎjcƒr…˜8YZ½n½ç¬Ê®™{uÿÿÞJTo*úXB'›òWºm ß÷wÑÁÉ5nY®§Öo˜UZ ¿pí¶¦~;‘#ì6p ’:Ê—~>W>úYiPTÔg±ð•,‚P-ÅOk"?¡˜²¤-éT¸¶“îþ@ }ÀgiŸ~1›¾{Ër̳Š ”œ®“¶O·ÄŒ¼8„ŒµåI­2£Š%ÿÑ—ãÙFâd©÷—Jž®Yb3ÚIœøå´Ë_½AÔw$}E@ÀBî{ü‡{ØÔùcb›¶kV³G™16eq÷Þn—,ý–Z7XÊ÷3ËÀñÀ±Îÿáí²æqù ƒ*s¢; Ö×ܪ½œ+Wö×DÜ-:M¢sÛÛÌ,ÈD9ÚÕCö#&úœ|=eàà›±6©;%¦Eõ­Ê©÷U^|Ö|VÖg¶$+Ýr¸­‚RùVš†–¸ße•ÌŒO9‰{\ŠÀxInníµªª©%f 6•všÒ£ß¼ WÙhPª#ÀÅ…Ì™óø oßðÎñžgŒ‹9ðé ÏS +igK®#?—œ¥ÆÖüÆ£.ëÑ™â`Oøî>ãÊÔZÖu‹‰#Œµ»é¸Úd'tv{E‘u+uœf[sÁÓ5/Ÿ8‰[`÷~…ÌØãg)†¬³©/¯(àóàU^û¦òü> ¨«”Ò•#dÅ"»É ôÙ…ˆ¨L¿‡„Ö¹÷–s=n¦R¾¤ŠŒØÕ{4yöáÁ°Õ2Z†ê[­l$ä dlQjò¦MgÞläå³'å™YvæËùÉpÙ+×Vž¥½l©Û¬ìgÒÒí``Ьš:j©®ÔwE”O?YnSsæ3ÞþvYñÕßtöáW…OwÎŽ&—(îÜÖ{¦•Û¨ˆ7…|$Qt‡U·AD²X4ÐONA±_5[ÎX¦¥#xZ÷wxxlÉG¥Fp‰Ì;ë"bq¯¢×c²Õ1…$à§¿ÿZ}±Ç]d奴nhP’¢'n^[–‘9£o±Zûd†•à¹/¹¹Â0³õÁ°Ä÷ËÑOÔgÐ)Üzv$/™b ¿ÎÔó×&äÏ67gɺâ¿L6/™—Ëμ¦3;ç ’çPñL5—¿›†ߥ_u=-­‹lÚ¹º&丆¨ r9ê:TÇD¥Š6½3ÔgÊÖmû f“{¸áâ´øãR¹ý4:6qf§„JOâö?4?åÜÔü“0¹ÜîC•жð/‰S KÒBä>%5ðéKNu¥RsÀ*ï&õpÌ=^ÿEÈâvï·—ÏÉy&ª ýP­ ÿ­ß”ûô•’»ò¦zƒÙ»ÎªÔm2¤§Ws8R¬é¬Y¼abM³ÑÞf%Ñz{Ͽٟ>¨RêJ)-®( ÕÕ-gÄ‚óähå ¸Tç¿î'®X­}R6|£FÚB ëLbF$pÏX»×ÊF× ½rHºûªgãúî9÷¡ã]"Ó9¥ÞÓkÖD &½¦xA#Uê„MƒJ¿^·YÒÕë/ï[Ò’-Sõ]ê9ŸÉ¤-"#ÏeéŽkYyß+îY¬¬Qì.œÛ ÃÖ£Y«âqÊ­á ‘GåÈÜŽN+wŒM@gá»ÀØPOçI6?ý nÈÌÓéh‡ %r»Ë¼jJVA,DhRµº.ü¾Žþ>ÍîµeäSÌŠ%f¡šGCíR­06#O‰Ûøâc‚|v¦\ˆ,ÄúÒ Á®¯o§Å´È«[Æ Pôþµ‹Æ£KîëùñóGÒî%¹vÔø£y”gEjíàþ}„*€šmqy#ठ"pä\îñThviÃDCµ2Í¿nâzwê8ÃüÙçÛAÓÿ_ÿUjd6d“Ã9'ŠÃtk@tóíƒ|óÊÈ=®ã€BN8·d·B ,WA VÒß?±‘)ÇËU$öçfæ\9ÕÎu§\-u=ãÀ¾%«pådA¦åãð8«ûüí¸²»méâs+™@J|µºzJ„:ÔHëýåW½L=œêòKúB„FöšCi9„ýú&¥w 3æ^¿z—~qƒVR,¥Á^ˆÜ"漪ôtƒ§‰7Zw©õ™™òi•š˜8ªœ.ËÊŠdeK^ØÖg\µ7Èë[ŒàOHàÊQñ‘ñôž‘V—ZH¿T‹E˜H»øEKôÀk‹òg­²Ñ¨ÁÖµ+ô¿ÁlŸ‹.’iß™œ~NíqƒECLQ=âÁƒ—à³jÑh}>v Pê°jÏ9³dŠ“ Nö¤Uáàܾ÷/Ÿ6]ÎõzqÇöX'NÌ?¾çKâ»Ø.57;{†;w3‘èøþ9i•º¦÷Æ™Ã]l‡‹g}–´í?¡è§OrïRãÃöðø%º,T<(Š'úÆ/?±ÐLÍbB+VDÜw«©QñŠÞd … ·Ueˆ=n5Œ§þMrtAŽxâ_þžÒÎ ÙUÐO?¹ù¯Ñ`;~èVËêŸÕ‚†BekÑÙ¤y" F—µ¥ûÿ¦Šs¿žå̋Ʒî rœ)wcìJMWÔ­[—øYÀ£†›¨É øüoÒx1ËÞÓÒ¼6Ò/%í0Kw8‹µ•Ç(ÂË={.¯0 #’Ló¿LŽs÷ÀÓ|ÙÞV‚Hk­ûÛK¨ Ó]Ä©÷Q¥¹‡Ñ´çm‘ˆƒÈËÒÓ«bH#õî=Sï~yr#ÆhH…G¡ÄGG™‹ZÕb£F–#@<\ŪáávåLjÕÉO(ŠaÑ·O[4)ëŸðïi“žv¢Å™|íœÑð!µ‘~¨Ù0,ÇôY,~µsF@ïÃÞøWº°8¨ ¾Dâe¬tM'ÜÈ—ÖI´z ?SsÿáÅk'ö9—³EP³Z8wç®qëU½ff˜]¿#¯êµ3¥¿RÙ1DÓ&O*Y¯’ǬPyÓ•ÿ‘–'Jº‰ÑôrV“h7Ò<°q³h:•Ç=Àä±ö"–ýÅãÑRçž°ôãr‰4ùRˆ÷,V#ÊßÊû‚ñìk AJ,oà(ãm.’5Ç"1&ÀémãÞÛ7ÝÈ%ÌäÉH2¡YK¦ÊJ ÏngÙ¿Zo ôõõ­èéœîÇ.ÅW´ ½"hÕÉ­rƒÇ@‹›iŸêËÊW+þÊ7(RaªÛ YEß{TzŒª²â½Ó:öDc Ït›~"öVÓVí«gc™Êf‰D,»¹)ª E?¬^²Xä6¢„÷Š“/¢6ïßµÖÿÜhÜÆ6[1†ØèÜÅÓK·!¾ÕØš’,ÉøÃãâo+5vÍû¹ˆM•´ Œ!=ÌC¶„Y dœ-•‘‚|5þšéjQNA‰_zDï ñöš/¤rHA¿)9ïü9çËÁ#Ǻ؎à”÷ù]âÕWç©F"q6>~–’pïA;‹R£ÙoRÞ¦>“̨šÜÃX@9uFVEƒÝ²”Ž÷èêY㎠[êiõ¬n´Ãû’ú´o›±Îõê—ªÌ {¶zΙUÝn¡KÞËù9;'êè‰Î¶ö¥k8#éÚ¥éNSŒ„æ¯ìÜÛuT]§y£f­ùýwýbÛ6­;´ieT£ÜC¿ñ9ŸånÜêè%ú)«ê*Êÿ›Tfð]«yWé´d˜@-ÑVQQÉÉᮕÔj‡:µ3fþYM‡4Œì‡Šm¹ÒØhŽèK¡ÔUUœ'•Ñóµôºê‰ç¾bš››ÇÇ“ã‚FÃÖ’VÊœr]¡¦¦&¡˜‹ŠŠþØZµ··¯mYbèWѸqc¦h‡ä®Ceè÷[ö0ù•O¦zú1¨ oß¾e³Ùééé´ÇlhhøgV©’’Òׯ_ú1¨ýõפI“$óäÉ“UUU³³³ÿ´*522ª…¹bèWÑ£GM›6A‡íååEc´qqqÇŽûÓêÓÀÀ@B×ú2ôû áää´oß¾ hiiÑx­³³óÿý÷ï¿ÿ‚ úøñã?¤2W­ZUk¥=C¿Úˆž=É©ð©S§‚Ú±cÇ{÷îÑ-p;€Û›7ož>}úo_“III³fͪm·j2ô«í {ö &L¸}û6âNëãG Q=<I±Ü³µåíTîeddhsO•uuu…vvãÆ Ì=àäŠ+þùçŸÊM ìn×®dó5ùÜÃó˜{4´Jx9 žUÇ…úU¸ÇЯ֒T˜ ”?æÞîÝ»'Nœ ®woò„Ù®]»^¿~slÅ 0GCCãÓ§OÔÏ;wRnþ©0<xøðááÇãîß¿M錄$Xü·mÛ¶=úåVó0ô«] ¸‡qòäI õ¸·uëÖÿýw@-pþnß¾ýû÷ï )îy{{ƒT„ÉϽ*Ü9ùíÛ·-Ztàqç¬ëÕ«Çï@ b̘1­Zµòóó£X•³wï^ "pï—Ôt˜_{о}ûðû@órttܵk價7yòdJáœ:•·ß÷áÇ#FŒøï¿ÿ <ˆ=ÁÌ366^¾|ùÑ£GÏœ9Sy@SÅ++«ØØXpPÖ?~üPQQyóæªªªÔ*$99ùï¿ÿ†²`=|ß¾}Ø___ºžFA÷ôkL£¯=àpzzèàù=1÷Àüóðð È ¦¨‹À“uëÖA»tqqYÚ¸qãÈ;R¦L™‚ƒÍ™3'22róæÍ î°5#OsÔQ02W¯^¸“ ©}úôÑÕÕ £· +¹zõê©S§@#îZMj›‚……ÅСCñž=< õ;ØùL£ÿ%Fð\–€¿²²2æ¨ % A;¥Ä&¿Ö:oÞ¼M›6íØ±côèÑA\ðÇbM]]Ø5}út µ'p8Ø­¦¦vóæMÐýðÏóçÏóÇ:êÒ¥KöÎÎΔ² iÞèé鉕I P¤)7–x”ÄZþ~_–¡ß¯à^§"௨¨´éر#4hì³hÑ¢+W®ÄÅÅñ ä‚ßĘ‹ ӀЖ––xQ(–ëׯ>ÇŽëÙ³'ØW˜~À±ÄÄÄøøxjv>y›4iˆÙúõëƒR Ö#5ï{r;øçñãÇ!X› gÂ#ìY¥ªÌи~soÿþý aBõ±!OéâW1ÀÌãÿ â¨ÌXÿÑ¥K—»wï ¼› ™™1cÆëׯÁÈôõõ…`¢@0¬s^¸pÄ(Š îB6€6@°Q£FõíË;¬m'ü/B¶ABBO<MÛÞÞÒ¹=˜‹?ðë3ô«IdddÃ=P»wï¾ÿ>¦Ó°aî]»&dA+V¬îܹ3X Ö äȦ«« B{óæÍåæ6???44tß¾}ÉÉÉ`sº¸¸XY ^·½Pä90sÙ²e ýT åNs8h¾Ð¶wƯòH5j„©òñãDzOýüüÖ¬Ycaa-^ á‚߇¼Ú½‰‰ÉÂ…¥®ª‡Z¶léîî)¶Í455*u×ìB.²ñåË—ñãǧ¥¥©Ú¶m æ äçŸ#‘““›ÉEEE†²¬]»ööíÛÀÌ%K–ð?‚ÎJ:6È^JûeèÇ@Ÿ>}*»u¯áQ†¹'<^¼xѱcG‹µuëV°Ó(_.DÈÞáÇMš4™?þâÅ‹q7Á? éææÊ¡±úzâÄ ~0¡S{/$$$<<œò‡ÎâÙ³g ý*‰ÍŠ‹²þÀ½)S¦€Ì„|‚Ê@ùûûûƒ@mOc0ôûÓÂíÖ­[ü>xµ”ð‡p–’’æJÞÞ»wô1~î!îP$ØiÂg tT0ó𠡬¬ì›7oPñv§¢¢"P&)b`uêÔÁGMc€A ¦à»wï(]777×ÞÞ¾ZÞ œ:uJÀ”aè’®_¿NMØ,_¾ü_.úý‰€&ËÏ=hîwîÜ©rµ4DPù^¾|‰ò¯€¡àííÍ¿X0vìXa²äååÕ®];ÐÁJÄÜ+§‰Ô­‹¹2äùó甸~ûö­øÇwèÐZ „$ °˜{`ýг¨E@1xìØ1¨püsÞ¼yPï ýþ€˜½ »7oÞL5…²Æži°µµ¥¸W ‹ò/×.‹œœ«W¯^¹r¥ðùÇÜ£6ÈÓ~7èŸØ1{ölÞ˜{žžžxÙ˜˜ÁvŸ>}úÆxeORRÒþýû«U ý~IPÜv•{ÌZcÆŒ¾NÙö*A Ãp8œŠFD¡)ƒQQQ¹M÷¨MI:ÊaíÚµÔ(>Q`놘°ã‚ú(xî+# ý~O4lØkw`Û(((”ËUUUq–zPC ¦¦¦x’XWaB‹øð!h­À½²ËÄiæÞÌ™3{ ¥ƒL{˜{?~üÐÓ#ïQ’òéL ý¤„´´4ø;nܸ={öh‰ÐšË<‰‰‰ÆÆÆüÛj1ðÎ@‹I|þüY]]]rÜ£°~ýz¼JrIÔ©SϬ[·è§££óúõk†~¿äåå?ÎÏ=<óNïÖò©S§Þ½{wÇŽþôr¸·téRŸÅ‹Kz^N™2¥lÑèžhÁW –ÝÂÐïµ¬Ë IlÍÆË8åäx—Ë‚NýºäJÜ>|¸›››t*¸õöìÙ30%šŒUTTìÕ«WLL C¿_ À´ÍŸr+t%Ê=ŒÃ‡K³&¡ç+º¨¨Hüs‡«„¬¬lfff›6m¨ÓÁúý’X±bÅéÓ§ÁÁb±‚øÍJXJ”È•4>|ø€+S €Ž>˜ ¸ëdè÷K44Pc,--¥À½¨¨({{{ZÎ¥`#m&ÍèvvvR»& >Y“&MúýÂÀW¦l•Ö¬Yô“÷0 sù?ß›7oD8Ú˜¡_ÍzhŸ?¶mÛV`Y¦$°}ûö„„ ÝŒ[ @=;{ö¬Àæ&IWìСC;&e>&&f̘1û÷ï§79†~’èHxb×ÜÜ<,,ÌÉÉI¢Éáø322¤¹†¬#$ÉûèË¢  ›šÒ¹ªžÃáàmVŠŠŠŒôûUÇ'$Í=%÷ŽB’(¼½½¥Y™²²²Ø!ñŠäß¿gè÷KB:«(Þ¾} ²HYYY]]]jEóòòúIGñˆ@I¥¦îâ䨃äúýJ樠T‡-€ò)å2*((DDDH¿nñÍMRCEä0ô«aðVYK–,©$@µÖøVžâaVånVWž®§§ç}.hI´Ê2 S«Â§.åäúIÎ΢¬”—‘!èMH´lH¨tµ$­ºuˆZR´jÓÏeæÌ‘óÖôb‹KÔßrÁ’¥_Xp°Ö°Å½ØLe1`P3¶Ÿ=‹u!=„R‹Gž JM@Ú^k[¬œÇó´\x5vy7pÌ›7#00˜’Q1žÍ¢›¤§ «=]õï`± ÚëÚÙlV‡.V÷oÄþs—è§]aÈÆÇ<Ô@^ŠôÃËÚ»yø¦/áy¥&ÄÞ}biÒjU©s2¬H¾á ßXò€¬s2¥˜ Æ”}%œ{8äŸ6ÚéÃf9þGè©ðŒ„¡ëoÙY éægÄVI?13%zùò;âä6k¡^‘–E;÷¨3pŽ.앚ï?¢ˆË Óôô; ú€~$?3^³µYüºŸM/^ýW#¥†Ã›_jÎ^”Æ‹§à&!Éw¾»¤ŠosC¨ˆkeq 9õ¹þ²ý Í.hîò€#«½ñgËÍÉE¬\vÏEé§]ض–¨-¹ÝýX·TÓ5/Û‹Q÷ óôIO?Ïîäôüôä^o’g@ƒ™pÜbñ›ÓóÚ¾L'šqÛ›ï/™ñ #SÉÔ+„ÈøÆÖVL>±ð^£¹œn†n‘&Ú¬çÉïó8¨CûF©‹–âÙâÍ\¯™Á?ÁIŽÁT7ß-Ñ„Š;#Çê§µ âÅê‡l{8ÝÏ”{¡Uƒ…\‡r'îunŠ}H£‘P³TaèW£p?B¸C£Ù4yÎ\øT ²2 ‡¦Çp@?Y‚º mƒŠ{ú~cæ 9ä¥y9ŸÛÌBü½?+ËÐDOü´Ú°?çr¤h܃4ô#"¹À÷lÝ=‰†§¯Oy¶·ÛŽžz#äp¹s3#fЇ\á–±EzúãË÷L6Úu"ææ¹p,æ]$ךrÒv ž}t ‹WoØ$Ü­¯ØÄqìȊ¬ˆýâˆ&Ûum±=»òOL?ý75qÝ'`ÂI©/H5ú7¢|ÿy3>â/¤ù1J¦eø³]> Žý‡GÏ’¯…/ßu=rÛVqR‘Ó&/ú‘iæüv·Ïª ÿEFÊK31È0ÜÞ1ýÒ]„|Ç;XtÝ6ˆ”߆µ¦2ƒÐ ‘õ]»>dÉd„ê™¥ŽŸ° 2‚¤º‡³½\£¶Ëüt¼ƒ¶žüÔ¤þÉÐ04ÙyvzÊ:'—þÛB£ENÔoÉ óT£OMˆMã(õ43Fä±½í'€ºçÒ©Aj*ï Q=½Ö9ïž\¸•j?ÈV¤äæP#=¤£(kïÁScÇ’£Œ7ÿ=Š4 ÍÚ·¾z5ù]ꓼâäàïÞ}ÛÇŽ™*.ý€{A×óg›“kóMû“{+fš°‚y7^éÄs‹Ÿ<ßp%zì…)jÇbE÷]‚Î-¡â¡&ß)“a†®DGy¯ œx–j×ßU£XXroVdé8qéÀâ­*Ý"»9ˆkƒ²ô›¸´7fy]Rç<|Žw_tdø•âNT‹,*3"ƒöΤ‰¤ÕvXdÄ0!âxº·{i¡!×Xîq¡€õ’o3šÚ¿µ¡ïª÷zœôí7ÐïìÎû=»´ær¦ÓËxçøzÎöƒZ³ÙÝ1oÅ@aŸ[çý'Ÿ_ 6ëé>©yÈ€Ÿ¢hzúÑD¢ ò‰¹¸½›4t'Ís Ø9m½ðŠ–˜ldc‰¢Ï-Á? âôÌCWX åDÖ‡ªššà æ(ïߨOK­»¢Á;ˆc“ÿ´9 ç#̪×ê"—kd’c­ó»°‚ÌÉ›ú^¾´ fÃú•Ò “÷Oõ äú‰~ôKw^v57GººHÞÐc¸-+þ^É@(ßîš›·hÖLŸÛ/±sýfúG®_0¾ì ¤ÖžŠ\<ÑÁëŒAã×&ï(~}{µZçyþÅweð‡Áð÷²gZaUúŒ÷ìî~6.lÃšéØ©ŽÙ’Ǹõûôe Á`Œ?wêÆ¦É` ŠcAð"LCñéÏÁíÁfæÎFœån¥t*-ëâ㟋6ÂTŠ~ ášÈ°ü7†owsHEÓoBÐ%¯–Õ>ãé䆸;w‚Õ‚ÙènyçÁ-0Wñ&r™†Å *&pH¢aú%r[#éF7}»!_²õ/=GþõìÒÀ“"ѸÇ}ñ¹hÄãwàHJ˜&Û¾Ÿ¶àʲℸô 'ˆpN–i›nNóSÂVb;ÍÁ¾ÿô˜XåÝüT·éÚµþø§>hæNküëPO[òXKô˜àí½ÿ¥Éšè"/ÕaÆL§â0þþk-ôUüÀ´6 ØvÄc£zLcð‡¢ÌăŒ¾' „“Q%¦ó¬Y³¸/5Ÿ5Ë›úÉž0ǒ頻á€Y†ÀqúJ/†0rñ'ãí= {?µe¾†~ Įʺ¢¨¬¿ªýJXXeG|ùòEYY¹òD8l¢òD…ð‰V¾9]üõý®ÊËÐOz¨¼mѸӿÊF,¡S*O7==}õêÕëÖ­£%-͆Bµ]ºJª©)TrrrrùùùŒôûÅ ýØmmmŹ%Wà{Þé¢_íDAAÁ7è=ªœ¡ŸÄïPRRúúõ«tR<{öìo_«xÆxàÀ'Ož”Zr...ô^‡ÊÐOJÀGÍKø,ôš‚‚‚Bn®T'“ž>}*eÈ(Ÿ¿êׯÿóçÏÀÀ@)¤Õ¼ys]]ÝÇJ§€ ’Y[[·jÕJ¢×¯ `ùòå ëJáþPŒW¯^õèуöhúI©ËTUU9s¦¤Ózòä‰ô æÐåË—=:tèP©%º ©%gjjúáÆ~¿*¤sd béÒ¥Ò¤Ÿ4miĽRWÑ2ô“ òAÊ÷HR= VšÜ“úI §N‚¿™™™ššš’Nkÿþý£G–r/^¼(µ´$t×,C¿ß—.]óºmPœ$šÐ”)S¤O?©aܸqÒáí’¾—Š¡ŸôÜSWWÇ·®ûøø€±$¡„|xøðáàž>}:ö433»yó&]IHsœZZZïÞ½£7NWW×M›6IŽ{=Z¿~ý¶mÛ€{R®.†~5̽ |üÈ;ÇsïíÛ·LHH3þÎ;Kaä ,èåÖ6{’ÈêüùóY,ÖÊ•+¡«¢ýÖh†~¿0÷TTTrrr°OãÆ)îM™2þîØ±C´ÈkjŽQüÉ•?~€.pçÎÚ-=¤ÐÁ;v Ü«V­ªñÀЯ湗šš:wîÜ#GŽPþüă0:uÁ¸¶ÔéU•ìÉ!C†à¦&M÷’’’ŒŒŒªû↠@ÃÜ¿?ä¸'~N€Æ -àÒ¥KØGB‚”¡ß/===Š{:::Û·oïÛ·/õ$ä³gÏøÃ?~üxÒ¤I?þ„V[Ñ&4àˆP‘å§ÈÀ܃.²]yH]]](,"Ï%!ZŠEEE3fÌ€’zzz.[¶Œê€¤¼ï‘¡ßï€×¯_Sn??¿½{÷ÞºuK )–;T^½z5ÄÄ[´hæh¹`gJ¹a0q>LùúøølݺŒ:ìS­µ2Ÿ?†"""Øl¶››ÛĉKÚqݺ›¸øµ>4C¿Ú_.¨Ÿ·oß¶³³›9sæ‚ Ê ?Œ Oà¨aûöíƒ'<<\MM Èéáá!¹l?zô衬¬ ¦ 06–<}•¼äzôhYÿ·oß!á›;vÄž_¿~KÌè¨ ³fÍÚ¶mÛÂ… ½½½±êˆÍÚ¸`>C¿ßPSŇúÛõë׈"UkÇ@ãÆÏ;‡ÝmÚ´wÓ¦MÇ' ©ÜÝÝAÊð¤fÖqQ6™dèÇ@zÕş¨¨(§ÕÝ1ЩS'¬Iþ÷ߨçĉ u5j²±°°°aÆ Ó‚z)Ll ÷úý)ÀÜ6lÿŒ"â®5¥Ö»UÐHÁ&䟅STTÌÉÉ©S§(œïß¿GÜQGê )°0AG=xð`åÑÖÈ’T†~ j˜{Ó¦MÛ²e ö©’{ Ð>|ø@-Á¡w¨üȶ\ î&ˆJ77·rƒ÷€º <ú1øSÜÓÑÑ¡&+:.%%%E__¯‹À­««[ÝÃ(ËÓ¸lÐ¥y6 C?5à^ÿþý££ÉksÊeTÓ¦MñÌ>¹Lüi̽fÍš½|ù’ߟáC¿?À=þü nM ]4aîq8þéÄ &DDD0ôcðg¸wñâEü{š™™÷<<Ôvñ“¦ò[‘SéÇ@XÛØØÌš5«¦2СC‡ZR݃Œ­ƒÐÓ}î¦V£Èsè"uÁª¤ÝœÁç3vÍ0åþ,˜Ðôñý$¾WÎuŸ]$Z¢ ýþtÐ{Yyu‘––V³ÅŸb@J­ÄŒ<®8þ6üÏ|B§@7nÕ* !5Ž{m0öÙäH¾¸þÎŽ™ð+n=ç&f„1ôcPmœ>}ºS¯_¿~ÍŸG<ì~°!ÞËJ›a¥1ƒûÛ—ó C?¿Ú·o_Ësø5i½’‘¤®%fè÷'âòå-EEy66%÷œ¥¦|?{þëôi ¤‘<ñc…ÿŠéÓ¨©×=yr B¤Ú¶wïçnÝz6kÖYÊU1U[^¾ƒnȹ$'G¯í{Ù2Ö–çé»]Öo=·ØÇ“vPFw$øÌó˜:yUв!¶\ÛoBGâc‚‚‚ÆÆ3ôcP5Ö-;G«{÷:•ºcPO¿þôi †Ý½›kj*©‘˜ü¼çº(/ÿ×Â…ZÆŽUGÚtÒŽ²¦Lž/µ ª…œ{œyÙ;dÛÞ)×wxu5™q1!Øfߺ‰ì>ë>û’TìlAÒ¯SS•WÙª[Ï] Ùw*€äœ×kΨ=G8É0ôcP ¾ç¿®/w¸Wy0SS á®]_çÒ›uk—͚ݡ¿ª ™¬–›»EAaštªe|y,•f÷€Ðå‹v,Zn‹º1.kWŒ&—Âü=ÌO¶©Mb†W&iÊ \¤:R›Ø¹au–ì±C»eÛxDÈšåÝzöc¤ƒ ‘“¯¢V;7•ýüüøÏ×®­ãrO(((Ôáj¤ÎR¨™ÎÇb‡ —{€IsK‚»xzb‡¦LÉ£Iîžü1xÌ]D§íÇb±(÷î+iVÍ*y?q}“Yð&1²‡³bÍŽ³&ˆËàξ¶HÍbÒxáÕ„åÝÊ$ûö2µÎ>T$ è¶µˆjqÃ×WûÀ £F¹‹Ÿ~Xè g.]‘EÑ¢7>ý¯‹†D†^üïæ{›Èr>Æ×ÓÔuà²bþLç¦&C]lÁýèêþUáI+§ÙÜJ¯ßýo+„êaÏð /Tù"Qí¶œ –Û±XÑd #9°}íé[¯ÂÃÖóº– ­ÍÌvI½r,:Z£igËv ÊÐNQ˜Œ K„G¢aV ý푸G"22püøyôÖÆöÈ}òŠª†]ºó{²¨»Ðe8vÇohnl”ø.ýkâV%#¯ƒÿåôÔˆO}&ûW ªoµõƧQ\âýsçËÑ#q'ýíh¦*,DHV¦>%c?pf6”q›8‚ø8¡å S‹¬”t²>sЧ“Ø“µ´Â¡3,T߯,Ã[ªñß™&,–û¨¬[ä-çüDe@⯇™we‰üú™³ëmû‰5ìÎnüBtÃl¼ªø5—ð þRL¯1SëüE^!ÜÉfH¹Áœ'7Öî–˜q Ý]¼b©dô0³åÇÖ‚M®~á|¾/Û€·Dî}þ^¨u’oÚ6ú-0WÁ4JþÌ#ƒUC<´s(`è!ì3\’ÿʨªÉX žóŒQ`bPö}–j‡Æ½ øÊ'8îI]ÌâÑËeÆ ›àà‹*(9ßedÊt¶ýäŲ9³ï©ˆÇ ½{WëY­W 8E›6oimÚµI Cø©ÄnÑg\‹Ê_I<5‡ü›q OÄóO»›¨”šÇ:§A c±¿Nùôó¹òÑÏJèa`¹”xèƒøˆ±i¯+å§™r±Q]÷ÈϯÚábÑK.™ÅjCE¸}f?HK¿yº%g0  ,VÉHãúõ#üؤgóYGtT»B›R`¹¬µ@³âB¹· ”üd±"b~ëøñ°ÁƒE´ÁΜ=;z´š8ù;VE˜`‡£/f¾×Åv8þÙkÔT‘S<øàËÈöÊà84zþš‰eä›±ÝÆÄÓn¨ôŠZ•OžÅ^Èb]çCI*ÿ+½n,ÿð Àµ”§bEÚÈóÞbi-Eïy£jÍÇD"” ŽU/ˆ@+üɇÖuÚЂ„»‘&¦¥|Ú…D·yKÛ_G€õóàL<Q]¸Äs‰>óèt´ 寨P$rÄä^%HMÿ°gGX÷á•ÔÉ‘}“nú4ôUÙó¦N×›²ÁX»²šbOdãÎÂ'¾O\áé·pÕÞ±NKW)ê–Ç:JþÈ`·€DÂ?¹ƒœå¾U §‹ýËð)çbÄ(ŸtãÜùSíÒõ¼$ú#+Ð!8t‡vé¿‹T¿¢%Í\0?\ÖÐ:Ä?vuÿâWzÙÐ<;ÅÉû.#_q¾#Ò”zšö±•.I¡´ô<]¶<îøÃg~HÑlH’k[ø^ C3+Ò÷/¥NsèÍXæ¹UÛön½ñ)ôÐN!Ç-{[&¦-Ü9ĸ»Û‘—_rönÛ@îrý„UëÉà ¿1M¼6CI¹ñp›Öî߀ Ú7C_R²¯„òóhÓÆ¶®nõ<|;¢%I¤¤¥ò“ÃêÅ&åŒÉ¼’éþØ;MnÆ=³Õèí»|øùwŸ¡Eùy¹Ÿ3¾çææËÉÏÍÍýú%/÷[^îׂÜoÄOBVAQ^QQAQQIIYYYIEEUMEY]UþÈI7Bhö ]½z«çÍ·¸/^DÎÈÊh :ú“Žä…,'§×CV,Zî5øèÞ¨¡cíËÆðýËG¤¥# ú-I#–0 ¼vƒó]Hm¢!Ùôô,6[M¢ùiçö€ Ïv2ô»÷Þx§”ˆN™‰W4Ûë÷Ri‚Ü]Õ’ oÕéU'¹‡n¶kOö+‡†ø9‰?en[ð<ûPwR³;–ÿ‘Ç"rzÝc!žÿ»Üh³Þ¼@­$I¿cÛý‡:-´²~%:Šißµjê¥TH¿‹Ø¦\%Ó—È“'¯\<¬)Ën¾¡2^»Èá þ«5?–îæ¢ªAdù-Y›púéô{çÉ›’âvvZç{ð¬Ö*ã&3vY:ÖL·Þ‡ãááƒK‘¶=ýnN›4áÂÇšMšÓçñÐÀꮪýX¬®…fÊh¡¦ï‚Ù±X×ó½Í™{¤jãÆz ÄwŒ4¾[0‘×— üò’ÏyˆTÚÍÉóVŒQ5¨P·N=ÒîZ&#Ó?’Ý»^Κ-b¾}û¡¨È7¦ªµd6¹è´Kr“;».šåÇ[t±>Ü;¸'9ôîd¸çàá¶VýÅêûç ½†*"¬Ñ«ýð!ýÐGÞåR.À˜ÅíJÿÏÞuÀSÕÆás … !!"#™EdDÙR*%í¶Ðø ÚÚJi £ŒBeÚZê*-##Òýþ÷N×½×µ.÷^ç×ç;÷½ï9÷=ïø?ÿ礂¨ÅRöÛô$Ú8¿G!H”±Ú8W$ó\J)E ÌIon¤žžRzúk¢‹¥mÀ/@¿ñÖí'$á'Êna(z±~âh ‚Ó·`?ùÂ7žÎí¤òßb=51Šˆu$üTq+秇Ð÷ßti~ûY´h¶œ¯Ù8qü焉í¶tšÖü¾Ÿ^ËÀl#íÐõZn%o/Úüê $$þ«±£à—ê+cëm §‘Ë逕"UØMôqÃ>R§cJR'…€TVÞ”›»‚6ò‘#%-<óÂ…B—æÏ|™0qa;.¦5?ÏžjQ $’õBI¤Pø¿÷òMðw¹Ä"´¡ß¦™®¿_ºj ¸¦3fL@Òß­¢-´ùÓòeÒ‰I×ö~ý<¹¥k\\–7›SS¥ ¢ùÕàk1å±À L¾]Ys½o_8Ñ$ÚË—­~ûn§¢B“—/tê4–- øR`%)ÛÔ»¶m+X¼xFû.b¹í?E…ù¯sw*)7©²­êKJ)¿zõ¥OŸÇ¿%$äëâÅ+Û}¹Íï_’òüÀÀuÞÞ {¡î 8½¿Þ§‚UUíkÌÊÃ7ø.ôý …h~ÿ¼½©ólœ:…ùì–'O:+õv0P´•ÀÏ?33㺆æ;æ;¾~‚DÚÚþ+%B4¿ø>bñwN=Î~ó»²Z±·¤£ƒ¿€†F«ÿºŽ®vQPð*&æ9¿X´s'M=}Ô@ü[eA4¿æcÍ-8öëRR}Æ÷úÇ‹€h~Í¢ù @ íš1ÿ‹¾' @ºX»víŒk¸0a:RzH‘x(Á8DE)""$.Oóþýì—¼¸„$…ŸÔ6¿•ŸÏ3…È%¥ÃÞJH ” @&€T°oßqº°îý]\‰¼i=¤ß>¶÷Ô©ï_J»ÉéÎ^è§§Äê Ϫ‚k :Žpñ˜L!Np%@ -[_»#¿âx’˜üyô×À¡Ý9þJµ‡.Õ.ߦ¨x²ö¿%ŸR ™G—wúÙçi9-KÓš»Š>=ÿVÙM¸øQÜãüñã©YåeG'¥t“Ó·4ý{lãëô„”ç/¤•ô- u‰ÆV?oɪÚĽ§â„M$9Æ"d2…iž}z³u$0ŸrjÒµò¼ç²†j[®>7’‘R’'ò’¶µ¾ß?^¿/PRêw%üS bí4Q¢ö(‹²oo¢îÄþ,!9Ž™ZX^ðúÝ%µ¾7®„ÿüÕÉÞuŒp›$ŸäW{~‡à߃<ÊÂ÷„DÇ¥#BÝÆNusH=6òØ™s1å$Dg ãây5{û“3b2ó+++'§šÍö£.]Bº(›˜ö­e’²‚à ­¯óeÖ¬›C›A9I§÷GÉôV÷YëSÁé*Á/BrÃ[~JxÔ ì´JYYbsœ|ÄýôµÿR§dÊø~ò㪬æð«¹×qýR©¬ iTÈ ó»V ÎRoTÒ3ÃnÔòнémI´»t¶V@Kyê -K•‹ü<Ã)ž,ó\®'‚äÉõRê‹=@YYUIŒÈGØÙúгw ¶_½#µÊuøD¬õ ÊÙ9A.ä=o¤POö–%…#ædòâçû ²lÕäÚȈ^0ùj«o¬ÍþnOlÓyÝ©‡ÒϾ"‘D¨{´ÛK¹5k£É—F“-Ô#‘æS·8§P(§½†agäâëœá¯©_R¤"ž$D—.›“Î3 _d"Bš[ùÛ’Äê r³(Ç|ά°õMäp•(øò»JâÏ_.äˆ ÏCô7â!‹åm7#ÄýdŽÙÞôˆðW; …"‚iïmyðwР>Ø ÊYÛ!ùÑ ™\g¡ª·)ü=ÕŸ`™çhÅև͗uíõ—_>|üŽ]¼³^=£¿r_º]Ö'«×Dž× Ù•~ø÷Èo›)4–qh*éÐT,duXâÚIƒáböÆõ¾ƒW!H<~*‹ÝøQ'V4øÌÿY`– &-û¾1¾Y5»¦äí…¿c6ÝH»E Êàd…è*ÌG{*%`MÄ’sã}n¼_j+χ|ŽÞö™°g3Ý]zó#þ½lŸ»¹ÍµèÅ)u‹qqeËwù…ðŸ¹+ÅS_!ý•éÖkæÑ–÷TÐ…MX}=hV“·Ô¼|å̈ác8Ûö²Z[¯Ž¶œ~|º0©4zÔ9õ”˜Æä9 æQ‘¡·K?[YÝÆà§“~˜*±êªÝïBúïûFòzWzÜiyS7 ~®yÉ_¯©.? †x%Ÿi¢ëMm#`|yTÌ?»µÊÒsCbʧHSšÞÜH?._OakÅ©ÖWÎÜøÄSáͲ™ZB¯o­B=ÓxüK—u7.¬¶E:<Û•‡y/hƒÜf?ù-J ,úû©Ì$²ÎÓt§2…òJÜd%…òw»þƒ m§o÷!Eø”R(}‘Ä{4ùHûLIɬ£ qì®=r;ÀIæëdA&3Ù1§»Å2yK͇Ãð83.PþŸ@‘ÃÃÅTÇ’ÉØ¡&ÈäÚÆ@éÊôáM?µošnTŒÎ£—Çon¥ª¢A²¤wµáïÈ,K·ÌÔÝv%ó/SÎ6׳fºÍ½ðÅÏH"`bï]±o±;n¸jI"¤²2½orŸ²âÄÜS_üÌêÄ9Vi*ûÛoÚ-ò4šÏÜmLž³!?ÛèÄj`CrËB^‘)`h²Ï/³ž_ó¦6$MLw;cÎ×ÒÌX¤ûä¬5 >¡-С#]Í„DÊéJ¾M]׿mêž„…OÝœºÞI©”•­=¬dÀBrdpV¤É* zÁW-9nÉÆË‹UH¸X|Ê|€?jmÖ*Ù_ØØ¡ït8ú, þf¯7í$ÝW½Ç‡l¿éHµËµ¢P™6ÎÜ(Ê{=ªž£=È´‰rÏ{îYÊ<4P€þàáÎD§D}X½t–¡ Õj¿8wæ\œæ#ÈÊô8K1ªÕˆ²$hŠüO6óÕ´±šZŽßÜ–í=«¦Ô~|§ãd²;n\nnÈ80U®³CÈäãX ¡W"9|0gÑ…ÂeFÄh[3á{“²K–“ß)Hv[¶9n/À,Ê«#ø˜æ<ć Œi¶X“X?¡-á¬^ÛŠ{#Tb¾]³¢yÌšž"8á9:R »>“•V?œF±•¡†Þ8žššaˆN«æÇÆÑØK¥ïŽöWjg'lÛŸ^óâÔw~ÈÎUì¨õîþùk9jÍñÿÑ57*O¿ª¦6ƪ—›ú  á"´ÍmDù#GÿJZo‰«ñûG~–ä'hN+ÈŽQ(Çꉨ;5‚25¢>IÂz—ÑÀtJ`=_Û“6+·„š¢¼sÅKþÚµ"YXBÕvâ¸ël]ÓØ4–‘ÉԾ椛ÇL¦žšOéU÷ÊZ*´¿~ù‰]˜iÿõ;%$º!¼&¹¸ŸnÍ£Ú8 ™ÿ¨óì¤ððóÌ2–uÎ'g7ü„6­ò«z{8 ´Ë~CÒ®<Süþô¬å*[׉èÿÀ}d°.ÃÎÐH¿æªUJVrºXùåÌëÐóWs)zêôÓàÝüHS:i›AÔ€¦‹ÇÈ3 .ö—eFÌ«cqæz5ûÕ,8 ) ÿÒªí)k†´FÚði¹Ã·¿·]tTA†äZmŠõFˆ<Éh”÷µ2_ÿ¢O&?Ì>æl½â’‚¡ÇL;íþòë^®¢ › “ÌF{góÓr5”½å±Å©ïR?ª óŒ™!Ï¢‚Ôì5e÷ÙïXd·`Éü–ó_ƒÈ:ÿ±<ÅÁñtCbXâÏç½_l=ve‰G2‚àb¨ÁœwV7°´Öà8£ü0Ø‚ò“UEc´7n¥N+¬!ARƒ eã=v8),ðYµÝ”³=h×%ü_¡Ÿý0¹èãûшIÔIp?ÿ67¬2lÙp>À¡X¾swЂñ´OªúUŠ ÷x{Ìñ?¦Æ!OuN1o¬ú¤ÎEðkCo¢ÃkYijo­å ݸä¿©»­ý£á¢¿"òø-bä{ÁË’Î ÃßAVvɱ×1Û„Öê“d÷Q?N%ÅÁ'-s$‹:>|ú1Å´Ùè$¿â$2yvM;æ9wvÑÏã"Ù£6pÆbTkÓèB*þ:nã'Õ]w„RT§F>\ÇÍk^ÚP  eþ.+žuÒ~Ë$ÿêbƒ(p£Ï•Âù}3eU-¨Ù=T7. ΞL¾ÆU5ë8 ’¦Háïë9n"Œe}9ÿ7—(Ó'´¹êQ¨ÏÁ'Ð#ÈÚ4áKoQ–Ö\nª}!ü!4õöïˆïü%ÛiŸŒ»G®“Öq¾˜iFýë‚öýýЦ¹ù¬¹X:æÈuJjo¸ø.yqÛ%<ïÖø]h´ã‡/‚k7Ý}þ”)ö²²PKdI¥^‰ ¡Òwó>s õþ¦pvëOú» ÆóQ»€Ð!ÉôÝÖŽ(¥Õ“×ð»˜æÝ1K ¤ÇÂÃÍázÊ cÜ÷Þ?1{ áεoçÓÞÞ>**ª1ÏùA¡€´ ÷ y„·VVžGgÐÕÌPÒ£o>_fdBÎÏ“•ù;’pìi$ƒOpZ—JQòÒ¡ÎÑÇ:¾‡Z!¾¦²T[œ©J;Þƒ/ýù·ÐÙ<5b½aí˜Ð¼¥@ï±È@ªR‰Îœfšÿ‚Ú˜:—Ÿ9l VAËy÷ÍÝs‹F€@#Qöâh Oüã¨U‰bà^R'½ý¤ͱ¯Û‰8YšÖªŒùš}† Cü—Cøé§”6y­—ü>|Ž]ÈÖs-óÍ¢Pöb!Ø.Îr$ "-++Tò«¤P—‹>Xßuàjø˜]EÑ„Ÿ­H& ÃÇÄBÄTq2E¨«Ô倸spJ&6ò‰F³8Ï’ŸßIJýZ\i?LyÞ¢Xn‚?ŸJÝbþœðm­YŒù&„> Ÿ©N½ªÝ8†]`.êvåך NžÛ3=·ÓßD3ÎHX@&Ó¬úüW•I/‹•dòßu¢¾‹ÃYd ͘“ü§’¼A¨=š aÕ¿ópÌ¿E™Ï¬¹©9n&“é·›Õø;2Íyåç<ÍuÞùmp1ãäûýãäÿ~nc­d73+*´&DVˆõo`3«ÄµÌ°=ùp?€rØÿ,í‡!‰ÑõHÒAF¨ LùAqºû‘_]˜ãåÂø¡NüD}%@€õ’Ÿ¬íÖʪãŒW80žþÛi‹ü…©êê)‰¤ŽƒI¤`{W»¨s×±oc ZÑì‚]u­3+ZßtRØ©C,oí ƒøf"&«˜¬¬‡ä¦åO€ùÑ+6•qÊ8–·«±X`.>`…òw‡ºµðtënІ®¸EYÁü£îÂ[”ú·<5­û¤îxŸ ŠO‹³lÍš5ìÊýÂÂBeeåoß¾±éy¤ÖN0‘ÒÒR¶>’>å«W¯fK÷®ŒŒ _^^^Ë…&©ÕÁ®tèÐAPP°¬¬Œ} ãê‡÷ýóçOK+"‰ÔfÆWºYC÷åååЛ÷¦Í.D¶”ιsç\\\¸§ëü8‰®]»Â_¨®®®¼’fÌ:,[¶lóæÍ­ý+-DuuõçÏÔ}@?~¬­­ÍCÙË.®ª¨¨˜2eÊ‘#GÚý‹c·KHH°Ï•äÒW惿ÅÅÅbbb¼U:ãÆ£žZÇÊ@«ÂØØ»˜5k¯ŸvÞªäÇžÊÍWS½uttþ©=ð¾|ù²~}Í®ÁaaaŽŽŽ£FjÇïÛ¹sÍF¾ß¿oß%‹“Ð|UUo¥oùýëHN®Ù™|Õ.]º”””py‚‹ŠŠÒÒÒ°kPTÝ»wçf/²”ö£Pyyù?Rµ¤¤¤h?‚kõôéS55µöêDÒv ³«§—›•âïß¿óòòzõêÅCi†èãã³qãF‚üþiÐu#üüùsÆŒû÷ïçæ4‹‹×Ù¼ìÜÜ\eee.LjHHd)mHEEE+ Uò@Õ ®®^\\Lç´âN$na éêj»,VUUU.÷çÓ “ ¿x/œŒŒL~~>o¥ùðáÃS¦Láæ¤.DàR¤¦¦>zôè_«ZÕÕÕ’’’àÀß?¶×—õFP÷+‹‹k¯ê–¶õA~ùò…‡Ò‹/îØ±cDDÄ«W¯ÀV~øð¸Ñ¢Eíòeåååoܸ¡¡¡Ñ¾ËtçÎG=|ø0O¤vâĉ»wï^¾|ùìÙ³¹6‘ùqS§Nå­c+çxàr®X±¢C‡ÒÒÒݺuk÷óà1`ojmm ×þüÍw÷î]MMMúWUUmO/«¯¯ÿþý{…wïÞµã23g8¢¢¢ÅÅÅ\žÔ€€Híýû÷!µÜÌ|ùqŠŠŠ'Ož7n¯$xûöí¼’TP¨<÷³²²ø³ í»:uíÚõÇ'Nœ¸uë‚ÎqÝ·oßêÕ«srr¯^½ª¢¢òòåËöñ²Øb›eË–µoæÃÊÔÒÒ’Ë™¯oß¾™™™BBBÐÖž?ÎýKçÄ+ä·råÊ 6ðJÆ‚ÜÁ.–,Y²sçÎÍ›7·ï©FFF`%áâÔ©SX×4üÅÆ;ù@öó]¸pA^^߯€Gáìì ÌWVV6cÆŒöZ Ož<ÉÍÍ…28qb\\w&òèÑ£©©©àVðPï:A~œ­¾ºrå ¯´É°°0씟ˆˆHçÎ_¿~­¤¤ÔΪеk×ÔÕÕSR¨§ 4_ú†‘ßÖ­[{ôèñùóç/^(++ƒ=…ÀÙ³gûøø òÜËm?|øðâÅ‹p}ðàÁ ´K³ ££BJ° ·%#¹ààà &€s äÇ»èäÇ]¸|ù2O¤óÎ;<´`nß¾}“&Õœ1vìØ1ìBQQ±²²òÀÓ§Oo7õ§OŸ>¯^½Â®ÏŸ?O»èßë˜ÏÏÏÏßߘ¯°°p÷îÝ{÷RÏà¼~ýúƒÚfÏî–# ÀÕÕ˜A2‚Óþ˜¯¤¤ÄÆÆæÞ½{À|º’{fKfgg›™™}øðAXXh¯{÷îà ƒOùõëW^Ìj‚ü8qqq###Ìmçfx{{§¦¦òD–Î;ÌþÑÑÑüÓãÇ©'¿ óIHHðh‹¥6„3ˆZ|ÃX ´«H×­[jÉÙÙê!|çÀ|+''FŒw¾éСCOž<éëë‹s<„pùN MÅøñã.\8pà@¬ê.[¶líÚµg¾•+WFFF>~ü˜D"õë×ïÂ…  ÈÉÉAÐ}y:à òã €uæòŽ1‚W˜B¡àš‡P#—ù¢££¥¤¤xå´Z”––É ÇFø0$$$èè舊ŠÒƤ"ѱcG`¾ÀÀ@lKLímÞ¼ÙÔÔ|/pç±hòòò ñáRÎâû÷ï`jA­ÆÄÄÐQ~»iûÛ·o÷eçÎXøÁOž<áÔÆñ^^^Àv‰‰‰ØÆ¡ ítuu%%%ß¿¾‘……Æ|íùqÀ½²±±¹yó&w&/33<>^ÉLy'Nœ  Ø.h`ã°¯@&.X°àíÛ·ÀšmüšyyyŸ?îÖ­6B‰$û–-[Úó)**B„Jdƒ†„„@¸““S›mH´téRHüîäÉ“q2^¾|9ÐY³f­ZE=uŠögu òã€ë÷û÷o.ܨâË—/"""¼²ƒ†½½}TTÓ¯Ö¬YÃ˜ÃØ6»»ví?wäÈ‘\øFÕÕÕ PŸ?.!!ñæÍF™ëééÉ”ù0ËÌwøða;;;\cÌœ ¢mrFExùò%°QXXÒ€$ 2,/”ÝrR   0Ó¼ÛÀ+++AšÏ›7oΜ9ð¼ ÚBìÞ½;È\lCÚVÒÐK–,9sæ 84´[Kƒ3sæÌ™¾¾¾—/_ ={ölÇûÁäÇuPUUÅVbqUª®\¹baa¡¢¢ÂýXQQ^ó444ð‘?:€IÂ.€`òóó¹a/ccc[[ÛÕ«WƒðªopÛãøèÑ£õ=¤¾qMl´LWW7==vë}mmmŒùPè`1‡®‹ŠŠðh?†´¹»»·°k…¿‚uS_¿~‘øAæÂ[àS–xàiA. bZ@@€nÒ&d)HyÈ(bvíÀ°ÿþ}ûöÉÑjJÐÐGP‹‹‹ƒ’Û»w/$ AÏ^àòódòkÿ€FÒ·o_îYÊC³BôõõÓÒҜà Ìgmm-gÍà¤ûûû;88´Ù+lAqÿþ}lùí„F/–œœÜàîþŸ>}bñmFF‚î…¦ÐÄÄ„î[ÚµðÇC‰oOÜ¿¼ã˜u T0÷`R1õÀ 2¶nÝŠ-Ϋ²²2YYY( é¼RKKKAž‚t†:†uZBâi‡*q‘žÄÊ•+A¯‹‰‰5•ókê˜ýmä¹h`ãh‡grrrÀÞ9::6r§´òòrooïC‡;vóæÍÀ[XøXy¨ƒ`=Án6U(Ц¼AàÃl`ÈXȴJ@u:t€kCCCWPP`ñØz¯_¿:t(˜o(îYí#$$лS§N…æ@K-PÔw/Ð|hh¨››P85ÌŸ000®²¶¶®ooU3pqóæÍ«W¯‚c±~ýzãÀ‚3Œ²ð5*òkW3Ì—žžFs÷îÝmð‹àHîÝ»÷òåËõMàgŒ1,ØÍ8,«ñcBMMM|>$°ncÆŒ))))--7|áÂ…ðÿVHHhŠÆÿhM°z ƒ°»Z²¥NóNú į8R¬W¯^d2ÙÅÅD ÝâÖbîܹs›6mµ:mÚ4]Y±nÝ:̵‰‚E2***@cœJLLZŽ„€sÖÂúy½xñb||¼ººúèÑ£gΜIÇ.@WðRÊÊÊ@E Ž‹ŠŠàc¿~ýôôô h ôôéSŸ»wïúùù-Y²¿w3 ìjEtt4ü¸/yëÖ­ Û,,,lmmëÛfÈa òû§¡‡L<ˆ`ÁÖø 0s¹¹¹û÷ïç~GRBB,XaðÇÙøXŒùX/ƒ8aaaใ#šl¥¥¥%þ-DÆÝ§òóóçΛ°jÕªûKÁD‚Mq€•2hM°•ly;Öç»–••Á{?%''ƒ5ðôô¤[9þ4 à¼Ã“)€]€2!æÁƒ]]]ñpÚ­d@hIœ8qBZZÔÕìÙ³éÖT NCÑàk^¸páäÉ“P@¿~ýúýû7Ôx,|õáê¢¢Ž‹ŽŽŽ¾¾>ÝKÕç媫«¡¼|ùðVqq1”‘¯¯oŸ>}tuuAÉkÂb©h—.]\PÖŒ ?MxˆÐÞ =ƒž`×b;04ýû÷‡¿Øzg®Å‹/°™­: dÖ0<<ŒãŒ3è¶u8Eã(##ô°@ǯY³DLJJŠ»»;Ög &²5öb‚¢7nÜĉ騾šƒ¢ñÏœ…ÿê ˜j)¾v‚uÇ †=z`s|°@3'>ÌÌÌgâÕ«WØ }QQQ\:§¦gÏžÀ7@ÒtÓs@·MAÑ`ú¿ÿt{úôix¸;Àôrrràʃs“““³`Áhø XÈ1_„!"ȇʉŒ)4Ýàààõë×7ï!ïß¿?~|RR¶¯#×¢ªª Ì\AAv » ö”ÇÍ›7Æ ÅÄÄð¯@@×`X!í¶Ôˆ˜·oß‚"¤“•@K ½ |›´ÝLyyyPPБ#G:wî „=þ|º4ƒŒÃÿ² ‡ B922ˆhƒvýˆ#¾}ûR8==ýÙ³gMCC’‘‘H[ºy‰²³³¡6»wëÖÍÍÍmŠ…2ømÇŽóðð} zëÖ-È@PÌ _­¬¬ÀaÂv'aü ʧOŸ>|ß¾}pO£‹|ñâňˆx8°/¨7(>LM È@[š.Xífìòž,2uêT.Ò%‘——7iÒ$`¾–<çܹsÀ1`÷ìÙƒç@¾Axƒ÷Þ½{AW¼±EÝÄXÍÑÑ‘éÂ`,º¥‡`ÍAÜÄÆÆbkÈd2ˆ30î HhAƒjYƒ‚ů·dó6pA._¾|ãÆ à€Ÿ?8$0–?LA»E;(*H381p 9Š H …®˜r7 ºGì_äìÙ³YYYC† 5jHIlý%å½(°˜¸x=xð p¡¢¢"-óÁOCuÂW ¹,@»ÌŸ) ¤víÚ•„õªU«EV04fÚ1vÖ÷Ú'6—ŒkQZZ ² Þ«÷fdd€m5°nÝ:,ÄEKÒÌ'##Ó˜¥L1räHpSú÷ïO»;Wcàëë L°cÇ|µœ¡¡axxxó¦þöíÛ—1’våʸ°°°€Œ^¡ë½víÚÊ•+!cu¿¯ ð%È# À 6à›…b'Ë3Ž{Ý»woË–-À¶¶¶@êtƒˆ¢¢¢KP4þe±Bø-IIÉÐÐPH9Ô¸ Šq0ÛÁbrÊÑ£GÁ/éÝ»7°#—l”J€ ¿ö0 д֮]Ë:ZHHȼyó¸öL˜–m<óUWWÃ-~~~ºSIc-4À|JJJMS B³°°022²1‘+**@Q 4hß¾}XH Ú8 …± P±))) îuš)00ðĉ:ušˆ‚N‘€]‚©bŽŠŠ:|ø° Ö?ôû÷o³ðsW¯^Å–p€2cÜnê*|Û½{÷Ó§OëééááÆÆÆlÜ0V^^þäÉ“&&&ð[xeXùhðtÓîéÜR®Ý)›ÒÒÒ¬7"Á¡¥¥µjÕª1cÆ€Li›}p€ù0áҘȽzõ¢jpu6pÀâŋ߾} T!((øøñãF&ïßswwzýúu CL`Íþi5“? ìc#ŸÍwmP1ÃÏ©¨¨`›0ðññݸqƒö[Œ¹—/_NK9~(pEÅ®Ýã€}AäaÓtY4´;Cδ’"¬Œ;Dƒ6¿D0ÑeJ&<A\4ÐTO0_Ÿ>}X3h#YYYŒ~²²²Ú>…ðÓÇg=wñ- \¢1} àx(/`&7OÀíÛ·A¾÷èÑ#88ÃÃάPWWóæMƒ ?œ%‹­t¬o¥Gee%ä?œ|LLL¤•‰S¦LÆ3§Á‰‘ؘ+k§­1ÀT#di³OxÀ·KêÖ­[ø<Õ66=z4x!ÿæN›ùhïß¿äÈîO¼˜˜ívÉt{&´»öüm6@c± ?++«ØØX¦¼>`À Ex‹Fî×µiÓ&O;vìÀgi2„é j z€ùàádÑ΂äää>|øÀt¿··wII ›€€NŸqqqNNNÑÑÑØ.ÍX…bmv·Ö«ÌbãÓ¦ó¨€ãûöíËxhFãìÙ³´õÊÑё؇“ ?ÌáãããîîNw|]DDÄÀ¹<åK–,aÊ|ÅÅÅ...111ÍžlÂv@ö®X±h‰ñ+)))¦SA”-aY<¹ººÚÔÔ”.àV h|ÚzõêÌ7sæÌÐÐP¦Ë3ôÃtÏ;wð9)´…Z|öìÙø&/@à |±E÷–––´§ã¶À|‡š:ujãoÁ¶£k¥=?ã1ækêŽ?­𨰠(©¥K—>|ø0wù¨:a4hРFîzÌYôë×.¤¬¬ÌÞÞš:ã^øG||}J‚™`>îLpbbbii©ˆˆâííM·ŒëEd:0yðàAPf˜ kðô†f(qö¸´éÎ|ùò%ˆN°§À|ð‚½{÷:Vn›C€ùLLLX,Djû9ÛÞ¡üúõë7ܧ¦-ï”Æz7>ùýC?~<6ïnìØ±ìÝú²•àããƒ_ÇÆÆª©©q£Í¼ðñ9::ÒNn¤UNªªª/^¼`ìE„÷ŠˆˆÐÓÓkä•-0j©¨¨À¯]]]qæ+//>|8ˆl`>Hff&P{ ÷h€ù=!@œ:Û´/0G¨·A`ÌÇCÇmäG µ€oøÒÈ5œÅܹsñ=ص‘J€v£5Ú©: ã€ùhcêëëƒ ¢¢¢òìÙ³¶O'0Ÿ>Dôë×/¼‡ßà“À|²²²d2™.ýmŒÔÔÔ’’’.]ºÐ…ƒ{ÁZ¶€ù°="¸°BbÌbãótò#À1`{ S(Úý¥¸¸~⪑•±xñbì"''c>°‰ À·f™4iRtttãOÖm%ó?Ó¦•••X h;;;;‚@ÌÀ|X)Ý쨨(0ÝØdß¾}Ûf)gƒ€Rvss;}ú4wVK`>H› ã™òkÿÀìòìÙ³CCC¹?µØQ¨½zõâ!æÃ+è01,--…·ÀÏCHHHý:þ|`>.Iíƒ0ò«ªªBjÇ#ÁJºººîÝ»×ÓÓ“«: m¿ÿæã«1GX72÷$òMMM#:¾1À–‚㟽‚üÚ9#"" ês?ù¶ºlÙ2Ök¸€”ƒrMIIÁ&•——ûúúsÛÖ©›6m266¾wï^uu5|Œ‰‰Ù¹s'Ð3_jj*·e¬˜˜˜®®nFF\¿zõŠ«˜0‹±In46ž‚BÞXä¡C‡r:Áùqù.Û,2¹¨¨hÆ [·n522Âv*æãÎÔb'€òÓÓÓ‹ŒŒ´±±În̹~ƒ¥¥e ÷i%€Ó@;­š ÌF€ W äG ‘––F;…’kÞ=ïÎR{ôèh)ào ÀÁƒs§Æ"5 àÏŸ?ššš^^^p „͵©ÅŽ(ÊÏÏ÷îw¦PTT´sçÎ?þäæBæ‹ŠŠ²··'L"A~ÿ ^¿~Íúì1.¡¡!~ðÏB¡€Y9tèv&0÷'ØßßßÅÅåË—/ºººjjjܜԞ={nß¾=$$„›ç.6õMŽàòåËùä÷A÷§4h&ÍäÞ½{çééé5õœ#NÁÇÇ'""´Ýñ ܉ãÇsy¸¼¼<ð H+nNdhh¨§§gXXa òû' $$Äéd\ ÎC””,//gºÍ4wbþüù7näæi´ÈÌÌ çòDòIJԷoß&‘ ¿t'qs-œœœx7“)Šªª*%XLL¬¬¬ ô O¤VZZšËûf‘Úu\BöäG€ëФü¹ yyy<$ûxÜŸÈæâÔÆPTTÄÖ•Š ¿ö.9‡¥A(++ón&ã' òR çã#ê0ÑÆ‡¿7>$È ¿öFr—c³R“äTÔÕuÄ%¥±ðÈÔg™ŸÊ±ëªª_^>É}üP¼›„Û(§."ÂmŸÎÇ/ßݾÕ¡#ï~º=•ÕH¤µßˆ}BÓI¡üù˜ûìMv:¥ú£ŠwuÍÅ%§§ÆGKÈÊ+jhKÊõÆod¾ù›É•y/rr³Óºv“ã~~,üA^1žæùïžf~xõTߨÜn'§]OHNOº-­¨¢ Þ_B¦¦è/ÜËÁ“ú«ì'µg¥Kõ”ss.$(Øö‰Ì|ñæÎ­ë|zkèÊ*÷­©Ã¤~Ô&’ò§êðëì ¸rp°WVèҬ¸ôÒå+ß>Rî¯ßKUS SÍØ¿û’uxf~ùøʽàÃ[c + £„%Èg@þZtìð~«±Óø¨&@¡ßøÇ">??µ¹Â?¸Î-Ajx”tSTXØÙÞºõXyÛÎÝjúÆ=•Õ©Ÿ»H™òdL‰\ ø×¥`hІúñÕÓ—É çÏmûL.)ÿµ3$ØÜÅCD¬+|ìÖ[Ý®·:«LTê§ÿàú5¼@)5ýÙ÷bù; nNŽm“æ“—¢ªª«û›`›ð´ua¿«” üÓ1·Cj3¼òWy쩃sçÌmmòþZôóÀÞÝ–c¦tî eTuTuXćhÊÚá\?/EHMmúí()‰®ŽÃ,[±ïØ¥>ÀTV©/õ³¨´…ëdVu¸CGð3àµþÔf釗9¹ï{ÍÓz™yêÊõ_¿~i¶A?uÐ:’u|Éž ð¯†ÑÑD–ý,¾söÈ¢E 9âUäG a{qôh˜Õ˜©"`ë1¯…OÚ TýÏy¯Ë¾|d# m ¶t›ÊÇÇ&µe}J}Ôá$òwUÕí3‡—-YØÚ™\^Yl7i>\ÛOöjáÓú[a™üý3ùó«¬q£ZevOÄÅk’òÊ`ÎÔ†´ðQ àÝ1òŽ>µîÌÂl6ü½´ì`è~k÷™ÒÑ㥇âê ±Ç²7ÿÍóêâoŽ6Vìâ¼ -ÁCÇMïÈÇgåÖÒ“§0:„DVUýŠ?téB/v–{/eI9µ-|”pgQ¨í˜WqãØž%‹ð×väG€“xúþó›ùà~¶œNÑ£—ÒK gTØvß+šýœj0A›ÁtZ»Ïb½äçæ>yõж•~~­‘ÉŸ ÞNº§n`Š1{Ñ­‡,üƒôGî Z³f »»víÚ‘3—k jé3lìŒEÈ»ÔÔ~J²Êò-íÁËÍÿžóò•¼ªÊ|l†Lï¾ ^Ûá³Â»%u8dçnhe6Ù4?§¡cg@"¯غjÕJ®-w[99ß~?OO0Óï/ÓC’ ?œ´Æ#'N )«$ÚÚ¿eïé•ö¡¤èM¶¥é ¦Þ»108£5hƒŽSƒù¸ql׊åËØøØ]{,\'óµvúÁfeä—¾N‹娢M7.Þ¸#ßßžÖÚ VPë_‚ ûÃŽÏðlæ!æ ¤ö=ad; ˜¯µS jòá‡â’wO‡˜6¹o „Û[ÿ¤ÃðéK ß<¾×{éâ¦Þ{þz¬’¶a”{_=ãÏä|èÁy³ÚâÔe‚üÔÁͤûRÊš õà³ùøº©èlÚÔx ø»š—Ø´W×9wþæQ6-Ô£—ï~ ta=–Ã^H” †m ^¾dQ󞀹m™ámG¥‘VΨ«Ý¤cS3Ä{©óµ ããïªÜ?`ã&_ŸÆÖáʪß×ï·¼¶I°™0ûLTÌû&ì>¿)hsËÇ;š„ÁNîóŠ…Ê 4Uû䇢8ž$ÆÜîëa¦Ð’d„Xë-ŠFê™èlFZœÿ7§PîІÞÝÐuð*¸Xÿu­Yw<¼ìé j'ûê[_‡Þn¶6¹¦ºS(»´?Edܱ˘+î8òÄ…+šÆœ9–$àνæÏžÞ`Ì’ò_i¯ó•µ80aLYÛðNÖ5¹– N\OH‘QÕæÈj2k÷Y‡#ÎL?¦©7î>x¤™¯Æ1êб£Œê¥¨Nö¶¼åTd”šáÎÔáÉP‡÷ÏŸ=£Á˜…¥eYï¿cÓÁÚªz&q™/ÍúõnÌú“½‡ÃÛ˜ùj(Ÿ¿Š¿çÍÛ 6Cxòèv“_­µH+÷Ñ«™Tõ5Y@Òx’¹â$fúò"}ËžÐüï†CÇÍõ°£}FNÒ©5!ç„»õÙº{SUvLf~e%¥‹“ƒiïÅ@~´1÷GÉôV÷YëSQOŠÄMVn3]¼¸Î\b- kḃhlZ;¬{jZg,P Þ€äI¡„Õxâ2îÊÊHn®2‚ärɪڄ‡É|#d„ó¥à; _Ŭ]ì’µÞaå©PWZ6#¿ H_WfHFþÝkê<Á?`£ŸoçBÄ&§+jèp*—Ä%¥£Œ´4nÞí¯>~’êÓƒ¥¬g9|KÈŽ¥ › 8@”ƒkÂÁ4+ê™?zòL[£áWîfæpŠùjëðÄë0xØ iÙmÐ[ºIË]¹}Ïy˜·i>:H©éç¾ËãÈâ .#¿¿ý•RC~üulnê&k#ª€K(¨2•äÓ'‘æM‚O£)”3È·+$‰ÔH½<(ï7Yaë{îfnnž7ñþQºù…>)7‹ṙ3±Þ´,J ,&‘¨¦^Ö‡BÞˆÉÄš*žC;>Z9 u[zèb=ÁK3å èÝYd×hî)†O_ˆÊ5®R"HòúnZöôÉ~3üQªû¢+­p.·LYq—ôŠ9äSÛUõ'A:72 SÞNJ2¸ÞSo6øû;N]ÌÙŒRÐÐ…d4o LBB’ÞΦèØéç/GѨñ¿È˜Î2†—o?4†ü>óûJ+q6© ÖaÿÇi®Ã½µøøùúÖáüõXÎ2‚öÕ§¦?"Èï/|‰Ñ•سÔeQÆ|3)Ú^©³… ßGÔpÒû£ðw̦i·HAãð‹ ó#è:ìx̼½cf°ðã>“H=üM§ß¬wë~í E>ÒwŸ¤Qž’HêKô…S>8ïÿA¯wº’vqKœŽ¼Þ×Т‘‘Rõ\m柚µïVUÙ43÷òe}†\f«q3¸!»¬ÆNoÆ]1wpœùjÈê?ŒY@þÈzAgÛ žÉÕ›±¬×œ¹z³¯Wô’½zõª¾:\M¡ØxÌá†DššÄâÛ¢ÂÜHAVÑw’†Yðê,l&¿¿lŸ.d¨cÚj¦ë(Ù«i}/ å*Ý]¢=jM1zQŸ5>&.„ âKYn«Ä±òôˆw²L¢©]ôÒrÞžE"QåÕÅܵ‹Ò›çÙ'¿#3Ôv\ù{†*¾.ÂȬAº25Û¸ ½…5_ ‚:O„¯2h(“´Lë]üw>*º~Ãs#á·†Mœ"Vû±¢óõ«Ù<÷AH¤Kƒ¶˜©ñ1Ê,û<%è*ÚÂÔ×K|m†°Äæ2C1¤¤¨¨‹˜X³Ó¯>Ȫ¼¢¢1‹‹µÍm™ô{í^ÛË]/ªÞò8¢~ò?&=ÆG\„oi_:¹ÆÊnWV¹‰“Öñ(í>ë •©ZK~UÏ 4ðt]2lüþÑ.ihd‘€’}èY™^mô«¿Ÿ¼®Vwe$Üë8}Üšq`Ϲô©LÉÛW–”‘ºˆ”ÒgríàByQ‘PëCg±n×cãí¬Ì™2´¶© ]òòŠÇ×¶îVU|…´¯™œx› ?H»P(•$’’³†DZCAA¢vB^CÿÖ þ«Y÷¹g)óÐ@š¯˜¢þ˜õöÝIÙº\q[Ð[=`mÖþqÌwÉr yŒlGŸ©¹Ö‰Ë6çëÖ£ š¼ÿã<Å™Êꬥ¢ˆ×T_JÏ&µ¡Î¢ä‚Y))Ư>å½ï£ß¨‡LYü_wSœU¥°æäè³~Šñqgá­)Ƚü2!†®ÚÛ3»u‡Ò¿êÉ4¼[Þû&ö-ع k=Ì—_?AX×í@ÆééXgòµ÷e²ÔôŸÌ+»yñ ¡Z35ªuÀ©ânÀÀy§moïí8dT 5·Ñ÷=÷¾L¹þy9O_äêõ×dàïò:I0q÷û½Q_gsäÓe#Õ§Ék…çN‘8jæÅŸ¿3  ùe¯D ‹S;Ã÷Y ‡ÌËÈ ªx{|Р`~ 2̡ƭѓ`îJÊ4Ð)ßMúoæ¢úÇç¶7>„wTäÖ&Ò3j{úÊ1j·–z‡CE½IõÛÔçeÄÝ °Z´ósƧ$ÈÀñ¡iØë|{6|ôCº¼º5ÛaÚQ*§~Œdô>æê)#͘ÈÏó{oݧˆQu©ò•þG¤(u¬?TןÞyî‚Ììng|:Åß"#|Â<(ãÔ¼;þ5^±š °ïOjݦÞX©«=.üe„ÏY¬Ï8¹dŸ‹ph2=oåÕs`}öó×Åÿ:ð¾ë¶`›Ì›n4ÈœÚõrÄZxÇ×ié;JrN˜ ŽUEj›Bóvñ­O¿—Hïèä—qÅï’Wßµg$\ïGWªï­¥ËÏ‹òô/…¡{™¾ÛSМÙTL~º@ûÒÒ}U;‡³á˜AàJ ³p÷(+ê}2÷ì¢ûû÷ï>áKÞ;É^ lIŒ`'护c£wÜï)-ÁÈ3À|5æÕ+ZiÉýCô]µ¶ ËLÃ|ÔšÇßä‰Jª«Yªê”_Ëš_÷Úãüé‘å§ñ$°‰Zd|ÎÐí!”Q4ogvFþ}ÐOÈ€¥wã¨gÆî:q¡ n‘¯÷]: vj0Á"„˜º-ŒýÞŸÈp T-ŒÛß  `ë–Áopü*¨IhrèuÌAPq‚ÌÈBVùþGµ±õUĪߕ e/ͭݬh =ÜÕqßT°¨< Ì­7ßô¯±ò‡.R“dáwÙY‚½Î®ëƒìv\|—H—·ÉBE¢Ö„žLÖ 0¯Ã|LÌj_®¢¦¶Ṵ̂—±ŒIƧk¬ßô¤vß<ºÔâu›ú¿5zîÄ ¥RÎÌ‹äPiú)þNÌ#a!R†@òµ²‡ëYˆç47ºCn”£r0;.Â׺>OS^t*µˆ©ƒâNÛŸ;mGJ27ÑU†‹[éò³¾—ªþ]ýÏ“»QTÓo÷!~û—|]:˜O‡JÝ¥%Ÿüí÷¨Ì}öª‹„¢”ÕÂZTÇ{~ȯŸ”#âÊJÔœ{aöÊ,çþÖBüô·0Å÷¼W:˜O„ë§ÕØ rfu 3ÍJK‘‚_T3ÚeÀ º®Ú’‡ÿ5¾>hë6yÆi'ÁzwíZÉp>œ±g,m¶Ûä•Í´®6…¦ PúôB¦ª ?ó¤ÖÞ÷=2\WjðµOe™|¡ì¢Œð’&©+³ |Y„ôÒÔL7TwûÒŒÏOt{Ôd»ãê-aäa)ò«–Úê#¿ŸEE ´è/Ÿizî^èÊÑÐ¥ÎêB_>×$àÚIo¼ÜeÕ<2¶¦ãt.^ØÅ­Oe’„.oÕ.}Ù*-©»Ñ'A ?äê0ïßV×ìÏhIS]A!”:¿®vç¹ûÐ Úšy?9l } ª¢©ÛŒ1{Y93iÚ:Ìë°ŠB¯Û_w•ª‘\¨W"ë°7ãóB(¸]YåŸ^êJ«„a_ƒ\³ü›ZêíbmïJ/6a¨ Œu•éKQß«‰“òkÿ½¥üG^-œG»ýb¡ k„ªÈ‰ü20Ó¡ ߺõ2¾òmŠÞEIßj95Î%ãÕƒs—¶½òI7ÿ£p§ÎÀ|Ód„énaÚ x÷f¤e=äg:@÷BL¶3 01ú”¿3oý힥ëªíbð_F~£*ÅÇ',›¼3‹§û¸ÜÒ?4‡KüECš±«ÕMá‚ûeØ…eµñ•ÝNg¸!HmÂåÆõ0Ç_>;°q{–¦Ü¸0|Ú’†û½i´iMS ÂbcFþF”³é¿š|«¬AÂ;nlCuxìßUIU%‰'€úCôû½…>£¦âüU'1fyK©/µ‰7.YÔC~–Æ.ÝNQT×fU]™Skfm–ò+ŽÉÈÃ߯۵ÅQ]YRÓÃ{qðêÚ[Þe¥ŽfQ_fÞ»vÆÝf¶NJþôE?RÇ2èüG±N…ñ5éò“ñ¥0¸G_pé`Àú½‡ÒÓ©cpfv£Ví´êמ;Ž[ueÅFmáQp=Aâ̺G!ˆ\OaÌ*ÐVë¯äTk„ 4<;µfeJA=·Ô±'Å+¼Ym“XQô•²«´¸°w‰Šv¹¶3Àqê"Î&žô§ÚcÒ¤FF^¶|ù£%„…9›æ¨#ÛÜ[thÉFÆ+ý|#.]×0²à`úFGNäÞXë) ð.+EÕÐ’ƒ ΚÖÈmÃV,_y;E¡­¨…)^?z £Àj å$·Q_?Þà†ò¢]Dž\;ßßžƒ‰|ŸÚ¾™Íä‡Ï^:u.fœ™ ¿xÏ4’Û¨KòͬõI¤Þ‹ï¦=/e¿ ë@ÒCrÓÑ«­É…‹þHݰ˜¾ÆB¾(YfìqÒ›‹ÎìÒq5+9—€ ¤”R …‘¿_)éé‰b¢‘Y­|ç 7x35«æ 1¼Kj>s¦Qì¾cð_:f~í¦Ï¢ªAÇ/ÒDXà Ó[èðøÎU± ¯ú’Þ2lÜLŽdQô‰Ð–láOåo'»s7ãû §Äµ=âÏñš×´ufcF::¡oíÌ‘û¤IgÞ:š ¸u?G¦· GR›siò„†»é€¤·„ìÚ¬Õ¢-ÇÍðÝÙG×cì¨Ý™ Ï‘D~}•5b('}DÞ#¿x*Õ•Ít0Ù¿sŽÀÎ:¼’BñÉ XÖîS«üŒ)”­©‰±/Þ¾ùUŒlœ7XkÉÔÐÅ9Þµ 'ð•…?jèMÛÃ˼+‚xýܾÈÒH„1Óâj§;¿Nïl71!íº©ž*Ï•DÇŽíuvìÙoæ2±-·Ce9߯a> Ë-äˆí¸}æÐ²%lè´tµ1ÚÒçì°ÊdJõ—WšÊ|¦zŒÏ|ò¼RD?¤›k©ê°Ý þ;Cš:¹·iöþ®øSô©1̇aé›·† sok²Ô{ÙÒFFž;}jÒý‡‚²ª||mÚÿyçìaŽœ&ÍÛä÷…üüÆ¥ó×2öÑb&P»Âðu'3t? ¿bë¡Ã‹õ;R¯BBðÀiç(Ø!±‘Ƕ­ûÏL¿F¤\óî瘽Āo ¢?Êüûùø7ð­éúû +è“HX—騉å Lô7<]$CÑÓõN_¼"¯m$ÔjxýèNïå:>Í<ÌVXHhÜëÏ_¿ÝIÍè«oÒJ‰|›yw ž¶Žƒ-ÛŸ<@‡:ë=*:®‹œJ—®­¦V.Z0_g){fÙø¢]¾­;ìú«äuÖCG{DAª%±Eˆ8w±¾YëQ vÊ£Ns{»ˆ»Ù[‘ ¾ÜKÏé£cØJ‰Ì}˜`b¨¯3²ù[ëa'Ym nS£kPQü:;ÝÅÑ®…åþï’ßµ†”–é÷ê.q`µi/n³ß”EÙÄf”Îö³”ZpWoÆT tf×ìÈÓ¡^ æ ðK±ž0Ò$Œw¥U–•—ï;phûÎŒýööIÉ÷oNöÖ:ì8ßXVJÒÕÖXVc§ñ °gz©²ôÖÙðeKé8Ú°©»bæ‹J+°kT-÷}^ æ ð÷ÐQ´Cþ1‡Ùr5–.¬qN]¸\ù»º¿Y“ePñÇ×w£¯Î›?\]DÚ¨5Ò‰dÜ{˜™|7ÑÌe"?§¦µÃß¿b·lf¨]H{iÛm½ÏÇLJ§ÿ\䵟å:MŸhWYX}!bò´i`LiuõVM³£M[P\Rºk÷ÎVÃ%{5íD0|ÙI7øùøÇ89ÂGí%­µDXHhQm—õ‰ÓçI4Y5õ!¥_>Ä_=?kΜnb¢ˆ´~k¤sE-•Þ}þ %y°ÓÄ&/6¨ªˆ9ÌÌ|È@ÝþÔ:Ü YŠùÄ€/_ì?jìà†/‡o|¹?¹M"!c]F å¾ùWAÏ«¬´HœóìÅ‹‚|òÏâ¢êßU¤…:w“PPPÔ×Ó¦š Òšfúšm“HcøGòäenvΓüy%…?þP¨tèС‹XW¹^ý454Tðƒ±„´½æs<“]º§îg>~ñüÅç|riqaxxø„‰ùĺIÈõ’×ÕÖ’—­5=Ò : ±5 ÚE„nöà÷¢Þ¿{÷ãë׊òŸ»vî\൰‹˜¸”´L_µ¾zý4ð˜Ú®m=‰ÔÝ­ÎÖ‘ÕÕÕ©_¾|ñåóç²âÂ;Vÿþ-ÐIP\BR^A²—êIÔd¯Š‰ÖжI¤É=øG’óâUvÎÓOóJŠ ÿü©¦©ÃòZššê*ø.uêÛÇOi™?ä½+úöµ²ò×ݤ»&&ÆÅºJJËöUQ1Ðé×±cÇšr5‚0§ùñ6ôúk6¸?2ÇôFÃp¼‡:ýêÔl‹µcÇŽ5«Wsy‚»‰‹ÑžÁ ^éÇ¥ ¶ÀèÂ?.ÏRMÕ>šØ‘Ž³è)-Õ“f˜`Ô¨QkÖ¬!Œ$A~°ŠŠŠçÎsuu%²‚7ãÑ£G‡&ò ?Ø &ðùñsz—,mCCÃŠŠ "ò#@€=øö훘˜ØêÕ«×­[Ç ö÷÷ïÓ§Ï«W¯ˆâûGpõêUggg"ò#@€mxýúõèÑ£lß¾}ÆŒÀ|`ù$$$¾~ýzâÄ l555¢Ly·oßÎÉÉ™7oè{2™LdA~°òëØ±£……Ejj*0¦¥à+ð¸1æ³²²‚8wÀåååùà”²zõjPÀ|ÇŽƒàà`àl¢dy)))C† ñóó›;w.Á|ù ÐZ¯Ù0}ذaØ0ß„ Ž?þèÑ#øøáÇ1:Á`8‚***ï߿ǮŒ±‹‡N:õСCÀ| ({¤¤¤“'OîÞ½›(_žÀäÉ“MLL¦M›öäÉMMÍììlbb'A~´"ÆŽ{êÔ)ìzÆ ãÆ΀k`>Œ£££wíÚ×àƒå<þ‹¼gÏàpÒÛ&]»výñãþ‘v=¢¿¿ÿçÏŸ{ôèA"‘€ùŽ9¢¥¥5‚.‹jìׯQÖ\))©„„555(µŠŠ ¬“3''‡È‚üh]€¹¡ý8bD­ùbccu\]]!&6£¬ÕâÅ‹ArÍ™Ss„éöíÛ A&¶Æ’¡gÏž¥c¾Õ4;‘JKKƒ02e ®!à/Öáǧ¦¦bá *$$$6mÚD:QUU¥¡¡±bÅ Ðëð±  þB¹¼~ýZTT”èä$ȶ€™™0m(?l ‚u0Þ½{ØÎËË » ®±oedd®]»æ…¿ÅÅŤ°f˹|Ó§O§ ܶmÛêºÛpóEDD¸¹¹á³á1Û ¦¶wïÞØ0áÁƒ±¯ª««ååå/mllˆ:И9s&¸ )))Pøùù_¾|‰ »tŽ9òíÛ·pýõëW"—ò#@ çÒ¥KŒáÀ|šššt]O&(à‚Žóóóq^MM HÈÏÏïÂ… ´÷Þ»woüøñÆÆÆGmü†œØNZÁ‡˜˜1~¢¸¸8**jìØ±x ü>AfÍš5IIIzäÂÇi‰3++ ž) @ÔŠ–ãúõë'Nôôôܲe ²oß>ìJaÔ¨Q@~’’’ÚÚÚó È6ÅÞ½{W­ZÅô+`>--- ¦ÔËËËeeeášD"á4“››‹G{úô©¹¹ùÂ… }}}öÍܦM›6nܾÿæÍ›{ôè‡WVVªªªBüúÔ€¢¢b}FSTT˜¯{÷îß¾}£ûÊ FÒðsþ||Lnrr2ëWØ’D¡P¿’Æ•kQQ¨Õõë×O›6 °í-W¯^õòòêÓ§¼#Ýé?‚›7o<þ|îܹ>>>||5Öò$&&VQQáêê ®ÉÙ³g¡´WŸ K€ ?8 °MŸ>}bÍ|€ÞâââºuëÆzç°°°0üzÏž=kÖ¬yôè#ILEA¸`Á EP™6lÀäÂ\LHkÀ€À»@«NNN@º )ëKÆ|t´tûm™™™¶¶¶@Ït[›:¢ »êÚµkCCC üüüÌÌÌxºVDEEØ&3552eʘ1ch¿µAAòòåK(»Ç¯Zµ Ä^ 8 D#È®6¦¨¨ØÈø–––ðwöìÙþþþØžg¬1þ1)) ¤ÕÞ½{ñÒ‰'ûFGG#è òÆç€†Ø²eËáÇA¨?T ü:è L†¡´´ôäÉ“‘‘‘wïÞå:räÈ3gÎ@øÅ‹±%,O·£izzúäÉ“EDD@­ïÒ~% à‚飀€oß¾mbbâîîîáá¿~#;;ûÂ… ÝÒÒÒôôô†é‘——§‹f‚.¼xq¸}ðàÁK—.2dþ•ŠŠÊõë׉EÜŽÞ½{¿yó¦yÓꀺ0þeФeã`4AO€àãããõ&pØ•+W€·mÛÆh…q ±ê–ùàúرc¤ÔÔT}}}0Ü VÀ"QMCA÷@LÊ|ûöMYYùþýûªªª:íó6Šääd1Àg@rÀXÀúÂÂÂÀØ2:a+ˆ‹‹Ó3ÊÙúÄ"-à§!ñ—.]‚_ïСHX;;;QQQÖYúðáÃ;wî@‚Ì@šC¶`©µ¶¶fʯýPÐMŽÅ”+x†ØØXÈ øixiii:oà ¢ùäG€!77wôèÑ `€ùZø(gpáçç܃1"-€@ uï޾·€‰‚6ÚŒÏOHH 2&RÁuÆéÓ§ŒŒèf߀dœc àûË—/mçåççkjjÃÇ 6tèP`b|ƒ›ÆcÁ‚À»À@666/^¼øøñ£¬¬,°0ü PfII ðÈ\øºô`7BœgÏžBýòå |ödÔå ²X 8òHølìØ±¦¦¦@{BBBx´¸¸¸[·n%&&—ËÈÈ€óakk ¥Æ8‘B&¡ ÚA~´¤¤¤LŸ> %`>6>ùÉ“'À£ YÊÊÊÖ®] Æ8³ï(šýd3´!@±@“;wîDÐþÌ={ö¾|ùø~ˆµ´Â!!!1ãWtsg–/_?aaaq÷îÝøøxàKà¤ÏŸ?«)))¯ƒJ¢zõêU^^°$ÈtÄiLzàùïß¿‡7z÷î<ä&° ˆãeË–ÁóGA„ü­¨¨€À¬½zõ200}ÌÍ%a¤ŸÔŠ~ÞbýúõPrrrÈd2=Ð?heð'@26~å ‚üà^`3ñÀØ1J¥&áõë׫V­†á²zõjÚÙ+`©#""ðø–`¦åääÀÔ‚mmù‹TWWƒ2ƒ1@DDd w=xðààÁƒ/^æ7nÜܹsýú`úA >{ûö-°ð ˆ'---]]]¼ë5v š ¸ÔHL¸½1ã¸ÛqçÎ{÷îeffBnÀ¯ ·AÈóçÏÁ144šß¿}½¾8®_¿~öìÙ›7oVUU‰‰‰Ašëc> :áW èÁGß7¥¨¨ˆ8ÏÕÕR´×øÞ€øÁ τ܃\…' 8rÒOœ®GFbb"˜È;vlܸ±©÷úúú zh‡…@Áœ8q¢IÏ™‚(=z466;c¡’’*((æCе 111Øþ2 ]ࣚšh¬ 4˜˜öÆ(ð%Õh§r8p`ëÖ­ÀˆÀ¬LÇê脨@ÚÍ›7¹fggƒìƒ¯€™ UÀ£ÖÖÖvvvX?­ЦŠixÔ­[·€f0]¢väÈ‘; –>hâË—/›ššÎž=nlðÖ P(P½ðvÀ sAòBª@z‚Ї×d½qOII °utt4”Ü |innoO š*A~°3fÌX¹r¥)ŠÆÄwwwOKK»zõ*H,${S…&?È•Vj€5¼páP#ØVqqqÐ`©mmmÁ¼b<==i GÁ–„MGÁTâÌ™3Dp+$p[YY™‹‹Ë¬Y³pŠÅ¥'8ÚÚÚ Ý NËS¥bÉ’% ÆM .Ÿ¤¤ä˜1c°I°¬K¤3p¶‚‚–-[@q6&=@ºÍ pÝ@ ** X<)) t'”;ˆolÎ0SÉä°°°ÈÈHP«À‘î(853– ?x/Q€ýemïÞ½`õž>}ŠMyhªžk<À߇ÄgÀo :tôèÑnnnXé™3g@Ð`Hw9 ¦ ]5ØzÈÌÌ\µjUBB¶j˜ŒvË´õ( ‘ááá3gÎdúP“Ø"°ø222m³;¥ BCC·oßÞ¡C‡E‹aS^磠‹öîÝ;(¨gÏž¥Ý^§…àççgœßTÀ :yò$9_¨0àU¸ºº2yêÔ©Ý»wC½òðððóóÃý$ùøç>uvv¶ŠŠ Óoóóóµ´´¾|ùôllùÑ?nذèÓØØØËË‹®o­K—.KP0Þˆ-3¨o¿4,ö-k vìØ<tÏž=¸¤ÐÑѹrå ëi‡ðË—/3®|‹/Ïšáì9ò³P0†ß»woÁ‚ rÔ6Aº³¨ºrå <ìmܸ‘:ÄŠÆÅ‹—,Y¢¯¯OŸãÈ¡Ívok3ßôéÓas/!!ñ«›ù Œh``å¡cÇŽÑ+555é3ðPT­ªª‚mS#ž%tæÌ33³´´4ê y.3Mzäöýû÷#FŒàLvÈ(€­I§•—%®’0Àjjj°£»>œ+§ qnذ¡¶þ—–Å„ ¨3:¸åïÿýæä?NF r¹ŠŠ "?„¶‡óçÏs »Á–÷öíÛ?Á£aii™””Oéò€2â7š3 |||¿!gpÎÜlݺ•Òvý“€mÙîÝ»322¹‰¾öÍ Šùüüüzöì¹hÑ"šH C1´ •.HØÏŸ?§oííí³²².\¸@—9sæÀHqíÙom$ÁiECXZâââh惵æÚ¥K—ù! ´ ?~œÞÙ}ëÖ-(±ÕËâBhh(ìó>yò„ˆãÚÐ"€¼kjjʹbûöíÎÎÎ?|qÀ€ááá;ëT`Ö²€TAqž““µf24pÊm Îi³²²rHH}Lc¯^½^¿~ÝV"âF‚¾=pà@ddä£G¨Û+VtîÜù—ú^ˆüšVVVÉh£q'::6Á>„fc­-.ù`7<==º¥w²×‰©S§ 80;;»õ Oð ,þáÃlÈ|...¿±ù²µ²©‘†“'ORÌ×ð‰­œ§‹Àˆ;v ¸€\}:sæL®3œù! ´<è!)55µ:™¶¶”ÎÉ:5ô·6Ð )cccë;6¶wïÞ/^l[9Eÿ@mÕ‡ÌÇ`0h=8mÔHCUU•¢¢"Å|8üÐjEØ’’Ê YRS\QQammȡ呓“C”ëgjj*ç£M›6Á:lnnÞ¶HbÊ”)0.W®\ñõõ…f®§C‡…}pÈ|m7Ë ó8pÀÄÄ2Ÿ££#Œ&Ï”F>>¾ÇRç\iii››Ý¬ô¦ (víÚõÛ·oC† ¡¢‰È¡Å`llœœœ,&&ƹZRÈyîOÛ ? µ–pZRoŒ>-[¶,33vMxU &d>ø 3 –ÃGý¤š¡VŽõë×S†·oßRåPó(^@䇀P>}údiiI3Å­1}Ã8~üxEE}H,µ›S·@QQñèÑ£Û¶msuumt5r­ ‹/nÜM,-j\÷Ì™3žžž;uj3œ‚Èw°bÅŠïß¿CCLLÌàÁƒyƒ! >¼G¬ÆèϣƉ  Oww÷¦Î¾? $XVVÉzÒ¡#Þ«'ÖÚ2‘ ÙÙ¥‘\5[óޏÄoE¼¤/+kÌëÒeà“'Ÿ7v^†ù! ´;4ùU$`‚ãáÕ-á§v÷’Ÿ6æ¯ãpü*Ê`„f’üô1,šãÖÜ'>Èqe¶Ó5ßÅ¡YXÓOÚ ¯2˜'©ŽŸ§¦¾û;^ÉõOZTÉaü¹Áñó,ca,ÆÐ¡í¦¡lE@@h&Äö}^Y=fعKŸ¿fÍë!Ä Q ;âuúÌÅrÆO·¶Y:÷·ü¨d0:ËÕñ·Fðw½vkÑÒÐøäG‘–ÛÕrÏqÐh(}åÆ!﮸‹ÅÄ@=ÌšXkã;à¥ìÞîi.‰ÎîÞ[=]Dûð¼ßúðÁß'¡ äMŽCCŽ‹ÖY6~¨Ç+O-ë?ï ´Š|‰ÏêMøŠa¨B" 4B]âÁ8&ó*md‚yıfj!rÓS?‘”U›¨N’^õ± ÿÝgYYyÚ}nîã¾²òDK÷µèìéóŸ+Î,}~-ÊÍ~#+Ýå¿Ó‘ºôÍUe?/ýÇjŒ¬lÿ&ŒVÊêÈH.v¥ì ¢agEªüÍá'ùùE'êIŠòѱ€‘ʺ–ÃTRŸ©$+Ÿvv |t--·{÷ ®e‰Hö \Šô•ôu'%Ãq2¹/ÞÀÏ…=΂âÏ…¼úÒy®á<¾¶E~xY&¤¼q¼àÆj»¯ãÄÔVѸ5A~V}ü±†ùáøî8NÄkî„õ8ž=*:–2“Ì!´ß˜‡£*‰€€ÐlàèpcEùX’“m¿Õ§ò!aȪjSü¡dwÑy¢¦Öoé™”Í3 åhö,f2ƒ „¤¸$æ’— ´¼8Vyó øâÇÌM=7—sÅ6 ˆ“óøÞSºi™ÿÙ]Hž»ÊiIs9‹1݈ðd먺Å>gâ|X‘¦¦B?£S); F]–V0™ÿ •†Îd%‰ÖT°x»F£tI{øñîß0‚'áÓ —¼ý7ƒ1Û%¢Øfº ø| ~èm;ä÷&pg˜¶HÈf¥TúNUûXyþ8žh §µæT¢:kL ÃFl›3È ÇÇèMI=`ÏÎÖ8#q…é” tâécDÚè›ZNVW¯%ÃÿA„äC›¸b0‰ Ëó! 4/J© l¾fºíNª2”#šÖÛ7‰¦iôèÔSii¹û»Žç‰ÌÜ Ù1÷=ñª}:Ï 8 *ˆã'3¯ZÍòQF¤•êM5ÑæŒÌБJ å~ÝϾá<éÆ:•BÑ<¸õº_TÉ&}—;K ß ‘ò¨O¨Û&qÞj‘ñQ&{2Õ oºǘd•õ&‹zc_e%u­ ®=­VͰvÝXCYÑ0Süîx5Ynåan­!\Cz£F2%Ö]ÚqGÚ!ü…<ù¢ùᇪ 0 €—UðVÅînW€a}ä9¼=|—€—U§óúoê¯÷¿ÉâÔ#¹Z€\ð’€j$Ï€ìWCivàÅÀUDOçerêçZ ¼YØVÐs£;Ú¿‡NØmÄ}j±Û…m+t-)îlP˜±ù“¢9!2òÌBK†ßjyæ÷µëCfl¶ HY¢ÖJ„ ©n}-‰–ŒŽt3µÑdÅ¿ c' Fü>ÉÌlx=ç>Ìr?P\znºA÷1qÀY³Ic1Ñ%c´?¯‰«„n0ƒ6Ǽuý/í=Ìã+ Ž1ž’ ¥zÁ_Õ¶©“ 1ÈOdJÝš¢j lÞÌ©ÿ‘}Øs{ÎûÅ\.{×ç­Ì´ 8^ã¨R{D<g ìù oæÕu´M|ÒͲ*°g,¶ã%ǘ̇d†âÖ}=æ­(x5``Ƀ¼¥Ÿ¸PÜ©ï|a¿­n ÆåÕUkÕ…&„ÁÄ{p†y”%óôÞê¡'ï82ÔC,™—÷Ö†Ìð5:6>¬fkeHèÿŒ}FÕL™æ?ü‰|„svÛaczïièEßE\!ÍHƒ¥ÊŽswëdJr Ô…ó2é÷Œ²9‘Z©%Å×üq åJa¼'æÝGlb27±íÅ9óBÝá:Ó¡ŽQ˜îyoº'Ù¸ÿ“ù?Ú~7o•0á5#µÀ%fKᬎ{ÍØYDà€;ü´N—5"Øy<³[+ù! 4%¶¦à¢–* † ms"þpº…ße`¥¶I‘Κù—¬Õ—ÒÍh¼»2lja噪’S\’9¦+怤[Í–Š‹ÑS^ÃÄ·ðþ·XssÇX*ÌÔ­iÈ3v»Ï`d1_dHÊÑãiàM8t|1?5 Η£ß"ÿsÞÒ)Ó‚çæ´B]ŠXéWüñIØIBCÛIÈ|؆T¯9¸– [F$§vøn‡Ìw>£RµuêL]c8rn=¡YÝ6‰¢I?²(^£!Šª "?ë¾t×} ¦0—ÉÔ€Ër³åÚÒ<ý" #*Ob®%† C+…ÿÌ;…0[”fEÅ%BóÅl‚ùÊa›¸KË9GwkÐu°™˜¥p÷Ýÿ Ì+,9oYð3¶Ñ Æ€ÝþëVÛMjË C\l|ˆ0[{TÜ9|>©ß’˜¯©%o^SrŒ‹ïÑ›»M㟃ԫ  pPÉa†«v ìæ $ÖAjÑ*ÌgHõ¥"èkcþœàŒ«çL™ -A~¶aú®ºöG¤¥0ýMéûÍt<4ÀÔ~˜†ó …2×Íþ¨1™w2ÎÖYwZZÝÔrÚ0e28º2´À7qÒ4k=ø(- ŠHÈŠÞ¦ 7”±Oo·ý´ÕŽ6-ÂÆ ,¾–e¿Y¡){ý’?¹Y™"ý[ÝîÀ/Wý'Ô¶¦‡X~åo²ž)(È·ò«Â0VéÍ,Ň U³…ù8sŽÝè ¼ÀoëîôÜBAqÙÕvvCû³h¤Š™{÷Ueeå¬Y³Nìq;ÿpÌôU‹'ÂG§ƒ½BcÒ†ŽŸçi=rœUø«ä—™5yˆ‡EæË+=öNR$ºÈÞζ©9sW{Ï×È‘š¬v“’]é¸A¾g§Úþ{¯IÍU?Ï…ýeÞÎ.˜“ç,3=QKë‡MnSÏ#Ι‰ê†oÁòó"ìùBCƒå-®)œê0[XÑ[ƒúê0™¬¸/0¡;]Âè4q aK²}hOXĹS¦À9yC‚Of1“¹˜Wö„Ä+V^)šF2Ù§¸/±p`E„\u³Úp–°HÄ}uËŶÆ&¿7œ Kö,esöèIÓ’ã.бpSÇ‚¡¼.­ ž§ôyàÙ¶iÒš•ˆm*™\dCí gQ¬Ò8pŸXÇw"×jÆâµçn¥ÿÚÉ+¯µ>%ïÕòžìp4Þ±ÃÞÆ>Æ¿~Ô[SèåI‡³ÚøbA˜·kø¢«Íü¨7¸žPð4òYbn±–ägJìCàa½žº t@hJ$P$Gƒà5 -CFV`¾ÈÆÌLÁЇÍz,1R 4FjyŒQè+Qg§" Êœ82ßÁ“Qà3ôÉ.mÿìùÊXsʸ³׈k­õçÈ|ƾ§'Š!á/»al=)ÍWúÀZ²d´,àÜÜi|uöV‹Î3†N×ßts¿Ù(‚M!óõ²;è=šÍ,øçé/úçÖ¿+GµùU|úñËóÑùœáUÏîߨg÷õwÝÏ;8À{ÊÈpÒ'^ŽI#¨ÛáåW) ÷‹–PèAZúø=ba[”­/ô‡|÷üMñž¢áËþ¬ÐãèÝòz«Á w «Öð] ô¶ÚRùYû„°›ÞëIt ˜Ï+¹D«¿¢8þÃú ºËÃêÑS¥BsI~iI’-üо 9_m¢î,.)?™ƒ·ÌXTM¹öÕU'Õ…¾>qeáƒY$ Œ’u5YS׺º¸kóF¯Zò#w°ôPAbK·&ŽŸ©+E¹šÅœ³d#&Óˆ5ÈV%=ò/—sÅ‹;Öã"oÅiºººS¹‡CZŠü\Ò¹b„ôŠaã95D›ö3'7•_.b1yΔ1p×ýz?%ö3:¯Yw—”„´X§ ¶Ž4§>c«7«Ó®±êâºë’‹±³øöATq~ïj¶6P²¹H¬ÚÝ}­lãM\Öv8‡PHLmÂK˜»æêý׫©%ƒÕVV‚›;”fûdB³Ó$!Ö·Ä:æ½€´3ƒ8…K(YJ6AT{·+é[ru ão‚¢˜Ä|su¨¼"˜K4¤¨íŒ\gj£&úñÎæ#¤::Ð&N¬R>\S²lAòƒBdC  Æ}ÔK˜ÃqJ‡uMr8žÝt!¶ 9kÝc¨þ¨>e/ŽakcßI6(”>Ü+;^ó ?ªÅ¿Œ˜ZbŠºÕu¦˼d×Ý%»¸_1ô¼fèÉmÉw´ïsmþ£ 9r5“¹š£ým®èÑkqkÔ˜oæ[ûóDØûë“êbZbŠú«=Ã~tÖpTýÄFþÇ«çöÀ¸Ó‡5oíqÎéêë.áëê¹í>½¡'ãöuñ4‹ý[â¸%ª±íhŸ"?„…»»ûhaXvv¶œœÜo¼Ú:cD£[·nvvvnnn-ŒŸ”ÔŸú ((X^^þg~`­0¹0sæÌŽ;FDD4ª¯¿qQQL´Qã|ûöMRRòíÛ·­9ù!ð>~$40 ))ISS“—¢VUUUTT´iÓ¦õë×Ãæ’·ó±ÿþ=zôx÷îGÓÞÞþìÙ³òæ>ãÂÂBƒ!$ÔÚ×Y#òCàtïÎZK¬¥¥dnnÎ3Qãçç§(PFF&??Ÿ·óñÙ³gð÷ýû÷Ç¿{÷.OÆ1 `çÎÐðôéÓØØØ)S¦ðRìΟ??}úth(++srròññA䇀Єøúõ+m¶°°ÈÊÊòõõåx­]»–6¿|ùRRRòÕ«W¼š‰||Õ[¢ïÝ»9{öl‹#d;›j…E3fÌøã1ÞV?¿êƒÊ÷ìكȡ Á`0¸lüýýãããÓÓÓÛzÔ¶oßÎyûúõkØ ¬ZµŠ÷2Êyœ=ˆÅ‹óù=yòDGG§>¾çæ;~¼Æ2©?ŸF䇀ИL&Ý€òóóCñ¨[7Q\GoÔ4%!ÎÄáÇӑýðáÏä ' DÅQQQ133“Çb·ƒÄ·oßÂÂÂæÏŸßúŒÈ§P‘ÜÜܤ¤$---^ŠÚ¥K—ÚO>nܸ‘šãIÄÆÆÞ¿Ÿ'£vãÆÞ½{· æCä‡Àkøøñ£¡¡!ì{JIIòL¼ôôôÚO&îÛ·WÉÉdÊÊÊÖ֙ŀÂ:ÙÛP€ù!ð ó­]»2ßÓ§OÏž=ëèèÈ‘ ÆÙZÚy{öìáÕ’ ;1µ§¨Û:ttt`Ek[̇È7±mÛ¶‘#GÞ¾}2_NNÎ?ÿüÓšWý DEEa»Ý²oéÒ¥+W®„¹ÆK‘âÉÍ‹}úôyùòe“Gä‡À›€Ì7lذ{÷îÉÉÉQÌ[&“Im›k‹àÉ5 õ¡ªªŠgârüøqUUU^b>//¯ŠŠŠ7Bæk»±@ä‡À³€Ì­­­ˆƒÑ©ÖÇqÈ‚?†¿m+:¡¡¡°¹ùC%gmÁÁÁ'NŒoÓ±X»v­±±±‘‘odÊäÉ“ W¬XáêêÊÑAä‡Àã€Ì OQQñÁƒ€Túþý{ê‘¿¿ÿÉ“'“’’P*µBÄÄÄÀ,:th[ <ìZ½}ûvÛ¶mm==zD·_¾|™— "?Þ$<Šùäåå$%YÇ>Ú Ì·nÝš2eÊ‘#GfÍšÕj#Å>QQÑ’’’öküüüL&fY§Nm¦™’‘‘‰8p`Ûä]ºt©ÎE¡[IpYâ8îëë»oß>øŠ………••U3 ÓAæ;tèÐÔ©SyoÓX€Ì—˜˜øôéÓeË–5ó§‹‹‹.\øâÅ‹“'ORL %oºóÔªðêÕ+ÿÐÐЮ]»Â¢é™sc¨- Tåù! ÔsçÎqÞšššæäälß¾ÝÀÀ ÎW`ûâD¢>?£¢¢Ž;vñâÅîݻϘ1ú£®®þçA]ºt)ü…b(äÝÖvÚŸC›Ì}}ý´´´&’|||ddd¼¼¼hõxbbb­a{åƒÂÃÃaYÊÈÈ=zôÌ™3aà®””ÜLUdD~))©ØØØÚöaaa°}|÷îDÖ®]Ûð¤ >‰~‹Édž={6..îÖ­[?~TQQÑÔÔ„²Ýرcx+$$«u6nÜxþüùö#óEFFúùùAqð—d8ØQ8uêìÍB1HQQ‘ÓAsÊF=ŠOJJ‚BíË—/•””ÆŒ3yòd˜ãuõ0”D;Ùè‚È¡•ÂD¾~ýºwï^ÈI°u›>}úòåË'L˜ð3~2Œ$~>ð[°õ‡ èíÛ·333z÷îýéÓ§ 6À–tĈ<œ³IPæqãÆõëןŸÿòåË•••“&Mš;w.”¸^2œ3‰Æ Cvvö7îܹ¥±¬¬¬²²²(++9vY†ÞÀ»ƒIXYY¡Ú„È'jT§NÖ$`,(>|Êvvv¿½• ~k"‰:¿Ûú¢¢¢úÔBáòÊ•+°í†ÄùðáÃ/_¾À¶XMM ²&ô°W¯^­$=¡„wòäI(‹ŠŠN›6ÍÈȆËMBBm†3”Ÿj3_€táÚµkééé0 (Ù a°ß¿òÃÑé$LMMQ±G䇀€ðcƲ"QçSJkÔ?ÿü[Þ­[·6ì·¿R\\ ÈóouuukŸë+""2“Ä/y 9õâÅ‹±±±PℲ&”r ÔÜñãÇÿ^8Ïœ9xýúu==½•+WÖ&rN ¯a}ú Ê|”Í6¯ÁƒCþã´Y¼xq^^Þ/mõƒ€‰#&&V›­ù! ð(æ³³³óòòâ»téòóþXXXkkkgeeÑ–‡òññÉÈÈèØ±c×®]©s3šGŽ¡Í0zw9™ ‘¨„ òC@@àYP#¢#FŒ¸sçmiffÖ§OŸžÄM Ñb“¬¬ì«W¯–’h%q„Â(-ïB²çããÛ¾}{îýýý+**ZP;9"?„æÅ|òòòô¹N”j´ú !!ñöí[ŠQž={¦«« Å>È…­ÀžþôööþöíÛ† j»éÝ»w}cˆüx ùôõõ£¢¢y¢…¡¡aXX—›C‡Í™32 uiæççËÈÈpx¶Ð{þê\ä™ïõë×­G— "?„&d¾€€J[nn.×Sú¤@ Ãp/,,ä(SÌGňÓ^EE…Éd¢"È¡]2Ÿ«««———¿¿?§½´´4d>j<‹'xTŒ ôäÉÊfáÂ…í¹œ8}þsi™Ú¤é­$<ï žßºtÚÆÚªk×®ˆüšù(…gaaaÔ ݺuËÌÌŒ‹‹ãí™0È|+V¬Ø»w/4ûøøôîÝ»]eýýì™YYƒGhÉkLjUëÑ[Zo©mN)¥ågömswwG䇀€Ðø€Ì·~ýúÔÔTH~“&M²²²bàùˆCæKOO—••uss;pà@ûÉtßÝ“æ-|Øžsß8²Œ6CŒþ¸XzqT%ž¥ÝÕ¢+ZÅsúC½Õuøºœ^ÝÕ¦ì.,kà+œù! 4øùùûv€ŠŠ *íˆüÚpüeABþ‹¼EŸ¾«:¹³K‰^=e¥Çt•lÑ/`^ñâ釟Dºv€ÑîÒYB¢guQ±>¨tp£Cæ_^çרïù§»[þ­0_¡Ñ½>×Ý…T=ËkZ®•ÚÖ¼¢"?„ö…÷ï’÷_š?¿[ÿþYVèÓ‡ø@àõ/¶ÃwÔÿ$¿¢÷JÊ ÇK/,£Žv\.%ô¿Üw"¥ÀZ©Ç/¬5E䇀€PöìÙdh Þ³_÷`Ý:©ßðaútr < Žt8q²HC}Š´´j[‰þþ`/==‘Þ ~Q±ßŒ¾Þ4:ú‘§‹(¯É“E…¦® ã¯M[V]Üm¹qöá÷”%¿@ÙÇ"ð%QEý|úÝMúcOì]{ûÛ[æ]ùKU$% eò‰Œ»tlºŠÔêôÂÝÄ;ßÒ˜™ïÖ~³K½= ƒ×%UkD]rʪèï–cUóæõñ ºæo;þ!â-kžóÞ߸{/yÒÄ®«VõlDoçσMÓø·mû«µkÜZmô?–ÜKº~I_OÔ|yFôvö,1`ûüpëÖWÎÎn¼T`j-E¹×߬bwŒ °°–¥Ì¤V¬°n͈ïtZlGÕôÂPxe~pÑ ÜS/ÎÙ‘>gËèìz¢ÃñuD~Õ4~0¯cÚ[EüÅ‘¦þVÙ½ÝZfGJJÒ²³yM«!B{@Yé“´»Qšc…!ó5ÝWÖ®‘„ÂÐvŸ×kœþת¢_Yñ2!ñøÈ|M÷gg"ú¼G*RTVÝO?hÞ¤ùtw穯ff#Z~{åÏ‘_E&8¾¾‡n—ÞyNéþÛ!(¾ý·ø(¢Õ©H·òͽ´4ÀÞÂñÖõ âš›ëó³¿ã•\Ÿñ¿\y*KÒÈ! ´9l÷Ù¸ÆI2_ó|nS¯·o¿}›,))ߢ¿Óïo;û^ùšçsKJþ).VoCãÀ?FUÈ}e!´á™ÖûI„¦ï˜C=Q‘R›>;õ\ä²1ä.u€«(îI<ØA{ÆF(ÍùY<±Ê죻kÜCÛEuûÞµóþ=¦Ê ©}5tóSbÂò ûJ Q¾wëŒÊ¯UÔVwÊçôZöòk’Ÿ[Â;Oíjž3Ű6êôð$yKøÎG)Âq1Ò`ˆaáuø7ǯŠ)M†C3hÈ%7ÄÆnÂñM”Yèyjš>«žÃ0.¥j8Nc]€aÜ ºŽd•™Ês­E´¼§ÞÇpôˆ7d¾fþ¨„ çÎ&NŸaÞ²Ñß·w3d¾fþ¨¨hGQÑ;Ç_72²iU…áç²+I7žd¤Šõb PR“èÓŸ~4ÓrmCoòã ‹€~P™TÌK“@Ú#Z"ò냃Ös»ïÖðUÙœ{ŸÌÀ‰²ÊTÅŠU$ük€EUe6àjŸtÌ œ{ç«£˜yÜÝ_MõTEÒxi߈K¯ˆÁÏ6@~ÇõØXË2ê ÎA+püâú-ë4FÃ*q<Ò²Åg¥8.T“WŒ¦Ùå¤i_Ê%|‹¯¶ËŸ‡ŒaQÑ´ªJ~‰žBlORÝßCæÔc1_bV–|Ý:|MûY‡ä<‰ã¼¯å¡ãÐ!ï¥Kk ½œ>yc¯ëì7 ÊП˜ñn6Ó%ÕSæ—¾>}:i’qKE?pÏ&«FÝü%u>wvóÓná›7Ròeô0XNy„H7ÎÀäTÇ¿_öT`ðÖµ €•f¿(Ðwõ³´±dÛk¤_Ü·iàiÔúÝzp}ߦMº6›ÖMè÷}ä‚Dz^ÛIˆÜ´^xýЦ  ÇÐ¥+@éði=[Án"r®òßn¼hêØ/"ï~lcÉUù|‰–“,~üÖÆ¾Û:¹{ƒRÑmKKV’YXX€²ò¼*Pð²€²¡• ®UÛêRdSðþ%e ÕÍ­•Ûžýj¤Šøù»±^wó8 †þr«hÜâ]êdÚ œç˜Èáx6}C1„MУp‹Á¨yEhµØ¹óo;;n¡göü=oð}Õc–Ô­¾¬eta“ò&H]O ×6¿oeµ&ä$ t…ýþŽÅ7ωkœ§½Âñ}P–À0êHtI÷¬†Ñ£?W~)åï,ÜüÑÚçÕ‚ÌGAOÿû§Ooºvmü`¤gå¦Þºùúe^ÿ¡*”Gñ r¨‘ƺ*Žÿó{¸„޶”ÃæY¬á~$(c¹5ÒÆ×MÅr¡@zqÞ‚5  ªK»]Êv ¹PG›ûëÚ9q¥²ÔF…~EÁüœB‹fܯ‘Ÿ?]%pK "Y0ò%>«7¸¼vôämÉPÀi³\µ‡Ÿ¡´Cø bD"ähÿ‡¢—ù¥ì€gìˆÛ9k[=*ܬ«]ºàÞÑ[fÿ:óªì“”Áx÷­­6Ã1Œèsgï{j§h[eY(ž½ž]ØÐ_o395ÊÁèUðÅË!˜åb?ŒY„Vük™…EÝgÓôÄ,ióŽË[$H Ôs6ÚÑë;?'žEø>ŠùöŸpX>=oWñUL}:ŽOO Pµ¾O2_)d¾ Á—¯ü%Ï¡'¤e  uðööqqqoöø›m Úâ¹Ð±#vð@­Ý†ß‰Ž_»“q/ýNyé'YEU™¡ª;t¬&81©:³Z<‚™3®©âÚÃþksuX©Šö^NMf­?G~vì §ŽW¨Žÿ>wŒŠz\šwÒ/ižÍ¤­7ð­àfô‘ö:‘t¾´÷ :*|—àø’à-¶ÿœJÐ74 ÜzGæTÇw8\^§WpäßÈÌÁ—r«*ÊšÇ6 ð\‰ÑnøÀúibgjø4ÆÑ4[’MíL ^¾9x4ª'q[^àá⩦FÄh¨Öä¼U2âD:KhѬ¨ ¼.½½ý€·ïIÇy¨©Ehmø÷¸¿±qÝ›£ ·QÓír˜¥Ã?ì'išð \ÂÞ^ZŒfŠ•¿”í?<'w%¼õÓ²,ñÚÁ¦‘ [õ6·¢uyNvÖ„™Œï øq^÷@ˆ™Y ÌDDìœcÐG~‡x3qmè¸óe n«¨¬úwýöƒ{wøù;Ë*©õ0„ói×¾ƒ4ûj…¥ëÓÝ-Û2gým¬PòæÅvÉù̪J~m¹4í±h Oc(º°lŸèü¢%¥K“Ç÷ÀcíûIlç 2…xôõq+ŒàOJ~½ííëÐò0HÏb^ u½ÅQz‹ëôbùº]Ë×ýÔÇj¸œi=ôGîç[9Ô²Úy´†²†G9–ÑööØäQ›zeÆÙÛã´qq±­……_ê‘'ª…³¶ù(‡ejÔâÉ|öI䣤}öÕ¯ã5}ûN˜Ää÷yÖ’^½ðgÏÓdšwécæƒO? ¿’LLÌŸJq˜¥nÊnÝ;û¡D Ù\䦑Qƒ²,l;Ep×¾´´² ù—ׄÕ3r€á8¾’p$wÆ”¦&Õ÷.]:¤ß¿ž›_ö8#µ›dŸJ#zô–æt ¥0þ5¹$Œÿ^ùå[UÅתÊo••UÄï—*Âðåk%Ä—Î]Åeö‹®ŒÁç§múÛ8Ô:(ÓrÐàì½òyÃJëL²Ñ†”8j¦Á„•»f_Ùxí2X%ÅŸô"n¸®] ùaùLóà3ûÛù52ìqÄ*!á­'0Ÿ?5ó…~}qþû©zKHPïŒiÜ¿\4I/KV%Ä‘Ëc'[_ßµMÙ¡"øtZ’Ìqþk0/íV*5šJ¿ÝåbÝ„…„;ñwî Ðë,€•¿îßW^a@΂Bß„;óóñ5{>44óšx3õ|ê93½p& ·Ÿ×u,ç¾xÐYû!´ŒI'ùÛÍá@ëM}òÝ_e¾gÿn±œ§ÈáÏQôákO 1Ëp]3ü‚:Ûâ+†Y¥UíS©§Ò“K]rqÜ–”uƒTa)^ßÞCš9ú?\ îyèe?¿yæžÆ¬0YcsPž-!óÁß¹ª‘¡8˜¨§ÿæÏ߃¦&¤Œh`·jÌWyկĔBÖÜÆÿ€ãºö«Ii¸²Z gŽb(´ò£­®ö<Ÿ™ó¡¼[¯6°Œ=úÐ.W—uMçS“_.†ÉÕe_½Š„*Á?³—ŽÚØN¯8E@hç˜faEóG_yØÌ³g·Ï˜Ñò >ïݧ6¢-•œ~½æKQæ|faüÕ¤‚yU_*Z$0:tíÞ]QIy¢µ7QPÙÆª¾Û´ä—ó,“1`/†Uïá¸å¦‰n×3|Ú —XZò£Õ»P·C7L =æ‰gºAJFôºZâ2N¤zÐcôü†3¼ZbØb5±ÅRëp¦w f}œÀqbIgÙ½ÝÓ\ݽ·zº — Ý6º ,¥ë„—Ù“²Ýü.Ñ4zã 2o&>of.""*ç̱hÙègš‘qIQ1·C‡æ–ÿb.ÝiˆùùÕ†À8(öùYNs:‚a5$J{]¥Ò“ø´úºÀ¥KÔë噽¾¿Þæôc1b´¾žAðßCB¶ƒ^ë0°¡ôþ…à N ÐRÓ:¨zLušSÈVSB~7Ynåan­!\c1­eÇ ¡aˆûç§£Ó|'Å~°²Z×¢¯¬L yzyê^3ÁÏï½ýTöùÕ û}ìZ-%εΓ>«}[ýÔêÎ5 jµïšU=žç £Ú°ŽutœîÝ»$%ù´g¯¦Ý^æéYèîîneÕºR`ö¬µO_ºÛ¯/3DßmUF䇀€ÐJ0l”t ™Y)7ÏÏžÝÈRà÷ï}w¼Xãô?w÷VýAòšh¾Ÿ{ÊȨѥ@¾­[óÝZmôù! ´kH1fÏ&¶ee%Ý˸9Þ±`Ññý¯qpéб㚶°ª±{÷þFFÄÁuy¹·®ß¸jlüG,XòQlïž,''׎:9;£Â…È¡ÕCAA þQæ%¯ÃÂC:tü>N[¤ÿõ‹wü7oâW®¼ÐÔ”×Ö&´YŠwk×´Éè÷—ÿ(sYé‡SaG¾}¯ÔÖ W¯jït3»r5_]]nâDb1‹¨p^‡Š"?„¶ Ñ^ff?Ü:t£G<!án‹ÿxšÀÆhâ‘"?D~mŒüp´‰}ˆùó! æãy¼×JgFEÄ_'¬ ˜Uù€¨(w°?µº0wÇšá+­0âÍý¯_ñŸZ>Ž‚@PkžoUTàeåm&[C±ìˆQ±FËÄ|¿ŒÊ*¬uÌk³§{]z[m€)Öì/X;,Zß¾á­6â0l;6UØJK[EŽûï®»5¶nÝhaáÞV2±5ä_§Æd_Ä|í ˆùó! æC@@@@@@̇€€€€€€˜¯}#æ°ïóÊš‹ *+,­]QÊ4!¾¾ñߺùjê] ,<~ÒB›%Æ ;7``ÉðÒ݃y·€Ð¬µ1_Ó¡HNv²˜xñž¤u¡æþv¨«S|-KÄ|M‡ü«Nê }¡a¹•Û—7·¼]Mà_6¯7ç±7ö–ýóäï™ÁçKŒA®L攌ÍQûxŒùÆaX"ih%ʯsóÒ@¨¬ er`“yµëò7‡Oœäç¨o$)ÊÇ jîcYYù°cÁòS+õ ,“â#ó_}ž8ËD’ü×¢³§Ï®:³Lz¡êVvDÅ[Ìd¦n·î.eHu™µõÚ%gÍ:Óî++R¡klÙ„$åßgF]K‘ì£6QK…~+íZtV>³[%]-uV3˜õX¤Ÿü×ü”ø´Ì¾JÚZŠƒÚóuAEàGˆñÒ4 ¸›\e™''S È<¸ `Ešš ”«ˆYJSC§C³4Ðþ¦ôýfÃoêÌÞ -•eK ç¸Û‰(=k⬺pa&¯7Í-P5>rýK(Ιð)P×TxÄÄEQ*" 4^í£[6Øpe<,‚ÄŠRÝK%ÕŒûiÐ@¹×œÈr,#žÑŽ[?óÕÆ“ôË{þ9õò}¹œ¢Š­³=ƒCR){ûxWàžÔÌ‚î}äÌ–Û©+JQö§OGóó¹ÑzòâÄmUIfìõ•••³fÍ¢ßM?¾÷Pt94²rÔÓàì”y;»gâ|dÒÕ—æMXûȧÎ,¹MYo.¸ òóÿeø ¾ÿø)øRL´il¯Ô×Þ¤ ò þöÿep0bÛ`¾âÛS†‚RŠz7Cm4LòÎ-;€ "Šöö'<[®-]V˜,Ì€Œ•º#­ÂAõÞê³id; êùÜ‹v¦Æ¦½ÄÜb-ÉϘPŸ–.jÌç7ªØ7U ©:#;õ%Cvžós¼9bXÿš™C $‡ú;&±Å<TÝêÀôÅá!ûÄ©#=~ 3nJiŽ€€Ð„µïê:¢­®àvù8jùó°…´vÐgô­Ál…Ï‹X&¼KóTÕÆ–ùØkúœ»æ2_SÝØ7öçrr4$ráh;!©Ñ+bVZ^M^Œ='¯âi~STíc/¬1NÒ('3hõ‡I)Ztk£ø(·- ]?-ô†oD6Þy&õ ­>Ý­ïÍCá)–j„˜Rõþþc&¦8Œ8=ž­+…êW]øZàlµ`ÎÎDu¶,Ô{ÊDâŸdÁÎÒO¦9¢r_P€‰…C{[u3,àÑ÷aKæi¡ÊÑ¢µ¯n¸´ÇšÐ²\×m¯~&ô…e¨xJ+hâ‘­Ff>±±üˆ¤‘–«%§è—æ'k 8å&)Ø^8Øž-¨•iõÿa?S—Ð¥Ò¡K9ísèG^ë Yi€[ë*"•à‰òxjJÉxÁmµG,­û‚€Ãºe^Äš`#%ådöÀ·™Új¸üœû0ËýôòïA—eÔ²ü…Èe/“S?ÐRhÉ„íÔ;ä\RÈ9ìÚ£JYr†!óÒ¦xúýõ“i@BhbˆÝS9-4 Ü#ü=z+õôp4²p¨ßAÑÙ i3t'µžXÖ.™'R+µ¤ø~É“{ÿÙx|ðn»Ì7š=ç¸Ušx1pÕoT·´øý´æ÷àk¡Ú×NQ†ä`¢ýX®>š¨–/ͬÚL “z͘oµš~ÉY£ÏóõÄñJK½IA’XÏ¢„1¬'ÚV(ä{˜›{Ó®µe…‚î—›+rûRpûÇú½iÛò÷ïë À—,¸ï8xšÌÜ †¬2=ÿw-»š½*éÑM<€ `»‰'Ý0 óR}é½"˜BRRc2oT‘²¯'ªëLýUB=°zL¼öÍf>vúh®Qšæ}q‡áϤù€ SÀæXèàÄ#\«5/îä#br¿R[¯*/Ž>±iµ»'#ÜóOú@eé+Ìâf0[óq”Lþ2•T”æ«ñÿa?¯mÁ¤½kpY²4Æì˜jæcuÈréReÁ_­nEæ41ó5Xû +Øs>_YmòñǤÇ, ª¤Áöôð5*~B¿%:©Žk¥ÙUÕ*äYst››¢¦î‹NÜGšžÄÉO²$䕼¯óûßZî±þr‘§pOb1«Å†ÿÌO›QoÞ¾• È.§ö«í¯Ößîû6ŒäªÂ,ÚØuÉEƒHñâÛÿ¶`Ù©¯¢ (ÕQ‡qq.˺Üôi‚Êß~>²VôUœœæ¨`è[Fî~Ì _£cãC92Þ”´ÍŒÜ÷!•¡8‚ý®\63{Ï(lÇKRÔÛÈ0Hf(²f[A=æ­(‚¤X€¼xìÁЧ›Q–ÌÓ{'* ¤ÏÒ\dÈ&sM[i»tâƒYÆÇ×ÓÐ|·¡ÅZ¢?´ô ó— @¯/¨F­”ÿxgƒÂŒÍD¿ŠYL¼Ç}hÆXR%³{/ÅûÇÌ”|G1â·\F_9º>gfS!¼vÔ|Þ:V7ú\®Ö£†_Î ,€GL|0³:þv½6ñ8m‡Ò_>™° dÑqKö5ærpÝ{.óöÉ–Íâ/ý`¡õ–ewÂt.†Ž¼©1XЧfuË¿î¯>w5åFzüºäÿ¼¹ª…ÙrX“+*ª¿öqÚ‹ŒØÄdn"ˆAƈÉ4¢íý±Ü€¹*‡ãÙ,'Çç>8€*kéI¾¶‡‰ý‚ÄhgÁaH{nqþ+&¦·ùËAkøø²…²‚í ±d^Þ Êß0äz æÂ¼‡'3°O–ñQ®cVJ4¬ƒí˜q~ "‹!;˜1Á“yއ¬«Jsv첞¥, ÔyçHÙë„*ãKµ˜‡¡ÄÿíÇEx_\€Õƒ&¶$bÄ>binñ½Î”ßtÒéÊ<>æóKÿ5èCs¢zGТÙ‚ i\"On“f,a2—= ‚´g mÍ4•ÕØtåÁ>lBÐ^ü ÖNM~Z,é,;xð*;b6ÉŽ!ƒ˜Ì»ù×}Ôç®1?ðpd‹æj¬2.sÖà¡^.B$·O$÷z;ݪQÝ íõ›æ—rÀ®äYÄà16¡&þÆC8«›‚‚"ÌSb ;ª+ÍÄ| åw0A(¤ÖØÛä&A®³Kö„¥xGàæ¦±ð÷¨ 0 !nU{°…qÐÎPÚMê×GX'b\¸'Ç»oHÚ+}¸WxÈ hP®©0-ä«?—ÜÜl)Slðn¢³¯½þø,] ».öˆ\0òíàd¶k!¹L ¼'§È{Ò ¼ÿ.¥þü‹ÄdÒsH{„˜¥j®d¼v´ õUkþ óÒÕ€yÅ—-QÌ _OÂÓ(xÀ Cv3Ö[w"{À—y¾ ?¨þQ èk€ŒáÏV×}é®û……1§š9:ÍŸØÓ`çÿy5´sl7S ˆ{Fß>ÍûÀUÝÚZdŒ¡˜o–÷²åöžð¯NÇZ3Íá_TfZçÌ´fßõ···çr`bïib_Ç‹|=Ô¢Òè[{{Ô26ܾ„G\e …1:ÿµèof<1U@î,a¯9æ@ßñÄ0}É«Ç[̶˜„nIe2ýè§U ”Óñ«¼Çµ¿Wõ¥´¥Ê7¯Âƒ˜7˜ ªˆ¾òŸR=õøÇ)ÿ#Z †®1¾˜ëÜmK2—AÚS2 ½¸‰$8l‚AyŽU\ëY@¿ÜT3ø¨ÙÙ&3úýR¬J[ÍHœ¸îGæG]n·Þ^u•|uÕ ÒÞî‹o •$Po²…™¡6¨E ´8 ŽY÷–ÿµ€ešÖ[Ârþ6”ë´à/|þýÌí¬ >}…‹ü·9®p •”wÝ™h0Tg‚;þæ++!:›ª:óÁæYP ãºØ²Ã UƒÅÀçÆý¯€j^sÏo.^¤¥$ó'Ao8˜þmnS‰ö.$ã$èBðÖ–£wÿ5N<øvÿpÔ›%3&ý\Ê“‹»Œo؇{@?­¡ ‚˜Ôøwklœ¢=²¸²•F~>Ï4=2‡ÌnU¿[ì b+4ø¦±WëÜHÈÄr­Ã¤\Y_5"ê {ò-9ÔIióÒ°‹p®VÌKló`-í¡ªóaGÑÌ,øƒÕ3RXTöˆ¶\óNÎȶæUÓˆùš(];ÿ˜vøв7CÃ-;'XÚúÀj->C&n¸ÿõj©^ð»^¾GÏ9š&0ÐaW_ÑžÞ¡ðÏÁ#¤ìØ ä=¨÷ýÃ3ßén º€íD)ÌÝwÿƒ0¯0–?ãÊ¥-°œÚ³ö¨¸s"ø|R¿%#RþŒù~€?Jÿk^ÊNU»RÜ'´•5{pÞý²éä&}~ à±AeNºó …2×Íþ¨-™qç‡)OÊ‚©«MWm>Ú°Í‹.!&~!lC¡A™1ÂÅUßÛk#åì¿°Ó ÷ç¯&7ü¸8®ööÝ mÕ±Ló31E²Ÿ‰ÃGÒŒLæÑ$ïÒrëèõ Â5¹æ÷&öÃ\|ÞÜm ÍÿsqγÅ2ø»±ji×$Æ.`hlQ^˜G,¼˜ìÍYÝÜ툭8£W )Ý@¬„HÙ5)f\-fjÇO %W~¶XÏú§]Žf`Ûš—&ó5&roE&æ¼WVÑSà²OKLyüDRVm¢ºJ¯Õ:‡(÷áqâ÷U^÷îý)%°IWNä¿ú¤¤9S©ïm¡3öòÑÂå8m’ósö8ó¨Xw0±€ÅÇÙàlôÙi[vR'%AËÃ>ΗcN¡ž»CÒ 'QÏÏ1^éQ.Ê œ;âvøTŽ«]{”K-˱Ðßezµ&9è&戛ߙ( ¡^'.h)1~O8|8ˆ¿KŸ…†³N@Ð`ú—¼Êó ¸/º€™››'+ÛŸ´y{% ÷6œ1­Õ•*î¾ôôà±ôñ.€Tñ:ãA„•ÃæËŽ¼v]^2—×paÏÍ|õ§¼ºÃu“¬iå@¯Ç‡fQ2«Ouî<|ôdu%rÕ7.Î|œa°ØúfÊÃÈkEê²b^k-³²|³ð³×š•7ï>µrYoIL?[äã^1ŠUäßÙw,á~ ‡¥z­Iò˜Ñæ§Ãõ„ö¼_],g˜¯r¦EžüO] µh>ÛDà+>æmßä~çi¾PawŸHË…¬rNW7¡!k®sÚ{éñ«ÞÄrÜŠ,g7¿§9oÆÖ¬ni§xý{¹—ÜÐækú2Ó²^u—Ò¤ÅI}Y·¢Ór˜Jê3•d«®sÇ>=U¢4æç?†ò}~Γ\©ž²RͤZ1_£ÁK ÈÒJãž;šsIoD§fÒ´ä8sPkÒ¥ŽsˆìT4'/€6šª².çŠmäï2CR'«8>ë1™Q¿HÝ%ŽÜVe-9”}8m ¯ýÖ§­Kj½t”¾#u÷Õá4´æÒƪ»x£îŸ(ïË$8{âh§Õ³JÀš9ÕNÿ«d‰¾ÿ1cÍcÄ­1#ÎVî'¬^NdàZ=ZSÁÂûX6 É¡sÂ/ÎáêmÛ˜»6œò¤°Þ€Í:J&®JáÕÚv\·íc§Œ°ë¶£œÕ «;]Õ,-ÔX¥z[mmi3…Óçý!Ñ­'«ùDúsŨvu“k1–ÝÂ(le¥†&gu“eº{”i³›ªbôÙC7 ñ¾Ô@EõÊ)sêØµª‚鑬ZìȪêꄤàô—ò­:Î!êPÓó^Z,]ˆ‘ó½§ni­ÕÞê×±ó€XÍHAü¯†pýVÃéoÏ&.9Ñ1÷'j£ArrjÍx"CÒR-Q:4Ƙ±DU [f`kez‰ª>îrgm;™ª’SžAÃsâô¢Mlu4ü9–²ÐY³ùCÝó¥X¦ºü ‡EήðoÎökáNcy ËÑ/Aâp~lÈü]Y{ƒôÉ/ÀzO'êVZZîùsމºŸ<‡èõNOÏs´BÛã.}b»–„\­òûƒüAú ÔvV’=׬f[È‘šÐx|Â’RÂ(üLæ7‹ÉÁ~ÖüˆmiÄdý®'h*Õ¨€"-ÓšÕË|U/ŽÑ´g²ÆÏyù”´¨#¦Ĺk4£ãz(Ë9PN¬ÄÞœ÷ •²îL´ÈjÉÉõ,–ªç¢šj·éѼöŒøÿ–^šWÙp~dHŒìøw!.ÉÉÙ¨# ü6ùm J$ÖÝ’ ðÿµ“Uï”þ¹ðµ–oÍêe> é…”!Ç©nôÐAÛLì·QjÉôÇoÅ8Cà c#Ïj%Ѳºv9XÛœ)—nÉåGÓÇÃãŠr–Ëî_³oV$`‚ãáµ-Œ±´¯hYŸLô‡±oûÏÜ“{z%eŽÛ³z²Uõ±Æ›.„¬×­éO¥0VÝ•¨>°0cè°­õq_BÂü +™iÿ¬È2$Ÿ²¶ \Än›î#6yÍ=ï:]‚½ÒN¦½J2´uŸC$HÁ ¬Ÿe|Ê%ü2Sf;PšØiBÕêØŒM@¥±fïîÝ7`æÀº}0J}é¯: „Åy0&ÓÎìR¹Œ¹ŒUò ³Ò6ƒ€ðsóŸ²õ1U©ê†=eó jžÞcÿ…#þ©³Øè‰ì<éË®kFbÍØé¬ùXgÃkÍhà˜ÁíÍ܃“ÒØÍ³4ìÏ]ôzs‘e†±FÕ©³ú( ¼°f/púˆ(³ÝÎÅñ“e÷v ·­~·"$ŽY y¤n9x¥Þö£lZ²t\¾Ë!ÓÃ9Χ ÅöþŸZA=‹|‰Ï"‚W¤FAßñ˜Ì ô­«k!õ’]w—ìâv\-—ÔyÒGÍŒbÞ/®D«‘Vu$ }0J}é¿$_Â6KŽ\Íd®æð¥4Â/ æG-˜Í%ܦÁZÜüê–~°¶SA±^ÍÏÅ×Q†ÇìAHu»CÀ¾AQÇnë9±OüX@¹@o0^A™ô ÷Ñpž@ ög‘J凙@¢‚ãCO [r Äó|BO)tÞLŠ þò@áˆ;ÅkèìðMͤ Ó-ØógM[Îb±²6‰¨Ø" ´3ÔÇ|]¨Ë;ÙÀh@.ßf±FÏj;qêòêUµîW-YÀIuCd|/ANdeÅk»IÚ¢£íÛp¬Fʲ–mŽÐž.°KÈôIùœÎL @b8*ˆù°öì¥ûN>5äPêìÙ€óé‹FB‘‰ •bØ gXc¤’’M¸\˜E{}VàùÿÀ+Çz™àí³|.›D{ˆùhdÎT´:ˆ©²¿pü®aÆ 𷨨¨ªªJT”7ÂÆNôÚÍÍÍ-((èÓ§¯umm-¯°5[÷ÇmyïÞ½û~¥¤ˆáó<¸|ùòN|§555ðWPP‡l&[‹©§ƒ3×ðóg³³¥¥%eS…eTV6{$MŸ>ýÁƒX6uÕª?ÉUTT€­ÿªeddDÞöôôìôÌœ‡nüïÿëÄÌ7xð`rÓQ\\Œò=ÆA9óŠ3$!!¼Í+òÈÛYYY·ÖÏïO°Î¼¼¼§j566RíILLÔ××ï¬÷+++KÞþñãG'~²b=Μ9óÎ;Ø·¹gÏžØl4pæãlmmÉÛùùùÙÙÙŠŠŠ·ÙÕÕ•nWŽWh›RvbôîMk×ÄÄk锸ôé•þëÚµ6kTjéþýûØ·9==ý/²ÁÒsÁ™; š“777ÿþ=ÙÿóçO—C‡aÓ¼#FPíÙ°aÃ?Â|´’úúúÂÂBö®nÁfÍšEµÇÍÍRîwP½kðL±o³ššåGÜ·§=jW4òDfacC¯êìÙ³˜e¾çÏŸSí©®®®¨¨ûwÆ(¡¨¨XUUÕùî7((ˆjÏñãÇ;ó………ÑîÄø@­°urr²––Î|ÿ(víÚÕ·o_`»ššš¦¦¦ºººüü|Ð|òòò˜µùÝ»w"""`*ð‡  àçÏŸy«Øoݺ5mÚ´Î]µ¬¬¬à6i÷9²SÞ/¼G%%%¨³˜°°p—.]Ø;3fŒ””Ô?àN»ví* @̼½téÒ¨¨(ÌÚlll ]ÀÀÀ7oÞ@[­Gccã¾}ûh;+8óý+ð&!MY«ªªò„ÍäÙi“ØØXŒ[Knþtuu‡ŽYmÊv,!6ºuëF ~ýúJ·ß/y’¯W¯^_¿~í¬·)**ŠÞ0}LLŒŠŠ O˜½‚¦ O±fÎ|\î±òœÍ OyÈZ99¹ððð‡ùÈÐ××ò䉲²òºuëvïÞݹon]âÝé!//Ï+´‡ÂÆÆ›CY8óqÐ1/--•””ä!› yÈZ--­gÏžýƒUKGGçéÓ§Ðèœ8q¢Ó3Ÿ———££c§¦ÐVðóóó–Íâââ!!!8óá #¡x‹ùx%Ü “oß¾ýk•ÊÓÓóýû÷]ºtÉÍÍ]¼xqguô'cöìÙþ±R-áÀ8zôèYŸ2œù¸ŒŒŒ MMM2ûëÉxò䉺ºúàÁƒëêêP§€ÇŽªæ{ýúuVVVxx8¯Ä­nÂÂÂÌÍÍGý/0OÄmAqèС7R.Æ™Ç`pî—9444xÅÔ„„===P?AAAŒR8uJìÙ³Çßß¿´´T___VVvàÀ[ðÁS¶²²êÜÏÔÇÇgíÚµrrrhköïß_PP0cÆ †{÷îiii%''w¾›mjj ¡]ÏÞù·Ê+“ÖfffEEEXŽˆ3—ñîÝ;Þ2xܸq¼b*ÚL@‹ðïÔ¨›7oîÛ·oÕªU'OžÚûüùóäÉ“wïÞ $Á[éëXÁèÑ£¡ãééÙ¹Ÿé©S§ S"Ç2ddd|}}MMM±l$Î|\oåxôèѨQ£xÅZ4vó›7o.^¼øöí[Þro }ëïï¿råÊ.]º ¤‘O???ggg ‰Î—ÇÎÞÞþÁƒžù._¾lnnŽ}SÅÄÄ@ííÚµkúôé8óá`ìÇ-£ÄÕ«WyˆùŒáoJJJ×®]¡‰DÓêvnÀ-———8p˜UxgΜ9xðàØ±c‹‹‹>¼lÙ²Ns³Ð•gª  ““Ó‰Ÿ©A|||ÿþý?~üˆqS PQQ¡¡¡}*œù¸ÿöòµ¼•AõtG£¬¡9u;7€ãÑÁsÔ!Å3ƒ¿ªªªüüüß¿¯®®î‹¾È=ŠV.vâgš••ª6æÍ›‡e;¡óýªÜÜ\iié/_¾`¿`qæã2x+ "-'ÊËËC™ÍçtîܹÎí $WPP€üΤˆŽv?~ØÎßßäàƒdeey+ #¨©©}ýú566öÒ¥Kø™:::¾|ùrêÔ©X¾MèYîÛ·ORR’Wò—áÌÇe`|˜¶måS;†Æ.!¯ôruuíÄÌçææ†ÒÞ®]»Ö¯_OÖ|ÎÎÎgÏž…~@CCÐ^=x}u¿ ÇrÒ¤I8RA÷îÝA¦#ËlG`»¢¢" //ÏCi;qæã>—TUUñJ`ªô˜XÆ©S§Pæ?~<º'===77wÀ€¯566’×8Ã]£ÌGöä|øð!üuqqA\€*xZù¥¤¤×¨£wÖ–AOO¥=kk눈l¾b÷îݶËÊÊBÃñPñâÌÇe@ótåÊè•cßÔððpFéß0%%%²ª1Ô§OŸÞ½{‹‹‹———w¾Z´G–>¾¾¾è9Æchh(ºñåË—½{÷zxxíA€·üŠQÔ××ûøø#¤¾/^tÊf´lLL BZ°ˆ:ja µµµè|Þ¼yóV¯^ ¯Øõë×y«„qæã>._¾ÌÌ·sçNb¾mÛ¶‘·Ož<¹iÓ&Øøðá´•Ì'BSS“L{nnndæCçùR†z++«Û·o 544@³@{}ûöå¹$‹jjjäàNNN²A€·ìÌ™3è8P=På‡ 2dÇŽè ™ž={¡ãê8óáhh“cPÅy¥HçÎKþxõêU”ù Úã­aæGYnܸAf>ÊÉ!rÂÂuëÖ‘Õй$(ýååå;Ós$cÔ¨Q÷ï߇® B¸ÆTj mmí´´4؈/))áÑrÆ™û€ÊÄv‚æã•"]³f %ó“·¡¹ttt¼xñb'¨9ÐúL:UBBýøæÍ›×¯_“¿¥ì‰GFFž={ZÚƒ®:Úfí‚&VVVÆøÍR®Ä³ÑÏN†~ýú‘ݧAšCߥ@®*ÉÈ‘#ïܹÛååårrr_¿~]¸p!ï5Î|ÜOT *…qPÊR¹tΘ1ºÒ222<]m€¼(GnA.PÎbR9nܸ‘<¨´Gꌋ‹KHH€¾f§j† F€FMM'±ŽÆÆÆþýûS>kjjr=öÔuuõíÛ·C%A÷¨¨¨=z´™ûprrJOOG—aîîî¼Â|©©©ÀÓ”{6mÚD™¨xJÊœwëÌ®]»€ç(}@É¡ÃPd R~üðáÃ7ìííÑÐÈtt4 333{ôè©;‰‰ ¥Œ 2~üøNF{{ö쩬¬¤¤=®¯<ñöööõõMJJB—Ê ²ÏÜܼÓB™X¿~=´JX¶ÐÅÅ…W s̘1èb/2DDDV¬XáïïOÞ´Ç»ËÚ¬¬¬NŸ>Ý»woÊ þ¨&]¨4ßÖ­[É̇&ÿ¤¤¤ÈCÁpzddäÞ½{ïß¿‘;µ±±&øøñãä=/^¼ñÚ™Þ}èpR&[‡=ܪ™ÙÙÙFFFÐ÷òò"GC9r$¼SdÙ‡3¶]q…YgPºJb«V­¢Ý *’ùиðbÈGòêfJìܹ“r.ecŠ"99ÙÎÎîæÍ›ä=p0(ÙwÉ’uuuÔ'ˆ[¨­­•••¥º)îÇŽ;yòdçxëçÍ›'//OÕ_áŠÏPï„ à§¡ÿM–ž°sÈ!?î|M.Î|˜Æ³Çíf≒\ºté‘#GX¤C =òˆö‘˜˜r‡®;4R´ˆè:GLŸ>½¼¼\\\œ¼hoÒ¤I+W®444D÷\¹r!¥ ^¸p!W"\[[[ûÒr9TÂÎA{çÏŸQEÖ™Ãã  åääà‰?yò„¼ÔÞÕ«WŸ={ÆC~¿8óñ$ ]¦œƒÁ eLMMå•’üðáÝý;vì¸wïÞØ±c©öí©ªªfdd`ü¾ Iw’QsI5χÂÑÑZ:ª”¡ÐÌeff>üùóçäh» ügkk ¥Ç™Û¼pá‚··7Ý$º¼ ™9"""@ꮢJâ4•“3´·k׮ݻwC·Æ××÷Í›7”¶Í˜1cݺuÞ$tî&g>¬`öìÙØô˜RVV數'®®®@Œ¾:u*Ý–hËs~~~~=b”ÁxäÈ‘Œ,gä´GË"ð”öÔÕÕ)ýEQþ›3gNAAÁ;w:.·-èKKK¯Ó¦M£úª±±qðàÁ¼N{‡öññÉÎΦ½‘Í›7+((¤¤¤tܯ‡„„lÛ¶­{÷îPÖ“@þ*&&++«ãÇw¾ Ž8óa´*''‡‡s††Î,£¯€<ÔÔÔÉkã° }m"'L˜Ôˆ&Á`@ÈB§$77—vxåi777FÄϰ··ïÚµë•+Wh‡Ž›šš€JKK;" J@@Àþýû¡lß¾} TL õgÑ¢E ï:Gúœùx6l€¦ ²ŒèëëóJ_»Å@”з€÷œÒQhêöèèh,Ì$=xð`Ê”)Ðè3IF xQRR’ÑÌà߾}{üøñaaaTûAB?ŠbË–-T.ìfff?~ü€ hÁqAªªª¶³·lúòåKºœ‡&öÌÍÍÑyGžÃµk×@K]¸p‘ÛöÌ™3‡ÊF™•žžîëë ÏèÈ…$P••½(x¬ÀˆËHøgÛ[œù0hŒ0Å|sæÌáÚƒ¢‹gEÂ2ùÖ‘„ž={fggsKüAƒ ý«W¯¨fPÁÕÕŽdn$sæƒo¡E†–—2ì UQ€hðòò½Eµ@â0 Ðvƒp<þ<9+øüùóˆ# QÞ»w/“Ãàî¾ÿÞqã«„§OŸB—bÚ´iÀ.'N¤{Ì™3gŽ= G¶ó·ÒÒÒŽ9T]¥K—‚ª;Eå1ïß¿‡^5¼ 7mÚxK‹3¶ívB¥ðJ|ꆆ†›7o²’;$eLgº())IHHpvvæä‚‡òòr===KKË}ûömݺ•ùÁ:::wïÞm‘›»uëÆü  ½^½z1bYT4@—ÂÆÆæÞ½{@W”ßòññ¡Û`3Д-ãkkkíììà/ˆE摲AêQÆ*ã €¶ åäääïïϤ¿xîܹíÛ·ƒö9ØÚŸ¨««Ê }üø1Háùóç[[[!Ö˜ƒN|Ä[Wœù0 è b„ù¦L™À…¦¬¬ÌzðbVÚSP0@{ w"##;:ž8ëgÏžÅÄİè_Jw=#ÍÄÊa@{JJJOž‘Ë#À‰²²²Ð\¶a¹!pLkO¶µ¢¢´4ÍÐ.3?XOO/!!aÛ¶m ¡e¿~ý ʆœ6– ÂÂÂ;I +'(–ÀÀÀÿþûoΜ9—/_Æ”' Ð̉'bccAHÁM¡ÝVN}#¸) ½ž={¾zõ ”÷СCG/µ¹¹9ŽÑé á@ûÞ½{711QFFfìØ±†††›HÀÛFœù:9€ö(£)rÐÙ‡öˆnø ¬aÒ¤IË–-kOŒotVæøñã-ºPÁèLž<š9èûC‹Fy@CCƒ&Hè•O™2¥mÃP €{àÛA (³ g‰‰‰}ÿþ~ø/88ØÚÚšòÛ’’’ýû÷ƒÈœ7oÈ5* `¾U«V\¼x1È#r~ 2 agg÷ë×/Ðy”'FDD€²Ž†YÃbߢýù ”óóçÏñãÇ/]ºt< LNÉËË{üøñ£G’’’àtsp®ººúºuë 7Ðâ k]]]dd$0ô0²²²àDs¶¶¶Ðï\AÞâÌ÷h“£Ž}ûöÍÈÈh- p } ÄhùW«páÂ'''P!mX†G2݆ÆÎßßz*`›³³3ôÍ×’ÐfÃNŸ>íîîž––Öΰ2¢¢¢p…¶e¥üÕOYY¹k×®………JJJsçÎ]´h¥t£Oö›/++[½zõÙ³gŒŒ ŸqøðaØN7>»5 ”{ž={vþüy¸Ð-Pt  ÑþÕ&OŸ>½xñ"\Øä׬Y³@Zù@IÏ@l/_¾LII÷âÇЀ[é?|øppPg €úÒzþü9£Á8=&&t(Akhhèëë!¡ErÅ3ß? hwȹ³;ðöž9s¦mƆžžÞöíÛÙB{(@ÓÇÿA‹ÆúYÐÁ?zô(”ÜÜ\GGGWWWt&4© D¡Qóöö¦ Ú"@,‚|„.P2HR¶Ü XØ*æDuëÖ-¸Ç &€ªCG/÷îÝëëëkffÖª1I`)à ¨`P§N –€^”——Ÿ8q"póÓGpàÀÊ`[XXØýû÷üø¡««knnnooß¿ºWn’ƒS@œW³ÃYBBBðë©©© ×@›~üøŠ¨]MM å6ذ òjÕÕÕ báìÙ³‡ì~ÏýÚµkð÷Õ«WðAÏ ®`hh5A“777¼5ÙG+´G_˜Ì ѧOŸØŒª#pèС»wïR†“g §ýúuttŽÉhP´nP\3fÌX¾|9Ý!)hR_¼xn;vlÛ¶mÚÚÚ`ù€˜Û|°cÇlO– 60ú´HHH¨¨ À  ~ oÇûùùQéABMM PèæëÃ***æÏŸÂzÎ$ û©¦¸€9%D¸råJÇ6é:1ÂÀC¼qãFNNŽ   €€€´´4ð.H7xw€w~TUUûõëWYYYWW§A+?:[ñññ&&&ð&·9ü§££:+º$àÎ|8Ø xuáí‚æ‰^õi±pïÞ½Yw‘ç¾}ûñ¯_¿v\j\7 ¦L™Bö·¼zõê‰'€‡@ºA#nkk{Ö¯¹˜´{±víÚÇÃQæÌƒn ´˜Ð ÔA•‡¼ýàÁЩ‘‘‘bbbpG3gÎÔ'õ« ?{ö !ù:Âí€:¤ôû€žT*¸£ÀÀÀK—.µx5ÈášÐ½HHH€Ç±zõjº„’’ªè͇€Ø€çP¸#PðpyfŠL‚ô€ÿà 1G;õˆ"99Ø þûï?`Gª;wîlÕœùp°÷ïß700€¿´¹FÛ€úúz™ŽDe  [ ºIÂ@o½ÿZƹs炤›D[®L ö€œV-Z-¬±±1´ÝÀ"&&##’õigg7gÎcØrq <ÔÓXêõë×Ð/»jÕªÖz®RbذaP8ðÄÓÒÒ¦M›œÍÇÇuuàÀ ÕàcQQ‘¡¡!š2 ȻŠÕÑ.€2‡b†+,,¬­­EYÓÉÉ HÞ­[·²BÛ8pæÃÁ ÄÇÇ[ZZ†‡‡SEPl-üüü*++±O{ÀCÐÆQ&c/îÝ»·{÷îçÏŸOŸ>}Íš5óH@Hž>tÛ²}úô©©©133µäïﯬ¬¼`Áæ¡ÔXèT°$ èTàïñãÇÃO@ûNöÄa/–-[†.÷²²ºxñâÒ¥K®z÷îÍèø?~@~úô)ê3L|¦ªª r „ÚðáÃû÷ï/++ eªxËÂÂžŽ©©iCCt€€±€b¥¥¥[¤=ø-xS 7?—••¿JÔÈÈhܸqèÜ!z\pË–- dG—f…S —`bbRw?Á™&)//Ïz˜.ZhhhÄÆÆ¶ÍÙ“€†{Ê”)l§=`ýýû÷KII­[·Ô ]'h‘;(†4îð£ÑÑÑèGòL!<Ö™$€m­º \êÀhÀ*777º:•yØë6ØÂÓÓ ZîDÃ[gff*((—Ù€n+..ƒ*‡Î‡iiiÑzo655:u žËË—/ç ó<êOÂ_T×®” ·ãÑè À[ð° €MAá‡à:з Mʃ¢´´ä/°/”öÐýÀŽT¶åççŸ}º TƒÃsH@9D(¿+W®£Ï >«õõõP˜=jçÓ„Ç~ÐUUU»wï>sæ èNïìZˆ‚g>tÀÇÇÇ(@#ÈÈÈðíµ“ž¡%‚~=´õ7nÜ8þ|{,Ù±cGû³ä€ÎƒÖ¶ ù.®_¿EMM ´oXXØRÚf‹Ìr ¸”0°0­­-TàÚÔÔTF«å˜`Ê”)Ož< zÞ!˜?;å!!!Àš‹-2%¡U}&(7”íà×_¾|ùõëW ÏŽKG'**JÎPÒÙC%::]ìËAœùp° H´´´X 36uêTžÈº0hР6ÓÞ‚ @]ÅÆÆ’gÑØ¢­uÚææPSS·Ã$:#dggƒàCムӜǎ[»v-y™`ä/íN ¹ .+·¡qÂP9E™Y^^$Wk¹J ÔÏСCOž<‰²Ý¹sç಴Iû÷îÝ{êÔ)`GèîîîëHhÿƒóðð€’ÏÉÉÕÕÕ…'È."æPWW¿víº åðÖ­[`Ìÿþ÷?¼É™{€:"²Ò’òD @//¯¤¤¤ÖžeäÈ‘ûö탮ÛMöôôl󥤤€=­¢½ºº:333øEh+©2¤£ëÚAÓ@)µÖ Ð^på^½zMž< ó5@WÏž=k•#Õ }ÞÚ°aU‰Íš5 t§¶¶6<_PEptP`'pa;c¼Ñª:¨dò4ž¤¤$ÔØX¿~=P2ü:mZÁ‚„„9{Qbb"Hv111ØÃÅ,8pæë pqq±°°@çQ˜ÀÆÆæíÛ·¿—†††_¿~1OÞFPB;wîLOOïдÝïÞ½»~ýz«Rçdee½zõŠìôØ"üüü¶oßþðáÃøøxæ*mÊóóó™øq ¸}û6°\sܸq]»vÕ#E{€³Y pþüyàÑ;v¬"î1iiiðÕ—/_Єº,z—´`Éþýûùí"ŠÅÙÙîzœ¬ÞúúúP%Ò:Ú©S§B¯"44”ÉD)œùp0C‹Táää„}ÚCHN§¬ØÞ½{/\¸‚I°1öjkÖ™/..‹"ÛÊʪ[·n/^lÑÝ”ŒÒÒRàÔ¼¼áR&&&îîîV$ _±ž¢È ÎbÅm¸ªª ¤¶ŽŽN``àôéÓé M¸©3gθºº‚aä!hYYY`nö> ˨·3]K¨ ¼¸¸'¼\‰RËÏÏ®‘G}dffBÉ´*‡"œùpgkàÍaâ×Þ†ôlœGDD‹ÓwïÞ]ºtiNNNkýZÛh(A’²=ZÕ„„V’…ª««Ïœ9³mžË–-^éÛ·ï¹sç¶mÛ•§§'£\©‚‚‚¬\öÚµk@¨-8_¹reþüùOž‚LìÙ³'‹ù•à`;;; ¢èèè—/_¶Ê6 =tÁ@k™ÏÔÔ”î(%íðrccã Aƒ@ç±2ßÉ" pZ{ʽ{÷€ö:(‹òÞ½{מDKlˆQàcè@• §Ä3ŽfÐÆµš={6v8ƒù»Mw¿¶¶vBBB‹«Ö8 Fë¾kkké5hYh³X¡=е˗/‡žJxxx›ÍÚÓÓÓ£›°ž‘ÇDïÞ½A_Ò0Õð2p^¿~ýà^ZœŽm-víÚ¥¢¢Âúª›ôôt`ºéØàÔÄÄDxØÉÉ€æ„nš§oîpæÃÑ OOÏúúzÊ®=;懬¬¬³gÏÒîoUkÈ10 5"''GË»wïò>yò$ókBó ìÝùY³fµßÂÀÀÀ§OŸ‚V£ÚOw¬,,,,##ƒVQ1”´7lØ0`åŽ }çææÆâ‘@É›6mMÖÑZ__?>>>88 Ž€«–-[ÆÞÚ8óáàºuëvàÀÊLf¤°ƒ™3gÒj”Áƒcs齪ª*ÝÖðèÑ£T{<Èdy2ÈD%%¥­[·²wDŠÎÈÈèáÇTûéŽv:tˆ*ÆØôéÓÁ$r¸Í›7—••µvܵµX°`‹~•½zõ*..æÌ³600øñãGJJЦ¦&vj X´Ç–L&8pæë$ !3ßíÛ·y"[JMM ÕžI“&AsƒYƒO:EÅ|¦¦¦ä5(ììì‚‚‚ÄÅÅݲ¬¬,è-¶Ç1A´GÇœv ¾……È;Ê=ÊÊÊoÞ¼A9V f§'ÛVØeäÈ‘£=ÖÖÖÀʇÆZ¾x:ðø §ËÅ5ˆ8óáÀ ÒÓÓÉÛÛ¶m#G®Â2(Ó $—œ={ö`9±ËÓ§O©öHHHP~tww dD{ÀyÐAéèÌÀ;vìMI9·GË|úúú”eddÈã™S§NñÇ1ÚCH®CþþþLâ½åå冿üãùÛž¥‡;wîgggã âqæû×abbBÞæ‰„D¡¡¡S¦L!„Æ:>>ãoòèÑ£)?ÆÆÆRƾºrå (BºþŠ  ;HçÑŠ*?OªÑN¸ Ê|”~.íÏGØ6lÚ´‰ óÁœ):Zíihh0r?æ" ¦%&&bm2g>œÆ¸qãÈÛlq—èhìß¿Ÿ’ùúöíËIÑ6ØÚÚR~œ6mYÔÔÔ¼~ýšÖÛ8ÆÅÅ…ÃC¸@ÉbbbèG*—aÆ‘·¡YGiÄòå˹%çôéÓŒ¾Ú¸q#·hÈô¬¬,%%%¬ÕFîÐ_ÚÙÙq>¸6”!N(ƒË\ºt Ím›™™éèè($$äêêºhÑ"®»KøúúÒ2Èe4¡w´=†6äæLLLvìØallÌhvÎ|ÑÑÑÀ|/^ä æƒÖ–L'´ëá0‹¨¨(”ùÀ~T$A³Hå±víÚ#FpwrhñâÅähäÑΪª*r8´yóæÅÇÇ/_¾üÿû<®ì²eËhw¾{÷#Ï=77³užàСCic«âÀ™ïŸ“™Ï:ÐæÀFEEo­Ì%/@$/tÁGép¯®® ‰fÈQÉÌçàà€–6çÓÇÇÇÉÉ ´‡}¿|ùB 944”jéZyéÒ¥GŽÁfµÚæ*Î|8:iii)¶2öM…F e¾ðÄ U!#¤8ø{òäIÊæ¦gÏž½huܺu«®®N@@€Ì|Ÿ>}‚¿ÉÉÉÐóxþü¹¦¦&mÀnŒ\¿~}`` yÏ‚ ¨&V¹‹ÌÌL,×L0ïܹs<á݆3v¡\Þ€YW‡„„ðV!£ÞåååhâõC‡‘Ç– BSSvL•——wtt¼xñb—.]Ð=h45{{{¨*;vìh1¸çu%óa­n a£1EÆ”èÖ­[xx8Î|8óýsèÑ£Br÷¾©¨§˜Ã[… "þ8p`óæÍW®\!ûm‚ÚÃíQö0Pæóõõuss ÈËËÃf,t…‡‡ÇÞ½{±fá¼yó0Ë|€Ë—/C‚ ÇCœùþ!(((@ãkaa}Sµ´´à/“È–˜Å Aƒàopp00Ÿ‹‹ º˜DMM ;ƒœ”@ÉáâååÌwâÄ èm`3ð#¥“Ë©S§0È|ðÜ1^?—,YÂ<…2œù:ú÷ïÿøñãQ£FaßTMMÍŒŒ žêDHCˆð÷Çõõõ¨¢Ú¹s'“äºÜ…³³39âš’’RlllbbbPP6­µµµ-**B=nÐÁd¬aüøñ𸱜ðyúôé:::/^¼ÀÛCœùþ 8W˜OOOÏÞÞ;>ë­Õ|cHxøðaUU•¸¸¸¨¨(f Þ¶mºVÏÕÕʜˑþÏž=ëéé™àîîŽM wïÞeæXZZâ!Î|ÿóQÆcÄ8ó¡f<„”W]Æ.''‡ÍqN2ÀÝ»wƒƒƒ¡eÁ¾§Bšõ‰ŠŠ’M™Ð›PUUEHnX6òúõëg>xÖ‹-‚ÎÞ*âÌ×ù!""‚å´®d@+L•1Ž·””daaA›™³ÈÊÊ‚¦¼žËÐÐÐHMMŸ—&öK2$$g>œùþtïÞûF~úôÉÙÙ™w ˜ÛÝÝn„elâãǵµµ˜õ¡ÄðáÃ/]º¤®®Že#gΜ‰ý’ä¡ú‰36È>ìYTTäææÆ»…\RR²yófjYŠ‹‹KKKy™VIIéðáÃ7’'– ¬Y³¦¡¡ b€g¾NaaaìYPP ¡¡Á»…\YYéèèÈCÿþWLUQQ!ÇÇ,SSS1.L{ôè§kÇ™×|Âׯ_yº1›­†.„„„dddx£%êÚõçÏŸØ·3&&ã̸zõ*Î|8óýà ?æÚÚZž.dhšyËCG@@`òäɼb-Oäç‰ðC˜ ª‡36ƒ'ü÷êêêð'ÅIO/Y²g>6"++ ïã̇C½{\óá B}}=¯Œvò óñDÈEEE¼òãÌ÷Oƒ)â:¥„â9æã!kyB©ÔÔÔ`ßHiiiümř—‹à‰!eJüúõ /^ö‚'Æ-$%%ñ·g¾N‹Úºú7YŠŠ¾VVW•|/»q'j¼ŠÂ©˜²3·°øã§Ïeeå°}óÞƒîò²ýúJ÷ä1ñ!¿ðKQEE|Œˆ‰ƒBV•—ïöfm.(þžù!÷û·oWnÝí#Ý[Mi ×xAñ¦ç|„â­¬¬üVþ#â~|OIÉÁJÄ0櫜[ø57ÿsyyyuMÍÍ{»Kˆ+Èö냱:\Z^ùî}Þ·o¥…_Knܽ/öŸ˜¬L/ey9¼©Ä™ç‘÷¥4âÖͺÚyu¾Šƒù$eûHÊÂþ7î£|®CRS2sÓ_}ÉËebf¢7œóF665] ¿“õ:y šfå¡b=zv“ÔMI.¬F(Fœô¼üÌ7Þ¤¨j ›dƒ­¨’eUÕWoÜ*ýòYA}X?%5A)Iy)èE?Ém^÷¡)Éz—÷öÕ—ïG™šŽÁu›ïÄ?y_f€ò€Á=¤ûIôW‘è$d7 ‚¼úRó9;ý}ÚK!{ûžÜë}ùöãæÍðe¥ Cuú)©òÿ׫§b/àÓ·âÐr~ EoÞ䥿úVôÙÐtì(M®ÔáË7#sÞ¤ ¬%;hˆX÷žÄ"EG9Ír¿"HvzÞÇwið® Ö>Ñj,W 3:ñùóøû=ûÉ PÑ”ê''"#ÿv?~ ïÛ³¼ïŸÞ¥å¼Iê!Õ{Ê;1Q¼Å™—p6ôúÏêêaæãGÙ´°˜ZRZþ¡Û7ãž½yüÀÅÅ…35þIê»ØÈ°‘6S0†LŽìÖ£—šü3í¯?<޵µŸ8DE‰»…|52æÓ‡ì‘ÖS†™Ù2?²gßþðݾÿ"51féÒe‚À"GÕõÍú)iX+1‹À×µkuøGì?Õ6^>Ô]\Âq¢-'­½p#â[ÉWÝq“FXLd~doYyø‡n_yôîåã•+– qÀÈG¯ÒF†ëÛ9ªèšÀ?æuxÈHSøÛ±¯ßC¶Ÿ4y°’Œ,)¯XGàùË¢âÝ5 -Úp.H.ø÷º <öŠ¿›Ûj¡s}žžx?Êh’³¥s«xJHI[Î^QßôëÀ¡#––* 9_È¡÷ª++4,´Z4t!üË(®ˆ=rxÕÊ"‰§SVQ}ä°¿±Ã«Ù+Z{._¾‘–D*ºÕX]é4ebG[{.ôŸ€€šžiΨ¦ÿ^æ–Æ]òpwë8?£§¯3ŸÄÇNp²l}‘v—’±š½òç¯F_ÿCãíltÔcyuÍa??c‡ÙmxÑø ìgÇÕpQA¾I6V8óáÀ"^¿ÿô0úîh»ö¦n·ž³ò᫬©ÏÏ›Ãöq!//¯±ÓC+Üžë]à u?wíñZ¿v Ç ¹ ´<äÜé±Óµó:¢b@Bß}J»|ɵùPÀ)•aúÐÔ¶ó:ª:ð×ÿhÀ${»¾2☕_qóšñ¤Ùí¼Î=¬æ¬Œ~‘ñ93u¾³{lhlòÞëe>c©ÑÄv%$tá3›2ïÛÏڪÇO*kéA9´ó:êúDÍçëwp¶óLI l¹àÌ÷¯ãÄÙó*# ÚO{d #H»½¼=ÜV³ËÙáEÆûׯRÚßSvK?/\=BGY¡Ã'ç/ÝŠ’èÕ§ý´G†¤t?ƒ 3½|ö­\áÒ »ª¦îàA?‹Y.l¼¦á„™9¥EÑ.;OcsÀ—Ó¯PÓj?í‘!3P þ¯¬ñpg×*ˆgo²2Þ¼ac‚:|*$ÔxôH9YÌ>w³©ó“?|üö1qÊxkœùp`{¼}-f-íˆ+[:/?{éúd;«nb¢í¼TXt|¾µŒÙÀ^ÛĪè[I~üc3ƒ‘WÈ~‡NœZ“íWç´äêíhóÑ#¥z²3{TAIÙÝèûìmþPt“ì­abãí³ÏÓ}5»®éí»ß|ÆâŽxpľQÈ%ÇIö퟽¾~çÔ@% CöûX 3³Í/-Êô̸ÝPŸK¾ß‹yÐϽ§Lø·ßÿë ä_Î|XÄÖ­[Ç/òì¸ë3³¹}ßÒd´¤DÛ£f„FDÉ©j Št”O‡Xž5‚‚×nEN´±äÅBVÓ3‰K~1bˆ’,›F ¿ýHxžÜý 2Ì–@±lÞ¼ûÅ;Ü|ÂÕˆ({ ³öx¿¡¨©' ÜQÎ_’½«…ÂîÜoÑvÏÏ‚o?Ÿ§hYt\ašL™·mÛ¶M›6á̇ƒkðÚëÓ¡MŠ!#MoEÝ6ÁZ Mþw^ôSVï8ÚC!,*Ö{֣ɣt´Ø{eÿ#(d…¡:“Ÿˆÿ'Ö~y]VQó0u#ìP@±€VótsmŸÚ;ÀâÕ44¿~û¶Ó$»¶ù¼Ü~ðd úðŽ£="ÿ‰Kü,9u„V[2<|«¨ŽK@] ;v =<¼jù¿’Õg>l!ð•q39Tù4 Ç>àÚúº^PRŽ ‹ˆq"Ê¢¨˜ø—ï%?**º‰‰±ëš7¢ãôí§s¦•µô‚C/.m·WÑÙsgØ7[Ö‚ò›±øâµ›ŽíÚhjè ó‹8cª–±•ÿ‘cn+[íåø±¨T@¬;t­8`äâ= J‹+ªª[;6ÛÔÔl8‘C‰àfß¼mgñO,xÀ™CÈüX$?d'Ñdò\Ÿý~î®­›Û¿z‰co#@º¿ÂésçV.cOcZò£ª«(_Î…ˆeí¸ÛË{Ýš¶k /Ÿ}ãœ8šr¡—‚ZQÉ÷Þ­Ÿ¤|_X,«¬ÆISͦÎ÷õów[ÙŠuÀ(7Ãn°Ñw¬Eô‘t*0°µŠÊÛÇ—cý`„ä\-ÐCš½½Lœùp´ŒÛ7¯¶sa@`0Á)/¿@N¶‹Ç¬f¥ð?ÐÎÕ m€ñä¹'N-˜Ãê»ó<=Û”ã% ½Ì£Ç¯ñpÇ™»ø® ?F¢{Ù‘øÝÎs'øêMÎÓ@PHäÚå‹,Žy~«¨VÕ5àJù(jª©­mgȸ'Î\ðaãëÚõÉ‹mc¾¸è»œo¦Q-u="r‚u+j.„Ýæ<íë°ÈWCO±è¢¨>wôú«iÕÕ׳8³þ$.ÖÀÞ‰óF‚ʼ7ÎÄg>Ĉ@ˆ£û…¦CSòåvKlb’Ï{%^cèÄÿ! ù€šŠÐ9 ¡ÉÎC¹ÿOú‚iÁMçgQ Þ7okjz‘'ñ‹{Y†L&Ï}ôNŒo‘ùªjêFZM梑F¬ÔἬ·ýÕt¸eäðqâ=3Ä@v.3”$·J›à—L܈/C ~‡¿¹tr_D\аP·›”¥(eZõ–U«Òòk-œ\æOqãÆmºáVöR¿¿£N|ջ׬Ë(¨3qÞ¬ ã뮺„¸ö€™k$ù4·ïÉšG ¯îÐÿ­ïP†ûC¹77’:ÿ9˜z …ynñ°úÏ¡#tf;¬>0ÖhhÀä ÛýÉ~@‰àÕÓT߸°Ù8b¡HϽպ,ø`LœU\Z&%É,ŽÑ™à Üm5@ö óÚ¹êñý»V³qÑþ–éY9¬G4Îþüeô„™Ü­™ï2ÒXd¾Ùoå5GrÑT£IÎ,Ôá`}®ö~¤û+œ»:g3Òs>?»ÏýÉ£œùèã@Rª¸B%úy©ÁXɲ”ãݵþÄk8á…Œ?ÒtƒäVû˜ Üøêµ ¿‰ûÞ$%X!Èí?Ã+ ÍG83ûýî†p`;_ħ™H*^2¡áÜ7sí$âRÐeãÃ_mmù¤¤÷yåUÕhøà‘‰h{oð Æ„QX|þ º…ŒHû8úáCÓø+¡9Û ™£7?¤éü”$ô»ñ§¬‰#ˆø÷gÛºø&GäO¤ðzò‘éI ýBd¾ù>¯n»kd7Ï{Ùü,JR ¤ãlØÈ…SžïEmîQ^gðù}–¦k‡ö0KþR||ãŠÕSÐa%­äÂÄc¯‘So½áƒÐ'Y˜SOü¢®n”+§žU3é0óæŠˆ‰UÖá¾»—€ ðݘ‡6­_l›ö, ̧¨£_]SÃJ£ú†U]#,ÔÏÌ7©HKÌ—Ÿ™Žæ4‚Y¼«ªgÊu#¡3õ ÑrM]ý`=L<÷Ô—Oq棧'ƒ¶Í#v¦ŠòÑ=©¨1÷‚T¢›»Ö6èI Ÿ‹ tùŒõ#›±YÙš‚DzLBܳÌ $î’¡Jª«D"H‚,õy¥|˜¸šØéôɺ/K±ó ä‡j·âè&ÑEÛNÁ?"ïɈ\Ϩ¡ú¾º¾fˆŠwgc);- Ìxý*© Ì׫ß@,Ï×…ïAâ«1-£Š{–,)? 6+iY§T†SEÅ ¿¡Eß}7çÍ+,0±'¿dÄ|=“QÑÆ‚‘UÔ΋¶x¸Üqëk¹6ò~© ûùÑ™ Röf«éOËüdù[8rKôÍ­=í Jדæü_ì¦=ÌëÙ¶€›"=vF’>œ­øn†ž¬â`´“É·ôNü³n©—”ôÊöxpa³1B º¶*Š ¯(»õÐaurìMúFÌ'&ѳÅÓk?=}’ö•òñé[8°=‹¨˜x[²ôUla-]ý·äÄÄcÛ&\O›‹D™F_X;ªÍög½Í@X`¾¬woé3so;MB'‰“¿kIK¡£­-:„˜›W…òãÛ„›QŸ ˆqZ¦ ÓKq+R¶ZÞ£´ùÍ›tFÌ×£· £ê:pðH¹þôk8ô/O½­~5™ºÑb°ˆ¢2€2ý9™oQæ£4Ÿ_TÛÌ”ùŽ)ëR_U¤ó¢-𝅝<²ï=‚Äï]°£©‚4¯/^°ÿ¸u=oW\ï¾N¸´ úÙUFõ¡ùH¿¨uzĨìyH ¦H6¸¬]…nL¥73"1|#I;¢K‡a*caÉ÷ÿ$$Y<øæ§­~šZ2¿ã`™¸ë÷lÒ/J×ê=Xëq‡Í¦hx=꫟u¤Þº,â2Txú¥|C–~¢è3Ãu<=û´œf,;l§ëŽ÷37-µºÀ|R}úµöè±õìÛBª?75} €}I“Mdèvê´dL sŠ(qâoWS^Qö•ÃJ¿Ðyt{ùE‘ ‚æ›®0B(#Ò^auE2t ’ ïÒéjHJ39Yü­¼[^õÏùý!˲£´{Úx½}ªCÚ !ªµIE ë°äßu˜T]Kg,‰Ô–¹Î™Š ó“ ýiÏzô¶PXI§)d´ØÅ4×Çålhõã·é{Q!µyò1í¶‹Sî¦û¥T;–þ(ë’„´lCC»yvæ#vLš2b /A§©é…ˆ†‚›cCQâG¤á3ŸØ< \vçý¡ß£ÿœ’ÏÐê^-ªË(€áï2¿8ùÍ©&5‘j&“|dD¬bí•àåGü9G†Ëê'!Ê·ýOaê|*üÊ/ÕŸåÁ2e:ÍǯT;çD5\ñ¬ºUqºê•Šugž•çlðv£Ü:Sä‹ý‘Ó.Kï)6•–²A[=ÿ—üz=¥?ª‰‚0´Œ;þg÷¿7[l»u—lm!–– 2 þBø Æ\Ø?ušörró·BO$>¸ñ¬°z„ŒÊnzH¬QÁËP†Ô ÃÎ2"i+£/¬“Ó’FáÈ'¦1Ó¬,iÖ.ô"‹2rîmHÓ’!ŽÛìŒÞ>wØy¿°º;Qí1\Õ¸B`'j|\aumr ¹iÌ@mFrô -é>èW¿ r]òû\øUEq#S?ñIþiÊó‰%–Fìñ˜šèkÎï& J‚l@̦ñ[O}°mÖÞØ¾Ò¼6åÀÈUÈ ¢…ýŒ7„_Ø]øâ†4!3ú#.Z5ÊŸh!qËÞbüü+~Ÿ”ñn¨­°õ|´$R$RÔð88à¡@·!Lò©j…˜£[âtvÒsî%–ð’yýZð ¬xv xBÔ´¾ºÆdÒEd]ósm|͸–þjdV‡ÿªÃ' «s.ïöp·Hc›Âó'è4ãu/í5ѤsDEî=ÈL„FMX1¹w;]ƒx;Äcøûއ¿i5åÔeK Ö =ø{*rÓHËØ¿‹À¸zÓ”|áI¿=ù¨NE®‡"G[(À§Ä‚¤Ùˆêÿïo=ƇÁ,aÿûoOá‰H5ÇiV EX~›þú*ÉóÏÔ<ëÍÆ¤Ai$ã¢ÿ+A^¹u1bñL©O>sWˆ¾q[íž\¸¤âù°°t=›$oS•gÅSyF7Õâ‹ÓÉ™Ï~Ùjºû×¹þ¾’¿û–A[è÷ñ0‰<”Íl ù¯#i~‚–­[Gß¼u”çòS^jÀhg×јx""‚íæ«ª‚‚†>2lÈ–×PÇÐ¥¨¾®Ž¥K Û~“n`Ô•ÅÁÆúV‡üODˆI« ²2Múw_Ûk ²ªeé jk[pÁcÛôSÍñ€VB*¡””…0[Ó]_ÿ“µÛd0jG×¹·¶ŽåÂhîtò+Þ7VÏaªSE„©ë°ÂèÉ' áíÅ9Óle'Ðk‚Aö¤}(C(¦_Ao}+¦uõÌËVˆfľŽq¦©9ÊÛýƒÝ~ôŠ<-'5ÿ]×D~T᱊kð‹Ò>ÜŸŒŸ;…ýÚÞÉ.¨m“ŽýÓ|©)ÃQw]³à—Q0pK¶û||³‡½¶ X\äV,ü«<ùS"""ŒtRàYЏ ™^=5ÊIû†ÏÛû†üq_l¾‰ éŠäƒÖ–±t§g"n·^¾±Ì×ÐÀp ºœ%¿ ¤‰%"§õGeßËZ[ÈýzK}ûZ˨£½ØnÛ˜½I{TÈ-`ÔÇ¢…Q6›G-6wNHÎ=B^uƒÂóqÀˆ‘DQ’l%ƒ:ÏÖÉš½c*âw}¯ÏØyžE—Új:Mt{?Ð;=µ„ ß³éʶm ³É;ŴͯÑíÓ ·!ÇX,À^RÌFäûôîUJþð=Rkð$tb‰Ø§)…oþ,Š0œjë¿ÊYEÔ©¾Ou»ÔŸ ïÐ _âžÕÛ† O‘$¯#ȪQ¨:Qí&NS¶Ä9×· ˆJW$Às=ÕhgÝÏZfu˜Ê³„<œØ%—ØÐó#õä^DÒ\ã²D?Ú+ˆ)è“ ¬ÕÖeX&bŠ:Lެ(cØç¨úAQ½+› 2ùE€–ŽºÛøj±¡¶pÙ£‰ëñÓNÌÍºåø û!›ÂyŸ_äM«ºü»P“½­¦® MÔ幄áMIKIuÖ†—›ÌW‡à@äúHä|¥›Z@PÑvF;}9±ÇLdW‡™IŠCöýKÞ·²*šå5Å9yÅ}T“ºjuÿÛµÛtÅZá&~f§P6p2 g³K ó5[ˆãW÷³ðïñdú¥ünUÈןÔÚ›ÆayÍýׂ­-d>>¾Ò‚|úîoÒäŒÓ§üC ûºèÎN¦°ðPÒD¨ÚhJm¾~Ò8-‚4“PìÍ]&ÚÍäå×™E…íÒ…¥—®»”4íNºÎ½i4%¼Ù–èÓ ²$7 AÜÐviñÈ?þ,ÉÏf!Èïv¶¡ùFh«¾ò›”"³yY©Oä:ÜÝr…mSŠ• W~»‡  «Dš è3cŠªpEJóWè13‡v &þ­yϰíÂôÊÖkái²äŸø«‹ÐSša†:@™ù™ªºŽ^8TQòòñ7´"=÷?T fV¡…,f<˜ÂÚjØåö·ÍGþ7†|ä°Ô늊 òÊënÌûö[öur@Ž’ÑO.L\7E–l@Ô—@¤q¤ùÐæÂé7-áS£xÄúât*uyŠÉPßÚ÷ú^$&=g>6¢;¦&·¸ŽÂœwòê â²ÿîô)˜,¾à;ÍÖrfa"©ŽÊϘ¥r.êÊÇj²;yèL‘ÝÑÈ —¥!‡ŽØø%é¥gYÖ8Y{EúûÃ9ŠÊ*ŒŒ,þ”Ûâh¯~žL;.UÝœLrâÆ•u—÷¢OÛPÈ2é3ŸÕOëoÍJÞ þmRS_òÇEôl–¾*¹p+– PTdå0y%zÁµé9÷.¢)aû€jªœŒ¿VýWnûß§ˆélI~Mߒϲ†+¶0#^ðþÂÐæ:<'à-mžI²ô4õj í•ѧ(VÎÑ–­ùÖgæ[é[¢¤¬ÌÈÈ/sZ¬®BJK“ —ÒZ®û»C¨í7N.ÜÌö§„›¬¿~]iÄe…ŒÈ`£¿–f–|a˜µ£¿‚]ó6V£‹'¦ø½›B–£ÐŽê¯I&¹ý©?bÚÊ@[ž!ôÞ>xîˆ*Î|8: Þ¾bÈ|P™±Y3©Œ&½DÏCq3ûCq;›Ú#¹P"î+-´”´·VÿPí)¨Ž¤ë¬­Ë0G?ÅA).ùAmY`ûñÝkVó@u0t‡³îÄdÔðŒï|]¸28+5kÌü˜Üô2óµõ„vœÜР?‚¡}ú+p°ØvïÚ ÿÐþ#ÿrT¬Îè4£‘#>ac4,ëu2b…G/ÃÑaamš«”8BR[G9:j°yX†4)2Š^¼ z§ÐAvÊcM†¡ǘ•b ¬ U43iKTÃcáYç¤iê³>TDX8+枊÷±ºK¶¼€RXT´mÓ\›\øWÄÁ9תç´ÕÔ÷©O47Ö&&†?8Xn ‡1š~™›2¬Ã={H<¼Ï®nD{лŸ,ÒyÁfæ«.Is]½% ˆ´j\^{¥Ó¼[—"8˜ÂÖÚª¤éW‹ËækLC†mç#Šûk­tE )[“£—öºÈ„.cæ“•î•x‹û qNR‚F›ú¡­-Ò¾Ö¶°ž½ãñ<.f¬>«³³_=çz~5ÚÚØ´\‡­,Y©ÃŒ”çLdмlßÐÈXe-=î™ù⡦-³Ü,Ï0À|¿ÆÛXãÌÇÖh¼“);`I~Ûàß²¤ª&-’Þ(Kü_÷Ñ;=ï•xi¹‰®Ëùfee¤c.]Ó¤›Ï:3*ôÌu•ù¾;‰Ëð/4^¸|ûÅM³ÕÍGèŒÞñdWóà™À0¢°sŠJóöŸ®x+ubr!ÑW âé3k]'ªS(ÃIae7ž¹¹é)\oˆ³Ò_·m$Ôó{õm¹k¿¶n+¢k޵²åzå|wGñå䑲}dbB¯s=Ð8k»zN©Ï¹Î|y™™ÂŒùF›qý¹'ÅÞÒœ1 é¼`ó5¾n¦=¹ÉUY¡"üHLð¦13‰ÕÚ¢6MM·`c)ž+#ÿ)ÙWK/FF_ãùnüyuuuˆ@ïSQÚCåÈ£(>+¦ìÞúÒÚ@µþ[ÍŠ]Äùó“…Õ‡Üü½6õ”|—ÛéÔ¡µ×¯ž3ÕAu ío¥'Ü™îÐBâÖ™³œ ëë»òós«”ºüªŸå<«Í§wo}Ø3ö¢0㥥i+ò]èj =w5\] mf²Ä“ 5åÜ-Þ·c'´ O§Mw*ýÅÍÙSB}íÜy³™cª¯{1"Ze˜> “¯ —å;Ï0_YZóÚÿw¹¡è„’™Ó¶}7n~Q›jn=ÁïBsÒÈõ£„×7‹¹jª`.UMÄ1»-[óþ(¿n±ï}Œ¤^ @‘LÖeSÒÛùÀÿW.Lj÷ÕVyi æâ¹³n%¼ì÷wÒ†îjvN ò„¸ûŸ'oó÷Ðó{bÄÚÅ—r„B{Ò&íItN¡SÑùZ®è}¥{yûZÌâÚðõí ck×x¶ùôé“ì.ÝŽ¤ÍµåCö;Ĥ•¢¹›ÞÅÙ©sgÍ`ñà%ófG$$±’Ø¡ƒð«¾å² Û'Ôg¿¹Ób®Õá€uk×´xXEñnvÑÞ&9O›Š3KÔsðäígÝAÄU5®WR~Ogõ.™ö¬¬¬nß&&K%€»#HÅqM¤ƒ›cwzz®ôööv#š¥$™ö ­ ãnÃ8YË‹|njâ­H«ï’õSäŽÆû—§NÏÊ‘V¶¶õ\ê2wij´›8©)/*àÖóýž—±hîìÖž5kê¤Kу¸ÔýÏx•4v´n+T×ËDn1_^Ê£éS&²räsó¦_îÔáºI“Yo_0ÛézÌ£jZ\)Ìœ·éˆ17'/1"d¤ hØ1=•æÕ?Ú–³î„•"×ð¡Sw»”¯3ê†Ô6g®¸Wâ7F$v3d]r“és½uÄ€îè<_fpóˆó÷&b/¯Å¤LÐŒõùÍ©èW‚¢þ›Œ5}xJö¹­Xr=Bm$§3gÂ#a5†ŠºªòÎÝ{¬ç¬ä|ùÜ:shýºµí¼ÈÂ9³Nœ;?Ü|çíÏÿðžO·-G…»¸R!ß>Ž^±lIëêðÊåçÃ"ës¾ó±\HÚêjÜ«ÃGY¯Ã?pÇ™úM—ŠÎvz¸¼ljJŽ Ð¶ú³Œ2)ò\/sZ.7“þ™«¯%‚uV++x÷ !âäɺŸ•ÍþÆt'‘®\D7v¯Yõó'"ø;äÐñ¸Ò­†ÍôÖD«›|Ͻ¬Ë–àçɇ!).ÖÈqEyîÐZOV´nn«£Ÿ¦öSâh¾æÒ÷i«ÝV³åRêC8>[wõLkY„ »qf^{}ÆÍ\Æa…Ý»M‘«$D…8¯¨î]ãÞŠê±ÚuUlRFyNY”ùÊÓÃõã'Øúú4›:Ÿ£}ˆ_ ²ýú!ÿؼªAËraS1yRÿýäÎå ¶SË&²+;ØD›–ITu*äþöÿ sþAa>É´P·!S|Ñ=ÛÜfÁ?„7B-L ÷pv"-79Á£•Œ"$ BhhlhàãTÊJBC=?Ò¿Ë–«ÐTßïÈdÊ<ŽòüÌ9³Û5àºêÚ½xN:%Þ 9áéîÚ†­Æ˜ìñÞk1Ë…c¦~J{궪Ui(a!!¾ŸUœìe~ýüOX°µY^—.Zõ䕜Š:Ç 3êÂ)·UÈ?ö5^õÕOŸÄ^‹ªöÚN ¦Îß}þ–àé4D5‰. eÐüûð§{šs >ùÞ¤ ß}9Oa8—>àwÖeº|¦6Ù§©ÉäãQ?ßõÞ'šÇ4þ—˜´ƒ÷†ª×zºyûî7ŸÁ‰øúïEÊŠò|­Oúc:ZïÀÁÃÆ“çr¢É@7‚Wº°³7àºÂe·—·¥órNØÿ³ºñg “„æ,½¥]»’“)/-ê&Ù›6ßm¥„¢©Ã{÷û¶¦6T”ÊÊônCÞðq&£9¦¨ Ç…]Z¾dQkO†–¨ª(çÄs:¼¦5ªg>"¬Do“6lVÔH5_öôöCè¥gáç*j¯?4¯¹Ö%Qâ¢ß´G7l±Åıëã‰Ó~™Hs>ëY‹ÆèªÛ/êWõèp`ðV¯²¦¦Ðu^ðψ@ˆ¡y2ÙÁ““´žn®1v˜Ó¡¿ÒX]VYúeøè6ʈUË—íñò¶èxò¸qÜ{óæÍl¿ìº5žèa€ZM?m’}û/¥1Xånl|WAa‘ÿºu¨Íaì(pוþGŽNœÕ±•ãgEIþ{‹Ö,ùKI¯\¾{—åìX®Ã£†k]»Ùo¨^Ga똭ó3_DyAÜ6 {ÑΠXSΜ˜.÷ÒÙïÔ¼™jÌSÏÀùÌÅSHmåïou±3›¢¢Wâaƒ„™ Ó~ÞBš–-CJË·z@\¸pا—h ÑGgóîSYå²´C;¤uß k¿—˜Œn×èÙÚ5ž^>ûÆ9-é # $ñÑqo#ô0:tþŒPWù.é)[hï·L1ˆ‰K¬ëÞG‚^¶ úb» |ÅÒEû6é°Æò’o¹m¦½æÐÚ5:¿Ð…uøx;‹t¢åÛQR CDÅ»cÿ¹ÿṡtÛT–J ’6˜±'.¸yýÊ!Äå q#é%¢ú¢™ÌPœ}]=T$AÞ'œ»•±ÏkK²]ÿ÷I%ÆehjúD §^QÚ|EG>%m“ιkÏò#^¢_9íó›ÐŸ§ tHwíÞcÕhÙÏb‡ >¾M¶6QÑ?þW-7ºE~—RU~æðF;;-å6íž„”ÑT:óñA¥,¿ðËQ¯1†óz‰ömæ3—ÿ’¢®ˆ‰‰³ð…S½¬_g^SSsààaEiùÍýzþëgÏ’-ךQj‡‰Á@@")*>|ÈKsšIïæ—} ·ÿ«©þãØ§,düXúkÚ«×£ ¿ªLlvæþïëÇÛW.­6_»ÅÁ¡µƒÚ­kWH^Ðé“'ütŒ÷è-Ò¼bÃRîFôèÎo2C¿UÃim¹ê@p V-Ùük8/#%óyŠÅšÕZ#[±§ Á$ú`A¡×þWU¥¤3­ÙUoùQWΛ­ZÝùŽ”ÑRÄD„·8:€´\ºríÃûuí¾2=«ü^øâq—Ïy&³MV6yèÅ–{‘ÖÓŸgß¾÷ðAìpuméaʽB£ý÷ñerJb¬ú)ã´¦h·ó úÂB‚Dqpár؇œ·Š:b+Âh•e¯’²_¥ëO7šoÜ>ó*Ï„ßää†^¾,,.1XY³»Pcsy›‘üà®´ŒÌ$¯ˆTcÓÓ×”}ÏHŽÿðöñ,“¥M˜/‚Uu +zgÄ›1±OãÕ´¥½†)ÿÕ|xùôib¬¦ŽîÔqc§ŽÓl›pÎï·ç¯Ã®†J’—QÖìÆß½‘ã¿åf%?ˆéÛOjžñ µŽ­yHù8–ù?­[^Aaò³4PÁo_ ÿWQNûï?^¾ÎÝ{ Š÷ë7T~¨ÚÐ!jƒÛmbɉã´à¾—–=JN~—ý¶ðs^EY)/_MuUW~Qñ2²£GŽœ m0A›ÝÒyþ¬ÚÉn¨Ÿ?'?MË}ŸSô¥ òþþþæææü=ûˆ‰ËËÉ¡¨,mˆa†í`ÙþRv6µ Iž½Èx™‘™Oýð£¤èh@Àò+ºtíÖKXDªÿÀQ#†ÔTÖÓTnχ—×ô§Œå|¤>}–þñ}Nñ·/UU•(hM®añ~CäåTG W‘1n¯pN¯=oA×ðC¸†ß¼ýRÀp ôé#6PVFCeä=m½ö¹†ä+ÈÓß_@8)5=ãÕëÏyÔÒ’¢Ä„ MM¾Î…zõ‘ìß_ò]Uq¼ª"*B‘òq<â}Dô'Žgó@öà×Ó‹isêè·¢¢S~M–æââ²mÛ66óˆaòðG,ïqsÛ¶u+Ûµ? øcÿkx"Û_ÃP'VSÄê°aî_¿Ž I¤| ¨®®æ¬wâöYÖõRTT„)ÑQoo^tƒwDŠ‹‹Q" åC :( %''§ÿþ():eee(ò!”ÊÊÊýû÷{yy¡¤èPôù«¦ò!ˆzèҥ˛7odee9%ÀåååÁÁÁHù:ZugÁ‚(ò!¬‡‡gÞ¼yOž<á”óóóÿþe\‡bÕªU`ôQ: åC XCUUÕ§OŸ8(À=zôÈÏÏGסˆˆˆ D途`™…‚ÏÐÐPcccN °¢¢â–-[\]]Qöu ¢S^^ŽÒ)Ájjjúõë'))innÎÊWYYÙ¹sgp@@R¾QÑA途` oß¾8pàÝ»wutt""" Ø<À§OŸ.--…`¯X±âû÷ï=zô@™ÈÝ”••-Z´ÈÑÑ%R>‚5<{ö *Ô222ùùù`¡Ø_ù|||¾}û–——çççwôèÑÂÂB”‰Ü„„D—.]Pó¤|Ëxþü¹ªªª‘‘Qnn.h‰œœÜ«W¯Ø9ÀàóŠŠŠtuugΜ9vìXXBùÈ­¸»»ÇÇÇ÷ë×%R>‚e¤¦¦ZYYñððÌž={À€÷îÝ;yòä²eËØ6À›6mÚ¶mÈÞ?ÿüãääDX@”\IŽ––Êb¤|+IOOÏôàÁƒþýû‡„„¨©©™šš‚ÿ“’’bÃІ‡‡¯Y³æÀ«W¯®ªª ,((صkz Ä• <Ø××÷ãÇ()ò!¬$''>oܸ²w÷î]ww÷‚ ç'((Èn¡µ´´444”””äçç·—’’òâÅ ÂtíÚå&7Ñ»wï¯_¿‚á322B©”`%C† !”oÇŽPÄ+((<þœŸŸŸ»µœœ0a|ÊÈÈÀ'túÛ·oýúõC΀›€Z\Šƒ ÊÊÊB©”`1cÆŒÁðv.ðÅ ,‹ŠŠ~þüyùòåöööòòòlÔû÷ï?~ˆ ñððÀç›7oAöúôéSPP€2” ëðĉ³(5ò!¬‡x”4~üxø>|8Q×Ù;EÏùóçÃÃÃíììØ!¨†††%%%°0b}’n &¶jÕª´´4½Áƒ¿~ýå)§Ë^aaa^^^ii),£Aʇ@°(b¦N ¦¦¦Ä–ôôô/^ 6l÷îÝk×®=tè!;( F "–ÕÕÕásÖ¬Y Ì ,Ù300ˆˆˆ€@Љ‰¡!=9È>'''¸&«ªª Zãëë‹Ò)ÁzŽ9B 5wîÜ”””‘#GJII 2$33S@@àÀ'N¼}û6I ~°ÐŽ“ÁÊÈȼ}û=z¤¦¦Fl´°° f®Ù¾};h6xV=âQ-Ê\ÎòqåÊ•çΫ¬¬Ô×ׇ‹ ¥ R>¢U¸té9âÞ½{Ïž= {Ó¦M»~ý://oddäСC_¾| â÷àÁ°YIIIíbøœ‰e©|ä„mªªª:::÷ï߇e=qqñ¼¼<”¿Aii©´´ô—/_`>çÌ™’)ÑZ06 ¹zõ*¹ìêêJXÀÎ;ƒì 3dDªäëׯoËpŠˆˆ%#pïÞ½þù‡Xöóó ™={6†·!^Á2ÈÞ²eË/^ žå2;£§§·zõj"soÞ¼‰d)Ѻ,_¾œ\¶´´$—Ge``N¡P`dO[[ûСCÇ/LBBâòåËmH(Ÿ>}J®>~ü˜) „òaøkK¢,Ÿ¢uÙ°a#Æ- .|öì¹ ²×«W/rÈDUUU=99¹K—.)))ñóóÍðhmmM–b¬¥nu]]]ÆU==½aƽxñ‚ܲÇX’>zôˆÕ¢ƒÆyi_üüüöïߟ••5kÖ,b‹¿¿BB’=¤|Dn‰Iù$$$˜ŽÙ#-Á«W¯ ¶vðÓ§O°‹hr9þü‚‚òáKعsgff&ÓÆyóæ1mY½zu];+&&RGxAâ=epp0x\¢Ï>¢JÕ—/_nÞ¼iaaAl©¬¬„«ˆJ¥ÖÍ>R>¢µØµkÓ–k×®}7‚ì “ LYYY¨­ÆÇÇ/\¸Ÿ |P÷òòb|ƒøwŒN ضmÛíÛ·A¥ˆzzzÅÅŰ`ooºxþüybt˜æ’’’…#ÓvoooºÇƒŸûøñ#Ó,ngÏž…óhhh0‚µÂ!ÿʆ£rsUUUÆÆÆß¿‡kƒ±Î„ác 8ðÑ£G(•ò!m ˆÊš5kênohæëÇûúú~ûömëÖ­ŒÛwà€ yzz‚_ìÙ³'±},¬]»6$$䨱cÓ§OobØà< ¥ue8sæL½Ê7zôèz­9r$È8¨¨¨Áƒ“Û—âdffÂ.ww÷U«V¡K¢åÔÔÔ,Y²äáÇáááL{¡VYÀØy”hSN:UïvÐMMÍ„„„º»,--+++ë!Å'++ „dïÞ½+W®$wÂÁðɵwîÜ I6Ŭ ”Œ………ëÖ­«w¯œœ\C_Ù›0aB½½ÁÞ½{÷âÅ‹ºc° 2„h¿NJJud4µˆ&ÉkffVPPpᦧšÞNJUUÕÉɉµï€HùˆæµïFŠ¡… 6´«sçÎ { K²²²¶¶¶L{ D<)…¢q›ÌlÂÁð1bÀ¾yófûöíL“¿/X°`Û¶mÌÇÚÙH¼ÀjÖÝ5lØ0=°§©©©ÁÁÁL{Á’bx·E555ðŽçÏŸG­@ÿˆ——dÖâÅ‹}||êlÌÙÙùĉOž<ÉÍÍEÉ…”hgŸxhýúõþuðàAø‹µkè€ÒÒR0a`A„„„©»111[·nýðუ££½½=Ùƒ¢^À,Ö•[F@/_¾`ãÆsçÎõ÷÷gÔZ&Ž9B¨`hh(XÀº^ÁömvªªªV­Zú޵µu¼Z  j)¦®® +pê=2×ÊÊ ìþîÝ»¿ÿŽn4¤|»@£Ñš2š‰´´´©©éÝ»w? ¢¸¸øÝ»wÄL`¶ê=¬gÏž¤Öîܹìxooo>>>âÅ!•J%{2¾%:þüáÇŸ>} Jfcc£  ¦°)q¡š?¾¯¯oã³»ms@à§L™rèСzŸñBp`RÃÁÁáëׯð­º} ¹ ¨7@‚@¦@š»áÔ{dMMÍš5k.]º4oéÒ¥èCʇ@°½{÷nü‰" ôŒ¸4€ŠŠŠ@Ô””ôôôjJ°ÃËWß‚³S§Nu\€C®‚÷‚_éÞ½;üŠ)??C¿ráÂpÊÊÊÄ샔è†%ø­[·@¿eddê=rܸqdÃp¨...éééàr6mÚÔ¹sgν$ 8õ È PtˆÑ_¥‘¯xxx@òB‚ÀÉš)ÁŽ8::6kâ:=pZMôJÿÔÔT oøàìì %cCFaذa`ãHYýþýû–-[Ž=ª­­mmm=iÒ¤ºßŠˆˆkH®VWWŸ8qÜÆ½{÷  Ôf|@ þìåË—·åaü%†?Ü311IKK;uê1ÿm½LÀ!¿}ûö“'OJHH˜™™™››7ÒrµÝÉÏÏ?sæLHHȳgÏ qÀé‚?vÆiä[Gpu„‚ÝyóæŠŠ _³¾¶¦Þ~r@6|otìØ1²3{rr2Fppð«W¯¿Ò£GOb5&&äìH ˜}}}FFFÆow8/ïrƉ‰‰ …`ݲ³³UUU'NœTTT¾ f·)‡ô![~ÚÛÛCYJ ö®‘¯@àwá«_¾|ÀŸ;wÞØØ¼¿|ùrxxxll,,CjÌ™3ÇÐÐЧñïÒh4ˆûñãÇA)! À×îÆA÷R>‚c(..]!š\6 °/>|èß¿NNNs¿ ŽX€rÄ,Ñëׯ‹ŠŠþøÅñ8äjJJÊØ±cAc-©·A)†?º¬Û*çîÝ»ЀN:uéÒÔwòäÉ7pÅ:ã¿xñÂÊÊ >á<˜[XXx'¹%** $è1„¼é¢E‹È™uY8ðÄPÏHJJ‚„ÒÒÒ‚š={ö<œ¦œê àzAõÿûï¿ €¥³ÀA÷R>‚#ÑrrrbœºYâgê?OÓ¿X]]MŒèqþüywww Œ蟣£ãÚµk›xp-?~ü`|„xçÎ .ܼy“I###PĆž16ì(ÎúõëÁ¼‚’e=œüГ'O`yðàÁÊÊÊêê꺺ºL#—ÂÈþjÏŸ?ß¾}{dd$ÈÄ…q\˜†˜„øååË—àM¯_¿þôéÓ‘#G‚Ϙ1ã“~ýú´˜. ¤N\\TB *>Rµ‰I ¿ €¯ƒƒtÇA÷ R>‚ã9wî(Ö_ËIaa!Øðmõ*Æ x…ÿýï/^ÍcÜ%59ÆJ\\è1ÁË—/ß¶m[CãgÖÔÔ€MdR5=Æ-ß¿ AzðàÈ3xÄ)S¦@i.&&FsàÀ ïÌ—™™#ƒSï¸eyyy÷îÝKLL» *U^^ 7ìÄ‹ ϱcÇ222fÍšµnݺ†Z·21tèÐm8Œß½{ œ––•^^ÞÒÒÒ~ýú©¨¨€(Ž7Rt»Ya»råÊ7=z4dÈйùóç5 džp´¤|WñÕ«W™Æqþk¼¼¼@‡zõꟓÔE[[[(µ7lØàìì >ïgÓÒÒ"ÛžTUUíܹl™€€€©©©µµu·n݈] µ Ü<[=Ìp˜¶ƒ¾‚³O0IfPfp¼àft; åC ¸'(‰™óXØ©oß¾bs ‘ƒeWW×@œ¿;-ÙÛ¨¨¨pww?}úô«W¯V¬X&²‰^ª^}ênAòðð9¹víȼ¼<ˆ¢¦¦æøñã夡Ðêà0m1Ø»wo89è8ZsH±ÊÊʲ²2øìRPP€¸€ÈA*))5¥¨´´4)êPómÃB Õ[ˆÂ‡øùù!ð ¸mmm4)R>¢†ãäÉ“,—=ÀJsð÷ïßÿøñ#”¹,ÿ•®]»nÚ´ , h±ÊúãÇGDDHII^ª¡ñÉš£œ`¿¦M›ÆËË v"?  ܤœœXOð£žl°¡°EFF$R{äȑెãüñ§Á€A]¡   ™¬¬,(qZP/Ä‘xö[\\üéÓ'øQ8Œx9iÒ¤ºOzü(ñôÌb—.] ´p*°¹`궈@ʇ@pÄ첬:arrò®]»®_¿>wî\0–8ä^+++77·°°0r~>V4ãlìL­=1| –sçÎEEEêÌŸ?4Œ|@Ú,RSSA ÅÀ-=~üä­ªªJXX˜huâè訬¬Ü¿òxˆ)H×ÇŸ={Ë ^?~ü€í UMì½@ØPðµ`—¡ö@<5ýï¿ÿ@ÿÀB,@™ µÉF1àóÒÒÒà8ò÷Í›7ÄÐ  š Êð»`™Ú)à4 8 ¨;ü:ñ.³´´tÈ!p°¤ ‹ªènBʇ@°;ÕÕÕ"""P·|ª¹Ã‡ïß¿Œ—­­­©©iÝ™ !Þ!:B¹|ãÆ –Ä }PÜÆ™‹C®‚1U§ ¡¯¯B'2|H8**• FmРA„$€¸‚QS©{~¢õæ¿ÿþ;sæLÆícpê ø³û8 '/^¼Õ”””é‚_úô)Èü¨ŽŽŽ¶¶68œ†"¡=þüåË—!)455A†™G~~¢2 VLh9x;40£ˆ/ˆ:q<ç|œzÏvt4,#xGPwð‹ðu ù€Ð‡”hgÀõêÕ«)]åÂËËk÷îÝ***ð¹§Y_wÄYºt)”³-žÔ”î àº@Û@ØÀ aø\ &&& . Û`ËŽ9réÒ%UUUˆ£L6ObT3œMZZº‘ãcccÃÃÃïÞ½ ÚŠ2qâÄ)S¦=è¡FrûöíèèèÄÄDWWWP‘±cÇNž<”—·þ Dš©Ë9F§{êÔ©˜˜ˆÔÂ… —/_NM)¦ŒÓx¼ ¡@ AÏ Ñ^½z•““#..ÆäŸhDJ<‰ÕÇiè$SB_¿~ ÉNâ˪æT¤|Dc@ÉE¼jîA9V¯^ _„’½¡gšÑ¬'”§à6þâ yyyƒ&Ç[Ò™h¬¾-33ÄT w555pN£p9›ººúñãÇÉSíÙ³,Ôœ9svìØÑ¸Œ1â€SRR_w»nݺ+8°ÒÒR°©S§‚!ÓÆiè$}úôa†” ªªŠèrpïÞ½÷ïßC¼@!„õvÕ§P(LN·¬¬ÌÏÏïèÑ£ Ò‹/¶³³#-]CÈâ4Ò5âãÇ iP¥ç ‡!ÞŽ7nèСäIç&YŠŠ‡3@ý²Ì7¤c÷R>¢EHJJ62‘^½€£Á[»v­››O,RaaáÙ³gÓÒÒàŸ ðCׯ_~(## Ù›7ojiig"ÚŒ0¾Yükà<§qˆUýû÷C9 ¼ÓE9qâDpp0Ô@äjjjôôôÆßòþ‘àØfâ0m/r?B¯nCP~~~Æ* Bqýúõ7­˜¶Fz æææBNAðàÊyùò%ÔEÀ}‚äƒ`ƒÂ„ï¬Ûé´<1|j0ššš†††`‹[ØL ”Ñ¢F£5ñ`(­ ¸177ß½{wkO²³hÑ¢ÊÊJPe0£äFjXXXdddll,˜B(¡L700ÐÐÐ æ^˜={6¦m“tŒc.;;;ïÝ»jûö틈ˆ8räH”Î࢖/_Îä†AQàSIIÉÕÕuúôé­¼ºoÁ_úûû_¼xª 3fffuEòðð¬Ç!V³²²ìíí!. Ïeßt¤¤¤ˆÌª» $ ò¬ð£GxyyÁ#‚Ûƒ<%äqŒoÐB¨‡Ýºu«gÏž3fÌ€Hž”¨Ÿ4QöæÎûýûwœ¯_¿¶Yð ø322êܹ3j`à@$ F8 }åi¶àE>|šñåË—S§NïÉÕøÓãÔÔT¢&ÎâÄlpÈ-)))¼|ù²œœœ……EÝù DÎ;²ú Ÿ` §M›Ö!2dH½s8@ ƒ"Âç›7o@Îá€*ÑÁ߇ñ`pÿPÿ€ŠÑÊ•+Ñ„Hùˆ_ôîÝû2V]] e"”D`Z;<111'Ož„«_¿~àE h#Úîûùù<\¹råg€*?”m“zîîîÞÞÞ p›7o†Ð2zÂÍdggëééõõõmDŠ@öÖ®] G6wD1–0räHbõùóçnnn „p±LÍ/A– êCF,àŽ;êŽÓÔu{Pcƒp‚*ß¼ySLLÌØØxÅŠDËši8䑹¹¹^^^pi)((€s‰î}¤|ˆ `Pþè¡È Z<²ÐÔC‡8qâÇóçχb«n;Ì»wï?g³ÆÂþ À븸¸¨ªª‚˜mÂiä`(…‰q>ìè覰¡‘©! t7nÜç¯om@_dîÞ½ÛÓÓSSS>§-Äð©3ˆèCš@ìêÛÐJP(”Y8Œ¡ÚtìØ1ÐB p{„…•’’bœ¾ 2ÂÉÉéñãÇPáØ¾}û_´êBʇ@p$P_–””ldÔ«™3gBy÷G¥i.§N‚‚’J¥®^½úŸþa|ŸôGˆÉÈìêrçÎÐÐP–§Õ§OŸ–-[Ô®Y&!:x‰‹‹Cù»hÑ¢ºÇ@é ²×ÜY [¢I*±loo)`aaÁ4™&K–,yõêU|||s§od!L½¡>áááqæÌyyyKKK¢Y¬ºº:Ùg´  ¶ƒR‚Š7}’ ¤|G"&&ÖмêFFFL“¾þ5ÿþûï‘#G&Ož í†Ú54(Oûöí[UUÕPÁ åZ³&‹oœ/^Ì™3|ñ<­å'"Ð4¬ÆâÅ‹ë²×¬Gm 9ÑàÓ§O!ð Õ.\`J›pŠ |Ó¦M‹1bD»‡éC¬>þ.B½¥K—îܹ³wïÞ}úô "½ ¹¹9??ÿñãÇÛåm1R>¢)..nhœ”)S¦ìر£…²7ð`•À¢yà°$äà-iÂ’F’¥¥¥“&M‚2ñÚµkPP¶FúgeeA CQ[·Ëì’““cUÍ£5PVV&R.$MMMAAÁ°°°Î;{ÇŽ[‚#"">> µ<00póæÍ[¶l-dWKbªÛ·ow¨±¹‘ò!¸ºšTSS3|øð¿“½={ö@ñå”LË[ƒ=z,Z´èÓ§OLÛ‰iþîœPjkhhìÛ·¯çŸ:t(ÈÞĉ!çRÿ4mÚ´ÜÜ\)))ޏ¢ú÷ïO¼žtuu=~üxzz:1ðw×®]‹ŠŠ233›Ò:©'æÁðî vC† 8|ø0l,//×ÕÕ•––&Ûþ åC 8‰U«V0n)--IÛ3wî\ð>>>ÄjVVVCSá0ÊmkkÛtƒÛö€ì1>ä„Ђdœ\—㵑¸}û6ÄåÙ³g>9‘“““‘‘QãSd°!ópàŽ1bÄ´iÓÜÜܾ|ùRXXÒ÷ws#åC ZÇ“Ê7iÒ¤Æe¯¦¦¦ÿþžžždSo¶ââÅ‹¤ò:ujÇŽŠÅβG²ÇØ«;ºWOœ8dOKK 4.€„pb~EGG›˜˜„‡‡'&&râƒÁ†øüùóŒ3|}}õõõÁÔ´ãPŸ,!** j–BBBpì5e¤|D[púôiBùbbbˆ*™>}:ìå ÙÃða8ˆeeåº{§N$((ȹöˆ˜-ÃÛVäääpÓÕvøðáÁƒŸ¢Ý Æfl òéÓ§… r¢ì+V¬€C)£¢¢Â¸ÝËËËÓÓ“ÓeàÂ… %%%~~~ðÉ1bÊAð|YYY}úôáŽxÁe©¥¥u¯ \ºt‰ã2)‚{011yñâERR¹%>>þîÝ»111#žƒ:88¨ªª2Z[)))®)C544±téÒÀÀ@î»,—-[u/p·Âœ*™ƒˆ‹‹Û¿¿±±1øZ¤|D;@£Ñ¦OŸ>tèPòuQnnnrrrÝ~œEXX˜¾¾>cã@ˆ&Ä‹›òîÖ­[•••`Ö¹Rù0ü¡.ÔTòóó###›2"§²¶o÷îÝ÷")‚K¸}ûö¤I“ÈÁ-³²²®_¿þÓ‹³©©©`dIåigÿþÍEMMmذa³fÍââë³  ÀÀÀª,l2’*«˜;w.ÈùéÓ§ZiºG¤|DƒÄÆÆ8Ðßßû9sì¥K—¸ ^ d_=(4Éùµ¹ ]]Ý… r÷%ºzõê1cƼ~ýzðàÁÜ/p±îîîgÏž}ôèTbò!mGRRR\\\tt4, stg#&I Ÿß‚ ¶df]væÐ¡C\Ö« ###!!!>>>vµlÚ´iĈÿý÷_ZZR>¢íÈÌÌ,//Çðv.\#{€ªªê™3g0|¸Kލ¥Y¬\¹l+ðòåËýû÷seÔž={&))ikkëé鉔h#rssi4ÚŒ3¸)^ÚÚÚDQ2zôh¦û¸ uuõº1¹Œ¾}ûúûûkjjs_ì’’’8%^HùœM@@À… ŠŠŠøùùAùúôéSXXÈQ Íÿ ‚·mÛ6(.¹;+}||  ¬¬œ““3mÚ´³gÏre4;îÖÎÎêjpÑrhOÓzËËËÛ¹s'Ü•; ㎔`=«W¯&—‰±¾(  oß¾åèx•——/[¶Œ\…Ò„ˆZ—.]***¸)¡¦ÂØ7‘›ûÊ•+Üw­2ÌM¼¯e[ahIì –Æx?"åC X¼¼|FFÓÆN:qz¼š”ËrPDD¤îFr¶)n^YYãYYY®‰]÷îÝüøÁ)±Cʇàl>|È4ds¯^½>ÌQ›={vHHãUUÕëׯs_&^¾|™±3¨¾¯¯/÷E³¨¨ˆi|ç 6pMì¾ÿÎ4ÛÔÒ¥K‘ò!­BÝÿûï¿I“&qAÔ‚ƒƒ™Š’ÌÌLaaaîËD¦f`Ù9t€Æáãã(--%VayñâÅÜA¦Îì+W®Dʇ@´Œ‘8bÊÙ¿cÀ€ÌOË阙™=z´¶TâåÚr Œù(¾²²’Ëb—žžNÖÕ JÊÎCË"åCp<§N"æxÃð×`bbb\µ°°0²“Fqq1çNÅ÷GHåóððàÖh‚0ïø©y ‰•••,TWW³s8‘ò!8žÙ³g“^ËEœ>}:¹Ìe·ÖERRòÇBBBàÿ¸8šäû°aÆq_ì¼½½ åcj˃”h-FÍ}‘",BîξÜÜ\nêßÖÄÛ¾={öpe좣£'L˜ÀæÏ'ò!¸%%¥ÔÔTî˜óš‰üü|°AðÙò\»¨¨(×Gª2ݺu›:u*WÆnüøñì_ Eʇ`#jjh..;þ⋳fÍúðჳ³óßý®““SëEê¯CERUUåîîÞ“´jYÓÍ›7¿yó¦åçamd««i®®;X›V |-&«"ÎòlÙ²%++‹å§ea¶"åC°«W;µñýýÙ3F,<ͳ…°‘ø{(_-QÞ"¢•”@ ˆ­|Ô8ÿ~ÚæŒ[Ji4~b©ú-…O†qWDv™þÀnä#ÊMQ…{&Òä-Šÿ·—–+Æ0ÀOì±õ:f¿†oL*2UÛSÒT†rúç\lïÞ¢LE måó‘#g7 S™¢­¢À1ûò6ÁÝË3+· ·¤¼•ÝEé¿y{½oìb°¶E~†+•Ê.¶±Zùj’HÙÓÑU¾ï),P(„€‘²§£¯s?ò>,Èð¤Ñ$~~»+ùŒ²óo“==8SÇâ*}i¤‰Î÷àûo°¥*Br¥4u~ì´é/Ù£ŸpÀ@t3"ˆ6¢â…óvÛß7ÑWs¨´Ö˜•UB‚Ò6â?D,(JJHôöÇ0]*õnsÏC—=ùÍÔèÝXùçèè%\ëùŠ’‰W~¤W+“5mú$2 ãǾ»¾ÑhDåA–2hì*×oË%þ¤V£ÙÓt£=°‡ÿÍ)ÈA4ênÓÓÄ!*4ÚøÏoÅ2ÝŽ¢í}IS'Çê¢|“èÛÛ`OÜ-{-újõ·°+á?*±É3—ˆàϾ²³_ñ‹Ëˆó×*caî«’ª22ôúvòýÄW™â2*Ô•‰½ppO)¹—ѧòyGõ}Eß’÷VXx `kÎv~Þœ.{‡ãÊfÈÔNœ{÷_õ…®«.¾˜;V“£ƒ2ò¿‹K*NÐV'(Î{õ¥²·Œoð¥L ŸÉŒiôoåÒM‰ôÐÁÙÙoeú ÔNQ]qÇ(Ÿ@÷Ú‘‡èC²Ô ôty“|ïçÎZ× ­u›ç©¤Ê,¡Úü«hô¬µ’‰•½øçŸàÿ'Äê'7 ÛT[³8\+€§ïZC·"hh½à#=ö f¯õÐo²±k¬* ÀììL5ìï\¶ž ¥%aK©Ô“ÄáJêòØ”3Ô‹›¥©7-áÎ*X ¼~0NŸÍX½ ¡Ö(‡kEëUZqTLÛk&·”=@jœKVŽ .Ö•]Ãù’Jƒ ­“ßžˆGGqTjZ²ÕzøÕÕéùÞ‡®Ô Å2"whéoƒ%é¾°@œvòË_¤³JIsTjBr0Ífàþ<`qk êÍbåãj¾Igû}|åí½¥³´é“ò*n¦¥î×—~Ñvø\Oâȶ¦ð‡50UGgZW†ç µ ÏNy?c:°VMe†þ믫*ºõD[Â'ðk9.€>Ù¬Í&ø¬Ù¾ìÆ­]S`ËC¿±Æ®z˜5-f¿Ñx›@ ÕïóyøˆXl-Mª†?lœ²'ú¦ýbµvûÿ¦H ßêO;)t§áàñÓf£ްÄΡËÏ‚lÿη$†Lɨ¦¯ÂêÖ±¡öZ%);Ïö»x×>G…ƒAöˆ“Ì– D…aœ¢|Àž{´=–ùô¶§ë>ÿëôMinwŠvë a sFUUõn8 áÔ%WM0,ø&”ä„òé×níA·Å¥Ö°/eô¬ö.ÄÄ6Ze7±e0,›¾v˜B9\÷´öà ö¤ã{ÏÖýrþâT{ç#S Û5sLµ{Ê QØ›dt"8œ“þû†N]£.Í’‚c’œ®|ÔÌ›C¦,:ôèìZ5ŒŸþ ôxHÖԾ̇ŒÜ!¯¿­ŒH_Æèu¡«£JBÂvˆ†ÅìTÇÖþöXD]zÁ5XJ &­X‡¿¼ö…ʗǰ¬ç–¶)m€{dNrlpòOšVD¯ëOO>e7ÊÔRÎß?‰ØµØåª·1ý÷Ú E’¡p“Òu ¹ªï–æûMËÒ?¾‚·˜™fw:ò_º§vxJs¤è²ø|•½3MƇ m¡®¨ìà2nm°Ì…qKò'š8~W†]»8cú\.‹¯ãv[%Ó÷í›2S%(©u6fQiüÍO ySNªtŸì íÞ©^¸–&òý5Í •ATj,\saJ¼Äê9r+†m4*ýK‰<Ö~[ŽÎp=™T¸LE„h*e÷Ðiìïæˆ>5qqcYÃç×$¦ƒÔE'•Ê÷å/ûš6h8¨!vÓsQõô§Á =o¼¯ž*Í‹åG%@~ð`U¾¤ß›¨ì‰x×ÐLÃÊKöÒ–ìm`g?¦¦.L«Gâ,Ž4`–?ÒŒFÃ\C§l=éiûá¯Ü«?KH‹ê‹÷j¢|[c>û”´wÊàÉT¨ÔÄÛ¢¼W÷Õ'OÔü¾hìÓñ¹Aª* ËG²þ2m·E ,õS®D_)²oÜ®ËTò° ’ؾØ®èCĪðh—]s§÷q¬Ý¯BÊÞ§ŠŸ¯ÜºÑ›Jïeè4¾#ÕËšþ@bø˜ *¿šîm8±q–šž+4jdHëîvŸÙþ÷>~0ÙàßÂ_ý¿Úuré +„_j÷Ö=!Û)Ñš% ýYÊÍc?[šñŠf=zÇ+KÔ‹‰‡0— ¶hˆìZ2Ð÷Î;â(Ÿ›ŸMû` 2cÂÃ| gXÛ[©qÓÐZúù®ž‡ o§¦91†9(©R»om躖?“˜XûÈ$•¦Ž+IzÈÆÉë=ˆÖž>³u1±¬)AÉùyþT*M¤NÊ´—ýÁ°Î|x„¤†O̶•7ñÄûõþ‘Å.±î+ð~o_“$†“³xËfQ³ø‰(\(Ø¢ž.1ð·‚¾6^_ t®Ý¤fN½r¸]L^]yþµ…&ÙxÛ=¢Ùý¾e™wÊ2ïFµŸÖ«íª½5ú-ÇšcëoÑÖ×Nr¡çh*Õ…q¡no0,ÿvB¤|ˆ¯QÏ ºùVïÙÊ/©I¯úÒ0êó[ µåÎÉ%ß;XxR騾ü– °š":4 ýqÈ^jM¤:[BV¶¿Ä\*õbûFdÏéx¢î0¹kÖÒ¾wrƒ~Ó–Ú6›2O¥3)Ì»M<ýyª|Ÿ©c%º{({áj±ÒûáÎ9jÙw]´.qÕºEG„ðÄ%H«Á2lƒ›ônmô§–OBDfo»s`Í„ä ë 7hW¶P¦]ö†™SoÆÊ?KÈŠ á@}FoæGïþÜe\jZ>^¤ò…Ûôv¼ƒÍP¡w,Ù“0¥> ,Ë¿7HyœÄrm²y=”ÁÉÐDsÝí¯6nÞ”Ú‘~&Ìv:s`;}‰ÿ—ƒÒPzæ=zmñô»Ý‡ã)+éÏaœn‰À|2-çú^jÿÉm7ŸJùbÂ¥\uëX=XÞB;*AñJ,Þ¨¿ÄQÙ ²‡ý|ãò°‹Xn Nd6ÊŒûW Ûê7Ý–6ȹ{Û¶YKQG}è®T‡þBÈcùr ÓÙƒåQóhnðµ[á½ð.Þbâ ÞJ¤›h]ͦ7 ,IÙ msªÈ}y²ƒå'ôF"übº¡vcŒ=ÿl^@ åCp8|’ºDQ˜û*Ñ×eÝég‰纅ãlÓ)Œ«…%µ/†I×65s 滩ݣ£>U\~è­-Áð€ëÁ½4LƒþèO]KëçC§Ú—=Ù¹6ÄŒ<Ò@KM,ÅÊßÃò8ÙŸw±]Nʪ0Œ‡-2îy:½Õ55ñ>ÈaÇa5%.Šä«/:™ôn[ÏmQ_è*!áo>ínµ˜ùt2äõ7I/8³Q‡ÞV2çá#ŒaxI)‚;‘’ÓØs:yÏ· zàN¿íeîô¿Zåëüsôò²bv‹‘ÁÎç+륢 9v;ýå?Ĥœ®¿úì|Äè­É«èåþûrL„gŠ’ýÞÚ¢]Ñ ¹|_ we¸h'}Øb""ØœºÏœ¥ÆÑ_ÿç½òs[åë¶äŒ[•ºŸñ ñ¡¦ŸèYëêªþGÌM`HùØ ?“‘Çßò$ §½D—{b/A9ý{÷åÍKz9ÈÏ<ÚkÌ΃ØZ5Üл†NÕW!Ž8òBï2un—+»Å.bëvle­lÝë·m£ñ˜ò¡»ö}{Écø*ˆ™èa w~=ª= PgÖƒÞ$äPÈ bxÃÜkôf#ôÁ1þÇV±ìç»RËÒ-øÍNYÞ†XBø¯ˆØh=ó_)oÜm×8ž—sôº?[aòx'0¿”O÷vTj¹eÔ쥘ǃ´jL/òÃ]?ö_¤Ýšc׋¦(*yò/[ÍàCrã¤gNå/WÝ¥»¤á̹"ÍïcRÛRIG„}¢öå3U¡œfê>À€ö6œ;JÕÊÏÏ’é±ë†mÙó)÷4Þ.¼ÿpýI#%£Îøãí—Òûóá ʧ,^ª3Ç>-x‹¢‰«„1Íc¦œÃØn®ØP¾ˆM V9»¤KîïKĤ—Ý`Ÿ¨eDºËëo’P3ð±™feKoȶs£ÅÏ—$Œ—zÎìoëj­Æ‚3¹¶ôtäs`³çzúHÁƒÖb4lÃplDðݾÁ’wûžÁúl¦—K )³ÅhX»ÇTfÖÑþ–Ǭ´ùL¨4“€|«¾b¡»MQÔSÁŠ­fð!9ãhýûG»yÒ3Ï$lÞûԸ贞Ò"lµÆ”B©ïú»ÊÏùöˆVoî{øø¥ìO¹ª‹ìý×o®$³ûdÛU÷ßWȉõÖpÉIžg»Í>äê; §‹[Ìç'È¢Ò¼ìg¿*¢8¾o>C»}cãäèÚWW®¶¼¹‘úfáªÕ oÞ\iãKÊž“ƒç0“õù!ÿ^‰a°hWÀ^‡ÚtøôÃcÛšógOŸmŸpÀ ÃoA»[4ø ÿƒ1yØáÐW3Ô‡0¥L»Dsñ.mš,ã–„Ü7GŽ]¹ñ²hêPzûÙa‘aÂ&ÖÉn^⸇€'=ìo߸ˆñ‹úœN6ÑS&’k˜¿Ä+§³ÏÖELŠøÊÀmû¯Fôá ºùQ[Q¢]"Û¬)ŠF‡~.ý!:@M]±öb žÉçe¿*ûÝ.ËÈÔ6ìŠ ÊÍû®¨e¤(ÕÆ£šý6-ŸÿÊö+‹±ŸÊWwZ% ìKFdÌc¬KIÓgÖ&Lí€Ì•ÙÙ¿Í.,%GL·Tœ÷**&–œÞ¨=•ï¢ùÏUÆ8Óâk'•ˆt36p ¾:Êîn²Ç¸Ú\Œ :~>¢œ‚©O\`aú+ÜW®\éܹsß“•Þ˜Zm)£ :~¤ijèÕ?ö.Êø=ýD‰”]‰¼Û«œ¤?ómÜG¯`a­#îô.\™±¡.g»õîë½±Eþ¨Žþ2³ùÈ4'ÏSú.Ñj›F¶ÿõ+tžÇ]ØãÙwàPg‡ ¤+‚–¡%üÕÝnbëoòs™O\ÑÇ?²î1UPQÝÂ&1·ü½+T·~§ï1³~ý?Ë=&Ì•;çÓuÏ)¯5ïŒÖ¼FR¦í™ºÌ–yŒùê äš]}9²Ìnϲz“Kzž¹r?´tÇÔ¥lUQûÃEX§ ü…%¦¤$“ššMèwÉ«#Ä >Zò ¿ŸþôÇ=‰!ôÂvÂDe;[°ûúŒO}Û˜ùkm·ßñ$Æ‘©wZ¥mêú£øþ£°œdÌ[|ìû´þZZò6— 6*§kiýÞ/ºX‚Bw–ŠºXÚ=«5XP*M»õýaƒÊ7ÏÿcmVþ”=@sèÊPÿ+v«pÙ+O£ð+‘{OŸ>m¹óM(°ÀÓâ¨ï(³Ú2+äŒÿÇÂ[ýD&“«·¿Ñô„è,ÀßÍc,Ý;ý÷ÚÜwÒÔqN¨]õqþi4«(”ÎŒ?ºjVIÃ/ü¤'®‚±…˜vÛ=;dˆ囼 }š9½†¦UÙïõîì\z‡¢kNScã/cÓl~ZõqŒÇC¤@öªrüAöŽ'шÑMgKPæ)û‹ÉßY¥|µ³ÿhïŒcÚqŠá9'){éùU ½¾S:ÓGްÔìcÁpÌ3ËÅ×'x™î¹ « {Êf½u³u–Ð߯OœæMK°ÆºŠƒì…&¿ %GTÝAö<ßæ»+þíN,qÐèé§_+{¯*iCø²)úC¹õײL'ϲ·ëB¬V§›:séƒ8jš8Ðîb_®ÕÊžäZî¡‹›§ÎÄàÞ'¥½’“?¢t@´&Íš¢HQP”(÷wyG.š3íòzÛºæÒ7dÏÀåá i²¿ŠvíyºÓK¿Ã—i¹Ú‚H¦®#ÉŸh ;ßÀ´J x1ÿ ˜g½kã–éÎ7¦×wÆÙµí•èZž~’¾éMägô3©hb ÷Ú Vhá2^G¾¡]UïO žIåøãÄ^´ò»”nt/ètÿ‹31[9§m§bØTÓ=µÉ—°>G-ñ 7.I Å0kÈXb×´½)3•‡Ì$G¯6:¶Á`$f@sÇß8^OsÐkqµrQiiUiýÁ)&£ƒa ‹o“^`Øtò<ʶ1ó´0LËz€‹÷;Ø@OÊ‹6눽•¹ô±òæºÝ˜·uârÄÅû¡D@´*Íš¢ˆ¯ßüœ4…ù‹•­õéýš¹_~¢TBa:&·=`ƒ¶å{9;_#û÷—åÿÕé¥ ¨}ÏWòx§¼Ñ6ŸØ*b°ø†¦UJ Ò‚}­¬vyO¦÷Tõ¸Y´Pñ·ÊçùÕý~Ê'À/< ¾ä¼Ñêç~p2oÚ_ùbne8댭wWiþbaŨŸS"tÕ­û-퉅Q[øOÓ­¶HªïœëFàÿ×–V»6×÷ª‚o‘J¯Èìß7þÞSËûßqÄÂóIÞQÄò›wµ&ßnRÆÜŸ¢;@´€fNQÄ'¬r^ê—}Œ¤jx ©x)ÃÈ}A2e©1NŒ_!Ÿš¶#=U·jbÛˆºX£Ó*™XúÀ,¬’ ØMZÈÐ[#ï®mø{;eâ?· t§ËGBBVG§!å«mëêMNKðÐocwã- BXeÓ SEåoRWü§kˆŸñ«õÌ L¶8]b½I}øpËU¦MŒj׺¾¡»@°ˆ&LQd%A þùzŒ_˜þªl°„ –_{‚Uµ“zÇ•“S wÑDÀxeÊ[ô’¦ÝñC>åKô“ï Â\ÿ´JÎÂâC.S×kп¯"†¥©üúþ÷„Q = ¶>\8´¹Mjú6 óÓ°8žè·«ßJ–)ÚÔó­Æ0|VÙK‘1ý_OŒÊ4,=0øÃd+_ך¹³éÃqÛWQÛ.iüdÅV pm£_å-q§pI®U¾&\jjÕOÛçþݬâoiþE>Ÿ¾÷íýëÍ™„)˜Â’µCM6ß«¥Riv·h·ñs[ýÚLöèÓ1ND Ý2ÆØu{pö&“z§UúóY5Öj–ÄîŸßM­5…••ØÃ€Õô8îT—ØùswŸÍÔg»“¯z2Z)º’Ø–ü) kýNs *ß‘êT^zIŠŒ–éí¦ÏÂ:z]&öÞþœÅG¨»†®=ŠÝ–ÓhüX¥[íŒ!Î:=[;ÜOÏ€µ×ýy‰$\Ç0ûÆ¿²z× Ëàåt Tr ¥îŽt3Fw.h‰ÉköEõÍ4DNÜÓÐ0.7Úix—º¿«nOý9ÜB½Ó*™8Ç™8cõ§‰NuC=¿"®jE¥Z1$Q[D­á÷|<Š•ï®u@o›“wÊ`â)rÏ®kïôðΔ´O×(}é0ôy÷M(jì[Z[á¯}_øá0…BŸŽË÷àzËu0ìÞ¿þk#ßä¼ Ãèʇ¥¹Q(nŒ×/º…¢ãÐX ¾þ†4íi\¨ë®£oóó0ñf–Œ}Õ1qú§÷;y¡g6qöº=Ž+Éû\÷aÝ0 |25`¾§ëDJ7¼u/‹Ï‰ß)ÿR»ìÚ·¯+†Éþì:²k×¾®]±1²‚uO•D£Ýn}ú18báà«§úða"EØ¥$ªÂ£ºç‘ÒZ´oß´ŠŸ]Ö!´Gݬ]Šë.¦xöTé}ß[ÙUå=Ñu€@ Hù~¡¬eÙØƒÁ%6ÎðWw»£Ío‡mø­ç¸¾Ù}'ç`óÛÁ6œÊl»·Ãê<r ¦óHh-µÑú-Tf›½Í6ÿ\1²T@—@ åC )@ HùˆÖ¸"y¹mçž¡¦¦†‡§¥ó¬;99±d!¦ðÙòȲþ²äañeY]SÃËNÑdÿûŽÒ )‚Káá¡ôù«9X¢££õôôþnò¬Ö–„–Ÿ_TTôóçÏìŸ}-é„ ¾ÿþøñc¶Š¥(‹ÏÙµk÷ŠŠ 6I–ß‚‚‚>>>K—.eÛk)‚˜7ÞÈéùóç ÜÖhiݺuOŸ>UVVæú|Œ‰‰‘””äúh‚$üïÿ+//ïÖ­÷Å®¬¬¬¤¤$,, )ѺÒ‡›m¨¬¬ä²¨Àçĉ¿|ùÒ²òÇ‘‘‘úúú\ÇS§èÝ£===ÿý÷_î‹>øŒeç@"åCpUUU¡¡¡ÄÄ\Cuu5ýFååþ[õÌ™3ÄÂܹsüøÁ­Ñܱc±pâÄ îS¾””âŠe×µHù\ËâÅ‹KKK¹&:‰‰‰ÄÂçÏŸ=<<ììì¸8ïÖ­«GŒÍ ÍB¾Tcù{>v€|&ß«W/¤|D+bnnN.—••íÚµËÑÑ‘;¢6}ú¯©=¹[ù¾ÿN,”””XXXøùùq_/\¸@.wéÒ…ËbçããC.ûÆÖSá åCp<çÏŸg\ݳg×(ñþ’€xˆÄ­0½ ½téW*ß‚ Èå·oß‚Ø÷èуkbgmmM.çåå !åC Z×+ŽaÙ²e'OžäôxÛH***fÍšuùòe®ÌÄ1cÆ0®|ýúµwïÞÜǺ½5öîÝK¾öãtLLLêVI×®]‹”h#‚ƒƒ¹@ù´µµ™¶<|ø[³,))©nôŸ?ÎMqTSScÚÈ5ÊwãÆ.]ºüïÿ#·DDD åC Z…ââbâ~ãååíÑ£‡®®®ŽŽDMHHHII©gÏž¹¹¹ZZZ½p¸5û÷ï/&&&/..ÎÌ̬OŸ>#FŒà²8–––†……ݹsçôéÓ°Ú¯_?¸z¹&vDsÜ   //¯””IIÉèèh¶ -R>g#((H¶‘ƒ¢3&&&44”;¢I,ìÙ³ÇÞÞž»óñÝ»wÄB÷îÝ÷ïßÏ•qäççŸ3iÒ¤¹sçreáB%³’AʇàÆŽ»sçN*•*!!ÁMñ5jTÇÉD999îŽ \¢[·nåʨ•””/ åC ÚŽ³gÏ())16‰ä˜Zp7ªªªÜÁ#GŽp«ò 0àëׯHùˆ6¥[·nS§NÙ³¶¶öööæšx þL4uå«W¯FDDìÛ·ÏßߟC£ÃÇÇ·uëV":\ÔZ¬¬¬§¼áôè•0î€F£AŒ8¨R>D‚‡‡GKKëùóçÄ2œ^½zÁK¡P81F‡î ʇáó•s‡òÁ%Çæ3Õ5 —òòrN—=¤|nÆÐÐpÑ¢EüüüÄ(ƒöïßÿðáCÆ B9==½Ž“wPÂrA,¸¦‘‘¡««›ŸŸÏÑAʇàfΞ= ‚ѵkWb‹ |>ÜÁÁt‘ƒâÎÅ]¼™°¶¶†: ‘YœHUUÕ!C¸CöŠŠŠøôéS®‘=¤|îçÎ;FFFŒÓC§§§×ÔÔˆ‹‹Ÿ9sfâĉ‘Þ½{ƒ‹ ï ·cÇU¾¨¨¨ëׯ¿}û–Ó³ //O^^þÞ½{ÜôÀ)¢£põêU(FGŒHnäáỺºzРAà0Ö¯_ÏþùøñcÇɵøøøïß¿“T8…ÁƒƒòMš4‰£?44ÔÌÌ,-- W^]Hù‚mÛ¶eddhkkÇÆÆþvðòfeeax+íÂÂB0ˆì‹»wïrßL 1lذ~ýúq؃wuu}ýú5ç¦9F›0a‚(Ÿ±±1_]Hùyyy=QQÑÌÌLÆ'ŸAAAðyóæÍ¹s纸¸°§KÁ5C²ý‘ÀÀÀÊÊÊÎ;³y8‰7aïß¿744äФ¶±±9þüµk×bbb:Â¥…”ѱٸzõª··wtttݽS¦L!†ÒÇøùù-^¼˜­Âüøq¨˜shÇŒæ2qâD6ïPVV&##ƉoÂàBZ·nÔùöãtœr)¢Ãa„3}úteeå;vÔ{ÌXØ…p÷îÝìxpâââyyy$³>}útâĉåË—³[À^¾|9f̘[·nq\^âçÏŸÇð¶y .¢1 KJJ`aË–-GŽ9pà@SæRUU½té¹zýúuÐΨ¨(mmm°ƒ¦¦¦­Z=uuõ‡vÜÙk³¡ŸsrrÖ¯_uOOÏå8l˜ ééé Êàä***@’A›qÐŒ”ø\q` ¾;vÌš5ëàÁƒMœ!h¹ZXXèïï•ñ/^Œ? &PSrÑ/ PZZÚAòdoôèјRRR,?ù³gÏœœœÀ0­[·ÎÅÅ%,,Œ}"^]]}ùòåððð[·nÁu8eÊ0sp9yâ )ÁJlqÃî ÊsssÐBrÐ?"""B¶‘!¨©© ‚â;66–F£;V__±nÃ?ß½¼¼?~üà¬îÞ-äÉ“' 7nlù˜ã`šA6Î;'**jeeÆîÊ•+íÁÌÌ̈ˆˆÛ·oƒ›‘ÓÖÖ600K7Ý’Hùˆ¶£wïÞä#Íâââ 6œ>}ZGGÇÆÆ¦¹Ã~òððÔû&::úúõë÷ïßOMM•––ÖÐÐ7nÔî±…B¡€ìËìy1 ôþæÍ›Ã‡oúANNž<)++»jÕªÕ«WïÅiû(TTT€¹„OLLLKKëÑ£‡ŠŠ x8Ènyyù!C†pîPÝHùîDPPð Yžîß¿ ²É“'¯Y³fêÔ©wÚ 8u·§¤¤Ü¹s'>>ž,_NNnÔ¨QcÆŒÑÕÕ4hqLnn.øH°A`%;H.|Ä£ ŸõÎ_qãÆ H°é]ºt122233›ˆÓf!„ü‚\{ôèTJ²²²„……•”” B&prLOÅHùN‚©<}òäÉáÇAÀ·-X°À¢…ó Ä!º2‘}ïÞ=ð ÉÉÉ™™™ðC={ö?=”­"""Üò`ûŠŠŠ úúúúvvv`AfFŒahh¸dÉ’©8­÷ë••• l>„Äþüù›7oDEE‡JÔK@Þ”qÐ ‚”à~F}‡X­®®wxæÌ™ÔÔTƒU«V±ÐvÈà0µ<îçÏŸá¡h~üø1xÐEE(—UTTÛ„“ÒóÒ¥KW®\“ݽ{÷I“&Í™3çëׯqqqûöíûöí[Ó_ÁþH´$%%AJ¾zõª¬¬ LöðáÃAÛ455ÕÔÔÆã k)øýãå]‡Ãh A Á2DkÍš5,wB {6ltrrZ»vm½‡Þ¿,KJJJVV–¤¤¤’’”æ Š`YØ$õrssAç^¾|©¥¥ellljjº‡éHØ[XXeff–““ÓÄó§§§ß½{r´íõë×P9——‡zTÆŽ;gÙ²eè2Fʇ@ Zj ã«T*ÕÍÍíìÙ³PæÚÚÚΚ5‹%¿¨¦¦¦‘p#pê’¢¸¸$äÞ½{=m—£­­=eÊp­—2•••¸páBFFÆäÉ“/^ R÷/NÏdÏÕÕ‚MŒN@£Ñ "ÑÑÑ ñ°±´´¢Yêvm8º&‘ò!ˆ6EBB‡X½}û¶‹‹ ý I[¶liÉ™yxx@öÔÕÕ#""šõÎÌ¢ NÝ]™™™7nÜŸôàÁƒN:7‰‚#ÿîbII ¨Ô¹sçzôè±dÉkkk²Is©ªªºvíÚÍ›7ãããÁ‚ž ‘@;Çá + ”`SÛˤ¤¤€þåççƒ)ük/^ÇÞÞÞÊʪ_¿~-ñÎÆ´ä0,, 1//dB;þ|^ÞzŠ—«W¯îÝ»÷õë׫W¯Þºuëœæê%ˆexx8ˆøQ¢äŒ3ˆÞŒGúâlÚ´ ]W¤|g0räHrù€W´sçÎæž¤eÅŠðuYYÙV j݆”•••`¹.^¼Ò ¬  L˜³³óÌ™3ŒŒš~fÐÈ£GÿÏÞ™RÑÄ|^åzäV¨œ%>G9B¹ÊÑŠˆJåè‹H‰P¢t()¢"tS}IÉUŽB…BÑAº¨$ÏYûí¾}žçL‡{~ßû4»;;»³;;ÿùÏüç?¡W¯^EõW--­… ¢*æj2?=×ÎήªªJSS³Ãµ!PòA ~ÍZ2h•%–––¨0ëÌt¥CŽ?ŽêC‘‘‘½¶¤-===*´²³³Q…Ø€ì3•ß6l@uA GGÇí0O:…Þmff¦¾¾þ²eËtuuC/¤‚ŠÛ[·n©¨¨  ",E(ù  Õ‹*Nâãã>|ØM‡¢—/_îåV¬Xqûömôrm–ùæåå¥:DäÕÌQ5Õçл6l*ÍÌÌ\]]—“ù‹÷CGG—`eeuôèQX~ PòA @ TWWOOOÿçŸ~z *öPM±‡ÖàF“EåÖ¥K—Nœ8ÑuLA<<<#""ÔÔÔy‰(///UUUôÐ__àU.׬Y“‘‘¡¤¤KJ>d`3yòä¯_¿–——spp úßOGòz± >õ x]wÀúúúz{{kii¡Šàv2´GõÉPÕÓM›6ñóóŸ:uJPPð¯Ü¤¬¬¬žž^tt4,3(ù ÁWEEÅëׯ999Q)ØÅ`*odddrrrþü¢uuuBBB¨ëbíãÇ‹/þð᪡:‘éNÊT+ͦ¦¦•+W^»v-44ÔÀÀào{"""EEE°À@ äƒ@ ¨zôåË—}ûö¡‚ Õ–:‹†Ê’?¼‚ ¨BvæÌ™Î…ñâżyó&L˜pþüù?±«1buÊ?ª†„„\¸pAGGç·DµÉÏŸ?z¦(ù ¡>Uþ>}úÔጺuëÖ¡zîâä7ÐÖÖ644,--mˆD"¡GYYY£¢¢òóóÿn¾v“dToKIIé¦i-“'Ofgg¯¬¬„åJ>2Ø@•?MMÍ'Ïuß³%-wïÞµ³³ËÎÎnÈ×××ÇÇ癞ÎÚÅ‹Ù妊ŠÊ±cÇ:ô5Ó©©©ß¾}9r$,$PòA Á†¿¿ÿË—/W®\Ií0¤_RRÒõš·m˜4iÒõë×Û‹=±cÇž8q¢›cx ))©ªª*4 ¤¤4sæÌîOðGO-,,„%J>2?~ü¡C‡¦L™òàÁÚýD"qÆŒÝIäû÷ïïß¿o³•nnnqqq}›G<FFFbbb»víêÎ) .„eJ>2haddLOO~õêíþáÇwçô§OŸîܹ³Ø›0a–-[ú•Ú‰þ•““svv^´hQבwïÞ}ìØ±U«VÁâ%œ BîÙ³g&&&ø­w•Έ‰‰yûömxx8uÏÊ•+yxx^¼xÑ?sš••U[[ËÆÆöñãÇ®—¨utt„’J>2˜¡§§÷õõE%îù ‡Ç‚‚‚lmm;;åÖ­[UUUVVVøfQQ‘ŽŽNÿcbbBo•Öiiih;‹fmm K”|d# pêÔ)T%¢Nð÷÷ïLò=zô¨¬¬ÌÌÌ ßTPP8|øð² 1#ÓÅ´ðòåËñãÇÂ%ÌlÙ²…v•öÎ\½|ÿþÝÏÏïôéÓxxôèÑŸ?ˆùEŪ§=zTHH¨ýÑ7^¹r– (ù È {666ÁÁÁhØÂ¢3í7iyðàÁñãǨØÃ‰‹‹»páBrrr{/ØpÑ>(ù ÈP……XZZ¶?:mÚ4\ì…„„ ö퉩©iVVÖŽ;ÜÝÝi÷ù PòA ¡Â¾}û&Mšôøñcž+W®R•––ò2î:::rrrƒ#ËhFFŽéíí½yófêN{{{X äƒ@ Cj?çÿýG+ù¤¤¤ÊËËÃÃÃçÌ™###3˜²~üH ÛÛÛ/X°ÀÊÊ Õ„k~·mÛ6f̪Kš!kärLFm¦ÞŠuýê®äµæ¢…,[bÂÎÎ%ùË  _FF†„„¾™——‡j{œœœ_¾|ÜGŪçáÒ½¢¢b¾z//¯ù6ÎýööÔ˜ßÉ~¬*3‘›‹J>ò—111¡®òª®®~øðáön©%kÖ¬©««cddœ4iÒP{é»÷úög±‡#$1)ýéµIâl¬,PòA ¿‰²²²¬¬,vww 455 722âàà@>EEÅ!õÆïgÏZºz@ÜêØ ’ÇOs´·ƒ’üMPQ—““ƒΜ9óáÃZk—AšÙÚÚZyyù!õƳÓïj™ˆ”»a²êÉóBI1Q(ù È_ƒ‡‡çêÕ«ZZZÒÒÒC*ïRRR"""wîÜR¹–Õ˜Ój›P)Ë«—]z WÇËN½ŸëÖͤ‚¼m|7÷ô ÇÝŠ‘³…’üM._¾ìááÊ€Û·oµ¼›˜˜|ûömäÈ‘C!¿ MMœ£ùÛínH££‚ uÅ“«¥.®·ä›b±}‡uÝ»»•.ĉ*÷bÒÎyIÒ£r§SÑ\9ûŽœÙYÆ3y³‹>º;ë‹ØÅMk¦9ãÌò½øðËY²CTj|9¾¯ïwÛ:FzÁ_§6WyxÜÍ'TDmñöÍkÛÜ\UEy= (ù ¡ Waaáóçχ`ÞwïÞª¼|||JJJC!¿„a„ŸÆi¬,r™s-»ôž%±b‡µæ”YÙ¥¤Æâ‹[Ž?œ¥NçXxvŸ,Ý @".ö(gY¦fgÝC+$y\Zá_Ê$ÚÿC™±zV¬³Ò‘ôK[]Å`©_vi^Þ©5s¿šH³¶º!’üetuuãâ⨋ö 5wíÚ5D$_7±¹|ý;]“ >k§¢rkœ¤adüwwg*z§ý\›C[ñ€8|›41'æW:M|„YM Ia1´½Š±LzH^wCèrÖ^Ë&”|ÈÐE__ßÙÙyO]ïTá"9:‡bÖô&úœøSÒ¡‘zLjQX¶ÝópÅIjÄ~ ñ àÛçªîô×ÑÆ¯KOatÏ.uu{‡Ð‘5Ж«˜üìÎG,8¿bÕ‡ÇÖ)@É@zUUÕÜÜ\êZµC ‡¬¬¬¡ûúö`÷y¸*†’]zøÛÃ;õ X¸æ=h`£Êø§äCk¯¼])¦#ˋǜ]z™²|Ù¥Û°MuøYšê>Õ7D=¹ÕÿCx¦)Ë·CßzÉõ¼ £œi¯b Gq×y®ðÔù HoP]]=dónjj3”ß¾¢Ýùlš)s#<È›û$ôïl<ŠZ6Í&m˜z¾ÝŒRhi o÷ ÅÚ4WÉnu(ù ¤‡šÖ=(ù ! ÝPÎþ€^q%ù†>”³O"‘`øCšç9@É@úsu_ýºèÕý?«©>|Øõ¨NÖqc'ª …ì××¾Y˜†fÿku-@š¢ÈÙ;FTHH –ö8ó÷ü™`“•Ü—ýdC«]ÃòôwÖ]ߤÐW™‚’”}J»zí6aÚTf11F)i %j{ÌÕÕûÉÇxR]óðé^Zõ^™6ƒ)ûåYWþ»E ÊS™%Ä™$¥€¤Ô04û󽚳Ÿ_CÊË|X›–V-#Ã3{¦5a(hÃßRdÅfSžý6=&â éÃ^E»ƒtj“²SJ²cv›uÅáqÌÓZ{z;Ë #©Bqir٥᨞§³pIÜ¥³ç )Bq)q~ì|*zI¬£Þò8©?ÐÈ[M^€/¹{/êuSÓ_’rýìéÒnqÅ÷äÚ“–‰%ô—æ_?˜U«Â‡ôZ¾¡äƒ@5ríš_Cc“‘›•%w×qYX†OŸ1ýa•WiPØ©rs3Cþ±Û¥gtôþoßLMÙV®äê:&3q¸º: úCßʎœ:Ynb¢'(8¨½ZÓ#`¢ûµ+îyGçÆe–ÌÃû-ÉS0ÉÇ(d€õµlJzbžÝ—*Rœ ei'#«-{Bñ°¯QëB!Õˇ¼T #‹-ûöÐaÞA½Ûh"¾Ñ°¬äík3µïÞ|=rÓc]Œºô÷0Ú.x—3§þÔÅÞÙwÜ¡äƒ@ ÊåHßQ£ óæÿŽ_J~>:—M¼ß¿ß;øß’Åf\\B.û×£üˆ,ˆžš}æ_=wÝÆ¼?~<8t(f1/ÿÄA[JXÉJïO|›mAe¡ë1û%ºøfviåE_×Õ3ûdÕ¢›gÞý7UðáGåÖ?Ø•ã¨NÌ,ÉþŠíúþDV`ʹÛYékjc³Üýø8'ç! Ûôf¦¡äƒ@!eS®ßHY±‚ûÓ>œ°ÖnTUÕÍûË×9n(Ù¯¬xpñb¬µ Ϧ3lذ5kxjj}}/:9môÅF m.ä5:¿ÍþZBc)®–‹ËcÖ“ÅX–ìX¿ìÒp!¶ŠØì×dAHBÕD S’(ÙRØž¸ô~ihz™,/ù<ÝXûUïÄÅegœ2:²n#ék`Ð ENóVÉu·uyúQêPòA ß%"b¯šñÏÅ6¶áëG…†î45]5räè~žý¨¨}²“þ\ìQafîä4:,l·¾ÞNAUVe|œ±u˜8YW—’®„ì=\ÅHk«}ãÔù#gB?¼88YÛýꥩ±øïÙœðÝÎ>aÕf]Ù}XŸgö‡Ño‰.œ…†JIÑ'‚Ëêß<Üxn?ì26<ø,šrxÈAÆùëfrnÏ-™ýöuÀŽÍ¼bQ›¦Aü&l_·®G„“¥%wvö9nnÍqãdúmöƒyÛ®Õ)››sæ\-¯P/ªSýw¤°Þ'nh½±M~Ù…+d1?,6kñU„ bùV8QLaõ–,!'5FoáêYzöÔ°áZ²º,o¯HÞ4³ÆÒYAÑ¡yìݽ{?ßPòA ƒ//¯­[ùz.}YYâ‹—w kEE•†`ö%Äß¼}øäI¤¤,lš~.ù*DE´Ù9*¥*{üb¢„ñìrlÿX[; – È€ã`€wÖû8Æ3>y’^^>º¿Ù¼ïè…ì Œ£/*|ZZ2†_|•g>¢W)©§—÷“iïÝ’|Br‡&#Ù—þð̦È9î³v6ÇE¯²À+Ðбƒ³:Õàk=Ã"P²óœ¡ëÀ2p‰‹ó·[ËÝ;×’”dºv-bÞ<§þ“ý»w[ZöRöEDâãnðŽž0È&üíéaÔÁ|ö~.ùX:;ð(•@ÈßX6—‘Ö]ÈoRý5OTd0¬×®8o«Ïîm.›<úCöëk‹Ù؆オ–´uXöìÝá¼Ñc0•"K>âÞeš¼<ú&×/^½_JÂ=ºÖ==ŸVxÚô@ÍÆƒÿ*{†õÀvSÑ~*ù:Ô¥ÖÉü³±@J%Pc§ì¼êüˆ‰‘Ã1Z5ä¹n]ÞÛºÙfv–†ŠW¯Þ §o˜¢k@µ¾Ò–¡*|¤].®ù%uÚ V-3œðó;ùžI¹ÀÙ™Yë*ÇHPò<ÝÿÐñÒ/u“Õ7¯·¤uÊK*{æt(3ï=×XÑ•–딤:î!ÉN¸RZÏÐÐÐ```ëVH&$ôÊúõ½mréì2êáƒë Sôû<ûN»¸ðövö7ò&'ŸWW_ÔŸ Æ·ÚúÛw3 gYÙD¥ø„'t­¯G4£ ×€íþg©ûí´6$—’FPPðÔƒïØN:fŠð v]¢Øb·ßNçF&”\½]J@L,%q{8 Jðùìg€è² ©¨Èü—´Ø#!»T´OžÌïóÈÊñ'`&^ñÊÕt¸*sÈ®nùú€ù‡«ÿ’Û i&ŠÅjäå0«æ8É;Ðàõ´ª4;%fXxÈòîÜG#¥s²®ºP¦GQ¡ˆ!,lÛ«óE¦ÂXf=e ^hª ÿ=È#ÈÃ6©–ÜÞ,§½ X| å¤?“šz¤÷ÅÀF†•~èû%~²2O:;ê“K7`“Üú¨JpÿIAÖý‡•e¥"Òr¢ÒŠRSÐ_wÏ.™]ô"`«ý‰°«øŠê¸Dl"Ÿð­¹òll;Ü„Ùy¾ñŽîùç%Ș—Ç‘.ÌŠq ¢£ú¬Ãü÷%_1¥­§­‚©kÍbo9‚œ¬|t’CvøoMé_%"o{Ñ•ºÕâXÚt.UTzz:²~(ß,ö’‹*ÕxÛÆì&¤ü\ì9_~îc8\$¢lЉ· \ìM*¶T$•¦1ó£WÌô˪[O£/‚ò¸1š˜Ø“µ»²\Ö­þÌÛ·•pöÉ¥çÎe‰Ž>¦§·ª³Ÿý¨DNž»O.­­ÅdllÛ· ´¼*)õî˼G£ED¥§pŽšª+ô{IU?ö\rZ8jßY†—Ä¢JÀÆíœev¹³Óp¸â䓯™‡œ[­`ŒÛnèä0Ãýnöímö|Ä5…íF Ÿ¿nl¤í¯É ÿ˜[‹Í–èÿ’¯#» 5"h|Œo¼BN¢Ù'[肨&§ly9k’…›Lw"ú Ù*îoãPlé'²³SÕ7jL5ábv¢„}MyMMÖ™š¼uªºW*Þh㜻²dê:"ßÔöƒ”£ˆ%n,4Í+ëà\X±Bú3Y™§/n%öHÅ…q)!tô ÓçˆwÏ,º†@X Á¿z U}˜ýg—V­âîÃ`emêý‹~G;iYyÙ›¾7ŠJÉ JȈ+Í@˜¬YH(ߤ% ¯ûz‹9ç(±Qöëù“ØÏ?MÈ.ÍC7½³÷†‡¥Gœ9…ê Þû(~;=0ß.ìÓv]õ¹àím‘ú-vh‚øAï}§ÂŽÍgÏ)¥œÂ¢þQáFúêÅýѬ³-¡aÛ°æ^åÇ·øá6¢±øÚŒÀƒ;7QšìSV¢ªÃG·cþ„î‹íÜêèþOJ¨#s¨c³~IRnt7Á!Áΰb…ôsï¼–“o5Ä•{öš{ƒƒƒ~‘ýXO·ìh^ ¥äO—Ÿe®¨Øÿ7``Èòôéþ™Þ'Ù¿|å©«+_>ÿ™3™<¸>¥ç;ó‹ß§§¥½+,”˜4^f —È?"ÿüÝK(ÎÅ&¤‹ªÎCm©,¶§®_¥G3Õ2‡ÕöÈ®[PRæ«7'HކUéÍóÙ—4¾¿i¤`dsám¾¸ß±p)‰Ù0fŽxýCœÚ´ÄæÖÖ´öU¤Šôún_°þ7²¤K°»‰DT—Ù-R|}Ì·Ä¡f!ÔÓÊÊ+”ºG]„’[k%ÅØ&ki&+H¿i\²¤£~ÎIòh“C o88ê-·|4§,=yÔÔ3Øw1G¯ðZµ5‰lñq«ÏçOhààÀt>¡eq"uMI^¡VOÐM‡£î,;ðÝuõjRßH>1^ÈÑç/!>>³'$_MmCüÝôg2™ÙØDÈÆ)²3ôÐß (¶tcæ\ëë)}¿£óñÏÞ'üÐo&e‡j‰7Â==ElXíVkÛ·BÉ¡[@”«òRlû~d' Sħ›¬«2ÖF¨|pæ7n{h¦œBÿ±±³kÁÒó(úd#OæQ˜ð¶Þ|ÖêêJü¨ÚöûÉ›¹Ìâˆ`piÞô7RRŽ«©w4hd‹†·%€$ç<€*Tì¥W„(±#òãPe‡·Ç™¦P™À!f‡ ~ÍÒïðüB ¸z¬,K¸‚Š=òN¬;ÔÊ"X²]¡®ÎÓ'ÙÏÌ “—gìó·0kÖß±¯ÁŒSr ²>¨üüQXJNTzŠ öëó ÊòIáýœ-*ç)ó;ûîzΰb¥ëOS7'Î#ìOßÎoöv"ùfú1† € ‰“PýëTgÆ6AÓ{fœ"lSHÑ‚Á9³ì ¥²$f¥Îz›eæù…OŽ!’DÒo òaMZlN:Júö%§šCÙ¦Ê9¦£ aG¯ÇCYyDVŠf ’º4Óæc—…½ÿ­zÒ´RºxƒôSÉ÷QM½#kþÔ³Õ*]ñ™P™…†žˆ¿€¤;•㵜¥hÛQ=æ÷û=f"ˆ*ØZÕlWë ;´¸‡í÷Œ@qÿþ´¥Žî¿–V/©ÆÐ¶+LeØ·oŸFŽìmËä¤×òò}ï>[Nnø‡Ïxyg£’òÊäÔ{/óñ ŠˆÊLá%¤ü»Æ)=IQ›íÆ''Õ¶¿SUáTífýÏ;ÈoWêâÙ2­O¾ ±ÿê¿ Âó÷¡›´V0¸Ø«yz„ùÌòSŽó'K@QcJ1·Žù+óÜC\¤´}ò@³=Π›ÛlñÍO±­Ç#3¼UÚ$b~úí²0,ŽÝxy +YH?Dl"CÇT— )ä2oc}¡ÅðR\ft}aâ!ûEtl’ÚÚÈ’/žŽ—&ŽYÀdYF†*…`Î’`Ìr¤ëá™(ŽÉ×èö|ã:ø„ „a÷ïÇji™õrö¹¸úK›ôÁýä¹óº%ùšSîÿøþCXZ^P\ú¯§ô͉»>ªËMiÖüˆøŠêÉ¥¤¥Zn@æ¢ãü¬–r‰&6ÓÔ´USâS³KI¾JÄ3l³ÇåÆ(ÈÝ<ó¹ì?FÆó&E¤ƒùQòYîÓn ÔÕ²¶‘ ïd÷3¢ßF)Úœtšç‹4¸{n´Êú‚uöØaª=™¢ÊIØ4”)Y}¨&¯övµÒ :^(²]Ì9»öc—èª]Ç8ÞÏ먙.M™ ¯µ;7Kù¨å¶p4¼çt¤–w±§Óžˆ±¢Rìä.…”ÿŽú‡„½úPÃÂÌkb»ÁÖT?Ño‡`üSEðÍŠ¬Ë'îcí²FÀO«YH¿CRª“¾¾oãqä•9AØ}¿I0»¼,×µ´¤°vž_hË*úÙ{Ûx7hÓ"ùöëÚd¡‚ð†¾i´\ÁÍ*_73yX&»ÛÉ/˜ŸÿV«×}8KJ÷JWgSYè–Ú] (æ?+í üiÑ»ŒŒtŠqÊ$Åž0Né vŃìÒ[¸Ìûö`·Üê«^¶r_ èÖí¾wæ¦Ç™:+ Æ£Yu€È†øåÖÎàF–bþ\μ÷ž…èê§Š˜ˆigûeÕÎàCª®û[îº%ù l×w¸ßÕѱÕ6‡ç0ÏŽbÒqËG'e5oUtq­Ö1Û]¢Æ8¶‹#k`•i@1„ED´¸ZP›o…þÚ'äèÖ*vYCGYX»Bú+""ÜÎ×PÿT4›Ú ©Zƒ0f‚‚¸š³Q­WÞ#– Ax 5¾)bMýªÖcÃäšcê#ˆ•ˆ• õÄÓTØ*Û½Ž° C×Iõ€È€·ZI¤z"‘ÿ[\üÐsñã]IMÅÅåÌÌ#yš}N‘ÊÊ^}j””äÇ7=57Ym³ìòBL ­xbÆ)©i3YØ9D¤øÇxã:@h,~šó0Í·?Ô·räÌF‡Yb¶Zûf=ăì?áñ*ÄP<Ÿ]BºßeaA ’¯__±²uà¨SÍk;âÕ²Œã&+¶§÷Øž¦‰§…J5ýÖ’4´X¸´PØI~œ\½miÒÔPÍù³ÞNmf{ƒôG%P•ÇÌ~Í33¶hœ¹ÍÔ°à4¿,½ç'š¦Y;Ì ñG¾"‚¬%Ø^ksž°ÜtRG쯠;2"ÃØN‰ é<ûL9O³<¬*o6N‘VD½ÝjlüÞXרTÿ£±þ{CSCC]SC=úOcJ}cc½”êìî§6€»ojÇìAÃ#ŧg³ õλê0ñ‹ùƒ =xÌ|"›K^kLu¼ºBÈûMm]OEÖ ÈŦ½Fº½™¼e†(Sø|úRò5ÀÚ ù]jHŸYÙúËÍ™z»&!Õ~býÙð?ª˜D(azá“Ki`’Ø@Ì'$˜UsÓÓªª¿'G½jÁæ0 56•üw¦üƒ h´9Œœw%y¡¢âft´m{W#¥eI÷†¿ÌÍçž *­Í0Šoªž0m&žë~4646Öýhhjj¬Ã„Pc=*‡ÈÒ 44’%S}óþFô׈ ?FÐѧ£G¡Cÿ2ÐÀþ0ÐÑ34ƒ†™Ðÿ‰ŒXˆ‰‘ÈÄ0b*Tº²´¼÷âqdw ͵ÒÊ€Ûj´¬½}å£rÚ£°€›Å®(°6~™¥RÏÊÈ€ýMï<‚¬-¯8ÈÞ;OxoÜ’Ù¥—Ñ͵_·‰Ž¸„M{gWàþ5±Gdé©"Þ'’óä ?là÷§oˆ@èõ샟^“sø|‹K¬ôHÙ÷øßãõÄ˰h…9[„«@X)Zì›LÎ ¥kW΃ª:°n«oÇN5±³WÓ3 ›¬8Ž¡þ}~ ±q2.˜˜˜P9ÄLd»ûÙI7Uô»ïb›ž²Zº26‰“q´¬½;eø‡ŽS–ÜIþËGÑ#UŒWà›[ÖXŸi½…摜ý«Ó_Ô¦÷”íìí„@$Dƿ쵫¤ø3¿–fcå×ÊÆ<<¿`•PWÿ½×³ÿ“ù ¸œT9g6&Ö#DĺÅÿ¼:Úv`Ä{wå 6ËBõ™ÞYA®²-µbvP YYl=Xc…ÆiÚ<ž¤$?–³@%üþ Ö+- äƒ@ -°sŠ|ÿŽ Þ‰êÓôŽ@·½ ¦Z%‡Ù,»<Aþí,Á1Âîɤå¢;ôRgEÔf&uÿf¾|©ëåìÓ1«¾~gcítm؆T2=}RüÙYÓ—s¥–édôµ˜&GWû’@ÜŸµS‚¥* civÅeÂÚr¶‹ßû¶ôÙsó *ïD犌—*!~b×Ù¯eFOGsç®Ýº+úÿ­ÖW”Κ=«çÒïqÉGúœç¸Þ3$Œì·EDÎÁlÕ¯5°Ú‚@þœWÅõãE;1-1V¿Òèõ·–Ë.ƒ %¶ü(~QRˆ’(žâI¤ï5Ÿ>ƒQ£jè˜â7Q-'7f&nÙ§%ÜñÕ†z?ûÅÅõ“d:•HìJszTùxÄîrhÊîx´ »s yØTÓíÜg˜ÆÊsذõ2‰ÈŸ¹GKŽ(mŠïdyITm.r[7Æî”[¼Elþ'u~¡®Vý­¬0F n®›¼¼¼æÛôk§Äe…ylDzA ‘*ù\ä{h|f‚¢,ÿmèÏ6«‘%—ØÊ»›9Tw8Ç}öÑæúijøÌt8FàäåÕu*ùHHšË¡á Öc½y¤ìô¯ ß7a½ˆúñ·n¼ÂÌ8+3¢8”¯£‡v¦¸)ÛÇWp¶KE¼sfKNIÀA_Üa;×õ2bbü½Ÿý§Oêº|(–^følO/L{°umÖ!x]×cý~<’ÿœŽh5µNË\[ËœF³äŒŽ¶úéLœÈ3€ŠÍÖ­[÷*ë™01쇷÷0&ræL-þžuDÞ“’ï{.Eì .¬yq‘HÂ=´Í·£;ä˜õûÞ–ªbËà±33w#µ4XÓA ­$_.É`>{§Jº>Q U-6d¡ÜÝ}ž)[Õ%…I¨P'ØìϨÇmp©æ†ùJ"&œÕç°aJJÖ¶!$Ìvy‘é3a£Î9ÝÀ­]7J§(hô‰Î×O^„¼‚òÀ*9Žöv?—;ê?Mw!ûhþþpK¨N›~ü°´è g@=(ù*ónãgÅñ†™–Ù6¿«×>HšÎÔš @@q3ï6ɢ̑ڸw©Aô\O!‚×ëÍOxÃí"ßéhcT”@OãQN—¦M¯t­4åè"’®0€@ŠS~Ò.¾`Å¢=éR¬~€†õ|j¯@ê+ªKOáçÕ+ÄÕ ¤ Áï­ c|O°ÿkPcÛUå˜•É '/ÚûÙWUú½å\þ.ùùÌr®ðŒæærsÝ„SîæåäT})ÿñã{ŸÜ =ã¡ÓÕ—öÚE{Pò±O¤;M$Ò Þ+MÄ>LjæÕì;òäB{ººº7n _+`&P‰ÈÀÀkšx=92e:³³Ãž=þ¨t#(ª$Uì©ëª'ßÀ,•õDˆï„@ ƒMÍU_uaåaânJpMÙÿêÓ0F‚EDbeaH‹²ÊŒÎSç—Gÿ\¼Š˜®­È`úzéèÏûòïê.™8uÑößúgñ•ÐOÇdµ)‡_˜¹§x'¥šÕ,Üeúé;Ä•ç_:ýüH×·1c†L_=óç¿8:öñr r²"°(BÉ׊Lɾ"§Û²ÖeÖÍÓ£èOËÚ]Ë:Øâⵎ@žÃ8•j•%Ïî¤F‡†6ÔWÅvØ¢‹ 9v¹¬«¯ ÍvgÁÉå^êñ†/Èà±ïtfÃKvèr2™(6¦koHW_Ùžû¨Å†àý0H…—ñÒÈ8.޼O‡°å·DdvRæxî\ Ά0ó³¥|-Jn‹€»§ù„®ì6““èÔ5 û*ûÓ¦Jà3ÖûŠÌ‡LššKa9„’¯-²s¬„ì·±"tÇZ+/l±ÙìÀy•‘öCó„n»‚(n^ð‰ÜÏÙBi ªðqå]Ü e²ß³mÃ2ô Q(d0¢©iq㦯îœN}VÑ I:ºÒ,°ÀÎùâj†Gеy‹búÈ?Ñ‘lÌH"‘˜™=uýÜ»¾·ï¾öaö•”##}ŒŒúleöììò °BÉGK#)#ýöåX’Ïö…äOÃÒ3|‰á$æÉØT’Jôl=c÷TJK¢„ûp–À×icJˆ…èPžI.ôE_T}<ì¿ÏmÏQJ§Äæ»YÞ*ð•C u=Ò‡ÏÌìcdoá(ÐEœûÄ¥K­û6û¬#yhê“Kçå²­Z¹ –@(ùZ¡GÏ|ƒзoTã¡\èÄö@ÊwE«¨½¯AµÜW$J;Ž,mšÅ^‡ËŸÌ^ ã–‚ û=@ ¿Ü2m%™é6ckî÷ò©D‹®>!èOƒ@HF[g¡ J>È ÃÀÐöØñ«VþegfíŠPþ¥ïgpëÌ\ämk;ª÷/ýìy¹”ôpX¡äkEtU,m&PÕ~MvPþèÁÌ}`¿™¾Ù¦ÏSYmùÉóÇ@]uóQ cŽÄÆâ««O$Ì­­ÃBBÐð0€ØÚ‚ò*/Ÿ£xLkkùw™!øZRö‡-àû† JfêèW}½Û…‘gOàï_æààÞ²o²pqYY O¯æ>RñïjXö äkß ¡ƒTæØÛÚ}©-ÝN)1ÆÀ. de‰‡a†s*—/M  (õôõ|?Ïhà…ÏÿË9óÈmÒwÂX4€‹=€­?Bîùäš›uÚIn™/3$$?dæýŸ¿¡|ßAɸq2§NÅ-_Þ{.9ÜgYó¯y?É>Ï(шˆÆÆ½wÅ'yìËLaÁƒ’¯ؤ»¶+±½„ÐŒ¶·´[† §;? À˜Î—5ß‹˜ï…o2tX¾|ƒÏ6Þ^¸Öç2.–‘ÜtôýhޱñÚ={·9oììWUr6Ô33Ù`©ƒ’ô1..;wz¹¹õ¬ÃÃêolÏ~¨ªjô·ì;oôعc››{Ï ¿úº‘÷ttŒayƒ’ô Üܶö¨æ÷éÏ‹¨Ø3ê§Ùw÷ðÙ½ÍeSOe¿²‚'3«FGg,iPòA þ¥ùìø÷_n:º¿¼rP|ü!a>•~íÙe“Ç¡C»V­â``öwSNMeààdÓÒ2„e J>Òï°·wø0º†ôBCøWD¾¾%Në] Àÿš5®Ož$””äèè0ÿ¥$Gøî{ïè°iøè J>Ò_QPÐCÿØïmºˆã}:GE}Ã?fãÆ•(û’’Zè/0p×¼ù¬ãþ(û7o88Ø6¬„… J>2X縹©±ÁÿÀ^u vYÙ_Âpöì—qãÆÌk2@³ogçú½©)0h’"ë”)ô¿ªæ^¼XÍÃÃ5gtË %X_8½Ã:l¾yڽˉ·óÔÕx¦© >¼Ó!À7oXRR>ÖT7/\ºTx gøˆv¶nhàþýkqqTUx¦N#ÐÓw:Xòž%%µ¬¼œdlddj*Ë”|d3uÚô‡P-0'/éUñ«²OUߪë¿ÿ@èFpp0ñ%>q’€€äÒÁ¨ä(*ÎChÕóž$½*,úø±²ºº¾éBO7Œƒˆf_LLfÜ8iS8CJ>2ø´@™I:2“†höQ-PJR ýÁ’%@ PòA %@ PòA %@ PòA %@ ½.ùº^9@ @ [ëƒ@ @ Ö@ @ ¨õA @ j}¡†——|¿ÁÖ­[áî5œœ<˜™ ð9÷rÑíÏ|ÿŽx{oƒoóWÙ¼ÙcøpÂ@¼ó¯ßý~ð·àîî1baejèTã>X(¡Öém¬­·Â‡Ð}† GŽÞŸp¯âŸsï?LXwÁ·ß¯@~À7ÞêUV_CᣯïÇ0|8|°}Ý$èP?í?‚Z@  f Ö@ @ Pëƒ@ @ Ôú @ µ>ô"µï3’îV|Eƒ,c§ÎQ“¤ƒÏ@ ¨õA ý‚êØÙb³rºQÊ&1vë øÀ ‘‚Ø-+,¼_wAÚ0èÔ¡5¼¿ëM/ÆE`eØ[ê¦À¬cÚ¨ÖG¨ÈJˆ}×À=MK‹ª€\²Òµ¾¦Wû7ì* ™° R-‰ÓÄÓ[‹j¤8»Ô9ÝR¨›ñw´Ø‡@¾éè¹)!ùã™é³¹4v–ÈÀcX‘¯h(®ŒÍÙÖom­ø)&À|¥olî[¹+¶;bH+d˜~ù„š7/p•O÷ú›h¹æ:•”¨oæV&W ¡Ö@ Á%û ý[ë{pôÍ^³Zü\ðõà0¶Þz* q ÝiæEn´ZëÛ¾÷Hóß°[̸[ZÃïvò D=¯x±rÓ|·íù2wu,V㧸ÍÿÎ Kk›Îª½Û—*vçŠfÞ);VªÂ5¤Skêx€Ø®[ÇÖ±µ­›GÍ^ë¥IZú«aB„y¹˜~­œ*|§pú½£¹¡/@^®w”&ø”ؼ3ÉP”¼“Ë3'w+7|#é—²¯üž“´ñ>4àzå™äݵf¾±­O“ßc!O#Ǿt[î–Úîò¢«öžõXªHmžå…N›éµöÖ‡§¹e³!ìq«èc†EžÕ$Qp< 2H = ¯í@ÑÍü3<R>é¢;ÞªK¶$6—9lNUê`æV €gž+7é~»6èëùD¹¡æœ5ˆ(r¬é[o8KsASEbðZ³gŽmT:¶QãBþ5´¢z’Ê üŠF Þ«5±Ê‹P“un“þúÀðÍjá›o*i‰즂tXûŽËO–|Ï]%ø]•—ît\¶X^\ˆHÓU@;Usìo–s§ûˆá¯Î8¿ûh·r¬¼»™CuuSxÃí"ßém:1RŽ­·° ,êø’z§2Ï,“kÛµJã–©Ì {ÕAüèâ@z–!^C$ôÝõh4ÒÐѱú:‚ÚR'¥1ow¯ |á)Æ¢-[dúæ’G³'ç´hͼ—¥[ÇóeÕ[ú‡Þ^wÀU>ÿ¥ï˜×ü¡phÚ†çN]rû}ÃhvNöªJÀÞà»Wù4.ÞQ£"„YnÑÁý¹Fb³ÐZÄi¥¿ÞMl°–‡´µö2²ú]ÒÞí룥Ÿq3=ãÖ&ЦÑV[[5²ÂVô‹åœ™•R{°°é¨æDfÊÞtÝE‡@ ¤od ‹.¥øSÅ×?ËÏ\{!=k™'Æç®•WÅdÜû’’Õ$R#‘¹EÀ!³×GíLapË@7Š«j 7Κ@%‚ú¡üóÿRlãTœÒžˆÏ–œ‹JÕè-k2Œï( üÆ\¿×ú¾gPˆl½ON]…ðí]R6U³:ê¤yÔ 8G•úèóâ{²ÈÙþ×ê49uq–êï²²šÏK Ö |›Þ—ìâ'ï`€FëcloLIÏ@»5–±U{©ƒ‹N6vž9&=Ò?¹ߎ^.Ͼè¥×\W¢èaËè—…µ»¨((DωÖІUDâ1·ÄÎ2ÌX£Äù¦Ÿ¾$f9]´]©­©§ rÒÑP ©íì@ õI­LÛÊŠßॠ4ÚDå’Ó5–kÞ¨»ó(¢”šâfrò<&§j“Tû @7VÃ-8Û-˜ÚWUñ¶ðÙ“¬»·oœ KÈNŒôBèn£ÏšLµ¿]Î!˜²¾Ñ¼mz*éF0´O?/Ò½Ãy] $+ݶ±Æ©Ò,U™ÇcïïZ_eÖ Z•ÏãN­—í47R±„_‘¬°àØñ|ì%OI€—¬LUÜ»N«}9V ~´PH¤'Œ3YpìX>>ÖüWµüÂj†÷<|q+•oêîŠ{.”‹ú À† B9­<ÞW5‹/­n­òÉ_y÷Ð` M2ÛE ñÊAãJþϺXª µ”×õ¦p·y™„eïñPMÃÏ.Öjð@Σ÷@¾ó«66P®ÞzsQÚJCþN•Ì1n¢2ú›½xƒV“EΖĆï"×¹¯˜{Qîû_+ç Ù÷K-t»ŠãÉ=¤Ò+ÃÏx/¥éÚo8iÎà–Ðɉ ô4 Á ’ªý½­ÊÌ2ŠvsÛt¦mxHXÃ|îtíéÚ:mùÛ{´d`heÅ·Ÿƒ°©Ï1GOÒž>;³Ážýo6UD†œoµƒôt—˺úzê ‡´GKw';y©;wð8ín³#a4* Ñ6ìÎňéɰFèš‘ ÆZ û’/Ž|zÔ¬•ùõÛ(ßP|„DÝTþgÃnrúËÁŽ{˜j¾eOÁrŠÙ7Nê®±&±fµ”sFìºé‹õAÚu슻O?:³rr«T¾çÜ:Rmž–†ú8¶¾3 #|Š9öº¡Ó¡H–±¢%%$$¹‰° õ.à \x%ðe‡„Ë.šÆ*/̧,^Â#ʽ£¿VÎñÎ’ R`ƒ==ö¡ÑÓsqŠŠ‹ˆŠNdûóOŒPqP‡sWô¾Ó£ÔÄœÜ×aÉ0’‡w”Ø?Ò"|½âD¾î>§éSVJÌÝ{™9o^|)#w33srˆÉÈ)¨¨Ï’áO¨÷dß/­ßP÷ò)Å(fWk•<»°‚ªòµ×â2ö*°S¤mõ}½¼+š8CbPmõwyO'aƒ¦¹Ì–ÙÓFïy•€þ¼ZöHoz–¹KŒÒÀ&ZF S/9I™ìk“`òÍ0ô·fóåç>†ºy?ôc'…¬ ¸ºõžÇ§÷<î*©Ò’ øy„Æ”–Õ´D$FµÇ8ÅÜ$Gª¢KæÅáoß“WD;Kò;‹¯±U—Yú,él`8eˆŸgSÆ…‰?«;è­s#ÞHïà”¦À)0V×ÎDµîÝÍЋͫg¨¿ºsãi|ôãÃÑžéàöfYþÍ@ÙÈeÞ4IðåAÔŽƒߟgîÏ|8®kŠº§ánN‰Ý‹+`žrhiOk¨©;eLslÓ‡ø‹?Ä}Jߎ5‡J”t-~lŸ ”î”ÉcÑeiiA^B2iÊò[i»ÈËýrÞXhg®”W66ÿçËÛ1›Oï‡Ú_ÏhF ~«5û+cÀgir1üf Õ±SÅf½òòB³óN¾Ïáž{»ñ*u¯äG÷ô$Ÿ?}Ýßåí'¥%-­GeÍeRòSXëÊÞä$_ò¦š_„¿<´”Ø;ÒmÐ|¼¿)û~ÆIÚZ 1ó#Ãan«7– ¼¸‘Fë<#Étùj_³ÙÊ:4j!¹Õ§´ÌÕD›¯áÅÕ#Û)·"üì éyržIÚ' ñ¡ÔÈÏó23Òî§&¦ÆGÞlå1%w÷Dú4Úµì$ú"ˆ/®,y–ù ãþ½ÔÔøøY­¼¦ìY çžšå­‚¦^÷­uÉiçÙ.7ñr'…L\¨µÏ•}›ÖËÿYÞ*ÊŠß¶R‹*€zë%+ê„ M•q¼ÙN_µ íN‡ŒuŠEœ5)ׯÇ]ôüê¹,€T)#W³•ó”ÄÈqZ´D…%]g7t˜8×4ï’oÒÇÌ×.$'å=¼›BdaÑ3s™5×|¾š$ÍÌàQÖ—k><¹~úLÚ£¸k•…DPÃiì°Smîl=-Þ~4€Ö¹•,¡"ÄŒÓ3¼¹j¦'(ë¢ÚêhSEQNn^ÑëêÆz€0ðŠJÉHËv60H*/È}œÿöK5¨#Ç IˆKO¤^³êëò/ûsÑ0Û×’¢¯ p¦ -•v/=³¼ž‰ž¡g#"6IZœ¿ÍGXõáY9 ­º8Ex>äŧæ7 Ò3IÓ,<‡ÆÉÉôñ÷Ðt&Ê*Hëý²Èص—‘µh½óõUFrtròý§/_¿{ñö V“0OYêb!«ÎË-ˆ¼Á›^sQ‰¢Ö|ô·¬9f¦A:|˜šPæ­ÂFišê€GÔÔ€ÛT#ä°Xófñ5;e×ôNn•cö·”–s7(ØéÓœ‹’°ËÊ/˜¯×Hi­²c8æ-šï–Ò2ðè KSÚEØI¡fCÕ¼;ÛbïŸÅ,®jŠþ~^ÎØN®ËÄÑòÆVèïç×ä•\àä³`À>gŽEÿnðLÀ„\^ÊÃ*U¼“4ÓpGKyžª5§¦ðfNqsß§KÂe‡fÃŒaS$B©Ý‚r2l•99-}2fÇŠ÷è²^X#âÙüõäž3S=G®dÉ~–ß_ž;Å(‡šÀTuQð-1-›šÂŽk•+ØpõÂqÏv_¡íå9ÒÜÜ :ÉLlzK?½´†æè¯‰ñÔtt/äD«õ¿þ::VaU};Uý^-缊Ë|—Á^oõaI¯ +^õtïxmgtËIkÎ8|õ—_úÐ:n¡v«Ì“òB•t©âÏIŸÜ6’ÛŸ[,ëOïaÒá46.J·,+;6G¨8iÆI3YHTFš-'7‹ºÝÆ#EóÒ)Í‘… ©OÜxyRº3ÝkXÑNQ²r-6›qg+²Ž¯Ð'¿>ÇËe•[UpYÇÍÚ*¸è¨éz¶UšB¢B °¸›70´8™äå®%ؾúÅ” š·ß…tûút¯¸6õÓ‚2rl_³h^÷êÔÒÃ"èBº­öõ:â´µ³w@óK²n´†Æ/)½xCÈâŽZ‘"*Vn*-m»¯_š{çšjгŒ­öt§áµ¾ž`TCú^;Š«D°q2a#S3VàÄz¬ÊßÞm3ÜÇ·P}"ÖÊÃUuÀÊŽZã*372ÿ‡‹ =Jz—~#³Õeì[à].JKVˆ¸ÆÑ¤yd"áHËÖø®îUv]lôi=êÝRΕ7·–'=‹‹L¢ 0âëO¨m¿Ÿ<{ø’kBvýÏí5™$mônEŒ&¿ŠŒÌ‚µ.äïÀLס*BEjÈZ“­”EêÍÈR¤!Âa¼ý%L“²ˆ‰Ý9‹öŒŒ Ã÷Ò}´fÕ%Ü"[ä“òbq•oÆâ3&´²!jë¼…£ÁÝËEŽØ¸hý3áh UÉ!å uPñÓ9õ)KȽ˜a9w´h-uif"ÓÐøîóØR—´ñZ©¾?ÿ|‹Øk|"8Õ†”?žùp6_«–±‘Øô4pÃTfú…œ;jpŠ ¤/ J®Ù7×™¼0LRz~•š2Ó/}híù…2?ycIÉÆ¬ ™d€v«áïað’´˺”Ñ߀u_K‹§&æP並ÃÍ.-ÏêÙy;\å°ˆI§}†ÍS˜"×Y™Ì¢èù‰[eÌŽbzÛŒ-gþU¤Æ%Q:NÍRKOŠÐ¾h¼:u™b†wŽ­¾õò°ô¯Nv"Ã?I¼¥7-¡q…DK¥úìÂÒŽg±0™ŒjC÷Ce™yþÞIõi¨T 2 ¢ö¨h.ÓQSWRP“‘k5[áèTºÕen¦¨|¢Çs^Φ=o/[*ÙCŠ«w/mkbÓNºy,ñèèã…@¨Ö×Ðð¡õŽºæN6­Ý¹ˆk¦Ë£=7(ÖµY)Y)íR˜´ìJx€Tó§À57A²Ã6/ómVá2#Ã2Û_ÙÌû¿ƒh‹’º-°¸Qßo•WUaê+_RßH³6tª[¸iy«—BüÓÌ›‹ Ô ›ä\±4³=†·2.dF«ñý…Ü)9gЪ|X.o·ô;‹uáe]Í©Z7”¿T:¡åi%ËÑ@Õ»Ìä„ÿâ®GÄgç&žF´Ñ4×]=á<¿«Y—Œò%Håﲞ>¼xöã»òÏ¿‘‡«zó¼óÓÚJ7HÂ*¹>'keBÇÊ=ø»„û‡ÖǪƒ*ie“÷‰.öùõTeÍ÷šïýõóÆ8F¼vìô¨DW·J>Ý68Õ6ø×.)4g3‚lîôp2â¿KÈ¡q¥ðŽM£íëƒíâó=ÈAù€”tl½oj¹Á2^¤‘û=ÂnHiWÞE „ÂâbJßݘEi%‹Ësÿ;wô\ôÁ´ÿôšèò¾·–Hw*ÂèšW Ÿ¡*Ýþè8éÉ`Z‘µ­Kå6ÛD.´Fv£óñ€×ƨv)‰ ‚ׯ ‰¸f†@z…aE1§)†”üìÌ¿ú¡µç/”ù?¾‡A\g^ˆ¹Ó¡Û Ü>3è_ɨ[—Ra«E?‹¶œauŒZãé™Ë óðŽdeɈ¶­ïJ5Ì%É×M+Ê$W³$êÆ¨û/ÔæOøóì•’g÷µî•`iîNÅ «I¼ù9V~îrô·-€f'é}œ—Ñ̰7 ñ€à]ùw6u*Þêr=4d¨Ó¤•Œ'K‹òŒbÉÂJÿv¾³C+øø{WËgææe"y…^Û ‰ÆIgþˬ “g¯P öòé›h´>Ö)[Jž+‰ÍJ™öjtWhmˆÌ ”ÆŸüñÈÖfc]T›\ÒÆvèF4F;LÑ™N³ØËÉ3¨‡4cÕÌÜ”eTdúZùYm’ÍŠ¹ÚÍü17·XÚªÁ é{¾gÚó+àÎÿ¤WǬaÃ~çCûËežø§÷0ˆé¬å-7Ûgå½IyRŒ¹«“Ü(*_»µ[‡äÚFk94‚‹”G=,ÝÚú7d$WÑq²9Ä…h>ïåWòO*±×œ³fÙÂÿ ÷\—úh¿H‹Š^ÝiAñÖÛ᫯o§¢<ͤè'Ÿ†îŠŸM§fÞ ‹â½RxQ©£ñmâŸ{wßUÁæÓ>/xÓh_:­tË8´š2ýÁ;»ÍòNoyù];¿²_mÇdE?(mt"F+ à\ ¨õA9µ¥ÉnžØ•£Œéo´zÿðtL“y—Ÿý–c 5ñßùÍ23²ôãAÃÑ»ÒÁííJü=sn“}ÿwr·ƒìÙüΨt ùæÚ•På1Ýæ(6·dœMjÆV´YcÏ+·e]/—¬UniŹø¤À¬™ä'ð“U>d$ÞâI¿šPå@q3q‘ßÙÀ· ïäìŒW/x´̤Ílöy°n÷Å®'±Œ›ë±^*È/KG¹êØõ •T‘ðõù)“éä 1¢ò_ö¡ÛkX€;ÕÕ4Vÿ5_?>»wóäέ©¾È<9hB¶ ü¥ío–ù¤øÇUJêlå†È«D››¤Æò¹×‚W{ž¦,Êdä²°u»óÒ½üZiiŠ¢P»EÕ›úÌ“ŸÖ*MeÃ¥·žÝ¹ˆD¸B@BŠ1îÍ}Ú;çÚ$Ï Q;þòŠ–TÅÄÅÙÈ«#óÆk;€Tù˜~¾‡<è7^e>ä»Í–¹÷S|N*"œ§Û‡åt–½7'æ/`¸yÁc6]³‚ºsÞ |Í!eo´ÀäÑFñáaé6QؤCQ‚æ¿aÛìMEh§ñjòní·²Ø‚Ñ‹ô)¼#é(ÓBÁíØ4ÒÊÉ”*».××ZÆ/¾Y3¼™JrQívmNóñA5OØ{ao4&/0àþ»P‘•û®{š–÷à],j}È/T Çì§¡:’’sÆÀh476|nnÁtÜu‹ŒZ{Q$ûlÏÄÚÉYàÞ’å}£b–-Ž¿dîµR0YÒ.öû“üS(Æ0F Þ«5±Î/BMÖ¹MúëÃ7«…o¾©¤%"¸ŸëîéN÷§Š8#É™ihem“»uFcc=æPÛa¼ îÙ…+þjä=I“v¯™šl* f…¿>±6œa†@ u8w‘W=3Ú‘àm¦‰[Ô–?¿ìµÌ("1Ø\9xÕé’íÚ´—Vο¤¹îb–ÝB^"6óœ»Þ†°Ç¹çVÉ<ûJYÉPsÒŒ…²Š¦Äºëá;äpJu­fïJ¸a?k”ýDòÄï’ïæU45Ârã´X‘F:úA)ò Ö7d©2žá—5œ Wf•©¦œRêî!¾gîln1Ó®@EÇ)ïY/Ö¼´TseñÉwÞäÕ¸PxGÚ0F˜å,ÑŸKöVœVúëuèeëWO'ÒáQ˜±Ú€Übå·á-æV«ô`‹¼kl‹DôüT Ñzä–ÙtýÊÐc 0é·4Qœ¸4ÀGé7Œ Yëc¢g¡N¢ä[Žt|†è‘ô‚yÍ*̈1‹÷Ä~~9)À×Qcoo¹PVÑ4O9DÓkÉ(¾6¬X_6ó™§ëñùGÉŽvˆÍîÄYFÐì¿ÁÉi}•O/Û›…ýt)q!½£§‚,ÕaÉîW4|z\ô ÊDãP!д¾¦o_ð€˜ñÔ¶sIèÅååm£¹îé£<<”d*ÚùztÏcrª6u`ÿ‡§“yqdø«ñwn"u/)(«ÂR7U°C ~ +yX>H?á°8Yøy¡¨_:ߨNcÇ*=ÃÆÊf½±ºX»sÆŽfk£¿Ð1P-ùoÙ ÊŠˆFËfµWá”MÍÁQg4ðùëÐêùM­/ûÐ9ÛsÝ]m¥m€Õ‰!ãa ‡tE#¡T},'¶Ö3,¾µöCÝØ@‰ÜÞev·®õg§ãi4»ÆÖÛþäèªà „`@ºËp§Ëˆnª(z–™q;.âìž´ÛÁöè³CÅ{ ~mè¨PDÛ£ì" Üv‚Þ‡WÏ(¡z¨õýìI^´6 yßjß4¯¼8ɶ#E7v¯ÔsmYnëèŠ óngùNo›díûŒÛ©÷îg–Ö’;¬™8Dd䕦«M•fïàJ²¢ï¿¡§Çôù††Á)ú²cF€ÊgΞ‰NJ"¡çsMÓ65_¢ÉCUð+Ÿ]ˆ¼ó ÿ]y-zTRIËÄdä¨6Ù'e'Ü~]O ÷44Ћéiÿƒ&ð<õüوļwX>—ˆªÉ23-©–ô<åJäõ;ù¥ïÑër‘Ò4Zº@}B§CÃírÊÉ'"%«®¦©>qTûwÑñýT§Gœ»œœ_„^‘ÈÄ%¡¤5¯ƒ¼´$ò$õÚµ¨´üÒ €0ñ‰INÖšg¨ˆ>ÖºþY$GJL•iyغÃç’ÜÝÕi¾Õ÷üvPÖ8fćßY¦/Öi×ÑХݧµY|Ï=¸)t¤Ú<- õql½“ß=Tõ…–3ZޝݽÅ3wyÛ–wÑõ'òÙgÎÖS–‚Ó¢†°C Ò=ÊŸ%ÝN¿û åõ  Ù#ÀImô·ØÎ h6zJ"ák¬7ÜÏû+ÉÊÍ4;0Ñv{û¡‚5Šâ´=°ÃŠ‚)kl‚Ùºò­Ï«® 5¶A;ýí—3–}@¯Ê·ï^íú©ŒÅåÐÝtÙ˜»l„Ì“9ËŒ ÌVŸ&5‘Æ]xmî:ÿìŸ^S40-Ý–FSÏ:½ÉÐ5ŽzÔ#ôý:K»äÖçD†‡l°àŸÝï“åsÏltæ)I#¾âŽU¯¬{°Y[ÿ5Ò,ÏÀ„X»Ý÷Úœ²×ýggZ‰jŒ‘ºWZÛ£˜·&5÷Ôdo•Ÿ¨Êí¶»öìàÜ–¦U›û1Ü}ቻéºv‹V‡‡¸‘½>íL+s¥íϨJ3fŸ9àŠ$2Ê)ülää%¯1Å›t"ÏÙó ç $†9\8wñÉS7ÇÔ( 1ƒ¥»4FiënÍõº„ææõ;s …•$E*°û?ÓšÇE];¼çæËl±…Yõ{·úg0œÆJÌÊYñ®öÖ×Ý>t½ÝB +³ÕìåG‡iÕè!IÕúŸe`‚ ‚àa• …ùŸ¼]í½¼ü££œB3 EÅÛõ^±yÅðÑãzHpÚ;Ñ^“þ^§+PWZ¦È/¡%­Wû§k^â ×»WÝ|ï]=ÿφ…*‹6ï5šh(ýãú®ð V?wÈörVƒÙrªÝEkãì¾å¶±õþðuf× œ5ïÝi-ß ¬Êÿwu›Xv±pÁ<Ý9F+úÔèÍÓ“¶jŠÿ£©¤¤~<ž¡o†=;þ¤ì×ÁW¶Ñ9Ƚú苬EòÌÁ¥öé§"¡ÿŸò½”ºôÿûZumËå²tö=u^¥È|”½S8dr[sî'“z'soÎbÍí}XØo™ÂX‹œ•.·_¬Se÷‰,º•;///#7W ///W@´M‹ÿgaéunw]…‡s ctPxªÀ¨ÿ?},z==Wþ÷~¬”áö¨Â™Ë:ÊXÄügbý"¾Œh!þÏ£ÅåÆ?X>®KhÐfü×aúÛ›õÙT³¦HÛ‹Í›»fÀüÅJܾu÷q«öŽûÉo?­Ì7©øî̯Ò+m J\Õ öômÚÓ© '(Aïv¤¥úHõ‘?± Ê&Êå\U·uQÓñïç—кϴ½}¦Õò0—w‡ô?|}p/r«fÿò$ãã{î‡TýöÌR.öÀPxÀ¢¥=œ¹|TiçÒØ¾š{ÊI¥þʃ¹>YžëàMÅ‹¼\¯ÔËÔ]g¬ÈõµÍÍæ!ÿ]Ï®µÜ]$ZãÀ/Jku£Ý/7Yâüög¡I+c®…ÙÀ—HÍj±Î­Ø–ߢbÞs/r©ÿ„¡÷,Óž›ÌC¨¦!‚ ‚ ˆÚn®Ë¹}3­ ªO}9ßí´ÙÒ¿g™‡ñ³\}9MqÝ%ÿÿ+—·—s…y{¼šýã¦?.“™ºT_®CëÆš7þæ5m¹-¯×Ã3Â~ºIŽ@ å‚ ‚ ‚ ×WŒ¾kOÏ['góß‚«ÊuÞyï9¢M‰›gÚNë:÷ü?óSYlø×𸧼ÑlÕ⇭ž¬½ºt‘üû²ŸXî å^0hXáÌ‹›°|=7xs ôsÕó‡Íù3 rýz?ާºÞâÞ*Õ÷¥\4¡üOAAäúJ «uþó,!…óÿ-y¨ÛöŸgwÒêÓæL¢ØURà[ü3Ï[¶‡нÏ8Àý3ffÓù/Ï{v™jÇYaÒ£ï~땃·’æ¸M^D;.rݪٸ&†¸qó¶ÿ½orö‚ß’þ}[ ä%™é÷2/2ºè— Ô5ƒ›Vòœ"óv6¹6ó¿v†õÂÖ†úîff×ÈtÞ3™{F ‚ ‚ ‚ Èõý3,çÉØÛFR£¸'xë}n½÷¹ÒvÚå³näs,ÊN¹\8d²®Ä(Îx-Vi]XUâ®2GŸ-R­èS°&¿7Â"S;|žk8Í÷ÇU[çÚ@÷è1iã¿9zH³,´©[ÒÞ!Uø¬u;ÏÈÏí¶à¿E/öÈ‹î)}†etp$‚ ‚ âs}¬¯è0ò;%C^JˆŸ§§»§ÿ›è‰ÿ®mݽk¾êؽMƒ’ÑzäæŸ_ùºÞqqó ‹þø¯iÕ]U}ÀÈÑ#4ä%Šï'ÚBóà΃ìQ³³%IÿxOí'Ø9DðßÕõ$‰r¯VXtâÀ¯ÿv,ÌþÞFæ¿Õõ{ÍÝ¿kì_Â%¬ú‡vê“wíÒfVggÕ×úqàØ.£—ü®SÂU‰ *,ÌK>ksöƃGßJ|HhÝ]{âŒ9FÿÎ#¿hÆò+‡] JøÇ&еíØUe¼ž[öõ½¤lu®K‘›_X8?/)è¬í¹.¿ ²Õkdd #ÕL ìÖQÇ·yÿ<«q«ªÌg66[«aî·±±‘””9rd-(ÉÕ3Â,þþþÎÎÎfffBBÕwî›zõj^œcccOŸ>=vìXEEEjÌøÎ_UÓ’åää4dÈuuõj·šH‘j‘âû÷ïGÃ4iҤߟ”‚µ³hÿâ$¶µµMIIY½zu­/‰¿½ìܾ}ûéÓ§Ë–-kذ!5a|v}\þ¯YOõ±ø·¼¢h!«:ÿxÞ¿®Ô€åë”~O§®X_úÞÍFÌY1¢äU"SWj”¾§¤úôu¥7²M»\Þ­ôk–Tž³ÿJkm:NXwdB±;)ûz~zIÿœ·¹ÊœµøWªîz&ÝI&Û¼ys5Ìú«V­JHHÈÉÉÑÐÐÐÒҪџzF˜%--­iÓ¦mÚ´E9wîÅ™ þ#ÊnܸaggW¯=¬çx«aÉúòåË–-ÿ4$îîîgΜAÉ¢”âõêþþÍÌÌ|õêÕúõëëÖ­K‰RÓ«ñAƒ½ÿ¾.Ò”[uèëë '''ïÛ·²z•¹>‚¨iäææZXXàCzz:jŠÏŸ?WçÇP5šÎ;ão||üíÛ·Q#÷êÕ‹bÂÚ·oÏùܲeËÔÔTŠI­§   yóæœ¯mÛ¶ÍÈÈ¡ÈÔ °|øå:lذPLj4¦¦¦žžžÿÌ ^R»]ßïEWW×ÙÙ™ µ““„\Aü‡²²r~~>>äåå¡}3f < ……ïŒ9Žšýüýû÷éÓ§‡……QX*ϱcÇâââ8_ÓÒÒ† âîîN‘©Ý4n\tH3111¿:uêPpj:ƒŽg?gggÃ-$%%q›|¢fqãÆ Î'èÔÔÔãÇ/X°€"Ãw´´´8?‘äää@u }l×®E†\A\¹r…Û{$&&>}úÔÆÆfÞ¼y>âìì|çÎngÒ´iÓ¬\¹’‚S¾~ýºhÑ¢" ïß¿oaa±téRŠOmEFF†}Ä $N‹-’““)>5š 6xxxp/©_¿¾žžž§&òüùóqãÆq/©[·®™™¹>¾Ó¯_?___î% 4X²d \7‡\AÌž=»È’øøøåË—;ú‰âÃòóó‹´yÌè#›6m266F¥L!ª0;v,qùêÕ«555{öìI!ª}Œ3æíÛ·%®JIIA–ˆŽŽ¦(ÕPvíÚUÜÏ„‡‡wëÖBT³øüùsñ¶òòò¾|ùròäÉâ „¨0ÊÊÊ!!!ÅC}ÿþ} ¹>‚1bDFFFñåuëÖ2dȳgÏ(D|¡K—.P-Å—‹‰‰M›6íêÕ«¢ «H‡W¡©SWW‡à Ž^µŒ;wÞºu«Œ bbbTUU‹üàMÔ"""ôõõK[;uêÔ   ŠR ";;[RR²dÁ-$´fÍr}ü¢{÷îáááÅ—çççgee¹»»CÔQ”Èõ.QQQ...%®JKKKMM555577§@UccãÒžK¼ÿÞÓÓÓËËKSS“U^\]]ËVÿ™™™={ö|ùò%ŪÖíbfföÓÍüüüÆŽK/5Õ,ÒÓÓËx”———IâµQXX(..ÎŽP¢ÉÈÈ8vìšHŠU%‘‘‘)Mf€:uêlݺ• ¹>â¦ÿþe¬}÷î¡¡aïÞ½)V2ÅÊʪŒ ²³³gÍšõæÍŠU¹€b3fÌO7‹ŽŽ622º|ù2E¬ðáÇ¡C‡ò¸ñíÛ·¡&¡))n5Å!´nݺìm233/^A᪴hÑ¢xçÛ"ÍÛÍbUÚµk÷þýû26ÈÉÉñóó+(( ‘®Èõ(«V­úøñcÙÛäååéêêb3v&4¢¨ªª CДø†'øö훸¸øŽ;xy‚Ap(í¥ÙâRÒÅÅeÏž=k×®¥ Õt:uêT®ß¬¬¬¤¥¥Q×Qèª?Ý»wÏÊʪ[·.Ú26KHH8wîÜ´iÓ(bÕœqãÆ}ùò¥~ýúß¿/#Maü:´|ùrŠXÅðòòâLÈ6y(G%nicc³páBй>âO:XOO¯* üˆˆ011 æ,¡Yû*÷ÄqhùPöéÓgòäÉÙ X‚¿ÊÊÊ«ryàÀ>ˆˆˆˆŠŠ>þFïÞ½µ´´{¡ûUTTÔÕÕi~çÚÛ 944téÒ¥HâÌÌÌ‚‚VS 7jÔ¨Y³fíÚµÛ´i½5]³(Ò% É:tèP¤5Ì x||üÇ“““á" @áªþ888Y2a„èëë#AcccQu'%%¡ðÊÊÊR¸* *:8=ÎW??¿áÇKHHœ8qâîÝ»AAAh+?þœ’’B¯‘ë#þà¼.$¤¡¡Á½¤Aƒp 4ngÕÁþá+ÒµkW H%A Ѱq¾Nœ8QLLìÛ·o999ôx§#//ÏIçåå5mÚ´ÄÄĦM›Ö«WO€y¾wåÊ%%%ŠRM'22òùóç(ÎG¥ßkÁÁÁ°|ÐÇŽ£·‡ªˆ°°°aÆÁzxxÀø©««SLÈõDÉ´k×îË—/ôæwU ÙJqà;¯_¿VSS{ñâÅãdz³³………)&µ›-[¶Œ1ÂÇÇ'..®{÷î""")))†††È œšÎÒ¥K‡ òéÓ'²|µƒÙ³g÷ëׯiÓ¦dùª† ÚÚÚÂòQ@ÈõÄOhÑ¢ELLŒŒŒ …¢ŠÈÌÌüúõk¹º'¼Ûé+VÜ¿_]]ýäɓŧn'jÏž= WRRRUU…ëëÓ§ÏË—/±p̘1óæÍ³±±¡Õ\‚àÉõ½}û–\_Õáïïß¾}{ŠßùðáC~~~ÿþý?}ú´wïÞuëÖ‘ë«ÝLž<ÙÒÒrÛ¶m;vì¸víÚðáÃO:…tòäÉ‹/Ξ=;}útŠRM¤  ÀÐÐÐÑÑqèСNNNšN`` \ßÖ­[ííí{õêEá;ƒ êܹsaaáÊ•+iþCr}Á+0$/_¾ä}lt¢®ONNŽâÀw¼¼¼ºté"%%•••5pàÀœœœû÷ï<˜"S+ˆlӦ̀¢¢¢F%((رcÇFijjž8qÂÖÖvêÔ©222Ô­¥&2~üø¹sç:;;=šÆaªéäææBQøøø >üêÕ«þÀþ8bhhxæÌmmmŠ ¹>‚à(§èèhŠCÕB?vV~~~ì8¨­[·ŽŒŒ\±bÅÖ­[ÉõÕJÞ¾}»sçÎ>X[[4H€™ƒøû÷ï#FŒ¸råʹs猌ŒbccÛ·oöìYhMŠX bÇŽ)))»wïFA¾wï¤F“ŸŸ/%%uùòå7n(((¨ªªRLø[MMMÔr®®®¨úBCC%%%),äú¢tëÖíÎ;‡ªÃßßñâžãååÅÎÈ'##¸páB df]]] Nm¢°°P]]ÝÉÉIDDäôéÓ°ìò‚‚”¬þýû=ztâĉS§N}ÿþ½´´ô¡C‡ôõõ)n5{{{õׯ_ïÛ·¯gÏžð “]TQ---{õêehh˜@1á7n<þ|@@Àš5kð999™bB® Ê’’´2Å¡ŠHMMMLLTSS£Pð(þÐÐБ#G 0s=?þöïß¿jÕ*r}µŒ!C†ÀÒkjj"Å“’’F……‚‚‚È]»v•’’ºvíÚTTTΜ9óîÝ;(Îàà`Ž9$ª-&&&±±±ø ×çééI1©¹dddÀò:u 5°––ÖöíÛ5jDa©<ëׯ‡Hó÷÷Ÿ6mZ—.]®\¹B]rÈõDQUUE£K“7TW¯^íÛ·/Åï\¿~]^^žª¡wïÞGŘÀsçέY³fïÞ½¢ÚÁèÑ£7nÜ(ÀLÛ0þ|vù_ýõýûw|X¾|9’ÛÀÀ€6 &000ÐÔÔ5›¯¯/°Úâàà°téÒ„„!!!ccc==½nݺQXj(ááájjjOŸ>E\»vmãÆ—-[Fa©$Û¶m;vìØýû÷wîÜ9hР¦M›Ò#>r}Q¹ $©ôäÉ¡*¸}û6u¶®"ÉÈy¦7`ÀÎè—/_nÙ²åÌ™3»víJQªéL›6M\\üàÁƒøœ””äêêjggÇq}ùùùø0iÒ¤ 6|hР»»û¥K—Þ½{Ga©0™™™úúú ð{›6mZ·nÚµ€€€-ZPpÈõDeéÙ³çÇÉõU<°¶¶¦8ð({ös›6mDDDBCCååå!"ïÞ½‹Ìœ˜˜Hs=×h4víÚÅ~]¼x±‰‰ 'Máú ØÏ›7o^µj•ŸŸl^ll¬”””››öMNN>|xÆ ¯_¿Nñ¬>Œ;fý:tèÐ+W®ÐË&5”Aeeeö5ݰ°0ƒ¸¸8ŠLÅ€gž8qâœ9s\\\ðuïÞ½€ŠØ½{7‡\AðMMMgggŠß±··—••mݺ5…‚¿@#vêÔ‰{Ä^½z¡™„ëc?›™™ 0ÀÇLJbUC¥d·nÝV®\9oÞ}úó?bÄÔljjj_¾|±´´422zò䉴´4ù·p˜!,,Œã víÚ½{ñâE NÍ"99YIIiáÂ…œb"##UUUaùh.ÞA½„†, Õ×ãǘw;544êÖ­kooîÜ9 ¹>‚à?PW¢¢¢þþþ}úô¡hð‹ÔÔÔGݾ}›BÁ_Þ½{càááÁ½pâĉëÖ­ã^baa1cÆ ´©'Ož¤ Õˆ®®®)))0oœ…û÷ïoÒ¤ R³ˆëËËËã|íÙ³',’ûÔ©SìøFccã!C†¸»»ãë"†;wš™™! µk׎¢ýËøúõ+LR'&&†³ÐÁÁe3**ŠâS³7n\Ë–-‘p0'ì???]]ÝØØX”SŠÏOyóæÍܹs£££­­­oÞ¼É.trrš>}:ª,È ¹>‚¨Z´´´®^½J®lÙ²ÅÈȈº–ñõë×sräмysiié[·néééqž9s[Ž3†Ó²Õ–¸¸887›"=X .·oßž˜˜Xdû"®XZZvéÒ^ª”]rìØ±ààà¦M›>|øPQQK60¬\¹òÊ•+?æ~I˜¨"ÆŽûùóçW¯^q÷Ü»sçÎâÅ‹ÙÎ`DMaòäÉÏž=óññávw;wî´··GsÿRCçôéÓfff¨£Ž?þàÁva||üèÑ£¿ÿ~ãÆ Ÿ“\Aü"àO–,Y²oß> _(,,DÏ®€àIII°p)))ÅWAë_ºt‰Ûõ 0ï‚BåkhhШՙ)S¦4kÖìãÇŇô4hŒ\ƒ ~êú€———¬¬¬––œ»DYY¹ERRÒÊÊ þŸ]x€Õ¦­­-Ç%üÅÜÜüСCÞÞÞ222ÜËçÍ›ËG?ŠÕòóóÛ·oŸ‘‘‘ **ʽjðàÁ:u‚¤(•H``àš5k^¼xfÈÄÄdæÌ™œ†lÆŒˆÛÙ³gƒ‚‚(Päúâ—2tèÐÔÔÔçÏŸ+((P4*©©éèÑ£[µjE¡à/sçÎ]¹r%çÍ"nЦBšÀoùÉyçÎnnnÍ›7‡ £V+Ž9‚ò÷÷ïСCñµêêê‹/†²,¡å*îúPânܸ!''÷þý{ŽD~øðáCxx8RÿôéÓœßV3DFFvìØ±oß¾çÏŸ/1_iŠ”}øð!jÂ"«à´íííããã)JÕŸääd”Aø:Xô"¥ƒíËyŠNp@m¶yóf??¿Y³fmÚ´éþýûœU .D0mmm©÷¹>‚øèëë󯯡³]•!##ãøñã$køÚQ4¨¥ Ð"..®ªªjmm½`Á‚â?j@ÜwëÖíØ±cô`§špêÔ©Ù³gGGG/Y²¤Ä &MšÔ£GÎ=E¨W¯^q×'À¼¬É¥¢¢òôéSîåH}HX ‹–-[úøøtîÜ™]ÞµkW\CaaáÆQû™››s%Ê Âh``àââòñãÇ 6”ØÊÔ¯_?$$„bUͱ´´433Û¾}{xxxñµÃ‡¯S§Njj*ŠÍö¨Í>üåËccã+V°ó.pÖ¢ÚÙ±cš'DÕÓÓ“"F® ~?kÖ¬‘——'×WyŒŒŒ U7nL¡à/'Nâh­çöíÛ&&&rrrçÏŸÿøñcÙÃê§¥¥ÅÄÄüª¶9rdïÞ½(eÖÖÖeÿFéìì˃€†Ú®A}G}||6nÜøôéÓ9sæàCpp0ï?Ð@R+((?~›ÊÅo!>>~ß¾}.\’’Z²dÉÔ©SÇÏËŽ°ëÈíhÅPñÖ¬»NOOwvvvss{üøñ»wïÐjhhh >\KK«lÖ——çää„ ^WTTT[[{Ò¤Iššš}úô¡¼D® þ >|(##3`À€æÍ›S4ÊfÔ¨QmÛ¶%ËÇ_<== ¢¢¢*<¯ú“'OÚµk§¦¦†¿¼ïåúùóçÜÜ\h…ØÚÚŽ1‚’£^^^fffPŠË—/_½z5ʲG…¡òööæLW.DDD233Ë»< ÌD!´Ýþýûjì±ÁFÖÃÞlÞ¼¹Œ¹›6mÊý¶Xrr2L „ø§OŸ`á¦pöê“”¾ÈäH ˜Uvª1ráúõëóçχ¦ Q~=ph–––È]HGTY+W®<ÌÀãî¾¾¾ÈÌ»îÕ|2Ü êdTì¸TTÑ ªªª4hÐD†2v/((@›róæM¼¸¸8eeeì2Ž2¹>‚ø£ÙCý¨¤¤KñK ¬¬ì–-[h`wþbbbòüù󤤤ʤqãÆP ÈÃhãáʵo½zõØ·C?|øUñîÝ;kkë²'¨Ý@3;wjÖeñâÅð0~>š©©)¤*Rç§Ã”¨¨hÙÓ?”†°°p@@Àׯ_{öì‰û æñ1cÿþýYe\XXhcc³cÇŽ&Mš¬[·nÒ¤Ieì%&&V¤Ë››Û©S§îÞ½+..®«« ±þ‹_*¾uë–••Ôó€Ö¯_¿¡¼aŸ!t8Z%‹*Á;È<§OŸvuumݺ5rüöv†rÙ§½{÷ZXXhhh;v,44´úÜÝ›7o=zäççY¿~ýnݺÁ¡¡è <˜û‰z ºvrrrww÷÷÷ÏÊÊBáBÕ­¯¯?˜²A® J`РAhPç¾|ù’¢Q—iÓ¦ÁœHHHP4øÅçÏŸ{ôèqèС²çjãyyyggç:@üt€Aâzzz 0]DÆÿàÁƒU ÂO!pe°y숚k×®ÉP™æçç£È@Òyxx˜››WòòàꑬÞ½Q£FOŸ>Å%7.**ŠÍ'<î+((8ýjgg·gÏžÄÄÄéӧ÷jÕªì݇2p/A¾ºxñ"B‘‘¡®®>zôhˆT~+›––væÌ™K—.¡&×ÒÒ211Ñc¨ØÑà{gÏž™™‰ ¦)jªú§–{÷î]¹r‘åtttfÍšU<óðȹsçvîÜYXXˆ¼jÆðëï(==Å? E/<<<::ŰK—.¨óQÉÀ…vbफ़Ap|}}Q“ šÂÑrrrQv´µµa1P"ÈõD9˜f6¤$µ¤¤dDDDëÖ­+|qqq{{{öó©S§6n܈cB?Õšá®]»{6þ|CCC~ éäååedd4lذóçÏó+qáú¾~ýZÙæ_HÈÑÑ‘ÕÄË–-[¹re‰ãø• ÷Ûe Ë—/‡Åƒ \°`A‘y JdC}ŒKrrrzüø1-²1´>Ü ´´tÙ‡BLà”HGèñãÇÃ*,e¨L ,--·mÛ¦  pâĉÀÀ@ª¬ø,ôõë×ïÞ½ÚµkW¸Ô‡: ;`FF†………••UÆ ‘«Ù®ªîúóòòà¾BBB^¼xGõñãÇ-Z ÊÉÉÁ’¡VAþ)—kE)xôèr2Œ"‚;‚ETQQ»ÃAú3PÎ!Èõ߀VC­º;,,Œj5kÖ@Qñ"æÙ¹sçßÿííí]ƒÊËËGGGC.œ>}š/ôf10ƒ€C ;v¬mÛ¶ëׯ¯YýC //^¼èêê*"";1oÞ¼Ÿö)/ïÞ½CL²²²ØÉŽù{ý°U…ü:«‰q©“'OvssƒV®ØXV’’’Üã—ÂîÛ··Ÿ’’2räÈÙ³g—=‚(·§-.Ó QF\\\ØI&¡æaÿúë¯÷ïßúô©OŸ>HÇI“&Ía¨|L‡uëÖ}øðžÁØØ˜žð…ÌÌÌÛ·oß»w~Ù£GÔÓÓƒ±‡5B}R™ƒÃðß#o ³!íxyµì †‹{þü9þ¾|ùu);u**½Ž;¢^íÖ­îBII©7C¹Ëš:\sdd$;h3jleeeUUUÄD—² A® ~¨ÇQËËÊÊBšÿÉØÌÌÌlll}z~~þ©S§ªî¹Ò‘¿ÇlРÛŸ399‰µ}ûvž &p'g‰§§ç… `¶ tttÆ?tèП>ÿdÇ wwwøð!®}çv̘1œMë°öñãÇð«0ÛíÚµÃ6Í0™ã<ŽF‹$;{ö¬¹¹yNNβeËpãôX¯Â„……¡±@Šøùù¡ò5RSSc»–g¨ü)+Ο?æÌx'X£ L`øéŽyyy¯_¿†…{óæÍÛ·o‘aPEÄÇÇ#HHH ó îêÔ©r.5F/† \^pp0kçØ§:t`_éìÙ³gŸ>}:0”Ý3– ÈõÄ/ETTMŠ+PSÔâùˆ‹q6þü»wï¢ñÞ±ce¾ðõëWèˆ77·Ý»wÿš“Þºu "LLL š›ïcsCXs?+c_¯‚íAÁÑÖÖž9sæ/x4;;ÛÃÃãÞ½{ÞÞÞtRRRZZZ£G8p /ó2W2Aalmm‡ †þüyUß,ä)Ÿõ™ÞŒ5ópò»víRTT„BXÉ#‘²ÛÎÎîÎ;ÐǸ©Ö­[£ÎIHH€h–——‡mCH±KÙcÐË2”hP“’’à}}}ƒ‚‚ ¿‘RÐñÝ»wǽôîÝûÇçÎC¹À)LMMÙé ©‚â1Ïûøø tlhh(Ò UQQéß¿ÿ!Cº3˜˜˜ðëtqqqÈ'NNNOŸ>EÆ@U3mÚ´e Hbø7Ø*OOOTªØùV3--­I“&ÈQmÛ¶EmбcÇÎ;#Ÿào7†Š] ŽÌ¾Æ‰ì÷LjӉˆˆà,8²œœœ‚‚‚’’¬c…{!¹>‚øÍË{ÆæÍ›ë30s ¢6Ã-8;;ãÖàOÆŽÛ®];ø\ÌÊ•+á²³³‘£PÏã\HYÈ÷Ÿv&¬•äççÃÛ°‘(hH;öýC4 ýúõCªi3ðëŒyyyïÞ½ƒmcýܾâ2êÕ«‡UuêÔG4hüJªP\O§Nš3¨ªªV¦,¿|ù÷÷ À©qjIIÉöíÛãDptÈ«=zôhÓ¦ .Ζš3‚\AÔf5j„ö $$ÍO%ß}ª¶ÀÙNŸ>²ìÊ•+h)Ñù‚››Û´iÓ KNŸ>ý ÄzÙ°/ݼ~ýúáÇ¿¦‹¦çkaa¡»»»½½ý½{÷¾}û%+8jÔ(ÎX£ñññšt'T‚‚´¦¦¦&ÂÈÑñ¿˜ÐÐÐ-[¶àÂ`B`ž×1ü–D„EF*((øeM5iÒä kÞvíÚeee%""o?oÞ<ÎËlW%¨öÈÈÈôôtXö•KX…! eŸ«E‹%>vCõ‹àÃí"‡ÈÊÊâ˜p¡HŽâ?a *»páò.nüøñS§N5fàÞL¡ì_‚ƒƒÙž]0pIIIÐýœn]È™ÊÊÊütô׃Ji†›Bª¡uƒ·aUávTTTz2”ë°iiiïß¿‡sCêàŒÓ‡>3¤¤¤ Ep¤ÒDFyOHH@Cƒ2Þ¸qcvJí¹sçVæîP4^3¼}ûwŠ»Ãõ|üøU Î.!!çoœ /‡DÄWàf ‚\AÔ~Ð6@ ¥Dûqøðá*ì—‘™™ éæêêŠ;rpp Tæ ¶¶¶k×®…¥>†ú©V×víÚ5üíÓ§$ì£G ƒ~åÙÛ·oÓòìÙ³û æææ…†Ðjƒ0x#GŽ„ÍÀÂß%èÅ}ûöÙÙÙA¡¢€À‡°q«@:ÃTÅ@¥‘ŸŸÇ…$ƒ[€ç„²?räÈÕ«WQ%"Õºwï¾bÅ #†ª¸† YŽ+9qâÄùóç_¼xGŠ%-[¶TUUE:tè’’ReN*..ÎË«z°:¡¡¡p†0°1ø Ï#,,ܶmÛ:°OŠà©zôèQ¥þ9qˆˆˆÀeÀó$''·nÝš}¿‘3ôˆwÿºÂÂBÔQpG°a¸ww÷OŸ>Á)¡~@Ⲟíû÷ï¸x4|Í›7g›JG;†& ÅãÃxß»wv=((Ι5ÏðáÃa/˾ììlöíMÖ¼ÅÅÅá X\U^^ûô팇úq†CÁåU`l‚ ×GDÉÈÊÊ¢5}úô)ÔY³fíÚµ«&ÞÑ9sæ@%=z”íÉCThš•+W˜˜˜lÙ²…/à Vþþþø «¿mÛ¶‹/Vxœôâ@)>yò„}æ {™á«  Ð«W/555¶/ÖO'§bÇâwuuõöö†ÚëÙ³çàÁƒuuu«hjoø”ØàéÓ§ÃéýÍP ÓŽ¿®òõ‹{èy¨ghh¤š¼¼<¬‚ŠŠ /bÚ××VÐÙÙ6uãŒ3*6]di£|ÁE _ÁÆÀ<Îe(q{ÜÈ£GpUØ·[ˆ STTÄôïß¿o–;2/#.ÂÂ3s&»•““Ãö@ƒ=CÌQ4`ùzõ걥邽ؾdHw˜ŸÜÜ\äRœž™v«aÆ™™™¸_öñ|"…0r°m8‚Àñl0ưL8©äT¦5ÿG¨QêaÒpa(¤0Þ0ÌÈ<Íš5ëÛ·/çÉ ½½=.ͨü®ƒ;BX _Š[“g –… ÈõÄo”(š®îÝ»C~]¿~½FŒõõ)VPPpúôé»wïR:VÈ)ˆNè°cÇŽÙ2Ô ‹gûnÁB"cð2L+¤³ŸŸû’”4Ä«¨¨(Ät=”••ûõë‡1’¡’×Ö¨Q£‡æÃIÝÝÝ¡w!ß¡þõôôÊûÂ*Ž`eeåææõ‹>iÒ¤ Õ?Õ ‘¡¡yü%‚N044f‰©-!!Á¾ÑÇŽ(ˆÛïÃPù Seà|…Ù€m†ÄGaß±„åàñPÈ]ì4n0<¸Tø}}}x ä1ÞhC®6`(q-jo8”ߨÈÔÔTÔäˆ ‚+,,Äí°SSKAäú¢fÃNè———§­­žžîííÍþ@[Ýððð˜={6ã .ü‚aÿ,--7mÚ4hÐ kkë/^Ôè{âÿøñ#20T5¼ÖáÇ9¯óAáAÕqLššš’’’ÃïºZv¾µk×r/„&¾yóæ;w>|˜˜˜ˆK…O3f ÷+g®®®G…Ї0511ÑÕÕ­¡3  ÃõeffÂÑq¦‡†£ƒoiÓ¦¬¬,â/ßKP|&ô_´¾û†sÛ¶mgÏž……˜Œ‹‹C)ÀeHJJJIIq†$Áyá!‘UbØ×&?|ø€»À-ÀÂá:7nÌÚ<ÜŠîåEOO¹¨Š³A® j uëÖuwwgׄXñôôäï{MÏÐÐP\\ÜÁÁR€Rª’ ‰wíÚµoß¾E‹mݺµæNß ‡\úäÉ(ZˆEä[999˜SSS(<ü …äeõ_R{ ÷Â7oÞ@­¢l åçç£lÂéYYYAWÿ;úúõ«¿¿?çEÙ¨¨(Ö~'%%¥¤¤ Úùéx$Õ 8ŠM›6éèè\¼x5Ò‰'`_aœš6mŠ¢äìì wTugg§…`Çaû°Áê°ïk°†íÛ·o 6ÄŰoB²¯A—Μ9SBBÛ¼zõ E…#ÔaÈÉÉ…£?~}îܹünU¼7;Ð>Ã{ÀHÀö¨««Ã„W¬$œÌ—/_ªgZDFFîÛ·CNNÎÈÈŽz+÷6H,vxFÎC*dc˜·ÌÌÌ›7o>xð€}ÃÃÐÐ;tè¿G§ÈPÞ«B?þÜÇÇ•3|lÒÆI^^žõØ=zôÐa¨|à9¹ËÝçÏŸÙ¹V¬X{<ïš——×°aCd6vhÜ ÌL¦CeÎŽž?þêÕ«¡¡¡¸;Ü\t‘ÙSxˆBBì|ë?ÍÞ¬UfçÀW”/Üؾ}û"·×ˆïA® ˆ ‚Æ~Þ¼y»wï†Ôø :"rÿþýÕmž€ dúرc!ÔªÏUùúúÞ¸qÃÃÃ"RZZ~LWWoßOW¯^=Vggg111???ˆ¹ê èæM›6YYY!,ÛéýÔâQüñññ(¹ìãzÈ÷åË—s¦/§OŸÂ:®CæöêÕ ©3dȈø9 |¼÷-Z¤¤¤T“„HKKƒ©»páä>´¾¸¸8ª –-["‹®]»öÀðºRRR¬sC¾Å_vþ^ŽúN.×ÜÜœ{¾³Š-’yù6++«[·nðTšššð<¬]äãT«0¥Hköý[ØWÔùìˆ&²²²;^ޤ¤diÏlYpµ×¯_ß²e êp\¡¶¶6Ü2e ×pêÔ)ì‡;E|Øì ~YKÆWFö@­¿‹LLLlß¾=‚‚DA›Ež ÈõDmÀÆÆ†ØÊM]¶dذa7n¬>B°FóîÝ;蘣G&%%ýÆËÈÈȸ|ùòµk× z¡Y-XdŒÄ_È# ¹¡Òà‹vìØñ{S';;ÛÄÄäæÍ›p»ªâ,mÚ´ÙÃÀ~MHHX¶lŒM§NÖ­[W¿~}xo¸ Øeeez===ö)ͯ™ºôÛ·oUý ÑÙ Þ¼yÃŽ¹¦Y³fL¾b9œì g÷ª ÿþAAAlÒÃ<;88 ®Ã5Ü»wV?66‰‚B1hР¡C‡òkœ¡ÂÂÂ/^À¨°C¹À¥§§Ã»²ß!¡‘î]ºtáeÞùŸÒ—ž–³$??9ÜÎÎÎÍÍ ;vì¼yóXˆûE¨±ªiÓ¦0x ,XÅPm«Ó&Mš g(q-*”#ooo„:<<\XX¾)ލ"&¿¦{$Aë#‚?ôìÙ3..NZZí4ßGÒƒ ‚‡¨¶/Ö,bbb 5ÌÌÌ ù~ñ© nݺuêÔ©GÉÉÉéëëO:•ïψ*Œ   äïêÕ«‘‡¡Ò~Ë5°S nذáWN‘}þüyHpè~¶C”¼¼üýû÷=<<êÖ­{îÜ9(ò_ qqqvFò ðùóg$%§¿ò<¼\óæÍÛ·oÏ™,÷ˆÏ¬á΢ûà¸öîÝûÓîd|a¿zõêÝ»wƒƒƒq…ð'NœÐÔÔ\¹r%ÑÅ‹+ìñ233Ÿ‚¨¦ˆŠŠ¢Ñ’’’:zô(wo¢Jõ³uëÖ°°0~u@úÃ5jTýúõ!þ~Íé²²²Nž< ÷òþý{ˆžyóæc¨Î!‚âÇC~Á0ü²“ÆÇÇkkkÃiØÛÛ/[¶¬JÏG„¼rå ¼ÐСC ¡¼Í¸7ƒ#Bz 0Ïd8°{÷n555”îª{ž_11±ôôô¯ ¦.222::º^½zìÈH5x9×:h3ðx.Ø Ü¬¿¿?ns-CÝîÏÁÁÁÛÛ >ŽZCCc=C‘á·ñ7""6µG—/_.qÊÎÀ* 6†¹UVVVQQAzÁ;•ýâåoäË—/Û¶mÃ=âö7lØPâ›^^^–––...ØfÕªU¿krŽªCRRrC‘åïÞ½srrrwwG²6hÐVp8Jäú¢ZP§N(0H™Æó¥yîÛ·/{ŠmåyöìÙ€>|XÕ9:;;Ã8…„„Lœ8Æ„¡fÅjΜ9uëÖíÒ¥Ë/ººò·ê¦CDZ9räÖ­[°FÓ¦M›>}ú:^Q!¡• ¬éß¿?<|cIðŒŒ _ø:|iÙ²e÷îÝÙw{÷î 4‚¡òç‚_222Š={ö¬££#ßïåëׯ.\°³³Ã½àÊ P(f3ð~99¹W¯^,X°‰ˆ8„††¢²íÓ§|RG®ð€F¿˜¼¼tèÐÓ§Oó}¤ÊwïÞ™››_ºt †aþüùð8 _ŽŒúøø°~IWWÅgáe n ß \aíž={ ;ª  Ð«W¯¾}ûâ/çñrÌp‰ÓT\À²eË`Æ ¡=<<øxäçÏŸ=zÔÁÁ¡U«V¬Á^ÈP®ƒ ãÁݹ¸¸ Ô(\¨aíP¦N0 A>|… Õ”Ò×:gθkX¾ŸjòŒîtö+‚³qãFdU«V­X±¢fͼR1š7o^dD«ÂÂBT#ÈÉh+5j¤¯¯?wîÜj5Aë#ˆÚ „š““är\\\ÅŽ¡¡Ð¢E ŠgåÑÒÒ‚’ö÷÷çïaÏœ9Õ%))¹eË–¥ µ,n+W®ŒŒŒ„Zå{ÿºððpMMÍ#GŽð±kå;w¶mÛƒ †>ÊPuÁ«Á˜>iŠŠŠ7¾y󦸸8÷6ÁÁÁ÷îÝ{ôè>äääÈËË#¢h0€Ç{„……+ܵ¯D`2aŸFý7CåÁNá ¤¤4kÖ,###wOOO¿~ýú7%hz¶O—¶¶vÏ­LMMp˜çêü껟Ÿß”)S`Ú˜|þ¼ÿþÇ“¼nݺ¿þúËÊʪ¼“7Ôh‹Œµ“››{‘9 !‚E¬}# ‚\ATS h'L˜0yòd´= #GŽŒ¡HVž.]º@ –ë²2ˆ7oÞÓ§Oׯ_obb2cƌڽ'N@Í_ºt‰_¯Ïååå±Ã‡ðeèÔÛ·o#q³³³W¯^taøõ!’––¶°°pvvîÝ»wtt´„„¬ je†Êô‘«_¿>¿†ñ„u_³fÍÙ³gQ·T2a-<($$„˜/Z´è/ûFEE>}/!!AMMm̘1†††3Ê{æææ{÷î…FÀ¯\¹R­JÍ›7oÆŽ‹ çõë×¿àŒœ©)“““Q)9::¢‚ªÎc~VõêÕ›ÌÀYâééiiiéææ¦¢¢‚˜ðe¶F‚ ×GÄìß¿_FFÆÝݽ\C~‡‡‡£M‚v$ËÇ:wî¼eËnP1îß¿+..t÷îÝ?*†...pÎP±•GŠbôùóç•yˆ ç§°lÙ2ؘ Ì~^a¯_¿Ž àçç'Àt»6lØèÑ£2°ƒòGDD`¡°°0_fV„„ÍÌ̬äA.^¼¸téR ¿Iûõë×íÛ·Ã7öèÑcóæÍK~ºŒ=Ê ,*4ØàI“&éëëï`àKr zyy…„„°¯W”÷=[¾SPP0wîÜ;wî\¸p¡ê:©– BqæÌæµóµk×?~I¿uëÖ?¹`‹'û¹°°ðòåˇzûö-2äÆé…‚\A|-îüùó!RyÜ>--MMM-00†ëä ½zõ‚6­Œåóññ™2eŠ””t3ïéXË€pD*|ˆ­>}ú¨««øð¡bG€Ý‚ÍCZØØØ¸ººVõ]Ãçà~oÞ¼ùðáÃfÍš 0înĈÅG˜(‚œœ\\\\zzº‚‚BçÎqÙ•¹ŒºuëVæ Ï“'Ošššâ*ðdIvôèÑ]»v!æû÷ï7g({—ÐÐP¶S¼ j¿ršS1zö왜œ¬££Ó´iSúßRF`õuuuQáàªÉ|BBBì\”°Hä8ðß2IµBPPЈýúæÍ›Å‹»¸¸Œ3fûöí¿lx^‚ ×Gµ¡C‡vìØÑ‚Ç_ªªªÖÖÖ:u¢ÐUMMM´èØ÷óçÏG)))wîÜ‰ŽŽ¦`²zÈÛÛ»bc ÅÆÆ***>xð½¼ûž>}zÕªUúúú(Gø[E7Ÿvîܹ7n¼|ùîÔ{äŒòÒ¸qãçÏŸÃ@e.X°ÀÊʪbÇ©W¯^vvvv„Cþ?{ölyŸïÅÄÄà‚ƒƒƒ÷íÛ·˜¡Œãããa/\¸Ð¶mÛ%K–ÀégøõYÂÝÞÞ^RRòõë×¢¢¢¿ì¼nnn&L€×}ûöm5•€BBlOÈÄÄD…fÍš9::Ò‹,hmíììØÏˆÏ¤I“œçλuëVz݆ ×GDù8r䈖–/®oÖ¬YØ‚‚VyvïÞžž~ðàÁòîø÷ßoÙ²B‡ÂÈÍÞ½{—/_PÞ¡ÅW®\ùéÓ§gZ+ øí™3gjkkÃ/U ÓWÙdddÀÁ«„……õíÛRånO$&&VXXÓÛªU«;wV`*ðúõëçåå•kœ7¥¤¤ÏÉû^ðäˆ3ÌÛO¥^ºt ™áóçÏ3fÌX±bÅ!†êEǯ¡¡Ñ®]»û÷ïWà÷…òW¯¨¨xóæÍrÅù7Òºuk\3>ìØ±ãðáÃW¯^­}³ÿU2>ÈÛìgd!äíÌÌÌãÇWÏÉ ‚\AT;º1;vÌØØ¸ŒÍÜÝÝ=<~ü·²²âã´¿3x{111ssó üQëÓCQ`¦Pš8q"ê½-[¶PÓ@ë#â'¬ZµjõêÕeKL¯ßÕ#¥ö¡§§ ËãìUÉÉÉššš=zôpqq¡Ñ½Ëfùòå»wïæÝõíÚµëÎ;¬xú)öööóçÏ?xðà“'O*©ð™0œ—.]jÙ²%<Þ¬Y³3üÆè-Z´hÞ¼yJJJ¼®l ÓÅãÆ†††ß¿çåÑSbbâ˜1c²²²nß¾]šÙƒI^¿~}ƒ 7Î$õÕaaá>Àô~ûö z¿OKKSVVž2eJMy¾WÚÚÚ¸ 8XÔ{666sçÎ¥*®8âââì+ ùùù¦¦¦'Nœ@È—š‚\AÔBFŒabb‚ÆUMM­Ä 6lØ€U4;_ؼy³¼¼ü¨Q£~ºeAAAÇŽJqã…… š™™Á$HIIýtc˜.'''^,ÜÌ™3ÝÝÝïÞ½›’’Rák+,,<{öì¡C‡àgp@è³ßnóJ´paaa–––`DD/c¢Ö«Wró§›eddÈÊÊ9rä§]a8-,,Ž=êëë[|-Ö488xõêÕ°y5ô…sؘ·oߢt#zãÆã×açÏŸY›æCGÓƒ²ããã#&&vòäIî¥T%-$Äh„²6iÒ$3gÎÀ9Sdr}Aü€M‰®/77Z­ÚŽP³ÈÌÌ„Ùx÷îÝOýž––all,=ß+oN†³‚m({³Û·o#W#¼eû4íÕ«WÞÞÞ["++ " &JZZzÆ 3ª -Z±(!!ñðáCEEÅŸjÍœœœ²·‰ŠŠRQQ èܹsiÛ¤§§ëèè ¶qssÛ¼ys‘µžžž°4 6‚ þÅØØ¸wïÞ%®Z³f‘‘MÄ/=½`Á11±2¶AÀoݺeCítÅ" óP¶ë‹‰‰™2eJÙSÞ=yòF¥qãÆåº€üüü rrr»víÚÌP㱘ššŠ[X²dÉÂ… ËØ²nݺe?ëóòò‚yþðáCiOß¼y3`ÀT2>>>EV988 ¼`­••ÕË—/kY^E@ž>} ·רQ£ gçÎvvvŸ?®õ¿-gÐÒÒ‚ë£?å—ÎÇÌÌÌÚÚÍJi¯ó¹>‚øƒ’’‚¹ÿ~‘ÑÀ mmm_½zE!ª•ÇÌÌlúôéð$%®]¸p!Dpyç.#Š£««{åÊ•Ò\žž^‰Ý]55µ«W¯&&&òr¢ÂÂBh)”‘ &À¨³ƒÎ×2àŸG½|ùòÒ&?*íYœŒÁû÷ï‹çù¤¤¤-Z,Z´ˆ;Ô™™™°ˆH‡²_¾­MÌ™3ÇÝÝ}ݺu»wï.׎0Û;v´¶¶®º±@«-h§’““UTTPØ·mÛF•/Œ5*%%%88XPPðرce?Ã'r}Q›t(>kßéÓ§÷íÛGÁá çÏŸ/ÑdddtíÚuëÖ­ž)›à¦v¢ÄU^^^PØ%ÎÓ ¯¯ŸžžÎËȇPÛË–-³³³Û²eËF†ÚOGGG--­M›6•(¯ëÔ©S¢ëûøñ#ö‚…+bù`•‡ú×_}ÿþóFâƒà÷Œoß¾ýæØ .4oÞ|Ö¬Yeô{,—/_:uêäææÖ«W¯?¶¤­ZµJMMíñãÇTïñˆ²²2Ê ŸŸŸ˜˜˜¥¥¥‘‘Å„ ×Gb±±±¾œŽL!!!iii#FŒ àT[[[EEÅöíÛYîëë«««ûêÕ+qqqŠ_½ÈÂãÇ8pàÓ§OuêÔ¡ñ%%¥;wî øáÇ;vì8dÈ"ªñĉ?~,c°{öPÀÜÜ.åÏ ©¯¯/´uŸ>}Š 8‰¬[PPPdã±cÇN:uàÀœ%¹¹¹ØQ__ŸóJçâÅ‹===á¥)ÇN™2åСC—.]úéüòéééÐèPêdùXP¢ïÞ½+))Ic{– x¿qãÆ¥¤¤xxxÐxѹ>‚øƒèׯ;àGì^¼x‘ÂRy¾~ý„Ør/dz¼~ýšâÃw444>|XÄõíØ±£Èì|Æ ëÞ½{iƒCfeeÁ¥ÄÄÄ8::&%%ýá!­W¯ŒŸªªê§OŸ„„þkµá–¿ÿν¥­­-\ô7¸móìÙ³!Ê›6mНË–-C`ÿþûoÊ«I elSXX(++‹x"ßRÄ8(**†‡‡wéÒ%,,¬M›6ráàà€Ûºukkkë1cÆP@r}ñG ¢¢ÅÆ~ŽŠŠJKKƒÈ£°TžóçÏÃQs/Y¿~}pp°»»;§Š~¿(ÒÝîðáÃÊÊÊÜý¦äååçÌ™ûQ|÷ˆˆˆ!C†èééA^Óïßà7Œ n޼ɽœûY_nnîŠ+ ¾9K/^üâÅ ößåË——,YòèÑ#¸ŠgÔÔÔ ¼Ïž=[ü¥ƒFŽíß¿?…«>>>(Ô±±±åm…hÕªŒŸ¡¡áéÓ§oݺE!ÈõDíGCCÃÔÔ”ý|íÚµPLø‚££ãرc9_7mÚH–¯êÐÒÒš8qb×gggÇ~ÎËË“’’:qâDñ>«qqq222‡*{6¿?–;v¨¨¨\¼xqòäÉÿ¶ßBBÜnêÔ©ðuíÚµc¿***bË¿ÿþûÇpÝÛ·oÿ“ûïýÔ ¨Ks}XÕ¢E z!¶4ºwïîää$++‹ÂKïÌW€«W¯>xð yóæaaaô®,A® j9ÒÒÒ™™™ÉÉÉbbb^^^Æ £˜ð…€€€ãdzŸoÞ¼ ÑÌNžKT¢¢¢Mš4va_„sqqÁ΃k99¹ýû÷±|ìæææeÌ>G++«1cÆp\Ÿ   g4—7oÞ¸¹¹±îš}qýúõ3gÎ\ºt)Âûþý{Òâeƒ<¹`Áÿ>}úYnkkKž¹lÔÕÕ/^¬­­|HѨƒ zþüy—.] ( ¹>‚¨Í´k×.$$dðàÁAAAPÆÊ,""ÂNùýîÝ»éÓ§ÿ9s‘ýFdddY×£2mÚ4vy¯^½–,YÂ=^yBBBïÞ½MLLx™¶€!QSS333c§ngç``W!°k×®e}`ÇŽ-,,P“´iÓf×®]øL¡ãöÑèùóç‹,;vìÙ³gÉ6ÿ” 6x{{oݺuóæÍ ))™˜˜ˆúùpܸq‚\AÔZ Õºvíš““#''G©<îîî***ìç¡C‡^¸pÓ‚¨R:wî!Àg¹8r䪈íÛ·ÃàÁõ±Ïúbbbž‚¨ ™ŒŠŠòöö–••¥hð…€€€Þ½{ 0Ýr”””Hºý2ׄW®\‘——oܸñ™3gž={öôéSv___mmíÇwëÖÂU.$%% ´{÷îõë×sžõÁæ±Òtuuõôôâââ`iìÓòÒ¡C WWWdNv ”÷Ž;ØÌLðBݺu<8aÂ???ŠFÅ|õêU›6mZ¶l©¡¡A!ÈõD-fïþýûõëׇ?¡hð…/^,Y²äõë×¶¶¶ôá/CNNÎÁÁ®_¿>vìXؤg²¸I“&edd¤¦¦R *ƶmÛàîX×Çsjggcjj***Š|ÉÈãœãDFeooÏq}fffµÌÿØ»°&².:”P¡‡Þ{¯Ò‹HWADŠ( ¨ˆ]ֲëŠú[׎XQ°+ˆ"¢Ò‹‘Þ;¡CÈ?!”€tB s¾÷i2̼™œ¹sçžW?ÜÝÝoݺº\ˆI‹gð‰æççÅ3qöc Õ™tñà('©PRRJh(ùÞLBHH¨ªª ü†,¾¾¾ëÖ­ù‡Ãáh4ZRRòàÁƒžžžK“†’’xûö-a(ù@™ ÆÙ ©×ÕÕ=~übir055ݼysÿ×€€Â¸Y‘#GÀgR}Sè0CBBôõõ¡´¨ ÕBPP°¦¦¦½½}ëÖ­SGNNÿøñ#== Ýf¢¢¢ ö(..nii©­­oAhh(hÛ„ÞlUUUˆ¢)bÍš5þþþÂÂÂ8îÁƒvvv666VVV Õ‘‘‘?“†‘‘QIIISSS`` ???4òbr4"‘ÈÑ×?„0&ttt\\\Ö®]±R} ›êkhhh²I‘‘! ðçŸîܹbc&ÁÌÌLIIùòåK___??¿²²2ЪÁ;9ÄÏÔáææ¦¢¢âéé ª¾èèh666ðCaa!DÎ!''¾jÕ*PW¯_¿"drسgÏñãÇ!Õ7EœÈ`  ©R¡©©IOOâa¶ÐÝÝMOOoccQ1M€Ãá êƒ–÷$9@! ôŒ·‡¨˜"@Õ—’’©¾©t¤»wï†x€©>È Ðz¬¤ËÈÈ@<Ì °X,ƒñññ¨˜>ÎíÛ·¢‚´ ¥¥¥§§‡x˜:$%%ÓÒÒ ¦sss[[[h¸,HõA€©>ÃT***³‚ÆÆF g1qˆŠé‡z’yBT80¢¦fff†x˜:ddd …sHAAÁ„„(ÉHõA€©>è>qqqˆ‡YA}}=:sqqATL«…C$L ’€ŸŸ¿¶¶âTª/;;R} ÕÀ "aVÐÞÞµ_Ì€ê¥5ÄÉÃá ë% ˜™™;:: H>>¾ÒÒRˆêƒ|¢ JJJˆR‰DB$Ì– ¡  €x˜nw‘ ©¾¹ &&&Hõ‘ p8¼¥¥â¤ú @˜hë@/(*).«©*ol¨kon¤¡¥£gd¢ƒ3Âhéhèè((¨ü.ùƒGà‹°îîntG;¶³³­µ©½¥¹ ‹adfeaGróðˆ‹‰ðó.𠤾©%;¯¨¼¬¬¶¦ª©¾®£­™–ŽŽ@Àhéièè©ih@ñ|ÿùëQ “Xt'º£ÓÙÞÖÜŒîh£c¹äààäà—f€Ci&†..¿¤"¿¨¤²¼¬±¾®U‡ëî¦gD€£¥Óƒ–|æÚ- €âΓW =c:ÚÀ;ÐÙÚ 3®‡`agaçàæåæã†ÂëaÆ`•WWT4Ö×´6¢¨`08žd&j:ZÐ]PR^ |ª¾Àoq¸.Ð]`:;;Z[ÚZš1èv8‚$™‹›WTXPBˆƒA”ö›[\YP\ÛÔPÛŒj™‰¥…#hhi`tpÒ¿Ï\ÂáºñÖÛÕC,è;ZšÛ[š(©¨˜@ëåààåá“àá^àÚ¸¶©ùg~qiYY}MuS}-øº¢¡‡Ã™`t ‰ÒSQÃ@Æ.Ý~pïY(‘½5 ÝÙAÇ€`am”KPPPZDpûa [P^_X\YQÖT‡wª ÁtŒLô ᮩuŸSíÂtt€¶·¶t´4ƒ{ö8U$7¯„¸ 7äT!@ª„EMcKRjznÖ·–¦aq.AQN+¿0X¦Vs $ÔVçWåÕ–sðð+**ª+É‘k`Æye5ŸS>ÿüAAEÁ+,2ÉÁ+çËÔ*ÏFuÕ~ûV[ZX–Ÿ~“’ÕÖPåå„zÐŽÆ$ÍÊÊÈ›Ÿ[H„K@Œ™•K,ŠS©¹¹Ï’«Kò«K‹Ø¹x•)ËÑ-¼Œù­íè„´Œì¬¯ UœüB\‚bœ‚¢ôŒœü²`™šÈ€´òæê’‚*Ðcæ2±²Ë*(j©*- »±­=)-ãGVªºŠK@˜S@O,;Ÿ X¦T3•e—V•àý0x׸„•••”e¥ÈuÌ-è‡sK+?§¤ÿüNI ã‘Dò‹pð ÀyÄ$yĦX9Á×””çÿU·¸Œ¬¶º*7’ƒ\Ͳ­ø5+;ók]E);èTA³d@°rK‚Ei*5£ <¿|ØÁÒ,R’£Ú} @ªÒ¾3óË’b*‹ …¤$T´ødÔÀ2ç‚32 Ë*ƒ¥Kb^Õ”øÊâ\ YÅ%º:HÖyM&ƒMËüšF£%”4¤¤5—€…ä'¢¢¤â‹œvïŠÌØ®ÄȸÜ/É´4tZºÚj r °Ý´¤—Ÿõ•S@XZU—‰_R‹_rf,9¥¨6'%¾qi™ÉñX,VRY“ORNFË,Óç‡åuŒ [Ê0]ñïc~¦'Óôuu)ÊÍwË,ª®‰M(ÌÎà“TÕa”Ò”š§Š`‘SKÿ–¤¼ê©q•E¹bÒ Æúºó=<€©>f mèw1 ß’ãø%åd5«¯˜•˃5ckÂçÜÆæg¡÷jª–˜˜h(+Î#2ËjQoކהÉëAêâéy)ŽˆÈª€…ð5,.-#.’WHt…¥9 #[r›ú-áÃ;&v¤’©Œæ™iÙc‚Áªbh ¼ølkø¨º¤@ÏÐXg‘2Èo •œ‘ÉÎÍ«h`*¯»,3ôpFE]°€Ÿk0˜ÁÏËò³5tj«ÏÓ®*4û>þsz|·è=”[€eæ/ƒ…KÃÌ–ð9»º1ýñóÖ¦zS3s%YéùÔèSÕðöí›Úª2ÐBxE¥ …fáâñ~XN ,„¯/>&f%~_n±” ˜GÏ{tJfrT+'¯’þR9mc°ÌFxÀ¦fÔä·´=ó°®¼ÌÀÄXSI ©> Œ+Dûÿ5!JQÏDH^,sçÚèf+ñÙÝõàexQöëe+äe$ç,™µM­Ïž¿hj¨Ó0µU3^6§®_B,à‡ŒÒÚÄwìH‡•+àd´‚3‡KÊøùáÍ !iEIUSg¯¹sm4´ô„`‡ë~û#%ÎÄÜr~5dô“›ž*ª &©¬i.2‡45 ¦d`ðsHô笄úFÆúšêó‚XÐGÆ¥&G¿“Q×–U±˜Úè8ÒŽ`Ö±Z ~Àb±÷Ÿ…–æ~_nc'#!2gɬjlyöôy{K£úR›E¦6sêÚ@ïðCjqUrøM.¾Õ6Öôsuu ¢ç]U\YËÌEjî\-\Ýß6ëîz•÷%ÉÜÒZeþw¥B€TӂŠÚÇAì<ü º&ü2szp*J* }°4·6_»u¯» ëæ²vîLš_ïÒ“?…+虨›ÚÎñûŽ`ã0qð?D~þ–ûÞÈÌ\SEi^[2ª¹-0(¨‡••¹«÷\¾T J1u° ;;®ÝºßÑÖâæâÌ„`˜û$×5·=xð€’ ¦fle±nÛ¿Za%°`1ÿÀGMµUë\]ØXæèbåeµ ÁYÙ• Ì-Ýdç2«TÔÔrÚF`ihiºt#€†æêä0wæY~ømlrZìG%}S- »9n¢,ì\K7‚¾|KøhjaµHI~î\^mSëÇ)))ÕŒ–Ïñç‚’JRE ,èö¶Kþwº1ë\œ p(ƃTðøœ]þ§´_F~iÈ£@)5%«7Ì/báŒLà‡Ãu> ©))Úá³uvSp~8èEhEq¾è‡ç|“ÄzÿšjŸðÓ3\2ë½Ó…µÁw$äæÝóNC'tJ‡~ˆ)ø–¶Ýg9 f©>&ŒïEåÏÜQP³œoeõ–‚åÙ›•…9îf¾a‡Ã=ÿ”žd´Ú]FÓpþ2ÉÈÌfîêÝÙÖròÌ9EeU³%úóâ²k[nß  ‡3ÚËç/­Ô‘Éé?Rb7lð@²³Ìk«¬oº{;ÁÊnêä5¯I&L¬ŠËø–¹Îm=×,ç¶Í-­~x›WLzÞIbPPP*˜ƒ‚CßÕ”nt_?óA6臟¼~Ÿû-ÍÈÁ]NÇxþ’‰`塽µÙïô™EšÚF³ÑW^×tÿÎM Ç|Þ¥é%,*9/3ÅÃÃ}¾ç"‚©>&Œº¦Ö[7…¥-æ¹Þ#aØgг”ÝÝëœfl>wlzVÔÛýN"JšäÁ$-œ”O„öæe+lä¥çîäI4{óÞ-aœ*y€ÿ3<úCCeÙ&w·Y_¼$ùÆ{ô Ló]TƒÐ¹•[™Ÿ³ÑcìL¦jlk¸À-,1LJ"OrZKp¸îûÁÏ`0J·µ3×ë•’ñÚ`¥³¸š.y0IÏ€WS]ÕñOØ®²—Ÿ¡É“íhÌÍ;÷,K7‘YŠ)©ƒåÕ»·m¨zO7Wr]‰R} @Š·±É_“b WÑ$-ùýºE&+ZPu'üN®qvà›Ösu`°þ·î° ¹çu#ýH ´7‰Œ‹‹s_ç2³¢¥ÿ,zýä­ #3ùñFÏm§Ïœµ°²V’“™­ËHù^ðöEðb;&F—RÕ‘S»pñ’áŒgþô5éSÄ{7ZrxFAA©nfÛ\_ûÏñ]Ö òóL·¾îÀÁ/D–~˜‰ËÂÍ')æmLl¬Ç:çé>]rVnÄË'‹W­ƒ32“™Jzfí­Í'Oÿo¹íJ9Iq¤ú @ c 1ØË×ý¥äÈ© ïW0²°[®ßõñu"ŒÊÑnº’¶}Í+ }tßÐ~=œœW>Ó6_“'Nžrrq™;  áp¸[AOÁàÒÒm;“OK7wõÎN‰IJJö\ï:ó$$&%mp^ @€Td‰Ú¦Öë—ÿÓ4³eãæ_¿WÙÐ2ÿëç‹W®{{‘>KMD\Ê—Ïñæ®[)(É] ð5iæºõñ£}9áßxqÍ_HF‘xt2†”š^eQ®ßé3;}¼gl´g{åê59U!¥…@²˜’שÿñÙæ=­£=Q­mW.]R3²Fò -bU¬ó¾$_ºê¿e“;É+ó)1;#ï‡)(ÉžI8‚ÙÌyËýþÆKMUåeIþ¼ÿwùŠ„’– Ô‚Xí@VkIYÞ÷Óç.lߺí ©>"` Îì>žMs-3—„¬²šÆ"ä\ •~Ë÷t<=;þÂÛkGOYŠŒ“–†ãl¢û¿.Žhøh@6ø§E[ÿöÔâéRïúî‹i` 8W[[·ìÓ;„aÐS0…•µ·n˜8n$¿ÑD£@Tq3'—ß©3»vl#¡gü:ÕPolï¾p˜¤¢¤2vðH‰x^W²0^<‹WÒÚŽ¾xñ¼†ÙJVNž…Ã?·8;òä©Ó3“Þ$ùÂ… š¶¬ÈD2(à ìÖ9svËVo¦iéÀ/¯E\¿bâàNKÖ~QÔêÌ\'OŸÝ¹Ý›„~8èeX[[›áªõ ÈSS/uÜ”ôæI}m½‰¡©ªmlk¿tá‚î2&6Î…C&Ÿ˜ 3;§ß©Ó;¶û@é=!ÕׯúŠ#Î_=ñã,ŽE¾>¸d) A×\»wm hØø»¥û8ÒÎ9f.|¤ÆRú¯s…[béäò/Œ8¢#úßUÞçˆ6xý¸‹-ª¾„ß½÷nÈÎ×î5(¶k§!¿ùouõ{ðËæÅ.ífž‚T56ݸ¾dõ†9*ù(Z*±8:ffœÄz[@Q×øâå+;¶‘&_B臘êŠrBJè…5›¸‡bé–èÎNÞ4{éêeeC‹%ù€32ë­püïÒ;wî˜ÖJ:@’¯\V3²XP’z8£ÁJ—Kÿ]ܱc;ÉA|­ëW¯tYP’>AyË—¯îض•$>÷©±¾ž°ÞÉBƒ†¹]ÌËûôp¸®†êÔkkGc®üwYÃÌvAI>Yص-W_¸pi×N ê+€Tß°XùÛEc$º“¢÷+ކ†¢églع'ÑÄ{…2¢¸´³ ì³Dš¢ƒè›˜×´Ê ѳŸ_ ­Íïû^pqùÿœP»´†™ ŒùyË`?±ä»}qÔ·½Ê÷ÿECÖ½ÛâÑ#ùÄ ²ùÑ|úÍ+W l\àŒ$KX\{Ö}Õ’áþ¤·3ØoŸõxÃ%ŠÖçžÈ#¯z> ‹Øm}rÈYº&éþ“Êå›yI䄹E$º°—olö˜j«ð‡¤ôüìïú¶Î Öœt¬×D=½ƒ`d˜ùE„q8ÜÕrê\¢ “|0,Ó4·»ðߥ½;wLÉ×A’5 ü" “dFf6k‡ ÿÛ³k' çø¡1ØëW/ë,_C–™‡Æ©n,úŠ€—ûTýpDlJYažî2Çë‡õ–;}|ÀÈW’•žªS½v]i±)û˜÷ñ+X9yÔŒ,þ»r•TíÂÈMõÞ¾g«Á/f;÷Ÿÿ-ÚЯ|€ò3¡©lUP>˜ŠO{׸‹*¶f‹=Á'W#Î*Þ¹êšÞ-øõj3Ë7KíЛj Ÿ £1i‰›møE¸¨ÆW§UháEšI4D :¾Ž±¿Ù¿a·ö‘•¸_Eoõßöƒ<¾ó•W®Rc¼Y n.?îÙ¸_‹X´´ÝØdEøI¾Qq70XF]ÁÊNª ³¯/s<üô/¦†ëòàþБö»ˆnÈ{3‡"+åûC×äçÖµ0@ÏÁÇËÑÿD LMUÏÓàv7{ûbv ŒÓœvm›çãb}ûH&ŒD]| ue¡ï"­–Mº’ʺƸˆ03×­ Ü¢t–9¾½{QRTd†W~õ†™“›WLz!“ÏÆÅ+&«|?艓ƒÝ4’,º IfArK©hÞ ~¼ÎÑTuÞ z$©¢ÅÂε‰%øá×,M&?ª¤ª!1êùº…£ë.w ¹wYBdçT:¥< áá[ÈLrð‹ðŠJ=|úrÍÊåPÄ©¾‰€fH×–‚¡O?u¤¬¢_ôdðŸí<}ù;cÏ݉!| ;eÏz °8™ôzz¿N vu¸[:ôDbb@^¤—‹ÌÎ.Æþq?p>ièÕE@ zà`Ì]W×»Àu†Z ‡Nî× ¯¾äNtµg\ÙE´rÎ7ÿGÒ‰¾¯ð¿¹i¤Œä«ÿ÷/Í®ßî¾ÐöµÃ]îï¬ùðg?›®ÿ[Û¼ëÄ3ÈîÅǤt MÚ¤X4¡£;úsRºÆ ¥YF§r´¢í(Ñž¨¸#KìNô|T±ö0bFgÜ¿ÞóUÿJF¸RÕÝ};ý£3ðßßûyæ]jæ5ZY~7 º_ù›šáöWo˜Š‘fœ•‚¾é›;ÿi¨¨ 9&™€þéógÊfÔÔ }5 ¦j`ôø©çz—;iNqyáof.[¡‡ZTI3êé¯Ù9Ф^G"yàu&¿(æù½´Œï* $X3#&%££­MT^ "ï‡o_TWVž´~ñ⩪¡%ÕH£5:`4tÊzFŸ<›t"ʌܢ²¢|rZ„sÒTÓû쟓W()& ±©¾AHNNÎáhmí“Q8€¢¥"3öæÙÝORí)²á_ÇÞ^¬ ×Á’ïð§Æ#½=Wgo…÷ÜE\z{ÉÂöjøJWœ°æ?>ò,ùÄæç: Œ»iûÏ’Á;l„Î.¦ÅŸp¸_7ƒu–|jÏJ?ÛðÕ¹šÁûñ$½ýwïïÊôÏ&Œþ}ÝÕ?ºòê^¹zOÃ[úxÃÈt›Ž×‰?.ß?RDøzÅãsÔ± Ù•±ÛèïþýöÝí>ÿÎ.À–÷|<`ùW —VÏ>ë4}$Í¢ýí·ži’t¹ JEíůބMî™øõ;æ—ì ¸Üϯ)ßrrglÉ£÷ï"äµ æ{5%‘ïÞ‘\õA$rt:Æ‘‘¡SW} ŽŽT7†ºúü°Žá¤ý0¨Ÿ €GDbŸ¤RΗ”Ik•O‘ò:ÐóÞïTÂß½“󄨀Tß „í±Û3æ^j×RÞ{öíDžÒµVðôïO;ûÞ ´*…ø¯~¾·÷Xû"†no_yî•à©ð­O?ÓFMàò‡Öé|å® ßà:ï~ ~<¡:‰!}ñç•kýR oÝß™‡‘=鲜XŸN¹£2ÚjAÃD>?wD¥w=®è¿õ®¯i÷”§{`ÓŸK/3áNýLý¯ø˜œÎÈÂŽ`ã yÍ9§óNý_;êÊr¿ÇE>:pÄåÖP;?¯¼ÆVô% ÿG}}¥AƒKÕÖ×9EÕAÀ†4MÐô~ŸŽ5>IůñŸªj¸&ÞÌü9)A\I2ª~H*«ÇÆÄÌŒêË)*oFÕA’»¬ÜT0š/YÙSœÕCŒŸÅåÍ É`áä¡£‡§ef©L-K~LÊW8# ’¢t°®ãâ˜ð¼ƒ´ä UmˆÃ~H)«GÇÄNBõ}Í-êloã‚V*ï¿HWÂÇìÜiqˆ Hõ ‚¨¨h~>^fˆ‰åáEƒwgYÙÿ†¬dü#z¨†º{Îo´d6 E Ñ)3ä•ɰD§îbD=ÿµ­STfêuL|SæOòý}zßþð:¿ú‰LùË}ønÉ>XœLߥ:ÖšHh¦ìá{ÝäD&aÃÆVþô&~ñý "ô¾”wi$üÈÌ’&qÊ LUtÐIó­ýƒM{4:v>y½Õ`ññ‹ò6OâRóÚ-à„%LÊj±óÀ#YÛsû$…8gãΉHË'~NYnn2¡£*kQ5U¼¢RQõƒ[T&å㛦æf&bºÏ•’–&(%?§~~G]Y}Á.„˜¥¥¡E¥Ò¿|!¡êKIM&ñR]MeõXzv!ÖYaé—äÀ˜ÆZT;ššš‘ƒy<îGDF!=ýËUßwÐK),,ï@ÑZS\‹¡fáåaÙ§MÔWÖ¶6¡fhÚàŸ0žç}Ð>c1@*ðˆË¥| okoŸè쾯éi Í,ÇòÞÂÒ ©ééêƒTß þT{dP6—¾a–”Ÿá£8s-ß%5Ðc¬D hEÇoÇ+§³¦pP–ÄŠü`H"™Žä»“gÓQçPȹ߸ö ÞÝÓí{£íÿº·â?b÷Œwå_ã¿]N(övîE¸ã¶í›Ç¸òê]K$dí#È3,¶º¬HÝÔ–´Õ¸ôE(vŸÎÌŒÖdòå7÷Ù¹VIZÓR[]ô9Øï~DÏxÜÅ^&¢ôt€ûÕ=›NÅûj2%ÿñ|ýjUXS^ÈÙí烿‚»l;í9¼V@S´áÿ‹8ÿÆJ”MLƒ—¤/M.!±Ìø0±hãË÷l.AÒäLýŸºûÉo£î²'²â/Öù`cM”<"ßhªNlö~yQ¶åꉼ›ô÷?»e¯5ÿWs¡ñn"…IJÓ¡èR}£¾õ5nÑYS­ºn£ý›óéO¶õ KàðÀ¢ ;¶>´º¶†¨c½2ÄuÃ-zI1n.nN$ÂJ+eÀº|ÍŠÑ/ú÷º¾ÞÃÁ†xÁtÌ g— ųζΓÞ9À‰GÐç7Ô}SÅC xˆ3ÄXE…mŸÀ:T çcÞÕ˜È×;Qöß+¡ÁH#"·¨œ™•Ššôς@siZTÄ»ô„§ifð­†cã×ó½²o‰¦‚Pÿž»ß§íiÍ|ù>øìo·:ÚÛ™EV]‰ ×ëKÇŠƒ+;ýÏÇ#ËÛ÷bDè_N{óôíÒšˆÂ&> ¼Ì¤\5‹‰“¯¾¦j¢GU–—“jí2Z°žo€ª_\¨÷? ƒiÇ`¨áp¶¦(¯ çà~cŸb!WjOemmXF‡5”äT¡°\b²Sï~açâ-+uÔôª¾ötkjŠ䊟®ï•| ¢¶{ŸUA­¡*+êQ­ÀÀ)&„èkœû%µ,‹sp£]ÏèãÀß”!7ð×íí5å5íhÐXxEy§bÙ´pFjmEU ’4$£êǹ ℳøÒ᳌uÔ•UÕ4‚FÈÆ#ÌJÔÓÖÞÖôXæ ë؈Î{yâÐ?·¥½4ÍóhýÀŽãMÜc¨ùxÿ]ê`hÄ>V4@çÓJOC/«¬æãžäRf?ŠË虘©‡[ lÜ4ŽÈ¦©°¸…¡¦Ñ0p ˜ëPÛPå• Õaè™ùu=aZÛ1ûl®,Í;\=ã²[ŠÖ¢ìŸíh¯¨Íy˜‘¼ µU]]]Z£ª²‚mÔ†÷®­±§½Où{kúެ³•¡òÛ‡%¡ü[RžFloCÑQ—SXÑ£¦¡çàEÚoèáý~»‡êj¼‡!x„9érÆ´×ÔT 0૊[˜Ü}Øëê‡yJJÊ&¤újQ-tÇÌÞ£¼›j’/»/ß}ÜVì®Í½§—W†q˜‚C˜“þ«aÔ—·azâBv>$3|œ.wžaö9ʹ"X»»»ëQl,ÓÛC a^«><4w„7¨cÕû½ËSeŠÀ³ ñ=ò†N;´ö¥ÇÀ¬¶ëŽ×U ×E‚-WŸö.öÖ'Š\V)·åÚ?×_HzùÉ„•Ä,í”+Ÿ厙Ȃu#Õig§\ðdH>š)€JíNÖ¹'²ÛDZ ÔŸàÚ,ºm¼åдá!ö0d7ÔÏ7š[oh`@Lã"~+7°ŒµŽAÞl=XFzM˯ò2tÆ¥¿ÉOš.›Š’ŠÎXSÛ0¡ r-M(n’fΠèÄŽÕà€Ïs3U¿¨×ÎÝêÝ5j®TM\sú¿ø†çl{U¹aÑT×oD0³UçO·%—U×™¦dÉYgWl Â;Ÿ˜v[¶çN²Ž©' Ω$AD‘е‹LEt`Jl09–pTÿë>—ãƒRË^;]½[´·µx¿R莞n¥ž{ :©¶ñðî·u.JÁμÇ{:4m ÔyŠ_]Šîé·>—zÔ~’£4,,Õuu$Q}5uã'yüY| ÙV9ÿáš‚¸ÄGÏ ][‡cëlEé{iW;²1ä^€C½¢~_{è:¾E5ûÊßÜS8 /¿Ã+;'–…£fª/‰ía»}šÄ÷Œ¯¯`äÕµµ“V}uµ#úáqÑHÑú 'ÿÐ`+Dð2FNZ²Ì¥Q—Þá‡PÒ;^?ð5lÏ Dõ­µ€’Àè/=ß}݃=Œ€–¹òê-áëþ·uöŠô Ûn;Òö‰èÚ¼õ]¶åQwŸ—Œéÿ(©èሚún䦷6¢Ç3(qˆwÅ1H*ˆø3Tx‘øJðwݳçŸßMÏJz³ˆõ¸\â}ê_É~«ùË Ì¾ãàèÛÓ•¬`î´X´:êõ»¯…Ä; >|0Õàƒ …K¼ÓËÒј&k1êAu ê¯Z,u70«ßÍÚ[Ž`f-Ê)ŸYVÕÖ2 F6-ü›ê›½íwû¥Y-ÀŠ÷o—µw¬k©³9'enß3èñ¸ø¼ (ßiµî*áçX/–è¬HyÑcœ{¿þ…ÄærOº+ŽáÇaŸcÞ©!`db­ª©‡TßV}htå=’29„«ÕTã0N ;X)~¶äöe)8\yÌ…eú>ý;¤†Ý¬²¬nÇßrÕ"v‚œûÓpŽaÇD,'Ú˜÷ú a6¡Øé¨Ç´çU&˜rsø:ŸôÕy=áeó^9¢5?e—vè³ÐM”9¸ŒOê™U¢.PüÃ}0¦?yé0dw{.âÕ&`öçݾ¦Óß_ªòƒõ8\¸èìì„ÑÐB< ã`°öŽŽ Ò…ÁRÃhHy)¿ðü>ì_Üž•lï DzóÜHŒüx°O¡¢†ÙØæ4V®ÔʃI©2IC£1Ó}¿ÚÑhØTº¬["¼{Ô—õÙ [1¾ÿ=Ù¾ÄîÜ-‡Å2‰ßL{­~áÕlzZé¥MpP7:r‚ÿö‹b.ÿTÆ¿í|„РԲ`€HSÚœŒ|eºÏ/Èâ¨CϤ£®o÷œTïX‚‹"}ê)+|hB܈~ðxaNËC¶«JŠVºLJÃ`°N4i¼]M=n’ǛŷÄCj/´Þ³A‚±uHpšm Û<ÜišñÜZýõBˆk¹Ë±Õí'Ïöoó˜xràÞxQrýÉ{»'4|FCžÉ4ø˜LžÆáˆý[/!ˆhN /–Ë:fŸµ¼bÐoÃxl *ñêeéÐòÚIbUVÔöÛñ^:ÃxPj&ý£±éBæñm—,>íþyjåXvKþ‡þOïñRÿ3r?–0±°ÎމÛÕ…¡Þ»š?È{* ÇMø)ûe ¦,x_h;iiŽö4Káð^ª‰?Ô=¼È¼ÿ(Éú7LὡuúÝÀùkhm»)“†˜îèì¤)<ȉ}ü€¥³_#Ž hªüЫè€ÃW¶°ö:ÕÝ=W[·a}ÿ3˜ôñ¦ 7VÙHF…z×ÿìÑx†GŸûïêkai®,ÇÒ³0âÆp¹ymŠœ£{B=–}®áy3þ;Õ÷RŸ÷ÂÂU}LKSpãvìcìÌ«·-·m¢— lq‡;4ÂΜÌϵNˆÂí$y*;Âq;&°¿þ™¸?FøvèÈÜŽvàB †ÁtB< 'á00Ú‰e’¡¤¦ÂbI*o$‚±éx² ¨š/aucó8r¥öõS ÝgŠÀb°TÔÓ¾j $;Ƀ) O/'¼àSÎü‹ÓÑ÷Lò}5=ÅŠ¯‰Á€T-!Æڽ畴?zþ/x5µ,Ößa‡AQ²ÃÕÈ n î㛞 ÏÏ]”ˆY1=pê>ž}¿ý5¿˜”êÃ`10 ᦥMˆäñdñíê­ª"G4æjªÏQóä’O68À ±°),Ñ £¢êÂ`§DãB*HÎÆ3°é˜%ÀÕàh/m«þw£1KuM « NôÒQÖÃ(Ú¬.ôЈmÛnÕÙ«sz—&öfº¦0–÷ýðÄî%%(üÆÞOíh\Ènâa~ 9Á,vsƒnD›lIM>¡›ŽŸoÀ]L †Z@ʨ?zðƒÐ ‚ ê@•öÔ©¯,1à `ÂkþXæwäÕ]§Ý]jʉ1ICCƒÅŒ$¹[ÑM­ø¿ÒP¿8|¦çqóx^y¾ß;Ê‹ª m8ËØ/üZAêWð”÷~u‘f¯÷áßmĸ€†»ï?]æ±]nó˜¿a~Õ†zÂwª§…J¸Uó¬,L­Í=ª£41!µ ô~`G p Jp2OØ6§ÿi`áÊÿìXÕ93b¢£­…c‚ã7à ˆö–æ¹y£a³—+µ£¥ `šîˆdeoÙ¤E‡n3½_ <ol`¢;WÖ÷3î­rV òY#(TÀÓ˜aËR?ýX¸%ÔdÇÔçåßü…»Kصýöã};ï”ÀÊ;=íú88§$?PÊ¿æÑ«3V$d¦­¥…™4£¸9ÙY:ZÇEòø³ø ?ÕbNQ=(Õú µÕ8œ´žƒooiba™<Él,Ì­MS¢Q”~00F¤h°ÕÕƒÇ!µçåöñ&{âáñ-íŠx[]Ó³•­ê€uºïe_{ºh¤¸Ø`ã±[¦g˜*ÿàÁ@ ùEc úaÖ‰é/zFDÇxüp30Dg³JÚû½/Ǹþñ:«önߘèÚ"Ð?Âh­/›ÐåˆÌ@´IQZ”0uskil£˜ûï׸WcŽ–Z››™tª#?ï’¦k7mê¯Î~ãÞž>É6Ü?ó|ï"¡ŸPôDê7SžÚWp ºž—Òïø»>ÆU!T¼ö›üu<"ßWS9ÿüÕåZü•_ˆŸÜ¨âµ[ë¯Ó i»TåÜNŸ¶VãʹpèÔƒqùášj~¾ Ž›ƒµ« ÛÙÑFK7ÖôV‹ó³6åÿðcŒ½è×úg﵄ ;ÿsè¦Ë±_UúW[nîp3`Pi/Où{ô k¿àFõ;¿:~¼ƒ¬û±?=T$@{IFJzo ¾€%Çp¹ãð ãð«¼S8Lº£$3¥!@ªošÀhõwˆH MÃñ(°@wBO³(3g]y)ßÄ“}K®Üõ×/³_ª_x(9>Ù«õÄOèÇ¿x{¢è†´›löŽ&jØå_²çŸó)Œ4EŸ¢ìùï[Ž\'ží)ºödÀ^gu|þ’¥¼ç{r0d_5Q¹ '£¢¶"&q–‘Q]VÌ/(4Ñ£d%Å^<N’ûÂgèáÓ½6ÊŒKZBCæ"¯ÿù5óiK·n³q¹R‚fûPµ2ʈ´³²¬XO{&V®ç¬))`HjrlaÎÀ瀩¤¥Ç¯’¯GphN~ô,+*ÂèhHJ;rx÷MhØÕÃ÷zvý‚O:X÷&—’Zv»”òü^L|w›žÄ²ÞŠ@Ó ¢¶¶ŠDƒ¹ú7*Ú€¦œÐ;W?]8ÀËtÐñÙÞ}|¹Ç${¦Ë ‘|J8F È/TSZ0¦êÆŸÅwxféݨƒ_ &äžV¹<60àMdêõÃiÌ<¢úŽÁiç©BÏV³õ¦y’÷|âÏöï³7Å Oî”H¯WšDr`:ÝÝެZ +мüÔSFKGCÃÂÁ5¬2ä_0¹_ÑÖQšøæùË7ç÷¿hoï g79zeõ’_ïʫɴ½~xþ·ÿÛ)˜=þº¸v½® ;¾ó¢w½Ãû j_9ü°¼ çô9õÌÆÉŒuÜvK'»#­bKöÛË7ï~|v³¢³Xdq5é4GøÍÀ*j%‰NMIá$ü°´„XøÛ7ãò®s1éØDÖ„J-ÒÈ0Icu®MgKXðýÐìвCØùÔ,N·Ñ§ÝþT.ߣñé— øLÐ’‰Ÿw‹ýÝ6ûú´{g/&…ÔE·HÃß2¹óƒŸ‚‡[ËsøXA\9·—Å>’’õ.4ŸE@Û'­âr° Ëñ€—´§²ª´x©‘Á„*ŸPUQ ”Ü`öôa\Z¿fqÀÏA]úâñÛb ïEb±‡¦ ½üÖÈ´­@QÌýGÁ/ÏýÁz;fÑcÁ)¦ú2Ë8ZÑv°*íý‹ôG>µ·œYHíij£¦Z½éëÇp¹ãð cÛç8î1êË‹¸ùçÇâ(¬êƒIê[IB7Âp—’.Íý6Õ7ì#ĹâFÖ'Y»¬ìS/ã·{iw,‹4;ÙçzÞîÍßî{™xº*œ29™zÒyhfBT_æ=ë“ G †¨ÐÃf‡ö.Üëü¸øÚúð6í‹ËÿŽX¢f¢g…?¾™šYL˜I!×TWÅÄ>Õ®V¹åëÇ·¶´™—´Ùm4Ãm$Ä£æJeS±ß¦BZëêh¨¤¤ âãK–—•KLJžê£¶Ú¸w ïÉoì¼Íx0‡^òf#ì<$µ¬€×0ËÆ#Få™IÒÊû´•7i8)üù]N†”‹ª+*ÈÇDG‰ÈwŽqdñÞ\‡nÄ!u÷é:o¬¶ß;U»Ãªv1ø_’ƒ·ÏyÛ„3ådIO™d i™’‘ýðø’!ÿ¦·¦Í'¯¡FHnüÝpãˆ{´5¼rË}ü–OÁnÁ½ÝÏl;ñ¦ÑŸ¾‚œïFÆFn} yÞÑ‚ªcdaŸ¢wÅï,e±é¯Áï §M}­X06Mûmš?p™—ô/NÕù/gâ-#>æƒ@ÑzÏy?aSÿXÈu#Elý€Šçñ=¯*FÆ2Ãt£µ¦”ŽžŽ—sÂÝS2²r_¿~%V}£³‡\±þ—XSHÏižÓˆM\*VUFÔ>ªË‡gŸ}Žz§‡Y²òòPè©>æ%–êkŸð;)§cƒ‘(™'ýÀú>Íiþ„~9ý•ßžÿBo:¦¡¡²Š€ˆ‡¡ÍCô*ä$!ÙúJ)lü½kŸzœ8 èdÁŠî}ú|JL› KhîËí6ž³Œ@WkU @A)-.<‰Ÿ® ªþóKòÔ× 'd§')«©Ï̹ԕd?}ˆ@ÕTNqÕ>òºµ¬ ×Å~% «T‘“Š|ÿ®¡ºœ•“"˜@riÁOç)“l¢§uÂÏO^Ûh&“*Ãh˜ñ“ÿx¤F^è Æ(¨$dðò3Î0¯m5eÝ]X9IñÉ4©ªýü’¤²Ø‚ÜŒ Çàü®mUiôƒk·ží_ÈÈØ_Æi½ôXÈ^+5Á‘$Þ§%«©kN℺‹”b>¼oAÕ3²°AÏ:ˆ®6TuE©»ëZˆ HõA€0/ƒÁ”5u¾ÅE*“è™éÿOÏ´}v  w£˜”¤0®?ß?OÍñçæÂ!Sê óþuQY ¢õ5„.Þ_ þÇÎ7Úc;ÊYÆ”ØÆ&Æ“ûífÚ~§Î4ÕW3±qB¦…FU×TV¸9:ÌØõ S?êZ¯Èñ5¾ ‡w°ØÐ8>ÄpÉ‹§NrÖýÿAy±ùŒ]<ìæ—›G—6çmÎͱ©± —,™Ü±–Fÿúlml``f%?“£ã×_ÿ—þúqïßQWÖ„ª×ÕPÜé´ 3ã?hYØA;ˆ/qu/x€T©-¸±g³ç¹·cî(jö[àãšœÃÔtõu3-mg­Ž½£ '5©.lZª…0«°61<}îB]E ;ÀcšQ»á[ð&n„Ew½}Ž_²La™5ðW$¤a·¯Ä‘y}Ã?VoÛK?$‰ŽÅØÞá||ü-ptIâ¯æèßW¥[ê´^?œŸš?ª?­bg'J¿%³°±O®™ ëeá!Kì7,p£Å{lø+kë3yR-eůi_‹¾’QZàü7W êj\׬"yÍ‹eÒÓÓ ¾¥Œœ'Dò4úarDù÷T£’œÌ¤k0µ²ŽŽxµØÎòÃÑoBV®š¼f3ÐTÍÌH/ÍÍâ—]àd6”ä´·µh© ÕGB$þ«­µ˜”±¢"ª,¨Ôü|âùoÿÕâúWdÃË|ÿeD›1Á[õ×\ëM0|XÑR…“$ pMSµf®ÎÎ×®^1sÝJ=ŽU§Ðøÿ†_G÷ÝÿœMúT•ÜÑÊŸ >FÇV« Þq±Ÿž¶ ~L –°Úi a‰%Á5Ä”\—Ø¥:T¬m;z›·çêØ%DBZ¹+àÿ{"+þû,ã±oTufJâÞÝ»¦Â¤‚´øœŸ_c#uM²EeŽ—‘™áóº9;œyúH>‘…<î $ùSèsïm>$¬ÓÙiíë×Í\¶PS/ÜÕÀ°-õ_cöîÙ=•JTådrrò¾%|ÓZÐ=3Qa2ò bBSjGpsr.1)-¥0¯¡®½ ‡Ó³ñH‰«éêkÊó@ö:­@²³Ú¬ZtséZÏ1WqPÝ•œ6~e„ã³?÷Ã~Ø1B=b ±èÏ´Š?‰7³¨ïxY1šoEO«8>¡³ŒÎ–ð§Û¶yO}°ÖªåþwsÓÅ•5¦9g$v´·:®\>ó§¦Á<<7‚¡³©Ó& ÝBd¿»ë̓ökؘ§k™D ¶ÉËëÊåK&ŽÇÎêN–’‡'y“3 )ã`.ö¶vaLÖxLh5ò!Óú6øÎæ-[¦’•€5¶ÖWîäg~•_´0ýpAZLww·µ™ñëÓÓ»º­¿{ç¦©Óæ…¹:9E7öõƒ.®n8R}¤D[óàE1S²ÒŠÞ¡y½XtápÇ~9¸áÙ^ë]ÑC·†u ;Šÿ 0&ê¾Ën“±~"Þ/\X›@µø=o¸*yÞ~ÁP¿£½.Æ£¶j7×ý©Â$}LjÔngG»JÑC†;M“o36ùøøÖ’Õ nt"¦#4Ðßc£‚4ëx¹»®½t#€š†vb)%É•9é•%…7¸ÍÖ€¡³“³ëƒÀëK¼\Ÿ ®ûý£s+k a¡i=; ³Ûz÷Û·ð$“,Ôü!9"覅õrqaÒ'p——–hm7úôä¶áªõ ÍuP`;CïßXïáÁÆBšŽz¯õ®®\ý° ¤ÂB#³ì[r}uµû:ÒÌ¿àåYm¿æÉÃkàóNEIµ°Ì²»ë탶v«… Vê#9¸Í7ÛžO6üe-ôWïG1ƒ•ÆæKŒõt”¥¥EÃdÝbÝô¤Ìbþ1w8›Ñ¿ÕóVúß–ÈÖV43(+ÿ³dð#:Hçȇ%ñ•å7;Чî ..—^æ_X6¾j ã“½ák¢ŠWžŒy²G— ïî´v=GØî­Íz2ýõÂÄ›†ãÄ’oñõO·= zã•¶ŠøV:çÒzð:ixmJû.U:Èv§ êÊòp8Ý«;—L=N“^WKÝÛG÷¼¶l!mßÈõ7nßooi’Ñ0X8&”—ÕÔP?‹’aÞuë×ø_Z²z=œ‘iO‰í|}ÿÚÊUö’bÂ3p:>nNOM×®_1\éJ–i3†1¯¯¯vX3’M:Úл—×x,?ŒkC…ÝÚ¸q3’” oóò¼róN{K³”ªÎÂñÃ9‰‘ím¤’|Hˆ ­utºwû¢±ƒ'-¾`ž÷ö×÷¯;¬uä @ªo÷xŒ[›õÀuÙÚ'ùCª¨§`¹6x£˜ÏùÛ§¶éÞ p$~íJ!.îž4÷½àáG"Ù‘½+µÀ¶¾@oÄ`ZÑhƒÁ >dßã+ºÿÝÛ7tfQ}¤d¡€e,㪶íîFgbÉçPt×­ÿ¥Èêr&V^ÈTugïºÛa{íî,ûá*…o1<@æÓ»Ï¤¸ìÕDyAU çÑ>›Š; ™ê BNRœg£ÇåË— lœ¬ä?o³¡8;5>fßÞ=SPô+<Ö9½|û>þõcmËUdÏ$Ä¿²svœ ×Ãˉܽ{×¹sçU—X#ù…Èžlsmøãû[¶z“vÌáè@r°îÛ³ûÌù‹Šz&ÜBâäOrcmøÓûÞÞ[I5(`$ÈËHrqn¸víÒâ•.ŒÌä?y²¹,/ácľ={`Ó r½6¸> }“ôö™†™-Ù3IIļ ”ZºÂŠä• òóìØ±ýÜù šf¶lÜüdO&¦¡*âE÷¶mÐÀNHõM³ò“u|œ×9aPe™™Iq?|ˆ~•ö˾yç}ôÎû‡?Õ1_ŒƒD\Ùéíó:¬=ÇÉ-y¥Äßï­º7Úø”¼²ÊV@Š „Nz‡ýÞÿ‡è; nì²·ÅÚãûm׆wÎØX˜îÿíZÀ:&V2NIBëŽ yÈÇ/¸{û¶é;Ër3㢲òûçômœ¬ä-9œ×­äCSpéhh|÷î |ò¼à[ІÙJ2~f³ãÞµ67ØÿÛÌŸ Ó÷íÞùøåë¸ÌmËÕd<-+æMGGûŒ‘Œdgùmß^Ð3qp‘qJP¥Ä…!98÷îžÆ<¶VæyE%A·ÎØ’³ŠÆ¢ªÃŸ=psßÀË9]KÁéé÷ûî»óðqÁ·45ãed@Æ§×Ø.Ìþß|¡˜R}3ûNeáSѳËÖ?‰õXüŽ%ýCñøkó ïo¾È±jC%ŸdÕ’wQÌe»Œ7‚ƒ©%ÊuçÉ]&óЦO«Gìo¯«ê«?òŠE÷wS›rûâ•»÷nDå ³Xà~°€—z-#ÓSá9ظÞ53ûç«ÛÛ­#¿1rèšÒ÷¯Ÿyxl$íh¢a!ÄÇë»oï­ûApvòí m_?…ÁéèfErŒkílJ**ïÝ:¯½Ì…‹Ü¢“Φ×oÛ¬\=óéR‰±j¹eEUÍ­€‹šæ+ɰ öðŽÝj‡™:Û**ªÍë¿de‡Ý¹úaz™ñŠAUD<ä¶~rºÏ%&$à»w¯ÿûÌœ¼Rjºäç‡Ó"C˜™3ã‡]׬Ê/. ºu^oÅZòkͤèhxýðîê5ŽÓ7Фú†—d3Ú·+r˜µÿ ð;§¼øå?Rµt¸þ!m÷ÿT¿ÊÁ˜Ôcî¯ðÑÇÁgíj‰ªÅƒfð¨"ý£\=4UM`j^ ñƶšo'Z·0oã¡@§ç ¾ö™¼´„¼ô¾ç¯Ã ò~ê­p‚ÑCÎ4ªúý‹ sËý¾ûf2zsw][ߨtãÆ eu²Yè¬ -&7ë›§§ÇLŽ*œx¸÷ÿæû!6ñmØS[gòˆž)0žŠJHͤ%0pIŽKN{s碾­‚fúQ [#Ÿ?’•ŸÅ&%Yi°< y]ZT¤»Ü‰HõÍXÔ÷>ÛxΖ(»¦·½7¸ì=³ÒL[‚Ÿ‹hm(Nýrc×ɨÁǪØêÔVvàÌ}Å› eâà€¡˜i·ï%úèjr˜Ú”Cv‹ü†T–‚ò5f»Z`Ýz÷åMŽå©}þ]CµÐÿí „ÊcNñéï%:Z,´:× ¤ÕVÝ9°R…Šû½ÈNý'„#¥LÌD«Õ«ÈKA’o†aciÚÕe™žgáæCC;_ûZq­õáOè.6œ­(™™ißî5u¨KýÄ•Ôçõ¨­ªœôϱ·y{ÛZ,/×¼DW,‘1 áïß.]»iþ.èG‰éøø*˜…}Ƕ­síÚtÔUÀxÝÈÞ}þÎ ¦À´~zùˆ“‡o§Ï¶¹p=vÖ–]]]ÏÃÂ3S’,×oŸ¿mp¸¶¦wÏUÔÔ}÷íž• `gañݳ«ª¶îÊ~ ö“^¤7_Mʲ>IŠÃO<³¶˜•kXj¨–7‘Q‰ÑL7Ïß5JLë‡ÁÜ||»·û Õ7söÕÒÖí·ôåÖ§m¼{rçÝ“£$v1>…x-QMx7ð÷[8)¶à?Èþ[uYõî²ÊK¯hq]!ªÊêâ%Qï-ú¾~2aÅ/ä·ï~1ÀѪýæ‹d_–‚ýî«.ã×7î4ÍßÓß}˜ëÕþ·:®w0ªÊŽøTÄZU}‡8³ú;ô;Eï%Ó… wæAEEµÂb)XBßE~ûúEËb%ç<ºþ¶Ê‚a/Í­–ùîÝ3ëƒdgùã?êPwîø#ù…å´ )æO"l \WvâÇŠ’BWWW3íùhÌFzZ`‰JH {úQÓÜŽk>%ãîj©|,§¤ì³eÓ\¾N]°$¦|yý➺‰ÍüʦÓÕXñꉲšºÏÖÍsÍÛY[€åùëðŸ?¾k[¬bd™OSÔ:ëJ#_=]lbºo÷ÎY¿.vÐ×Ô6ܾwODRZÃ`MIýpV\DuEù:WWK#ýY¿s#°¼Žÿ£iaÇŠœO ,c›ªß¿x¤ºHs»÷(؃Tß,.ë–‚s?`j b£câSRòÖäUµô,¤à¹¹‘ò‹Tu4´tµ$‘Ãt}!—œÆáŽ&Ÿ9y1¼ µ…þmÓ•,ì)8¦<õöµÛÏ>D·0ò•·œù7G#dÏÈ‘­n;ƒÎžy”RŽ¿6~i5{Eæ1ªí})IŸHÅTN«аw±Éßò[¹Å*ó«¸EäZÛØ.Sáš‘EÅ=ç´—E‡¿~ÿ!9ógvAe+áOŒ ÜBªòšÚfV¦¬ÔÉÎ:¬–¥¸¼âQÐ fv¤Š¡õܽ/è–¨§´t´ö«Vé̱Ygì,Ì;}¼±Xlø‡¨ô”d•Åf<"RsùÖ7•äľ£¬®egmNM=ïF- °TÕ6ß¡¦¥U_ºb./éN…æ|««®\µzÕÞ91šjJ`©olzØÕÕ­ajC Ÿ»#©º1I¯šQö«WûîÙ5—‰µ±4,MóŠKž>¸ÊÁͯ¸Ø|./¡F‰éˆyó×Õ «9ÇÎ!9X÷ìØúá׿}IUYbÉ-(6wßhP_œùV]S×ÞfÕܺéÆúÚ`)¯®  c`R3±žË+y‚Ï{rDHSc½ýjû¹Ð a᪾~À"ú6`qœxÔ´?øØþàð5óªzü –¥qØÞaâÕöƒERË, NèùôWx‚²Ë¹A^BÞˤô¯ï#"X88åuMæÎx9tCeBä JJ K‹ms»K”O–KÀÒÔÜþ¦àgެ†¾°¬Ê\y/ReßÓÓ¢D$e¬ÌL Ô•ÈÌ’¹8X f_¿ç¼{ö†Ψ g:w%â:›Ó¢ß×TV.1v[k?OIfcfÚâ‰ô‘“Wöø%•¢¾) ’{®Ü†úü)U_glfêîê4ˆØ»Ÿ÷269-úC’O@VËhî ±Ã6×&~x‹éìXjfN0€¹ì‡—››€¥©¹9$,¬¨ WVÃ@hÎÌú£Àu—g§¥%ƈIÉ-37Y¢¡2—ÉäåDîèé4KËÌz÷.ÁÌ&¯k LÜHŽõN½á)ª©åSlÌo4´´übRÜ¢2$Ÿ|B_ü£¬èçÂÜtttrJJzšê‹ÄW““TTTZê`!|MÿöýsrJeY1Ÿ¨°RHœäC¹(p]õ¥yŹ9¥?¹xÕ5.RË´d6fg‡U„Ï 8‰Š‹ûžù•ã“â•™}‚ëh.ÉÍ*ÌùÑÖÒ$%'¿XOWÙÊ ù‚ Áà`» ð‹}µwtÄ$|þ–‘ŽFcÄ$AwÁÊÉCrwi®+ùùtX FFNÁ@W[É֬ɌX>nN·Þˆ µ£b?ä|Ïddbá•â“&yH,º±¶$/”y8'+¯¸XGKÅÎlÈÀ/ÑÕ ákjfÖç¤Ï5•¥|¢R<"’œ"$ŸŒMÑ­ëñÃe…¹<üBêFÊ`!³D²³¬stèsªÍbzÂ:Z1i.QééèìjC•þü^”û£½­UJNÑPOGy™%êƒiÁÂĸÂb)@”Ú1¯¸$çg~iqQMe955;+;'=‚‰‘™ÎHEGGM §¤¤ÿ„Ãuwuu]]Ýè6 º³½¥±µ¹©µ±±¡¦ºUKEMÃÉÍ#$,,+%Á-'®%'VäM¦²œ Xú¿‚:$óGvA~~eEY{K +;33;=Ž`¡îarHr¿.tGWg;ÝÙÖ„jkmBÕÕ‚¥©¡–ž‘‰›‡WDTL^JBI] PW‚L÷Wqbmf bKþ‘“[Z\RSYFCKŠ@Vv&Vz‚Ž™ÜD'A{Æv¶uw‚–ÜÔÞÚÔÚZq U×ÑÚÊÎÍÃÇ' -!.ööή¦÷ã7) If"$ʪŒö!!úý¬ì-óoŒì=ö !d”d*ɨP¤(‰*…††ûÿt®«”"Üò}¿zõ:÷ÜsÏø>çœû}ßç9ÏSOµU=3ž¥Ùßdi))Ö  `΋W¯DDÆ>}ú:þnòJÊUªV“‘WÈ»]T–—¬$Áˆ‰UÈÍÉáa¡é9Ù™™ïß§¿O}ÿ6õMâë·o’rssTj Æu›j4RkTG¯Q¯ÃßX¤Ú½ºuáuû”éæää<ŽyñèÑ‹çÏ“^ÅKW–SP¬ª¨\MFNwJ²râ’2’ÒRÂ=šàfœ“™žw¿{›ñ6MÆ­#91';«ZZuêªå¶±jëÆª¼.å<°:ÚMu¸j+FÊÛ÷÷óîÃQ ñ/2ÓÞ+TQ–¯šw+––•G<+JÉàVœï‘¶¼ûpVF·LÜp7HIz’œ„û°¬œ|õZuÔWoªÙ¨E­–¼r¡yEÞTå„Ó|ÝÇ<ñ0òÑ³ØØ¤—ñ’•¤”ªâ­²œ"2„J•å+HJåÝT…OK“™ù!ã]Fjjú{œ’¯RÞ$}HOW®Q³NºšÕW«­«^“geAßbYAünÔUÅ_ ?T…âöM1Ðk…¿¢ËÊÎF’W±bEɼ¡½¤)˜¿óLFä«T©òá’t®ÛYŠ[‰¨¥R <ãït×,''‡ gee±ßš(nßIw*VÔlXß={qï«PA"O«Åy¼Êº‚(ÊW´ª¥ûðÏ€s¬š*þ¾{Z¶jÕJBBâæÍ›tS%Èú‚ ¾€$C²\ å\FÓk¤2|>¿ôt*ÊT¨PA¦8”úÙKç-݇Eð´ÌÊʪZµ*œYA!BHKK§¦¦R~)P뜜ŠAü ¼zõªcÇŽ‚¬ ‚!”””âããkÖ¬IÑøuÖÇË{ȇ/&&FÑ ˆrLvvvJJJÿþý)YA!BÈËçu–¸téÒM›6Q4~yÝ>ñx666 ‚(ÇìܹSBB¢]»v ‚¬ ‚!XíÓ¾}ûÈú~>|Àÿ5kÖõDùfùòåÍ›7§8d}A„h‘-##ƒÿNNNóæÍ£€”: ¬®/ --MV–úô#ˆòÉñãÇß¼y³aà AÖGAˆ™™™ø_¹rå+V >¼V­Z“ÒÅÛÛ[^^>##CLLlÊ”)Û¶m£˜DùƒÏç1»­­-Eƒ ë#‚ D‹´´´ììlI¥J•ÌÌÌ"##)&¥‹¿¿¿œœ¼:==ÝÅÅeöìÙuëÖ¥°D9ÃÎÎÊG-&²>‚ ByýúµŠŠ lØ0ŠAÖGAˆIIIø¯¦¦Ùóôôüðá&ÚµkHÁ)nݺ%&&–““ÓªU«ààà—/_ZXXôíÛ÷ðáÂ(ÌŸ?ßßßÿñãÇÔV‚ ë#‚ D‘ððð5jhhhˆ‹‹CNÇïää|ô葤¤$…è'Ù²eË¿ÿþ{èСÎ;ïÙ³gùòåûöíSPP˜9s&¦)>QÖ3fLlllJJ .vÜN) YA!r„††Ö¯__GGÇÓÓsÆ #GŽ 0119þ<Ò—Ë—/·hÑ‚¢ôÃdgg»ººÆÅÅmݺµM›6ŠŠŠÚÚÚ+V´¶¶>vìØüùó.\HQ"ˆ²K»víÌÌÌÄÄĺuëfooO!Èú‚ Q$<<\SS³mÛ¶ëÖ­kß¾}ƒ à{—.]²´´ŒŽŽîÒ¥ ¬ÆBú1V®\iee…Œ°B… rrrÈÏœ9lܸñ³gÏúõë7vìØ-[¶P ¢ÌñèÑ#Ü9===qóTUU]¶lÅ„ ë#‚ D”áÇ·iÓ&11ñõë×p’êÕ«ÛÚÚÞ¹s§^½z7oÞ|þü¹’’Ò•+WhÜá’’••µ|ùr¤†‘‘‘U«VÅœ!C† <3]\\Ϙ˜˜¹sç¶nÝúÖ­[.‚(C 8ðÙ³gÉÉÉ;vlѢŪU«(&YA!ºDDDbrîÜ9¤2ÎÎÎ:u‚¨$%%ijjN™2™ÍäÉ“/\¸ ¤Ƌτ ìííkÔ¨qõêUö´±±1TðöíÛÖÖÖAAAˆ³——×?ÿüS¥J•7nhhhPÐBÄñððÀ}Ò××·qãÆõêÕ[°`ÁàÁƒ),YA!º@íÄÄÄ6lˆé:xzz"›éÞ½;´ÄÊÊêüùó=‚º°.=ù|>\føþý{r¿ïC>yòäË—/y\K05556 âêÕ«92oÞ<¸t×®]‘D¾yóÆÔÔË@¹)t!šà6¨¯¯¿wïÞääd\à***AAAêêꂬ ‚iàu:::lúŸþ±°°`Ó‹-²··5jÔöíÛ7nÜøøñc%%%h!ܯgÏž˜såÊÖj‘ø&½zõ:zô(›†õ1µ³gÏVVVNOO—‘‘Y·nÝ”)Sºté‚À^½z²]¥J•'N ‚ Q ,,ÌÌÌlΜ9¸âe¿~ý^¿~ýæÍŠ AÖGA” ëëØ±#›nÚ´©œœ\É ^}úæÍ›ñrÍš5ëׯoÒ¤ ÒJ===ä‘+V¬0`ÀÍ›7k×®M‘$ˆ? nqÆ ÃÕš˜˜ÈãêðqÏôôô400 àd}ADÙÀßßûöí‚—½zõÚ»w¯ÀU`wÐ{{{ ^.Y²dáÂ…xWAAÁÃÃѨ¦¦¦¯¯¿oß>))) )˜7o^VVäM0'::ZWWWðrݺu***K—.•——ÇËI“&!ƒTVV~ðàAÍš5gpŒ5jÇŽÏŸ?'÷#ˆß.á¾}û†„„œ={–Õé½{÷®uëÖVVV)))‚¬ ‚(3xzzÖ©SGX* 5‚¿‰‰‰±9€ÃÀX‚‚‚ò¾„*V„(¦¦¦6oÞ<,,ìãÇ:::OŸ>Å[Ë9,X€•üÍQݺuë©S§îÝ»'˜óòåËÌÌLV0GVVvòäÉC† 9qâ›Ó¦M›/^hii?oaÎvŽ)S¦9räÊ•+Â'â×áêê:zôè1cÆ.O\¿fffŠŠŠ¸éáH!"Èú‚ ˆ²„³³s¯^½„çÔ¨Q£]»v›7o†{f:99YZZ*))EDDT«V sBCCÓÓÓ ¤¥¥=<å¾¾¾p¶‚Ë»¹¹Aù^¾|)...˜éèèm†ø!…²™ý8ž={Ö²eKDdúMÄOrêÔ©1cÆ´jÕj÷îݸÄó=z„ËÓÔÔôãÇ‚¶AÖGA”1¶lÙbddT°N33³êÕ«*00°F=zô€òá]˜ÌB,séÒ%hd… V¬Xѵk×rO‡§OŸÞ¹s§à[7nܨ²0P8X´­­í™3g„ç+((DEEyyy)))]¸pAOOÍWUUeë‡[²‘W¯^MíÍ¢¤¤¥¥9::Bó „›R||¼ð»˜?mÚ´uëÖ±¶ëAÖGA”aÖ¯_¿k×®ÂÞêÙ³g>ëcÌž=ûÿûŸŽŽäuA) ]»v <®oϵk×Î;wÊ”)ì-KKËððp×AÂâÅ‹7lØÐ¬Y3$>R"™““£¥¥5räÈ‚§ì]¨Za®ëää„·UD#ß[:uJNN†œcâôéÓ†††‚·XÕë_¾=bÄ|\¸Â ˆ‚8;;㊓––Æ-n-‡ð»¸GõèÑ£V­ZÇ6l…‹ ë#‚ ʼ°aÚµk—””töìY11±ãÇt6iiidQ<®² -6lÒ/ÿÈÈÈÌçÀ4ŸÏ‡ˆ®[·.22ò3mÚ4¨ è‡±C‡JJJ‚޾ɱcǺtéRôzÂÃÃÕÔÔdee{ôèñÍþåpwwÇæ6nÜX°v<Kº_¿~Ož}èT'þ6RSSW®\¹sçÎ:uêàNÒŸ£àb/^¼À|\)»wïöòò¢¸d}ADy)QÆ Û·o_Ä2p3haÇŽïß¿_ÄbÖÖÖ6¸ldÍš5C† ùÆ÷VÅŠ‹8xÜXS¦Lùøñ#œÄÆÆF° Ô±7‡ oƒîÙ³‡ ™åàà)©}Ú­[7vJƒÂ– ›9sf@@ÀĉgÏžýÍ'– ‚¬ ‚(W@º`YÅì÷ß¾}­[·þï¿ÿ¾»pÓ¦Ma,ŒáÇ_¸páôéÓ‚Î' ")))¨ÌÌÌ\¶l¦N:HÎ 6…«Lä̹yó&öÍÝÝ›CÚ×»wossó_Ý»zdddãÆ!Z¯^½*æG6oÞ\¢n!îܹƒèÁiÏž=ûÝ…'pdgg?þСCk×®ßËË ŽçééY«V-œ´#FŒ>Ÿ¿IxxøÂ… ñA8&<<<(’YAñ·€ìGZZzÒ¤IÅÿÜ@MM >Ö½{÷b}KU¬¸wï^LÜ¿_SS³jÕª§N*Z2¥¤¤fq°—ž5kÖÙÚÚB±’‚iÃ!\ _B^xîܹçÏŸ·k×ÎÆÆ¦OŸ>ÊÊÊ¥·Û·owêÔiÚ´iH@‹ÿ©èèèŸmk×®]8 ìyhhhÍš5¿»¼„„Ä&È3òàÝ»w=zÉ’%EtéiÎÁ¦ñ©­[·b£Ož<éÙ³çäÉ“µ´´èJ!þ¸c?~üòå˸otíÚuРA9¾+‡û÷ï_»vmbbâ¨Q£p&9r„âIõAp $÷±±±%ú”˜˜ØãÇëÕ«+Q»JÈÃÇ™ÅÁC5j´oß¾âôÑbÍÁ¦ÓÓÓ7n܈ ºuëæààжmÛo~ª%ÇêÕ«sà3®®®W®\yÿþ½¡¡¡¥¥e=JôˆàâÅ‹‘GîÙ³©dI>gÎÖÀ²¤ Á…»êêêÂ]¿ûL °<³1ž7o\}Æ …ELð©1ìevv¶³³óuttìííû÷ï/##C—Qêäää\¸pŽçããƒë '*.üþù§/GqÖ€{ËæÍ›ýýýMMM'Nœ8˜ƒKõA/‘‘‘ýúõ‹‰‰ùf’’’Ïž=ÓÐÐ@f6hР’~™>Î㪠»té’••ùî/÷ økÇÈ^æææBäöîÝ{íÚ5MMÍÿýI^azðÖõ¥ðL¬ˆd™bXX˜ŠŠ rM ìX­Zµ‹…‡‡ÛÚÚZYY999±q&JJ\\4;))éÇÊ ;ÿþý“'Oâ谫߬ð,ÁcNÉÉÉ#GŽ‚ âo'**ªM›6?œ¬‹‹‹c%–––ÁÁÁp¶[ ö¢Åãjð&Ož¼oß>$|Ë–-+þ Ø|"—ššºyóf¨ vL[[»k×®[uuõ"Ö`Æ‘o>lråæævèС7oÞT®\¹eË–²²²×¯_755•––.éÁ2dþüù?9rº-Lj# ÞP/99¹’®AIIi¦_¾|9vìXäÜ3gÎ,ºWªªªN‚9lìx)2øÚµkc==zô€9Ó0ñ| jççç‡ëç .I\>>>***ÿüóÏèÑ£…ë¥ ‚¬ ‚ ¾ÀjØbbbrU—.]Z³fMãÆïß¿ÿ3@ÊÈȬãÀôÓ§OáižžžÝ»wwttüî@ù€ÇŽãÌÉÉÉñðð8~ü8²ÏÌÌL$p§êÕ«¶’G!•„ïÚµkíÚµùÞMKK;wîÜÕ«WIXë‡444à„Xä²ßì åðáï_¿†Ù–J îܹÿœ={ö„††­¡˜ [8˜åîÞ½ÿêÕ«Þ½{cWëÕ«W|“,˜Áã;vì„vKKKHx1û "Ê 8sBBBüýýoܸq÷îÝØØX˜ëÖ­MLLPâÍ9~àÌŠŠ‚àáüÁÊq}õèÑcàÀëê ‚¬ ‚ ¾ÒúmÛ¶Á@J«Ë)S¦ôïßyÖ\*ƒ€«©©¹¸¸°é¤¤¤éÓ§ïÝ»3 ðn‰tåž™••tww‡¼ÅÅÅiiiaæ‰' ±Û·o‡Ð¶BYYÙ":“xñâ…¯¯/2`d«‘‘‘©©©uëÖ}òäÉøñãÏž=Û¶mÛÒêQ†uy ;õäøÉ>WÄÅŇq°—ïÞ½[½z5"S¡>|8´­D+„4Nãž™íååuþüy??¿ðððš5kBŒ;tèðݶ¦ÄŸ×Epp0Niü¿ÿþ£G*UªÔ´iS]]]œ°;=Ž^ÿ­[·p1^¾|ÒX£F ¸¢­­­¹¹¹p—NAõAÅbÓªU+dÛ¥»Z$j/_¾\µjôyaiY ¨ZµêJöy'¶rñâÅæÍ›:¶)))ùckÆ™ =ztñâÅ °¦!C†8:: `S7oÞ|öìYýúõ™– -f»ÓZµjõáÌÑÖÖÞ³gO¿~ýx\¿‚ˆ?V'|ðàhlZIII]]¶‰%[´h¡££Sü–·[·nÅÿÓ§O#ùf}u–Jäåää¦r°—Øm„e÷îÝ>>>8Ànݺ 4¨¤Ïò¸§;q| :’õ÷÷GdHcË–-!É8.ÄäWÂñ7“’’rûöm\¹aaa>ŒŽŽÎÌÌÄ Ù¤IÜ.Ú´iƒRhÇñ3[Á)»C³Öž?~„(¶oßÞÆÆ¦5ÇÂ… ©,‚¬ ‚ø)lõîÝ; à2õbÂ*vñÇo~àÉ·ï‚ìóøñ゗Ч-[¶`[È ±ÝÁƒ'1}ûöíš5kvíÚ¥¨¨ˆ†½ 2‚w«U«6ˆ£àÙ  ¬ÛóçÏkÖ¬ ùD¶ —Ö××/¬ÒÊÊÊÖÖ–)ëµ G½à03ÄV…GFF" }÷îì.Ô°aCøaÓ¦MáBµk×féÞ½{RR’õœ?~Û¶m={ö,ÅÈc·óu³‘››ëîîîêêêííissó=z`7~¸Üµ8¾9ˆ$ðÊ•+p´$55NC†£ÄqVü°ü—o²³³}Šÿÿü󼍸]›Ìð»K¦¤¤À ‘Ð39Äv_¼x­3f̨Q£dÛ]»vmР††FýúõK±Æ ‡cÃ!<311ÑÅÅÅËËëÚµk(Y§N:wîŒø™mAwóUŸ ƒ Á1 ÷îÝCðá60äºuë"€ð™fÍšµlÙ‡_nº–Aù†‡‡£Ð?~üèÑ£'OžÀåàüJJJuêÔA)³ß`Åì§PŠ[ÏÌÌ  ÂÙŽ€cpª³ZAè·±±ñO¶ö$‚¬ ‚(±ï!C®Ì†Iøœ>}šÇ m·~ýzèVéÖ;………¹¹ù… ˜ûݺu úgxþü¹ŒŒLBB¹sç BÅ瘝8ÈÊʶ瞉lê5}út–©³Ft¡¡¡HÙ¡gEäèÐ!˜‰––VóæÍ±|Ië¬pq¶@NNÎÊ•+û÷ïohhÑ­P¡<^U€‘Bq’$''ËËË«¨¨ PØ%UUU‡É¦K$KÊÊÊ À³þo`Å0O©J {%!!ñ“ÁGоÛþñ¿sçÎÝ»w¡…0d>ÎV5‚ ¡ðÿç÷ç®S¸ %†W+ÊåÕ«Wïß¿‡ÈÕªU ‹â`û C¡´åø;2b©‹s›Fp ÏÌè 8èKd}AÄ^”zРA¿§~¯0ærÄÅűßþOœ8Q¢ñЋëNÓÅÅåòåËð›Zqñ)>Ÿèéé  ÀØ}¦¦¦Å©aû.Èàuuu±K‚ºAh•GŸÊÎÎFÂÍ´PP‡ƒÔ¿zõê¬a'{ö–¨¡¡QÌ^m°ØlWçèèˆXuéÒeÞ¼yÅ9Ò¬¬,¶'PDì Š2>>¾„U%%%UªT©jÕªpÅjÕªÕ¬Y~R»vmx,;,ÜÔS‡cΜ9ùÖóóÊ•+~~~Pb5ÊÞÕºukD÷“5„ þ&ß=^ÖG Ã?þV†cÁâ p3Fà¿°¥çææba ¦ÄÌ ü ±Â¡!V8E±¦ÖX ë©ÎþuW"¶cíáÐðûùúõkX:Ž2‰“ *ŽëT£W¯^t%²>‚ BDAn7qâÄ#GŽ:t©¹ˆìL9=1¢S§N222‡F²[ÒõÀÜÜÜ jþþþâââÆÆÆ°—=zØq”hUbbb…ÕAØ`k×®] Â4Òñ–-[bIX4®8®5vìØ[·n½yó¦¤Í)%$$ŠhDZPN»GFF²:"øì'¼‚y¤A†¨À`<®"n+ë¢sÉ’%Û·o—’’?~üˆ#0QpC°MŽ(wÈóìäƈ=„i@RRRÒÓÓªT©¢Ì…cçk}ûŒàä™GDDà€¢@ YcBü/õöÃØ+œZ¬† ÁĹŠM`·a¹¸ íÊ•+cŸ±Ÿ]è.‚V¡BGZZŽ+°Dv,]çÎÕ§ØÿŸà$ØÔ”‰¤"‡ cë,.:¦s7Fl›5kÖ€£{÷îtŸ$²>‚ ¢¬‚T~Μ9C† Y·nÝÆEs'Û´iú…¨X[[CçÍ›7nܸ|v„ ÛÝÝÝÇÇ'00¶ ¥¥Åzö‡ çø¥;É’ãonåíÛ·ì6„ðÁƒȰa&LB Z·n 3ÁqŸ¿jÕª¥K—öïßõêÕcÆŒ)+{Ž´õرc¾¾¾ÞÞÞMš4";oÖ¬Y·nÝÌÍÍÛµk7„CÔvFTب(8X=Ö®]ëæææääôèÑ#øI:u7nŒCÓÓÓÓÕÕ-݇ K´ç¬e1—Çž¯X±*øìÙ36ÅÚv2?‡@œ`M¬U§ššþ³ê¬ µƒó°Çؘˆ¾æ`-H±!8Ï»wï°u|¬J•*Ø.œ[©þÖ¸´°±.`Yþþþ·o߆¨Cw±rlEVVÈtr{„±¬G‰£‡²²² ü Û*Ψ$¸0q,ÑÑÑO8°QÖ2Û…yb+¬A,óL}}}ÖÔ³èjÀôôt&Š(‘‡^¾|™­‚ºÃxY­)ÛO¬aíH«é%‚¬ ‚(K¤¦¦Nœ8ñÌ™338Xg!"»«HOýüünÞ¼*)) ÿa#°™šš2ƒ‚¸²…‘¹®[·®W¯^¨lÑÀ‰Èã pPÏŸ?gsŠðUdêÐöð^DDòxˆ’~˜¤‘õ–·ùã˜Ã!˜¼gψ“˜˜˜½½}çÎá/9àQ0«ÀÀ@æiÄ$--M‚C\\ùøñ#Êr…s@NNŽ#«¶bËáð±Î_1(_CŽÁƒ½„ …rÿþ}Ö7&Š ‡•Â~boa‰8–œœœÒ(Dòû÷222°Gö²Çކ‡Ä[8RVyˆ5 dñ¾ÇêY5#¦‹ÙYÖÖ”ã"€]Šå`O1¢ÈàÕ¬Ù*k³*0yÁó™Ø4Ýu ‚¬ ‚øM áFæŠDmË–-ûöí)µóöö¾zõê7°“Èt›7oÞ®];¨¡¡aŽâ¬Yõt–§BüÖ¯_¯¡¡±råJÑì0;iff†=ÄQó#0¥‚£s€x@9(&$$ÀÁ5j¤­­ÍžgÓÔÔü;ÊÍÍŪXÒ/°5üGÞÏ< åøîÝ;))©|õ]Ø:ó”ã˜1c0Áž¦óññ9uê”§§'–·³³ûßÿþ÷“¯ žOž<ÔhÅÅÅ jÿ°‡Ø1ìkÓ(uuul´8#ÚCذ樨(6¾›`+‡¤A̘†1ÛÁyËêß vaŠ19d=¾`mØO>ÅúÚA1Á`µ´´ŠÓ( … ІµóLü +€-b¡^¬¶ÁÇV.µ °8 t?V.kÌÚ£ ª|™äãŒT3² L„B1„-Ó­› Èú‚ ˆïèß¿?TêÀ7oÞü³Úéáááëë‹]úðáR[}}} cc㞥¸-ä©‚ž'ß¾} £Ø³gO§NàHÍE¡\úõ뇤ÿÚµk¥;>8ü VžÏXà?ȹñÿÂ… [·n}øð!²p¤ÝéÌ™!ðÖ´UÚ0“a2ƒ¤œ=½VZ»š¯KR8ÛÞ½{q–Þºu Î?zôhœ%²Sz1G)„iÀÙ"##oܸqøðaf_°&œ*x«R¥J¬v»„°À…ú¬vÑÒÒÓ?öÔ"®ÄâŒ[‘‘ÌÔ……ÝCÙA˜6oÞ\GGGOOkcÂV*%‚3Õã šË ºÏa=è >ÙÙÙ{du‚‡U|ùNiÖÒõdžàÃ>†£`'° SSì Š†µž¸¢ ú·t/+‚ ë#‚ D¤GÝ»wGwìØ±ˆˆˆß¹id‡nnngÏž Ä Û377·¶¶†æ5mÚô÷7(E’º–ƒ9° Ù;wvèÐá ÔwðàÁׯ_oÔ¨Qñ?…d÷éÓ§¬–Ið”ËÈß¼y9~.KÐÅS86b(æ¶ÂÂÂüüü`_(G¨dƒ ‡€r455P¡tE=ßÓ˜¾¾¾7n„¦Âp¦NÚµk×¢E.**Š9Rëùó¡%¬̆ ²ŽL`†, ð·Û[ÈÛ`t{hIBBBrr²’’‚Ï:AŶ455=A‰Ö wH=Èü;w„ëuq2`Ó8:Öm¬‘‘Q‰šûBûYsÖŸ)GœÕ ã,Å ;WY $l g{À’= (æ‘UÀ€ýôPÒ¡sssY¡Ds°jF¦‹>|`ƒ…`s°DX4’…õ+šYAñ«X·nÝÂ… ñYû¯ÞVVVÖ‰'NŸ>}õêUd{&&&ݺuƒmåÁà@AYX†BD‘4CÿºtéòÛvÀÀÀiîÞ½{½¼¼¶oß.èƒ9qFF† gHAS7–#%eU:E$^Zhs|³wŸ´´4Xë•+W ô¡¡¡Hß!8¨Ž;¶iÓ¦twƒ‡-ÞáèÔ©ÓÍ›7YÍ$¶‹È0‹CÊΞXcÆò{†€f³)pgyØ{äïÇÇ"x:QCCƒµí,¾V¬X±%GZˆˆ±±+îÝ»G…"P°h!b[œ¦­%îÄ~w(~ß?ù€» -dêÈþãIIIš²_7XÕ"³wÖRW ‹¬ïVPü‡{qõ¡tàð?ÆÖñŸù*kV Ed~£ÖÒÒÂ4}Ñd}AÄcĈ/^D’7yòä_±~$‘îîîôöö†ØÚÚ6¬/G™‹²í³gÏb"&&ypµjÕŽ?^¢Þ#©©©ááá,›t·ˆô¹/«ÇT)())¡€Nž ÚPÁ:´mÛö†”µýiŒÇ)„óA{ÆÁ:wÅEʺÿaU¾¸5j„xbBÐLWZZºh…ÎwÕã¼ c>î!(5>Ÿ+ÕîâÔŪ~ìZ‚ ë#‚ eøðáÈ€¯]»¶sçÎR\mrrò–-[œ‘å@íFeËQžB‡,u¦²qãÆ… ºººZXXDGG#¥ bþôéSÖY?ëÈžU, óFV‡—… ×.Lpp°¥¥%V…쿼ž„ˆÉDŽ|3kÖ,6Ž=;;s 0 6&J4 „##£ëׯ³éC‡M™2¥E‹8ù‹ßŠUdaOÝœ5))éÖ­[wïÞ ‚-Ã:Ùó~,ÎÆâ´Ò”””Ì÷h¥ÈÒ¹sç¼¼¼äôôtx#TõŽ+Ê¡cõáÅQk÷2scC ²+=!!þ†õ°kà‡¸Ìµ´´„]AA¡8ÿöí[\øØÄ;wp“ÁúLœ¢l,–ÖpQúþ"Èú‚ ˆïK;vl``àO>#àèÑ£«V­BÔ¯_¿iÓ¦Íå(7Ⴜ±Önááá0:&23dxõë×Gn·hÑ¢#GŽ@Ïòvç(•b‹:u‚ÿü¢'âþ8±±±0???ä¸)¼Ž=Wfjj µ[Æ!¼<’ì}ûöM˜01iß¾ý°aÃ:wîüÃ[ïω€€¬ iô±cÇÊ÷sU«V-làGÃ××·…‡âă·À1ÌÌÌP(ßJ%8ŒCxfnnîùóçO:åííëÈÀÀøï¿ÿ ?†W¶€÷²ÆÌÅY7 È«ƒÅ­ãÉ“'pcÇ~b]ì߇3ŽÂÖ ½„ B­ÙÙXmNN ˆ·¦¦fë·âž™””´yóf77·   fÍšõë×a—““+!…±M‹^ìõë×P8ø!«E„fgg«©©!¶l8MèœàqJœœì‘Ô|‚-çóÕ«WÙ= NX­Z5A+ÜbÊ*AõA”mX‹Aä?ܤ yö¸qãÒÓÓœœÆrˆþQóù|2Ò –°ÆÄÄ())± Û¶m‹¤ŸuÜÿóÉØqôíÛΆüøgVÕ¦M›'N°êÊÐ —.]:yò$/11QGGâÚµk×-Z ל5kV©oшCðòîÝ»6l€K z³gÏ.铬ž633sÆŒçÏŸWUU¥›CVV¶°>pq;w'ü›BxáÂÖ×.TÊË奯¯Od}Aå„7nÛ¶-))é¾Ýׯ_Íëի׊+ä‰àÑ!‚ÔÁ:à¥ÈŠ222š6mª««khhˆ´¦ÇïٓÇGFF***"»ú±* Ô£Gà÷}àÀS§NaqFŠd½è„¿èå–%ïØ±cÍš5?~ttt8p`1WoYù™µkוâ`ƒåÖI>«÷î‹‹ËÙ³gq=âbÄÉlgg,lùš5k 7 ÇG¶nݺsçÎçÏŸEa'544RRRl™˜˜À“KôY???ŸgÏž‰Ôù‰ruu}ýúµ¹¹yŸ>}ÎçÙ,Y¸ù¨³³óÂ… åää6mÚTÌ~;¦sÀ7`/7oÞ$÷+u?á™±±±æ™3gBCCuuuq 0@VV¶àÇ¥¥¥ÇpN?ÜöïßߺuëE‹•t ¾¿"Zç&&&Bçà„7n܈ˆˆ¨Q£ìÒ¥‹`Àuuõ ÂLNN>tè»»;n¶òòòÖÖÖýû÷ÿ=ÃÀd}AÄ0gΜ{÷î]ºt©˜Ë#ǵµµEZ°}ûö|yÀïäÑ£G§NB¾‚ýQPP€ÑYYYÙØØ”b)¿”ëׯ#UÅ>Ÿ?¾øŸ;vì† þøÎ_¾|ÎÿlÚ´)R½Aƒ-ä(£—À@×»†ƒƒÜFáää$¨)ŒU½zõJJJBL¨ÜÏP·nÝ™‚9¬a§««kLLLÇŽqò³].”{=¦ÓÓÓqnÙ²E[[Ÿ¥Õ¾‹²²ò7GÊÉÍÍ… ^¸pÁÛÛ®ªªjjjÊš²çcÙ°1@øSþþþPAOOÏŒŒŒ®]»>ÜÀÀ€‚LÖGAüyÁ÷nܸQLå8}úô±cÇâââ~çN&''#óæaJ5jÔ°´´ìÙ³§¹¹ù4޲|–ªêééݾ}»8Ë>|XZZÇþûw{½ñòòÒÑÑÁsÿòw9ÈËËÿÇÁ.yóæY[[oÛ¶­è^sNœ8ñòåËš5k¢4ûôéCw•Ò!ÍÁ^òù|ww÷­[·B-ôõõÇgcc“ï#222‚Jæ„„„Aƒ9sfäÈ‘‹-ú¾|þfÄÅÅYO3Â=$ñ¸.‹ÝÜÜàu¸W®\™Ýáäì'’|ӢȰä®]» Ž5²··‡%~³þ– ë#‚ ~!ÁÁÁ ,xñâEÑ‹åää4oÞ<+++$$©ð¯Þ«øøø½{÷"“ŽŒŒDn×»wïþùg4Gù+‚I“&!1Âa FŠ+‚eË–-]ºô÷ìXvv6r5hLff&4oüøñGý«®AwD7n„EÀ–/_^Xm^õêÕá(MD,00î-¿߆Cط׬YÞ¯_¿3fä놪Fû÷ïgÓ¸¥`¶mÛîØ±£|Àñ«Á©.ܼ–ñôéSggçÓ§OGDDèééÙÙÙAðPd]8K>~üEæêêŠÂ5jÔСCÉÆÉú‚ ˆ_ dÃÊÊêܹsE4cKKK311©]»vhh¨¸¸ø¯ØÜÜ\¤nÈÉ®]»¦««ûï¿ÿ"]þ¿Ü3yòdäL½zõBŠXÌÏÏïýû÷E´ýóe±uëV¸ ÒåU«VÌíþNØMNß¾}q–"µ-lx(PRRÂõBOúýzq°é¤¤¤ñãÇ8pÀÈÈhÉ’%-Z´^r&X}u»vípÏQTT¤– jjjŽ‚9(ŽuëÖážvïÞ=cc㢤ð]Ó°aíl±°°°ùóç_ºtÉÚÚzÞ¼yEóAõA?ÂŽ"º}ëÒ¥KFFÆÍ›7K×÷²²²öìÙ³k×®¨¨¨îÝ»Oš4é_Ž¿¹,` –––K—.-Âu7oÞŒòú[¿r劃ƒŸÏ_°`A¾ó ¸ Ž9Âô»jÕª³fÍš:ujÁÅ cbbÔÕÕ?^ÒÁ!ˆŸ…²‰Ó©©©Ó¦MÃ}ÆÔÔ—p`§Nâããy\{iœöS¦L™3gEïWÇdÁœû÷ï£tàÕªUƒŽ3FAAA[[[ðkJmîܹøj€®^½š:†!ë#‚ Jk×®ùûû#=-LBÖ¬Y¢¬¬\*›óõõ…ÒvîÜÙXym®ù3\ºtIEEñ)l ‰³gϲçÍJ…ìììùóçoÙ²¥gÏžk×®EBFEPLŒ“’’"##ÅÄÄFŒ±cÇŽ| ÈËË'&&6jÔ¨DcB¥t‚u´ƒé‡vìØñÎ;õI“& Úè².LÞ½{gmm °^½zº_‡––Ö6ö266· #GŽà¾7~üøÁƒ£Ôsð¸±=¦Nº{÷nssó•+W 7HõA%`ܸqË—//8?%%¥Y³f3fÌøÉ±ø|þÖ­[¡ŽÈ´†îååEa/[[ÛoªøñãÇ›6múóÙC.åììŒ|k)…ýÇÐÐÐÀI»Ã„™™YA÷ƒ"Uïß¿?…뢩©yáÂ6íää´víÚ!C†¬X±‚=N&''wöìYW—>oÞ‚ ˆ¿|_²ÔG|¿Ö¯_?66¶¤k»ÿ>ì .Ö‰‰|Ã7%bÿþýÕªU›9s¦‚‚‚ð|???áqÌŠÏíÛ·{õꥣ£sèÐ!h EøWP»víøøxWWWH JªI“&Ÿ2¡Š>|¨¦¦†ÿÔo¤HÁ™óù|öøßæÍ›Y%R×®]“““Ï;§¨¨xøðáÎ;S¬~?¸ Ú‚ÆÅÅõíÛ%2lذE‹­ä@Á-[¶lõêÕS§Ný{úý"ë#‚ JÆ­[·^½zegg'˜“““£¡¡áèèÈú¸+&...ÿûßÿZµjµuëÖ«W¯R`Kiiihó¤I“öîÝ+˜“žžÞ¼yó­jÅŠ«V­:sæÌÓ§O)°¿Ö)‘‘‘QãÆwïÞÍfBœ?~L!5ÄÄÄX3ÂÄÄD===YYY999È^JJ .Ÿ‘#G–âãÍÄP»vmè7›>{öìĉp}±~ž³²²`ƒžžž®®®¸Ê(\d}AÄÖ¯_/ÜÃÄÛ·oëׯþüù":ó62mÚ´=zlذ±ø,[¶¬jÕª7nDöÉæ\¸p¡eË–Å_ƒ­­mhh¨¯¯ïŒ3(ž¿ÿK—.U«V EP£F ÌéÞ½;²ÕÑ£G ú± D xÝíÛ·11eÊ”ƒž>}º]»v¸ÑM:ÕÜܼV­Z...¥?Ž5û!¬C‡ûöíc¿°Ü½{·fÍš666[·nýEƒ d}Ae www|Y²é´´4(ß¹s羫|×®]cõ»ví*Q• QR¤¤¤ìì윜œX„àúõë­[·.¦ï={öìêÕ«²²²É?…¥¥%.1MMMgggö4æŽ;TTTFŽYØ@„ˆ°†w9}}ýÈÈÈF]¹rwȪU«Þ»wÆ`êÕ«wñâEL¼xñÂÂÂâùóçnnnñññÙÙÙVVVoÞ¼Á»JJJ(²>‚ ˆ¿|56lØ=bÄçó4h lÓ¦MaËãK²‡tçôéÓqqqÀßÃäÉ“­­­Ö‡ø÷‘<‡ .À©5š(€ŒóÕ«WÆÆÆxÌÙ¸qã°aÃBBB(8¢ÏpŽÃ‡·mÛÖÃãsçΉ‰‰Íš50`ÕŸ‹µjÕb½Ü¼y³nݺFFFPt ¨à­[·’““1MQ"ë#‚ø9~ü8k!Ã㆓ž9s¦Á7—|øð¡¥¥e= Š‚®ˆßCË–-‘¾°Î$¢¢¢}¶°——W¿~ý¼½½Kq4?¢TðóóëÔ©Óÿþ÷¿µk×öéÓïââBí¢Ë l@¿¹sçÚÛÛC!ÂÂÂð²wïÞG¥àˆmÚ´‰åóù“&M:vìnž-Z´°µµ}þü9.C))) YAÄ_—†N:'NÔÐÐÀdÁe|}}ñe¹zõj|_RÄþðíC‡ÁúâããyÜOÚ—ÉÍÍE®£¯¯ŸœœLMàä½zõš|xãÆ­Zµ¢:[ÑDLLŒLãææfnn¾k×.++«–-[véÒeíÚµ²>‚ ˆ¿Ö¤ŽŽN@@€««+Ó a‚ƒƒÍÌÌå$%%Q¸þ,}ûöíСkÞY§N‚ ܼy³cÇŽø{§p‰2'Nœ€3ü÷ßS¦LþäЋÄoFRR—áÑ£G«W¯–e½¿¢IOww÷jÕª:t¨råÊJJJt·$ë#‚ø+¸xñb‹-˜Q?~\ø­ääd]]Ýaƽ}û–% hkkW¨PáîÝ»‘‘‘uëÖÍ÷®“““››[JJ ªLC¨]»vóæÍGŒ±iÓ&²¾²HïÞ½»témX¿~½½½½¾¾þõë×),¢ŒMzzú©S§úôéãããÓ¯_?SSÓ5kÖPdÈú‚ Ê97nÜ€Ú­X±BKKËÐÐP0„ þþþ?¦¯EŠ6mÚœ;wîÕ«Wù¬¯ÿþ|>ÿÎ;¢2ÄéÓ§á 111sçÎÍÊÊ’””¤˜”9äääâãã-,,Z´hÑ Az˜VôéÁ1bĈ *())¡ìpó¤‡ÕÉú‚ Ê3÷îÝ›:u*¾ü>|Èæ$&&jkkoÞ¼yãÆQfîëë+--ݬY3ÁL[[ÛÚµk£È(>e ==½aÆ 2ÄÀÀ`Ïž=£G¦˜”Q|||mð÷÷?xð ½½=ÅDôÙ¹sg\\”_‚Õ«Wòä oCÖGQn‰‰‰ 122b#7œ={9( Æ5Môõõ·nݪªª*h8tèPEEER¾2ʲeËTTT lÐv HÙÅÙÙyРAššš½{÷¦áʵk×NLLÄíÔÀÀ÷U|!ÊËËSXÈú‚ Ê©©©®®®»wïÆË•+Wº¸¸¼zõŠ"#²ÆÅÅÉÈȰÞ\Ö¬Yóäɓ˗/SdÊ. .Ü·o_TT…¢¬³ÿþŽ;¶hÑbðàÁ‡¢€”¼¼¼œœœ=zÔ Aƒ/^{vvvVVÕ’õA”%ìíí¿™ˆ¼ÿÞÜÜ\ðRYYyÕªUC† ¡ˆýq®]»fddTp~XX’KÁKö8JŸ>}æÏŸ¯©©IqeÒÓÓ;uêäççWð­‚íØ±cĈ4Q&!!¡wïÞ>Äz““Ãæ»¸¸/¦  °xñâqãÆQÄDŠøøø7ž9sæùóç¸6Q|¯•‘‘9{ö¬™™…‹¬ ¢lpàÀWW×ììì¢KJJ:tè¦M›BBB>|ø@=LüA utt‚ƒƒ‹³ð‘#GÁ¨««ïÙ³‡B'² ƒˆAHHHÔ«W”OôY¾|¹¯¯oÑËHKKãÆKÊ'jŒ?þ»mãqÁâb$å#ë#‚(Kˆ‰‰9::Λ7ï»K²~ù|>í$''÷þýûï.ibb¢¥¥EýŠ> 6,ºI§¼¼üªU«(V¢Ïúõë!Ë–-+b™?:”b%jlÚ´©E‹Eÿ¶‚²#å#ë#‚({Àú`/_¾,b++«ÌÌÌû÷ïS¸D„»wï6hРèeŒŒŒ¾P·«e…Ç+++'%%}ó] ha÷îÝ)Pe‚¥K—JKKöƒZÅŠÅÄÄfÍšEA†®©©ill\Ø|>ŸºY"ë#‚(“øøøhiiö®ŠŠŠ³³3JtPWW_ÀQD©µnÝzíÚµ«2DBB‚¼¼|FFFAI€õmÛ¶BT†ptt”‘‘™:ujÁ·¤¤¤à𪪪%ÑÄÈÈ(&&¦^½zßB™â¿ Ãd‚¬ ¢,Ñ´iSCCÃk×®|«qãÆ¦¦¦K—.¥(‰óçÏwss»wï^Á·ôôô`}Ô°ì¥D+ÆÇÇ+**æ›åÓ××oÙ²%…¨l1eÊIIÉ &䛟íääDñeÔÔÔÞ¾}«¤¤”ï™[\¤Mš4¡øõA”Uüüü*T¨o¦ŠŠÊ¤I“¨)‹Èr÷î]))©>ÏTWWoß¾ýòåË)>e…¨¨(áæ»•*UBÞyøða NYdüøñö1cÆæÈËË‹‰‰ÙØØPpD99¹ÌÌLü®~OOO§>xÈú‚ Ê0ì!áîdddœ­¬¬(8¢Ìƒ„  eŸ>}–,YB‘)»ÀÛ½½½¡îì%œÁÔÔ´Zµj™2ÊèÑ£QˆÃ‡g/áðýû÷§°” ÄÅÅ¡yŠŠŠ©©©<®innn®½½=E†¬ ¢ ³téÒµk×~øð ŸÏ¿yófû¢c#GŽÜ±c{9nÜ8R¾r€………““Ó¼yódddpIž‚ ‚ ‚ ˆò YAAAYñ'yóŸùÚ`qÅ*˜N ~møp[ AAAeÈúÞ^5U0óš1ýbâ ˪T<Œ¬Ww£ŸðxOØ+Õ¬,¬ ‚ ‚ ˆ²d}’¼Ê_Ï’¤¢!‚ ‚ ‚(7ÖW,Ò#Cn\¿tçirrR:_FFFº¦º¦NCcó6µ ¯ûJq=àÆÍ ˜¨7IéySªÙ¸¡®¡™q[íšù~réæÓ,IÎ9³²xML»4®òÕÙ©a/?æqKdeeUmbnÜXá[[}èsÅ7äaT|†Ç—®Y¿…iK³vEìé}ÿ3gܯ‡Ç¿É[^½žVËöÝlÛ(òx™tžAA„ a¾·B#ßaJB^Ç̶AnÀ¡ÓAx•ZvÒ¶.µ‹+ƒÖ÷âÜÜÚ]¾?Rý¡ÇÂvÛ •pÜ‚VuÞùîçt÷?ôØXš›~stb‡ÿù}yoÞÕÄ…&_54M‹=0¸îçWU¬»¦­ÖQgòEöúÜ´^Î6t<ö+åë¾;ëÔÐO—‘tí—{u쵨J›yt¦AQN©\¹b^î#!ñ•ªj[Î>ðd\ÀTM»5˜á{Îg²eA N؉i#Ư~Z`]ö‹ý– 5bk ÞÒ¬ëâ°¼©Á^/–Zå_ô‡¾– ·Ý“á·Û*wµ áÊ8~ÿabÜŽ£©#{`s«Í#µ|ëç&_7V¶­µ’Ó$·×sÚ)—ѲɎÙob0˜ÄáÈë9&ùDFNÓfŠ›ÍžûŒº£HZëñn߬ZÑÅd1æÀzGû/ë{µºuõµÏy*ô¹ðëƒCÔ)bù¨¸„4^ãܨ˜¯>µt¦]¾zqÅÖC§·š·2„¾‚ ‚(ŸdþVÀùO¿˜óå?7öŠÛW«õ§«×<ïÅ£-ò|M,-øðÌ®ÿÛ|p®ñÁ¹¼ÕþéýÔ¥u¼ÿwªúZxß¾N›z¥Œ×z¾FìͦaLùx“xå)_±W›§:ŠÕÙ’›GöæñLçïYÑ¡±l–xͺËß\ì¥Õñ:§=ÊçÂ|óìì, ‰²ÛKa–÷Öµ,Jö[# *Ÿ06+bmV|y™þ`cCˉlz؆óþiÃRÜè+‹ú9úlÐ|ë!”¯Ãã!)ŽœÕVŸg1éÔ½ÉÝ•ñœWÇçt˜pà^èÉaÍc_…»ÏTÐË«ÐK ˜ÛÌ.Oðšöò˜gÅã‚üöN%¶9ÉÏñ.¬˜òìQæÓɰö_žÞLPGª2¼Ì‡«û5YÈóÙоÖ¼Ùe›Ï«nšÕp¶x-^㛬cÍ ÷˜¤@Ö÷ P™ìËŸÌË9·wÛ¦ƒ;Îù}c‘˜³NƒñÇãõ=˜æòéG Eù|þÜìÄ ý›·8¸Ë7êŸ;ç2 <^ƒ¡a#´‹ÕÂ3îVx!ïH(Èå›c=rdíÂÖ“‘”ÄK~Éã)KÉ}w›xoèë€ ‚ ˆòÊÕÅSG·­YIxVÊÛÈ[®^_êˆL¶›oÎÙÚ«ÕC˜›™ºb,øéž/«ÓgÓ‹®6½4¬`\S‡n°¾2S¯2õ¨û%-›{<Þ2›‰b÷i~Î|Ãö]˜7Ñl¢÷4ˆG‰V+¼£4ÊH°OËJ± ̲Ü1½XZì£O^V«^¥¬½|S¾ýÚ5øbêfs_ÜiÒ©¥Öû_Ÿ±Ý»6ûê¼o<´Sý¼|E»•ãCª/ƒºyÝKiÌE_BæÓÙ"SI–‹ò÷‚\x»Óº}N®íöé…”æTç+×5Ìòž^¬>éÆuª¬Z_¥Ó·ÿ]á~AˆÊ˜¤#]ö ³¬ôá)ѪóÈíøûjfz¤ÏÁÉíG}iWyx¾û’Þ½ë9( eÝá vâ﫽ŽpY2qÄÁórQ#çºô?5T†W¥±Ï/@°ä­›Ñ¼¯zs‰;¶ópa»(ùõèÆ‹æoŸÛú{Ç•.ùõ³¦³7\˜Úî«ê¾”[‡VÆÐ÷AAå–а[•“>'Rbï^\ùì{£=‚ÖèÔÊ–2Ü û䊽ˆºÆÈφP¥ë¾}:ƒÿãñöžÞ“å÷Ù1û;Î<ÅdàÐ ‹YígÚvj¯\žK&ûÃûÏB$Q’nZ2b£YKWáf èFu ëv¼{yÖ–žï½:ªòù–—¨ôùÔøa.¢˜jVWúzküOB§P]þ«–œŸ{•Åã‘õ•2™×M¥ „Fo×Ýä5PGPj2¦&­»ð<¿h_å*yG²^_gr `n«a}võW|¬ZcK+=ÞëãµÒnÌn'UkÈ oÿÜ´6b—˜ÞJ1ÓÏyžÉðÍ…ïk‡göõËâçØF'f÷ù]CY»æþ«kOþ%ãì«Ç]ªÉŒ\ºwÜq¡– Gþ‘¼?3,p™VÞ¥{.ïo=ë}AQ~1=êuÅø«´,÷‰ Gƒ7lëÚ_1øò²/ORege|šúç|¬k³bd²5:nÞ3àÌÐÏbt_Õ#ešé»E½³í8ö¹þ§ä«ý+à«vïÀ Ëkd›WY¿O³ˆGÚr£·/ÛßÐn\{Íj¼l±OñŒ ŽÍà)çs$±—¯ãØTZ™¬ûUAÿjÆíÒzŸ¾ä Ò­6¨Ð¥ìx®=çv­&]–ë§3üS½\Ènû*»í ûXý¾o|}¡í¤³ÎÁ’½}~ŽÊÁ&FNÒß±þà·WTÕ&('|Fë&‚gðBvSÙ=ìKê/0ƒÙ D£Ái>È6ýåÝÐåÚ²Ë Je aO"HΫ`?¯kA÷b%¿NË›#+«TE£¹Žž¡‰•Žz5ŠPEÐsÀ7»1 ~b¯¼öxšUh L¥Êu4ÖjÒDKY†ÎŠ2IÔFÒfC¬–ép£q»y—ëÔ<ö¥•`e³¾]y×=0ul¹óCC[~-¡›fî’3îÖÞÔDUáKÝT§Uh‚ünÓ ….¯wqÕPGŽ©ú¡ÕMzjr9(í‘ÌÀ÷[ÔÜ?Ù9fš¥ZÁÅ¢/L3¼:oj‹“ýúÈ•½õìÚó®{çÅsë‰;í› /üÌ}5+žIo]:ýÿ*ë“j8bóËÏwóÌ ùNšŸÛUJé:óùÎ<Þëû¾§/xß ¹ÿ06úý;nI~åêjÚºí:Zu1Ñ©—ïl5Ì…?Ì…—çwÁÓûò­°GŸ$¤}º®ek¨éh·Õ·²îhR¯JþJäÎüÎÙ!§·¯[»çÚóTEEEžl ]³žcFõkU[š—óÄHµI¢ûÉ"S®‘ÊWŸ×\Ì_Áã¥DºŸ=wñÚ­ûÑi5È&D¿¬Q_KϬk[›¼•|L“Q|þ¨¼Gw9Ÿ<ç—ð>ïè*W×êdÛ·¯]çzUx÷Oo>-õ98Õé´%QâÙ•©mû­¼lg1P[·µ¼TæëØ{¾Ç6œ=þùoùÒûœÿÒæÿn­;Ê'=©Bˆî÷dþîþõ9“ùààì©>Å[¶nƒ~[úÿÒ!Ãäµ&øûôÉâI(×%åû…¨÷ÜõTǘë@2j‚±ÄùÅ!;ódLÒnçËdÛê y—綪5—׮׌nZ¼ä[îK6}Lîе=A·U…uâ£æâëÛèâ=ؾàAÞ íQ>_û­ö›|®6 Ý×Ý*v`K™WòÖkætoZv•ÞÞaO³Á÷ Íë­c³Õt,VÏxqý^ô—e5gúŸ_¦ŽËÏë{ðY\Õµa¼³ÓµjMo`7ÖÁ¤‰\|ÄU—Í?5ß­6óÆÑ12"Û¦\y» D¤®¯v‡ÿ½D5-“áø+銥kw¿~L¢U÷qÎÝÇ}+`õüoöw?¯¨Ñnþ&—d“ʺÃgâïoiu§E÷~‚MÞ_˜ôIùùõð‰ yÂﳯ<Û›Ç;eßCMí £Ô„'IÉ7ÖmÅû o_D'<á¥óªªÖþù8éÉõ€À ¤Ò’•ø’²µÕ5Z4Ó¬•/‹M|ñ67%%õºÕxqÁA7£Þgeñ*+ÕÒÖ3QW–øÛ‹&çMtì+^O¾Acåœ8?_ïgÉo%U »·o%MjBĽð;/Þe!¯‘­Ý¸•^3ÕÂkeù²¬hn=NÎþõöƒRSKãVß®[Êx|ýò½Ø×’•¤xüJ5T5š6שQXÁ÷.Î9ó×`z2üJÛo¦ÝbovØ+-ðæÅž²·æ®µüçýаè§ï³?ä¹vóf­ «LOzz7üYòûJ•xxrªõšh6kœ›’ÂÝGd%D?I/2o.X^%;ýÊ%R í—®6ÎãI¨7(D¥$ê ºþÌøøî#I<)^º§Wx½NMy|•‘nü‘<^Â}·ƒÎ‡®ß¹x&%J†—¦d7q©±M'ëö5¾U²j#o¸J I”¬Ä˪Øtàпž”dµÒ5MæÏΫï­iÚ8ÿz*w<ñ,êøúå§"2•¤ù’-Z—¨‘DFc׋¼övIσ¼½üƒƒãbÓ¥¥exÒíúÿ£cÜáµ ü:S/ð§Š¥=ôóð¸èq'òÔá`ÿ=O»×ŒYöC»µÕà–ùrƒÕë·iV§¬oœ ßzëKüÛ}‰¿´rÞÌ,¾TËϽ½QL‚·Ôò½%Þ¨÷‚%&¸*U­¾µ’…Ÿ®d}AûO\]8§½ZAU|€?X(1=:V}ÁççŽCÛqMÑg¹çu)žçfӺ׽Ï˪é›4à½ó¹þeä–%gR†èqß6¹ANmõ޽)µæêO…~sµ^²³Ë¿¶<Þ†m6êR`°Óª ÌB[)¿¿j¯aö¥â¨™©Eõ·>—qîrôÞYãU8ëzÉ|ú…»^ƒæ¼¨{1‚w¼ïstY^K-òz÷þDýöiQç¾,Üé¿ð=c?' Å[¸Ès†.<¡k­JŸ1Sxçý æw;u†‹Îÿ:Ú.¹(XJ¿}gá ·›áí6ÑâÓ‹ÿ³wðT~~¯=2²IvF"+$ѰZÒ’¦$‘‘DZTª¿†HKiÑ ¢"¤RF¤22¢¡HddÓý÷J×LýÆ÷ùÜï~Ïýž÷½÷<÷œ÷Šœ“¤ÎtÞQ ²,Õéd÷”©_ž‡^û=þùÕ)ÒÕe\æ¨Ê5%X©OMì7ußÛ‚þÒå7L³Œ_w¥Ã7£1^Ûûî<ÒF›þg4ðŽU_¾AýÇ› ä°ÔÜšÖšý¥ÙÁ×x8f;¿Â¬è5ð\f”T_„^?ÚŽFÁØZaÀ«z?5÷…ýdSŸ«¨ÆÌêýzë'…`}ÞQ3®&œ'`{l™Ð±Î‚œöòéx %¼¬¬x—_? £ÛKnzd*<ûTlãB:k$J¯hL2!¶{ÑôOÔ¥»±ÆDS5´ýÖÙ¬´q-&¢T•ÈUì:ÉfúåŸ[ýý$6<²Hï8Ê•1è.H-ù¾|“-ˆ“Šg“Ÿëò’m÷%f~{‡Ýá‹dµÓõ(y‹|c"õý{óÝÕ¥ê/#៹PŽ4|S¤“ØjÿviYywG—!¡3Ï̘±ãi%µŠ%ÿ\{ûÃoÜç53a¤fìm1®*Þw×Hsö›M‰ùÝ|ÝV̦ýy$lÂÊȨ®ƒt'›2Ï=áÉÿtf6>¸Kìű>3Ф|Sä]ZHþ NsèÎÙçr¹±ÇÁï4íEzÖ#ÑjÝ(êÞLí]0ñ™4â•vòÉ“ÙT¤ËOð/?°>€G{$b»—šÉ±nÝ{vý~jFôEô"ßLÛîæ¹Ísúi‘—àCzÔAÕiO÷¦é&o÷4‰¶G‚]»þĤK¡_?äøê.‡ý*ãxÞêz{7åXôýŒ Šê#<ƒ&‘«oFØùŽ)n<–ùâ{r4£h'c‰íU61'¯=Å[*“GÕé<¹ò!DL¶¯ñ¼Üî )7Ókìð”1§ýIõDš»lgvKÆ„UûŒwÜk¯ž½{úIÍÒŸÛÌý;ᆸ~VËÌ;váøúŽþ¿$^¹ÖQs''Óy“¬·@ZÞ)Ö>VS†×¡mzŒš… ©º Ú ñÙaªÆ¶³ñ3UU•DÇrÎr‹œ5ð4V…Í—žEªú3Ø“tzµò¿üÀú,üгV ×.o²…õ…÷ÜæÏðÿ€E™+¾/»Ûð¾äöHÛQ…1U]¦çÚ±2rÖn} ÌÝ:yUU×}à vþŽjŠûx #ÈKޏ9†öX/*(ˆåçç2Ðv]QI]÷m ltS\Œ4íÅö1¤9E]ÉžÙL5V€Ã>þÒÆÀw4Cr©]ðŸŸí–œCjÊ«è÷¤½›ÇÎZ7ªQbXâ‡öÉ€£q=:Åáróò:ž¦£³8±hqKEÆ­+§¯Ü9šxÝëÉu/²ÍÞ}`"Ó¯‚ãꮘw#žRÂ5åÑNÂï¹üÀúþ­…¡gOÅ'?òå É RémÌV†1Óÿ—ð¸Jûc<¯s>´bäÃ@áè¾O3rt=Co<٠ؽò'%²¯¡;Y±îCÜb9C:œó3Œ„ÔÆQAïÙäïàšº/©ÍNî¨I*msª³—ÏäëÏË»WØÖ¦%~$M‘ìñ§6îýš¡t™yÒö¢×*óÅg&bÉ6xêíI—:kÉi;¤Kñì®Í)û†š]ÆØÚ½È~»¹c;Éð–¼i&kEßS?Ú¤ÐÑ·“èÁ&Rôá÷]~ÀŸ¤¥$æÒ­ä&ŒVNw•в¬¯wвbãÂbï?Ix—›Û¹XT/Ÿ:m†žÁtUqjˆô £jŸ›K\ç¬æýªG:¬ø©OÐ1ÂO,BÛo¤y¢8mKÿ]6‹DÈãÃÕeÞõ4_¹ÔtS`±aG'Ÿ&6âÿ'7ÔØvô9!±øÖ‡/û`™çuènîøÞ4þØŒ…ˆ“vû×+w-J&¯VÔ:ûä¾®@ÇçyA”£¡;©q™¦ó:uÈ¥nŒµcã„c‡3Ûã¬Zãvlu§p}~}a!±krô˜ý¶[»ÊçæÑFîÐíÈŠœ½óµHVÙìAÜXfçe‡ëíåþäe²]2+¹g*?“´±éñã ô?»qï× ÐÁ¨7>•ǽï öp· _kúCâÐÛ_%mm=Ó>°ºMÄö†>Y©ø-·qU¡;´,N·çöX‹ø¤SP†ÚðH‘zHÚ\´Aõ»#2° ðÊ`X{ªŠãøz‹}m¢¹„Úâ¤Áö¤Ó}7ÎüåËo¸ñ%JW|fú64ÝáºN÷Oä `”\ÚÙÖ×T?3…5gû^Ÿ›‡^á^»:†d0:cÓˆøà¡ÛïÅК~ k¢ŠÌ?^w·±ñ ‰>± ½zÝLFoŸ×açï ùèEt°ë0ì•«Ÿk»asI"…9j¯=øää\Á“ÝŽ!jïá¨7®{í²…µPÒjÕ¿­)¹&…îKÁyÓÚQ5÷©±ke(kSÁô™÷Ê9¶é!f²!fÝ3‹üÁ0 ËŽ"M;l.<¤×#_°ÁEß ]ë`Ñ»{—Êú$ÆôÌÍ}—ýœ;ûù©ûºfFJn¶4—+oö^MàÚLP&vЂB$Ëçê_o,BÏ®´§èÓºó6†.×^¸ÌâìÑ1ŸâÞàЕ¤ìû*éý©RcÇ,›Ó÷ñíëy ¾å!‘T8º\]ÍÄÔ$^{çÛVwvîî-…œÎé/öqüäå7l¡¦ù¦»Šþ‰:ì„òÛ´¥®ø]²ÿÿÌ}äìÒ Ø…Y],Ú:÷Ï%‡]'E{à ²¬¯Çgï‡+’‚&ïº/]æhm4sò¸±£š«*3BÏ;û½ê vTÇ9*Þ)}®Ï a`Aà7v 6vù©]FwÎ IÃEE~âPÕBkœt9ÒëYFt¶ô_à™dYdÓw}ŸTÙ˜QÔ9 ¬Ãÿ~|b:—„^߸ïkfD0jFdÑ/h«ÇEV½Ü¡+½ÒVz (ÈÆnñÆn?}uu¹HÀÀ/¿‘µ£¨Úò ¦î"c'Lsño°IÒ¸½ÑllD´ý´¥¿dÞp4ßp0¿Ç±L÷Ĺ¯Vïò JeòÁ-k‡¦õØVÔt‡Og5ÔççÛ$g+‹Hl4=ÒžÚ]–UpÙVÕoîS™¸×ÖIN÷”-OÅóRCöAö Më«Uí¦|2ÎïÓ÷ uÝJ^¿ÌÞMÄퟡ±¥sœœd­ÿUe9‘5$, tݸØíR_gѵó=~@G¸K:ãÜ&h¸fuÎîHl°oõwì±·æÕŒˆEèËžz«ªØöÔTŸÄ'VªdÏR4Æ(Òk¥tÎrý¯®då)ãIo|è¶'Þ:(òè¬ôØr%kÿ‚nk…çOºiÙÃmëwžfu´¯wjº'âÔÖ®•àÝÒÃíVW¼%ÊiÖ<»=wß|ùÍÿ–ˆu_úéÞò)3üß÷ÜÜàNžÍ((­ÀpšÐ0téçáã„»Hó·Q· ÏóMZEšœ¿ãÁžuÚíM(pu)Wœ 7úlÃlÃÆ×›ˆÐ“Ë6Þ"ðÜ~üXbñ³¡4úìFS÷KíÕP'œSÒ÷µ×¬RÑ’ŽÙñp.­VpNI=yýš±%h-·ËÒüŠÙ²¤ds¨­˜©«X¾å!^¤³Ô—Äì_¯u&ñÔ"ÅSØÌÒ`-}}CÌú’NMéª|…éûøúÞïEpîuM©«<·[{QPx—òý ï"L9‚aŠ!ŸÏóm)-ù.»&Óïêýä1‹e÷™´\ëÉœw¤…ošØyåvñ R'FœS¯{Æù,dôéó¸ïo­çÂ=|R¤òÍáÊ"¸ôwsN}}‰âððη°M]뇫7ª²ôžž’Œ¸}ÔÃdœ‡÷‘ªDÛoFÝ⿜f¹ÏGD±ö‡/ïÝoÒ4}!þ«hjX0T‰Ù³i /-ù¢êϯŸE~¯ Ò8~Ô˜WzpÉ4sá;;õ!0*,>Zd8‹Ø©¶iµ—±·d*†oª zFúoã<Òsi[Y|Û·÷d10s‘ÿÿ9퀘þfÒ´ýÕ"GŽöŠÏw‘œ¡Û@š Üš»n O™w0»kz䉡£* ddßв¾ªèð{]¤n¶!߯œ·ÐI߃\ù毺iIÞ™HÒ‘¹ªö·¾Í%Ïã׺óñ‘>Qüuô1ì ùáô< ß4‰8YLŸÑ:¢ëÙæ¯»iÉÐqتößßB„ãÉ,»Ò¤00ËÎÅc±qä{.è”·úÞŒr¶ÝÞ†Orƒ•ñù¸ÆDzµðïk®ùøg«XH‘VYvU>›{å^ÓØ;îÇÛF«»w®rXãµ,sGG=a/é½ój¹e‡L>ÜÆ¥ý}_ì‰g¬©›Fû‘ó®­ëª|]µEi£õuø²€! aô†(ˆCœŒÌg£*¾ýÊ«-JLý& ëÂ’)ð’•Þ_¦evÈÆ"Ѿ‡m|™^ãŒgÁÆ;&]®Q1qÇJŽÊ’·ƒW4u¶[·ÂTd %yŠœ“¤Îkƒ–ÄêR퓟ô´£Ôê…çë»qBL¦ªÙÙ7´¬cæÀ°ï-%'pÿÂY«ŸóH%_°îUWåk×n»s{ƒÉ;™Œñ¹òLŸ¨vÝk“—ÜêP¾v?_ê¶Ë:bùÁ3¯[v^t*ëÜôíï‘éYvå—öžÏ{CôtÎ…Îú:†‰ËöâmÉÒƒ™ž{Ó¡|XûÊ;öMÿÞ–Ë/¨ì\uÐñ[DuÑ«W¹?}ºãïßÜÔ„aMXSËg ûÞú”@ÛO%ò¡äÌNåCpNuñÑs﮸D9¿r´KG;¦'ýÉ”¯ýÝXù? º® ß6ü34#uí°´³ÑÝIÃ¥¬)÷}ïÛ¦¥¹¡cjÁÝA2(ÃŽÕúþfMñ«Œq±}‚âö/C/ ³KúäÙO¯WIÇfÌs'íôC²ï¨ôÐŒ–¦Ž±U vg6ÙÙ7œ¬o'{ùüóço±Åb?{ÖWϺêÆx!®^¶={ñ—¸Îê>¬¸¸®×£áE„ÈM‰UFCÃÂÉ>¦Ë»øèUüÜäÃoÖÖŸŽÅ}÷:‘ÑM¾÷šréa§öWXx$ÔÑkæ—#Ø«Ó3¦îDŧ#Gïmöè­{É–¼ j+IÕVf'^ïž3®×Ä+Ì_‡Õ¹³Ý5cEw“yæ~.›u†®ªŒ5ddßP³>jõy‹0ßïíSé­UÌ÷]"ÐçŸc4Y:ê”Dô,6Z­[e '1IÃÈ\¥º¤º—ú¶ªÛWo‘Ïóð0Å'ퟬ±…¬=*ÿº''T:ßmñeQÞ¥ï~ó9›Êòºô1óé]¦ÁÞe“Ægþ | >DŒÎä+à‰÷\<õÝ=©§Û%ÆøtIå¶ý±0.Æ’}>71,à†&¨²|¾ž2ÖT^˜zÿt@G®§óö|6,åÐÃC 3:Æï¯Ûšv{(¦®“ÚàënÌ ¸6'¼F²ý™±k3p¿¾õ}q&Búà3aÍÃîU‰=‘‚ìƒìrÖ‡ñéÊ<–;Áê»6“Ãi¸Ç’ê˜å‹Š±†ÂÈ S7îÇÄDÞyω5Ôs›ÙûÌЇ—áƒìƒìªÖ‡^³yúUq²Î<ï¬Pä\ñ£÷F•o™Nªq’ž%´Ñ¦ÿ äTìÓW¢W¿ÛPókZXk~?¸ü¢ ò?ùŽèÇè®Ø¥»²²oxY_{|Ç-I&,ÁZ ÷8õ3Ú ù¥ûOx:¨pv;?©š ×5†n×;»‡IïV «nqÿ¢O·ñú°æ&ò¹/]«Çš±bòÙnÝš77wY‹}!ί±¶ÏUÄÓ‘Ï66÷·öã·TÉÛE½—Ø&¬ïN¾ö›òi^ȈX>áírœlg›ïâ/zÄcØ süŠþÓÓo’¸¶¤–DìÖßN¶>÷Æ ’O‹ž~r»ÖQz#Yg99ñ¬tðM`}ä{Yä€^¿~×kù®?¹ë”=ž>W3k&úîЇyzßkG#SÚÒ÷yñ;3 ;e­^¿ Æd.{[Ñz~˜$!½m¶>WÇìá°>¬ëÀúí}}ÝþæI]]]ÙÙÙ7lØñ ..; ÿ6 sfÏxøèþÛ·Å\\\ÿ6%;vìÙºmÇŽúºœ­¡¡7—š8èmaaýoS²uë`ÏV*ªðiù³Ô××{xxLž€ß ‡Û¹sçß<#©,RSSоá 55Žšú_& °°)ߨQ£ôõ5^½z9ò[ Äá˜Fýã4¬·\…þº¹ºÌ£'&&™2¨>-AùØØØ˜˜˜ÊÊÊyRÿÁíF9سÏÎÎ)Ÿ„„'''dXÀçáÇèokkkrròĉ_¼x1ü¨©©¡¿_¾|AÅJssóÓ§OCL†ÖÖÖ555¤ûÇúô b2tiiiáààh"2jÔ¨èèhmmmËPÁÈÈ($$MÔÖÖFEEíß¿bÖ0„ñòòBE“ÎÙôôôñãÇ¿|ù"3˜9qâćCÎ~üøñöíÛ3gÎ466†È iÚÚÚ|}}IÓaΜ9·nÝ‚È EPö±±±544féèèЇ-XßPAMM-11‘4]TTTYY‰Ü‰‰ "Ö0Tqwwï¶$;;[XXøÝ»w8â38Ù´iùliiéŠ+¦M›ÆÊÊ ÁºhhhÿSRR’œœìééioÃÜ=ò}ùò¥söýû÷Ÿ>}ª¯¯g``€à rÄÅÅß¼yC¾„ŸŸ?00pÍš5°>€!IRRRYYYÏåyyy<<<?~¤þ·Ï®½1wî\TvìþµJE¥©© ­s‡.¨”™ÐmaaaáŽ;fΜ9~üxÑ‚‹‹«ºººÛBÀãaƒôÝWRRÒsyhh(XXÀPeõêÕ}­*--åää,((€6-ƒŠŠŠŠ^›ü}þü¹²²ÒÆÆÆÛÛ¢4é«í_KK òyt?BÝûPŸŸ¿×_ÓŠ‹‹ÏŸ?Ö7hikkcccCŸ¥=W¡¯BôÙ !ë’ÔÖÖöÿü^MM ßÛ·o¹¹¹!\ƒ•¾V}üøñòåËúúúººº¨¡Å‰'Pöõºª©©‰……ÅÀÀ <<5ø-,,ìuÒ &&¦ˆˆ===Ô`£¾¾žƒƒ£ó9Ìž·!;;{ll¬††Ä ¬`ˆ1þünóåËTˆIKKƒä—.]ÊÍÍígƒŠŠŠ ”””À³CC GGÇ~Ö–––fddìß¿ßÙÙb5˜‘••}÷î]?PSSûøø€õ 6Š‹‹ÈŸªí Kpp0XXÀÐãÑ£Gä³´´´MMMôôô_¿~EE4M Z[[‘õA¬ 6làåå5j%%%ÕÇ‘àqqq‘šÿ¡,C…NIIÉÎ>Áµµ5Ê8qqq”¡¤œmhh(,,DùHÊStK¶µµmÙ²eîܹ¤…À $!!É9777###ú8EKš››Ñˆò´±±‘´M^^Œšš¤±Á''çâÅ‹_¾|™››‹¾õØÙÙQ–Õ××£[²ªªŠ´MEEŽ{÷ V`}C¨¨¨ mÀãñjjj®®®¨ÔBCCƒ,„TVVv[âèèX[[{òäIÎÐŇù’ÏŸ?óññ=þ‚3„@Ÿ¢HÑÉ—¸¸¸¼zõêÆHã_¿~ý–º‹IN ™_¼x±söìÙ³žžžèÛ#>X›ššŠnÆ´´´¦¦&ä„H!b`}C -"äKÜÜܸ¹¹ïÞ½»}ûv‡C‡A”9&L8sæ Äa˜ÁÌÌÌÆÆ–’’¢  Ñ¢äåå;vŒÔ­ ==ýD"–ÁÏñãÇOžŒØ(W™D¬`X±ÿþµk×>}útÅŠ . ‚˜ f”””¶lÙq~ÈÊÊÆÇǃõ ]¦M›vöìYÅÂÒÒ²   ==BÖ0ÌYµjUHHÈöíÛ/\¸àãã#&&–““ Z-ÒÒÒuuuÅÅÅ<<<ᄆ†Æƒlll CCCÃ… ¤Ç,`ÐÚÚŠ>Nׯ_âÄ ˆXÀˆ 88˜——×ÈÈÈÚÚzöìÙ\\\×®]ÓÑÑÈ Zñ{ôèÑâÅ‹!à tëyzzB†"––– {÷î…P ®_¿nee•žžÃõŒ¤h*ª´´´ñãÇ¿}ûV@@ ²²ríÚµ»víBjãDBðxüÝ»wÁú†’’’ÈâããÕÕÕ!CˆU«V555AÛø¡BMMœœœ……EII D¬`Ä1fÌ$ÒÒÒïß¿gddôõõEî',,¼fÍšmÛ¶A|³fÍ233ƒ8 ?ôõõ¯^½ Ö7„˜>}ú¸qãÎ;¡L›6MDDäÝ»wðƒ&XÀÈEUU‰????VVV66¶¼¼¼ÔÔTzzú­[·‚û TUUåääÀHnà +++ nƒ:ƒ“††11±Ã‡/Z´¢1Èiii‘’’B_j111è Ö0Ò‘——ONNŠ•••%-A…›ÌÌLssóÝ»wC”ºººþþþîîîŠáÒxt÷‚H r®_¿¾~ýúôôtèTiSRR¢­­=nܸììljjjXЈˆHyy¹„„„££ãºuëH 'L˜P\\\TT$%%5f̘&&&ˆÕ?ÄÂÂÂÔÔ¬oøáàààááÖ7˜™„ëŸ0eÊzzúððpÈ‚aò½;vDDDèééA4»wïöóóKNNfgg‡h NÞ¾}k``0f̘àààõë×C@Àú€àééùúõkNNÎ . /QòU;ˆhjj~üøñÊ•+ÊÊʱ¿Œµµõ¡C‡Àú†{÷îuvvëT„††®X±"((hûöíAHvvöÒ¥K›ššüýý_½zë~qqñòòòýû÷[ZZ¦¥¥±±±‘¯;vlLL šÈÉÉ‘““knn>uꇸýÖ¯_ô ))IEE¢1œ˜?þ±cÇŽ9bggÑøç\ºtÉÊÊêòåË•••ÁFxx¸­­-ʦ””Xðë8;;;99Íž=»´´i]· $%%‘¢ ¤ˆ‹/ŽŠŠÚ¹s§ tý§qssCb˜˜¡f\»vMHHhùòåÝ~jþ&[·n=}útddduu5Dcð€²cÛ¶m+W®Ü»wï›7o &`}Àïù[hhh[[›¥¥%*}þü¹×Þ\888®^½Jš¾xñ¢««+##£‡‡´UûC˜™™?~üòåË&&&á;;»§§§®®îÓ§O!™üüüY³f)((:t:L$ÔÔÔìÚµëìÙ³“'OÞ·oŸ XðG ¤¤ô%²zõꘘäãÇïkãåDÐDss3*9¡ohnnn''§%K–@$#!!!²²² ,€NɇkÖ¬ILL´°°8uêDã/€>©ìííÑG\zz:äŸ“ššŠ¾;îÝ»§­­íêêzˆ„¬ø{œ={ý SWW߸qcÿ¸ÓÐÐl%Bš½~ýú¾}û þ¡}!žÿ;wêèèÄÆÆB4†~~~¨¼»{÷nèAäÏÑÒÒ²cÇŽ'N Ï±cD &ÿ ô½àããsùòeZZZ++«uëÖ¡ï  Xð144¬¬¬lll433 ½páÂ@Zr!M×ÔÔ¸»»#‡lkk[¼x1úŽ‚Àþ,ööö‰‰‰¶¶¶^^^aFtt´ŠŠ ““Dã7RXXhnnžœœ¼ÿþ}D &Ÿ'Ož Ïÿ[·nqpp¬X±ÂÂÂâD 2`}À ƒŽŽÎÏÏM”••=~üøØ±c^×?,,,äÕ€õõõ'Ožô÷÷ÏÈȘ:uª‰‰É¼yóPy‚üC‚‚‚&Ožìáá±yófˆÆ0#))IGG§¤¤äðáÃÿBKKËž={Ž=:eÊOOÏððpˆÉßäåË—W®\¹~ý:º˜uuuW­Z5}útUUU___X0dàää &É›ƒƒRÁ… ¢¢##ãÀÀÀ°ŽHç’¼¼¼‹/†„„äææ¢‚’IäЫa¯$&&*((PPPlÚ´ ¢1Ìxðদ&i¬`àÔÖÖ"Ó;}ú´ŒŒÌîݻ݈@XþBØCCCoß¾.]ôÁ>cÆŒE‹éèèì&ñë†è;¾ó±û¤¤$GGÇœœ¤"vvv?[q'$$D¾sI]]ÝåË—oݺ…Š¿Èp´µµQyÂÐÐT‘’’‚¢‘ŸŸôèQˆÆ0ÃÛÛ;66]çÏž=…€ôCXXØÒÓÓçÏŸïääÿ(oÞ¼ Gv÷øñcôáÇã fÍšeBâÖŒTTT:{‰wssCeVSSS¤‚¿Ö ##cÏÂDCCòÀÈÈHt.ä<ÒÒÒ“'OÖÑÑAN8ðšÆáATTÔöíÛååå‘Âx‰Ã ŠŠŠ)S¦ˆ‰‰]¼xÒyûúùù!Í›>}úúõë ‰@d~#mmmIII>LLLLMME1WTTÔÒÒÒÓÓSPP°%QÀú€vÔÕÕïÝ»Gš®®®Þ½{÷™3g¨¨¨–,Y‚ j|||¿|dzzú9Dº-¯¯¯ }ðàABBBNN ‹¬¬¬²²2WSSë9ÜüðY1ÇíÛ·‘!À…7œ@&.欬,”¿>>>‹/ihnnFvPPPtt4''çüùóÍÍÍWËã?‚>–ãããÑöüùst}þüY\\}`jhhL›6MD ë~VVÖíDH³555 xÿþ½¶¶ö²eËÂQRRþdz000Ì"ÒsUmm-*5¢"NJJÊË—/+**„„„&L˜   €LIQQ™äÐ ¯””zGëÖ­³´´|òä Š\rà iiéòòrRu®³³ó0îòÙ³g7nÜˆŠŠzõꕬ¬,º—MMMÉûN[[r9ôœœœ‘‘ñöíÛ††QQQt9)))Mž<ý…jR¬øƒ°°°l"Ò¹$;;ûÒ¥K¡¡¡ïÞ½C6sæL##£ß8¸“>‘^×677£²*q¾xñ9!*ÕÕա⑘˜*TúDr8nܸAÕ“'O¶¶¶N›6 IuRRô†:Ì@!@HMMåãã›7ož··÷ÿ¡ä_QPP_]]ÞšŽŽÒIDöïßÙÝ?è }Xeff"¯{ýúõû÷ïQH™™™EDDÐG–ŒŒŒ<Y".¬, ’Ê"KÚÚÚ¢££oß¾þæææ¢r ò™¹sç***þöØ ©ég›ÊÊÊ””TÒÊÉÉyóæM^^^II ÒW䇒’’è-Hù‡ºEEEõèÑ#:¨üü|:¸º†¨(_TT„²ØÎÎîÊ•+þþþ6óŸPQQ‡.ȧOŸ"9¡§§—““SWWŸ>}º²²²9ÈPrêëësˆ¼}ûöÕ«WÈå>|øP^^>zôh~~~dtãÆ“––F"‡>g &Ö ())µ‰t[^PPp÷îݘ˜T”üøñ#*Mžzþüyg³çÚÚZt)"9A—·¼¼ü¤I“öq®¨¨@·'Ò6R[JtŸ~úô ݧåååõõõ¬¬¬è®äáá!ݪ(,ÈßËñññÉ ë€ß%%¥ ‘^×ÖÕÕÅÆÆ>}ú4%%%++ àØØØ$%%Qá•ÉÐ^ÿjH´1D¦N:ð]Pá)b^^*ƒv+€~þüyÔ¨Q$]D%NTEùùù¼½½‘‘š˜˜Ü»wÏÕÕÕÊÊ ®™á==}ç8uÈÖPF£Kå2RÁn½û´¶¶¾zõ m“tîÍ›7è*jiiél«Œ¤nâĉÒÒÒýÜPC‘²²2ôNÑ]Cª~GÓ¤]ee%‡=ztçO-èÆA7¦!!!v"P `}À †‘‘Q‡H_ ò_bbbrr2* çää •Bå?qqqTÔSVVF¥áAò^˜˜˜&ùÙëëë·nݺlÙ2dŒÚÚÚOž<áââbee­ªªB¥Þ¶¶6Têå$ÂÃÃC*õ"oD_x>pðS[[›‘‘‘™™‰tކ†ÙËáÇ===‘Ï ÓC—ñÒ¥K‘ÔIÒÚVTTDª///GÑ5Lªs#]ÃèÂæ%Bú탟Ÿ]ƤkÌ Àú€‘ * é§Ä‰4éùóçH _½zEª-”““#uƒþ{ýC000t÷---I ÃÃÃ7lØ ++{áÂ11±~Œ"Ÿ©v‘¼Ì]]]¤‚TÚFºˆ"Iª$A¥mR“Ôá:Râ_£¦¦&55ÝË—/I| à£ËEXTT]„(OeddÆ×ñññ›6mÊÉÉÙ¼y³Ýßï”t‘o#ÕQwVS#mklldaaAm¤k ´ Àúà} HâÝ»wÃ?dee544 â8*¡ª¨¨hhhô#Tÿ–Î-ÍNœ8‘šš:00°g{W&&¦Ÿz^±PXXHêÊ‚TÜGÆXZZŠŠû¨¬t‘½³=*II}þѾyÈyž?ž––†¤Ždth¡°°0©{p999 -"ÿñ\êêê111¤éàà`¤XÇÿÙfœ(ïP:IþVPP@j0‰òehSSÊP’ÿwþ€œô´ZõkW`}0(!bbbÒëÚÜÜ\Òƒ… oß¾EEa%%%dƒ3fÌ@{ †ôËÊÊ"_E?ž2eŠÁ‰'þû(8ŽôìâÏÔÉM^^©‚‘ô ©ÿCd蘤n3:;D¥¢teÏŸ?£'=PúòåKô.PÊIUs¤¾.‘`ùË #Õl766¢´©©©eee©ªª¢ì&ÕâVVVÒÓÓ#y#u&„œœ¤m$''5ž„aë€.ˆYµjUÏUééé<@Nˆt 9*LÏœ9sΜ9¨`ýO’Š”•ûÛÚÚ\]]=zôO¯rx5iÌŒW¯^½~ýšÔ!*ÒEd‰”””È‘®úÇ?~<òÛßû˜":ott4Ê>¤vÈšMš4I^^yú«Kä¯Å­¢¢)©×MÒSpUUU£G&µ¼íTe  䥱ù¥¹¹yiiiHHˆ²²2ܳ€õÀoC–ˆ½½}7‡‰ŒŒ¼sç’.TpGò0kÖ¬%K–ðññýT!YÚMdÛ¶mîîî¹¹¹ƒ¤N²/~jÌ $„¤†¸¤±‘"VWWsqq!3$i¡œœ©½kç.HŠ¢ˆ »C» +))©««ëèèÎkccóGß Ê”挌ŒììlRšëëëyyy‘Å‘¥%cìØ±D~öøLLLW¯^EÈ]‘ N:õÌ™3ÿÙ?Àú`¤€|£gQccãõë×>|ˆ ÷óæÍ[»v-šø£)ÙCÄÙÙùÆÏŸ?gaaá%u33gΜ^×"Í tttDï—@ ZCZˆlpÅŠ***ÊÊÊ¿«£tpÒC¡¤êJdtèÈH³¥¤¤ÊÈÈ í'U÷ÓáÐïBAAimKK‹––ºØà~¬þÈŒ‰t.©««;v옿¿vv6RD{{{UUÕ?qêýû÷#÷C> §§GnØðàÁ$Ò”””:::óçÏ×××w$ÒÿŽõõõ÷ïßOLLDrˆlíÓ§OHÏÔÔÔ¦NÚ­ÖñíÛ·±±±HíRRRÞ¼yCCC3nÜ8$ŠŠŠ(ˤ¥¥ ‰ ªßâââHJJúøøàp8¸ë€¿ ##£Òl[[Û¥K—Nž<™••ejjêììü[„RQQ¥§§Ÿ9sFDDäåË—Ct<†ÜÜÜ'N\»víëׯsæÌY±bEÿ9öÃ4"Kû!|ôèÑáÇKKKyxxP”™—•• !»ÓÖÖÞ»wïü™²âòòr~~~—Î+ ¬þ ”””K‰f‹‹‹mmm/\¸€ÇãÝÝÝeeeÿû)Ö¬Y3}útä3ñññC¢ þœœœC‡ݸqɪ¥¥¥‰‰ÉA"¿|@ðôéÓðð𘘘ÔÔTVVV555MMÍ3fð³³³ëg÷––äQ÷‰ yF©ÒÒÒ200@æIþá ijaaá‘#G„……³²²î½X ›yAÓÕÕÕ6l¸xñâÂ… ÷ïßÏÎÎþˇD>)$$tîÜ9==½Áö®‘Vùúú"EAHz-,,Nù…C577GDD„††>|ø°¼¼\EEéÙ¬Y³TˆüZòÚ‘j÷íÛG¾…4$$äÖ­[‰‰‰(ãCΟ?ÿ¿Ñ÷A6»dÉ’±cÇýZí(`}üAXYYAÓÏŸ?G:ÑÚÚŠ\H]]ýŽFGGWXX( €üêï5׫ž>|ØÓÓSTTÔÃüÕëÀyûöí¥K—nß¾ýúõëI“&-X°`‘¿£è–DÈfggŸ?©`ii)-äœÿpXBnn %%%“íÛ·ÃmX RP©=##M””” gKOO¿xñâÔ©Sê ”””¹¹¹cÆŒ¹ÿ¾¼¼ü?y#‘‘‘vvvÈf‰ |ßââb??¿   üüüéÓ§¯X±BWWw'‘Á“SRRRÿ#Ò¹$''çܹsW®\! .\¿~=Ý¿œªçÏŸÏ;úøñãp7Öƒnnî;wî ‰‚‚55µúúúÐÐÐAGG—œœFFÆÍDH³qqqnnnÉÉÉË–-Û¹sçé§W†ªû!ñ«««›0a‚®®®§§g¯›íÙ³‡ƒƒ#77÷¿t)ùâÅ 55µsçΕ••õº··÷®]»ðx¼Oll,äίAjÇ‹{UWW/((8{öì/Á‡üüêÕ«Hø.\±°>†*ŒŒŒÙÙÙ‘‘‘lll ’’’=·±°°Ø¶mÛ•+W~áøñññ³gÏ>pàÒËn«s¢å{÷î]µj•‡‡‡ dÇïå#Š<ö­×ŠŠŠÀÀ@99¹Ÿ=ÎÑ£GMMMÁú¬€!®®neeåÔ©SÇçëëÛmíöíÛ‘Ö××300 ü˜åååòòò7nDGî¶*88xíÚµfffîîî=”‚ÎN\333QæJII!ýø£zx<^\\ÜËËËÖÖ‚ `} y>|xöìYä 999ä½¼ÐÑÑééé:téßµjÕ*¤ïß¿'ï\äÓ§O³gÏnkk»uërBøßd„ oÞ¼AŽŽŽçλxñ¢¾¾þ@vܽ{÷‚ Àú¬€aÂêÕ«µµµùøø’’’È[{š™™mÞ¼y ÖWPP ''wõêUòNÿƒƒƒÑöíÛ÷§Gÿ~È"(ÙÙÙwíÚeeeÕÿöªªªÜÜÜ‹-‚èXÃ!!¡²²2aaá“'OΚ5‹´P__åÊ•?ìÓeïÞ½—.]*))é¬â Z»vmdddUUÄvð ¢¢RQQ‘ÍÉɹsçNkkë~6^¿~ý‰'Àú¬€á M~~>Ò?ghhHZˆÇãû±}þüùŒŒŒYYY¤ÙƒDBCC«««!¤ƒ)))dø/^¼àááÙ¾}{_õ~+V¬Ø°aCMMÍo¶°>þõ—=? .L›6 -™3gŽŸŸ__Ö7a„uëÖ‘ªŒîÝ»·hÑ¢°°°ââbˆäà‡4;òsvvöøøx¤‚Ý6 ¤¤œ>}º¯¯¯££#„kdR[[ûøYjvfzScǘ±¼c™F³ba§¤¦¦¢¦Q¡hmiikiùRSQ[UQ^”_^THOO+3QVEQ‘üqh°>†¨¬Ÿœœ,..ž››ËÉÉ9sæÌ 6ô¥ –––VVV rrrHùzöÛ rfÍšUQQahhˆ„ÿæÍ›ÝÖ¢å—.]ëQ´µµÝü$9!ŽOX\jžgü$ô‚°PE—–w¬Ðøï£¡d–|yùìnñû7êx¼ºÚdÖÀЀ‡‡'$$DII)??Ÿ›››ýéÓ§ÊÊÊäÛ µæææHùüüü\]]³²²˜™™!tC”°°°¸¸866¶´´4ÎåK–,ni‰‰‰‰µµµššZTT”’’„hXrçÎ<¿ÿ~ggg$öG…˜ K®ß+!Ó¯ò5e¦UF‹ÉŒ£'_Œ«ËONýÌ "#Éaì ^a‰†ÏÕ!·MŒçõ0¨±±±¹zõjIII[[[vvvBBÂË—/}}}Aù†7qqqcÆŒQVVžæ¾š¾Ô¢¿põÑÛµ=ÁûåÜU µ±1ùnœâîØ0¦ß°–4We½{~†¾|ðKÊÞ»t …zhµóë€Ǻu뜜œ899sss·oßÎÏϯ®®nll ‘ö<|øPEE¥¬¬Œ™™徨¨(Äd8ÑJøÚÚÒLCKÿSûl¿Æì]¤iü¬™¡w_"ÛY|-ÔÓ+X:q>š•Ò™'ÊPúm³20×Vƒ·ñå‰É:hVÊp©4EæõÛ/ÐÞ>)Q<1ëŒí/ å‡ ÔèE-<¯`êý\"øiLq÷_|[ƒfùâîÇ£©Ñó¤èû:þ^Bu–©ó½ÚSkhBW‘p/1ÃŒ.æȰô÷^Š¢ì VœB'š¾@ûرוÜI†þ‚ƒÂ‹‚Üö£Rm<Áú`ÄAIIi``Ÿ••IêÁÂ2733[³f //ï›7oÀú€Þ!ŽÖ®àëgG¬ÿÇUÌå;”’S‹P—¿BÖ„1š»Êvª’ônßo†Tg”ŒÛúf¢+[]}+Æà°ãž™Òë%{¢ð¯xµlµCP×ÚEòsÙÞ÷sVCiG&ë¹HÚÞ¿B6t+u[ßÇß:Zm_ê‡mUŸ«jj0'ãø½Ni>ËÈ0÷ó^Š3Û•RjþÊùæ ÆK z ë1êÁú`$2cÆŒÓÒÒÂ#^#ŠŒ=z„ 08Lj¥¥½e"#Ö«äÔb­ß a’ªpÏõtãíR?ÙÕ~LMŒ»çmmF¬š›v åÀB»ÿ•¼HIL⚈ÛÒ®¿ŒQ Ž@’&iIt¤ kD55'϶™¦ïã·em矆a†[ü–ÎPbçàj¡¡C«ëêZ¾¿·÷¢°ñYêÆæü”褻ާמHÎCËDþ—’5ƒ—ÖÀp`Þ¼y–––ÑÑÑÌÌ̺ºº‘‡[¹reDDÄÇ!#«¡­w¢—™èÌ-Ó•„°Öª71¾VGÐÊ->æ£ÛÕ¯ORöŠ™-Zy6yý’Í3–lvH;±\Ïáþé;µËl òÇÅEg8xØêÒc͹a®ÆæGømï‡j`Ííî÷øƒ‚>¸\¦-ݗЪöy|•ÖOhnë•k–ˆ24ç;/!6(-/©Â0ö>Oñ5ÕfÌ”8ÌèäÓc 7é.tÜõÔwƒÅŽ«OSógð ¿l뀑--mYY™““Dc¤±~ýz??¿êêjÅÈ„Sk_ê§}Äúº˜›c1¬‘‘Góf†› GGõÀÌ=‡¿=H`˜¼áž&!: Spy›j÷8æa€ÏõæF#»”GJ¹$/FÀfx½žq¨ìéÍ+§Ýc1Z:AÍ;¹{ùˆ-@6Ÿ¼˜šÔ¢5oÎ$òZÆnç¢;ë‡x¿Í2‰¶Ï²¨L@®ØÏñÏç?½|å¾ßÑD¦q[S?ο÷2"*Ùxªpï…BÞûS}Õû¸„Ε×4è¸ÄÍ¢?œ=LÛy‚õÀ…‘‘ñË—/¶¶¶Š‘†¸¸8rþÜÜ\ÅH†‰_~Æù^WQ³É˜¯¢Õ^ðýPf¡)³VMéÝ-8•m”{.'°ö¾¼Ç¹¨Ù¥g˜H“¥²ëlŸÇçT^Òe¹ úAuÒ$o?ïe´0Þ@?²¬F(”””ÔÔÔŠÈØ±cá¹>9€õÀ¥µµ•‚‚â02acc{ÿþ=ÄÀúÎ ¹Q†ß33sSSÄøOàêØ\ ç´lÚ/vR›¶_Co—EHÁ:Uö_;BÙã#§âÅœ éÿpRÁút4Ôçøò¡ °°ðKYys[+……Š……’…™’Žž‚GA‰‹ˆØ‚¤ïéSϯ_ õõ„ÆÆ¯Ÿ?ý\ÓZý¹Ð†qpÑðñ2+(,8‰‘‰ B:„hn,ÌËŽ2¿¨ðsIiss••˜û,”ttŒŒí¹¿m›…&Ê}$ÿuõ„††¯õu_««ÛjjZ[Zœœ4¼¼Ìüüü‚*̬<ÒaHk^€Ë–/Sõ˜ΜyWö…^@qéF÷©R̤‹('ôÄÙs9uØ(nù© —Z.áëÚÍIKa„‡Ûáëí£´¿Í˜¯±Úc«$–ÿølÀùЗJÐcdŒ–9ØÊðvì–}6àJû*·¨¢ö¼åf ÝGC¯M½ä}îN Óìíî‹™º®êewABÔA3Ÿƒ· 0ùÏ…7%uWë‹õš€^“Z›qù¸ÿ³´¼zŒ‰_eúâõv ¼ÃÙŒÀú`ÈSWû*-=&íEiCýW©ñôââ´"´’8 I cûoÇ.üúµ /¿å͛旙õ '+;FAn+ÛXû 1üwééÑ/^”TU·IIÑKHÐ ÓˆKPˆKPýçÜ/!n~,h}ý¶éeVCca¢ œœ'×8û§íÃSÿ[qþ·šüÒwXb¸ê s¾Ú~[îV,䉚=qIïš+ñ ÈŽ0\ÝMs3¿GýïeÔcô6Û½B*¥àäîa§„6»c#¿-èã†àâ­“ÛÕ1j³Êr…¤z¼”cÚf{Jgdmåh /óÂúe*Ó·DEèECÅ€}¹o3nv†m -^¦ÄÜ-±}í¾p“_i<ç¡J#7oúöˆ÷‘€nI%ÔþÛ˜{ƃmYåb À,¹.üÊN-°> „¶ÂÂèø„Ô¼÷ŠŠŒêS§L¡˜2åÔÈQPP Ò¢×ôi¤ß[ÚÚÂã×?I¬å䤛2/&62äïæ>¡´4>þqÒë×uå5ðŒ*ª”*ªœâT8ÿXôÒžJÊ}Âׯ’’Bꘘ©ÕÔ”¥¤´!C†0Š»Wë‹3–uüÄiXèýÊúúñ‘¶Áw$©*ŽÀ¨³~•[讜ô˜~—æ‘ÄaøÐ¿ŽÍ¬öÜMZyÔˆçhó(,q3¾=s£LG3LE}Ÿèˆ)R̵ií³Gg±·ï¶;!ˡ׆š}íŽa£´þ(]’Ú“þ„xÞE¢ä[æ\O¨Ú©5¬€Kkså³ç×bcKdå¦é0-ZÈúO’AI‰Cž‰^Dy›˜P«¤( ®¾Žž²é™þ—”Ô ?JJÐÏÔe22BE^濟 •QèEœ{ûäIZLÌ™ ˆ>’‘Ñ`ºt '—(dâ/ÓPÿîò•« óæ²¢ÀòÔ Ðnrà&šãã>Iª[ºÄˆoŒ4dâàeÔ´KŸêÉ(l|–º±cZÀÐ3õ“çB«q†ü *õ^]Öïv;ómò\êŠs½AÉ5õ“kçÜ—§©.½l%¹¢Ý Œ¦!õ¦ßçûIÖ%©~÷§úÝ#)ÃÁú`0R^öèÜ…XM &KKΡ•r ŠiÓ˜Ð+++ìø‰jËuK¸¸Å!CŠÏ5Ïüü"ÍÌ8†VÊq8 <~z½Ëvs»¾rå,AAÈPë  ÕUÏÏž‹–¦wÜ4´{ÌGo½ÞæÞ=wþê2ÓùPó3êê²ýÎÜÃOc¿‘{H¿QÚ;y?~LøßÿÂ.ÐQ‚Ì€aa}­ï=öåàè´!- ÷¸ñrŠÊJòƒâÇËê´óN‡éÙÛÞPŽÍÙ}P_x€a©Ú§Áæ×9«y¿ê‘N¿ÏÕ·ä;n»‹±Óÿl"*$fíÙh(ø—bòl÷hå³ø­ñ±{¦ÀÝð§im®ùˆíÿ}üw°¾¾¾©>Ü÷>þóûéí‰ß:õ†€¦¹Ì7À·s–gív}á^tÝõ£íëJ³¼ÈÎõSà9×ý5ëþ>oß^¹ùr½%'#ã0ìEAAF†Þ×÷„²²ì¤I³ »»QXpùÙZsŽÑlð–”$½„8íÙ³çlj ij-ì¼P3Š,˜K7žØ¥IkᣳÇÃ#žÖ¡çVÒ^1Ù ÎÞºé©}qÎÏ?=-¯ž‘[~¦Ùšu†£‘?¾rw»‡ßæ=ƒÔ!'®.jMµ¾Ëæù¸¼ w—8Õõ¯º/äwºåÚÖ«£õ§ ?ìýQÌþÔ1s¦Ö¼;'ÞNÌ+ÄqÉ*,³§Aa¢¯ÁåÇSv±}_GòR.l¼G¹xÿAŽŽþ:[J#¼¶úSOw²]8¬ïw`äì£ÃÙÜô­;U îó›Ç^7âÈ·ŠØ¦;nÿ¾ð°Ð? A3®‘lNT”ûö‚ÍÈ?ÍçÐ(Œ–\i±/¬/uÙnžëi}ަ&²D67 «JÀgÀð„Ðv5ð''åPoÒùƒò$5ÎÊŠ3%å½ï©ýæk¡“Ïo¹O¸ê‰Ãµ þþZþ kÖ°¿zUæåånµÞŠšrþwÑF ”V~~›ÿ¡°àcyiÑ:Vkqêµ›qŠŠ6¦ô–ãf&cFQÅQícà Ï-Õ™qkÖõ+DȶÇÕ™rî»m -¶ñ`F›š>NÛ ÛÿyZý»°èKc6ìÇ0úÕz~%¬rüæÍVý.ì¡_ØÃBŸ” ¢`f~ɸwéþ½J¯è»/‘1~^¥a(µ:àBð¡vͬ|`#-» [s³Ø[°ŸÁåÉGlo¯ëëH£‚ѼãÎ3Ížãî¼ðà#¾—†ó/è^ôš8¢®“?h};l7Yiô¨4³ßrýýpNTpõ»Î…EžwRvZ)|"£åSŒãâ•^±y½YoSÐå z‘³O÷–O™áÿ¾ç Å«™—‹,%6~÷MÅûUÏI­1iF‘·£àæ¦Ø1 îäùÐŒúÉ/<ÞÉV{ eÛ8.Üùy5é;ë5½Ô7ÖçßsY>Ã+¶£]~çÞI}‰ò­ïÏl²4÷ºÛg²Ô-î\ôÔhM~ªŸ‰Âš+äKlÂò½ àcàøÚZyìäqí©LÒÒô#áý*(ÐñðRxt·Ù°†wçϖ“£Ÿ4‰e$¼] :Nªƒ‡<,×™CkÏŸU»üâò÷ï?|úTXQòése3+ïh.>6>¦ÑL|b’|b¿í|£ðû®ïœaì6ƒ‡$yòÆö;ïX/ácèºYCFü}ôÏzê$f aŒET½qM§tõÅü#ȆIø‚ax?óöO\u¬û ÙgM•Ï’ïq&(f§£ © ßËàòÖQ†oív&&¼Ïƒ½“u\,ìUCeúXž+À6Þ½$Ã0².¡ÔŠ€¦ÛG›(ã7jL6¦WºÑí²0wâozìu1ž4qpá胘ާá›&uzbÐr‘Eþ»ŸHTËÍŰd— í’‰.·×rguÃI ûfR_š¿ïÜ⿜f¹?ÖÇ1ïÝù‹!+t•çwëzÿˆhèKbŸÂcS;æ .H^Ħ©Š²íTâÔãs¬nuÝMACrÔ—â))ß¼;þ”È)Œ×¹°h_¿‰¨y^FzÕ;r?îüü"-MEžÞgV­bçäAã›ññÒØÚpyy]gaÆÂ:fä_[­×Ñ#FF£hFλfc§rtäöôÈili}_X’÷áCqQQeIQ}m 3çhn>dwl<ü L,ì"ãÑëo$ׂ ÝÍuâ"©®0È\Û@ôüÉœ»*= àÆZ¾Mæßý_Àý/jë\:zïé!½öyÄdkßwâìQƒC: U|ÒìIæ–膅R߯P¯)o`áhÿa¬©ßOT\ïËéÆÛ]÷|aloº™Êäžÿýi{ž,“¥iW×´¾gÏž½æ[W÷M£4î˧ÌÇg8ÜHé²¥ðêýK$Hvž±¼«ò툩qÓ`&M¹€å˜ /ë°þGe'ÉOÿ3lo‘wm]Wå½úîí"áï¶rLŸÑ:¢SùºÂ¬C ô\ŒŽÙUùC>>Ÿ;†ì˜ ­¯ÿ¥|J=nÕUù¶”ö~ë §ÔUžûûÚ{vž±¦nU…U aäÊg_E8L~‡Õ¿ð–‘óæ×“äççåeÎ~ßÀ×kú6zg',ê•G¶pÎñª›–¬ð‹|m­:zÌÏd1ÛàQ¾ú²ªÒº6V.Ö?ü+8=ÅzKÎcÇý6X[1Ž‘}-šŽŸòÒÓcQÊG‚’·a§§gÅÚ•£ÙF\[¡º†¦7ó?–V–776°rñkíÆ°óŒ¡¡gãG/¹“¸Î¡Ûq¥a¶½»¬”Z½O¶›òÑ©z¿¸a3q¾ÙpçêváSEé±F­éØ®SF<§È·' OÕÚþtUYMKsS-†5tª£òÖ¨=53·iÓï#ÛÕädºã±~—ooEÞ9bûºðË]Î"ºøœgv–½ïeL÷ä^3Ùø‘ó­/b“^Ħn¥è›üÀü[ÛÎêÇ·ºU­½v· þ&õZ¬:™|­‡Ó…M†NœXÕ•£]*€¼BÉ”Á`ü(ˆ^+ö'’ßý˜¦'ýçŽézLÿGA×꘿Žüú›„õ¤/äÿ·w&ðP­o?ƒ±ÌXf,c+k!”%…2£¢²¤ÍÜmh!¢E´¯7uK¡ÛEÝ[të¶è¶kÑbèªn¨•Ê–5Â(ë ÎÿœË-ÿäùv>9ç}ßóž3ï;sÎù÷yŸ§òYNf~^I\t4›Íû"7Ô)©"òºÚOD¤Ãm;„Jh ¸É°uc=ÞzŒM Û‡ò¡‡Äµ# kùf¤rNAÔ€OzèoŠ:øë¤IR¾ï¡?qýZƦÒîóuÙs½è’Ÿëq4bú*ÿ$„±mMBàÀ/Ý’‚îßïï (Dìg½Æ =J\[»ŸNoÀ„_XØ¡¥¾KEÅ$¿»îE+«ëžçä––±J‹JS¢ÊcênU^IHRIU[F|s§Þº½™{ºvîÇìB³ íí½Mîè2é™é žÛ‚ûà»öÁï=Ãw÷E‘ŽÛÇt8JáÙ#‘éâ}ñí?º/û䮡¡‘µjjfáF‘ü(ùîîdO˜™ØYCÅìÝñ¾<έD9DDNm ’˜ß–¬oøŽËcÑ®Ž3¶—£Nuj ùô:?7 öRczî"•ä~ 5?¹\ßiW§ f\ ¶lâKYyúYð´ÁWíIcÃM…éë”à¡  ·\¾ú«¦¦¨–Öú…E¤¤±ø±&ѲÓð[Qâ :ã¸7cÅîY‹ŸÆðɦÆ2ì9³3H¥É*ɽ«*ë‹r«ª¹ïÕÅå(J‘Öt²ß} 8MDRKJmm×ÖTX*­öMna5nÜ#LR܃aBY!++é'÷»¸øô«Þg&FHI ’úóO€Lœ2UîÏc‘óç-ï‹çI»’ò7/rs _¿*f•½%IË+`¢ŽJS–’“¡©ib \ìþß¼}¼ÿ'«eˆ¾ÍÔßRL‚ ý´¾¤7—„×;¸$i5³l¹Y…(B"Õyè·ßð´­èr­}³=ìdßxû×P–›Ï¿]œ]‰tò†RÿoLÏäÙ—¨³·Ô'XJuTœñ ƳÝïЉœ# »Û[oÆ/(ú oU”™òïÝ{ÿ$%ÅÇ_Jíà£fÇt­k«“R»ŽË7|×µc£óÖ›µ9qÉX¯L¸q±ô–\LzLqQBæS–¯ïGå{ËÁ$U'‰ D÷ó¬ãÎGìGe,ü(R›vl|‚›©à¸`è¤xoD:ws\:ÇYOÉ={T}jž 1Äu<­6?7ö×TDuRa®ƒR2Îë[·*a£zmF]ÿ:bÜjù/.Êb>åMí6^˜²Q AjöÙú{_æÖÇá`.õüDü%î½Å5zS´ëG}:cc¡Gj3ßÐÓ×Oz¿ª*=9¹tåJü†èf¤ Þ»wnäÈÉßæâ>TŠËrró‹‹óËKŠßT–‹KR)4\ÚÉȧJK ÐÒ ]ù-#¡»è\ñ"h‡¯i¥Gòº„šì™`æ×®ë<‡Š]}»¤EÀP†ZÛ!ëÚG´î,õúË>r&Ÿ?¢’ nó‰ii*È+Ðäde‡N·7$"ÔÉ3§%¶O`‹šëî<•?`:çÀ,ךb~°ÎÚ³¾’y'+åZÇ ŸÃ]äÙ‘ùÝI¾²gÌËÌGY/¦?z{IáNÍ S%m«)ØâÖZ$Å’`ÒVyÚëe[F¿«ãè«÷ú[F£•ÆðEoO°§Ö]}½q<ľèW®%Oß«¶‡Ï÷“«olàÝ» õ/>Þ™Þr‡úÓ¿tW¥Ûr%ß¶;ûM[îï{~C®l·ÞqC¬Be¢å« ®Æ³‘k­ÕªæXE•éÝyÚ$=ËÔ²u«ñ„g€3÷†žº×Ëc›¸> —|üãÛg`§1œã¶ÎH+ÔÏTäc>œ­ ùرäþ£ú®^½l=^‚@€†hxøƒÃíÿ‘<؜Ƭü’œ—ù¯Š +J‹kßVIIÓ(òŠÒòJÒ Ê$ñ¯èC¥¯‘¾o²ë–øFB¯¼ÇÜ*A4Æÿ4g®‹Ð“‚üÏ—¸š|&Ë^ëiJ¡å›™ Ë$ K¾ Õ‡cºôjåˆ-T‹öYb§} G÷T&sÝNŠš_|}n¸ìä6o/Q.ƒ£\c[7•êøˆÓ-Þ#[ 확®¼‘@½Å‘ۢεû;I°¦DÓÎѰ$–ÏwÌ nºtEwu::æÄvòGóe¡ ··C¶ò™w¦àgbll\•ššÕEùM–¦9¾ŽCŒ—Oz³ÇûíTÍÈØùwtÕ•Ãnõµ)G.u˜&‰øìŸóþ¡;ʈµèkCþÚ4AöæšÌÍcá·ð‘'½a5Ñí‘9vÛ¦·\‡äÂHùïþW¸×áq™¨sÛ(CmA9Ï­Y™Wл$½,lDLƒü.Šž°_vR›t²-Gƒ>6,ÚÑ®{÷\eñhãÏc+Œ•~ ÁmNkîßàŽë%%޾»×ígÕ©úhò‚ÒÒ"ýd¸ïMÕ“¬¬š3 hA â₃IÞO½4b„Ã=PMûY^ÁË‚üÒ¢ÂÊÒWv=>d'§@U CS&É(é`‹!ôȇ$Vá™­ËO&䡨„ÑtOÝ~§™ì§Wöÿs1»¬%É›NœïÆ ×žw%xWÄa,{È8s‘úz”¨=iÜOòdü>ýØêèêLO³Ãaf–Š‘äM\‚æ;h1ôë.P;&#sãÂׄ0_Š+ÎðqGÈR}óÕT›]ÒIÖ³»Q£× ¯M‡ËNhNw–R 1 îÛ)#ˆŒC Š%…9Ð}Ú ¤ÆEwTYö‡“¹™Éòß2ÓP—¸-êvü^G².ÅòT‘æ.æ)‘P£ºÜìºÎØÖ:£îœ{»B/`ÂÓ‚z:Eôó7&†Ñ¬Ÿí5ƒø'ö¥¦òÚ„¾$ær謂ƒ?j·ÇÐËÂçCÊ“Ýg¯Ã3-fÅn¿´†[H‰Iy÷fm9¶z2…ïé‚?·ºžÓ¾wГ„!‘­ ‰[Æ~8êÀãáÃ4ƒNè2Ð[êÇh»J{ùM ¡ûû'ÝÐ&”Ä—.µÂƒ(#DaÞ=]áxÑf'Åæn*µówCýÛ,>ê3Î^Ÿ<õœ½úÍõñ!¬Þ)Þ˜ëFü™ëh­ÅF”å!âHRµ†‡OV¤Þ'¶‰¡¡ÔÓûƒê{ð€9Ì€ ¿‚޽OJHÈütÕ‡¢hÅÛÚ¹yE¥%E•¥E‚9E*>j§L¥) QU±Eš¼—prŒ4÷Dt–_½u ¿ê”Æù´Xðq O8šÌÉWt?–tU‡„¿ñ1§è„éM Ø5Pd¤Uµ_è<±·¶·Go¯Àƒ³?iþýDè™xpöES¼,²ã­õòôÜ)Ýj¯;ììpÞbóÜ<<ÄË ~SâÌl¿žê“Ÿ‚~ôìH™V²X’‚.éé)¨Ù®AÑ5ÝdV†¬îÍÇzoÂDý¾RÓi^D»ÏÖš´Ë,#×Y®;{tF”Ñ[PtK÷ù:(¿€^ñ"«Êqú'F''û%FŒÚb¶ú±5ÍÓ÷Ìö=S¨ÄÁ£neš­ÎuVò8à¾cá`*R“tì²sÐ=l»ðõ4»7ãÎhlôN :¦Ùj*’“ó[^ v n|}ývëM¼Yz®L™¼kíÞ¢‰Òã­vxî‹ÚÍŒ %DŒfÚYéˆV>}ì{ #;2•3ÿã½=ké4;Ï꽟]Á`|?ª¯èìŸÊS™+Wd÷>>¸¦fó_UsØuDá€ã>T*ª^dç¼.-a••ˆÄ©ròTyeLÚIÊÒDhêšØš/@qÆmìÿ%;—Ëñž[i¶žkÌ·ÜÁ_ •eãþ0ЏhàßåÄÅTW“1õl<ˆ»±WzÀƒ³o\?“—ªbï2Üëp òÞ@íLÙ3籫ØÌ#®c‡õËÌ"vU@ÿ|5ÕÀ‡hd¿­¬hTTüØ@6–Ûº¢4Þ ¦A~•SR÷Ÿc!Ó®ЭԈ¦Ahò,>ù×è´•Ë !U!:~ãL+Þñ¢ÑˆßŠòbÿ—tøúÙZÜç¦Ì ãLG':ÏÀ_p”÷Ìmvl]|SÁhô¶Ý#Eñ窆û÷‹øŽÌA$ˆcE"E;@X%1ûÎn?}m ©Ö˜µ ýŠž\Ïæh‰ HJ–¾zF“ÿÎߨ—³øDÍ_ºTmíÞ{kHú‡xžö fuŒ¤×ÀF$«.k°w±6â7³Ý{ãwü%Xv¾õNÁÙ»Aª¹yï Ô>5ôvÊùÑk-$;“nÛžºmÄü°Œç¯`|)wƒvÞ }ÍSÿQ}âö[/¨—„ñ¨El6ª8”ÏÀ׆ͮ& Þ…Hds¾÷Þ¯þÄÞo¼pðöGÃÃÚLªmzHãÕCø‹†§%žÈÊÚ…Írçj¶Ê1ñ–G>üÙO„a;9}¹M&\u 3ÛyÃhxVËìš:Wòa5xÈÞú#þw]XXÈÚq`jl~u)¯›J£v>Âþx} ì ‡ƒœ»L*)®.U†67‹S¨24i¬6!‚Öb"HH¨IRJJR’‚iœÚªÊꊲœô®Lâ*¡&®dâ)¥æ&¬‚ž.ÂêÁÀ¹›B\Q„%㉸j—CX"‘»&ÈÛ&bÿxJHˆÿ!bß@|Ë bIXi|[cßúšåäæ¾x¬¢5ô Ô-ßÚU±»pÞv>&jHÕ¥}qÁ½é¨G’¦;Á–h¤jae$Rr!¾Ìɪo€Ä>DÊ++•Ö긮õx9ìxðtî¤ÍÆ[Gñ WZÓÅ>ÃW¢êM… á?b¢¢ËýýÂ÷Gh¡¢3 ä3Röüá㇩+–/ës#À úà«",&!N({Í‘“íŸY8û†ó²LL€],[e'ûÎÓvUº­2.ÛVMÚ+Qj:¹¦Ò†-,B!¡\x+ùt/–-åîKI¾¦FbE[A~ðwÿW)*b«ªŠônwŠ©‰/ra/ò˜Aòì”e<{¨>ë¹ñ!.Ñç9+óÍúKâ&z¹aEPUÉzfÒM}âÕN50|–üæŽ ï Ék‹u7Gúo%øw(à{|‚“ϯ„(þò"uÇ/„ˆcô¶S®Ÿ¤Ù^•Ô*)hÃõásA$ý|¼ï¦>8±cì³%eä¡M>‘šÒüĸ3Ví|½÷ÑÐÇU_cÎå‹<ö^ù`A‰«ŽFÿlJ뢆ã—ÞŠˆ4Ô‹Œrr1¢ }®û"Õ}5 ÉÏz¥úSŸèÛ^n§³íMbsr§«uȬ0àŽùØÌ‹ TÇ6øñÍà+ à5»J«¸N6>ЧFÛæº=ÀRVƇŠ_Ç}?"†wjgß0÷ãJŒÇör˜ÆP7BrÒÄ5z[t‡Güï‘ÆëS6ªõ®Mž=RQ‘$ôÁ×ç=ECMúù‹ê^«>Q܃Fìé>›dꀢ¼Àwíddƒ¢6m‰z®sP×9ï;ˆØ€ˆœˆ÷‡h² Þ„ž6yù’L¡ˆ’ÈT¸8|^L ±%í¿Œ¸C¡CFÒÕt MzŠ)z’šv'ÉùÇY«VöéÏÒ‡ÕÈÝíæfwº¸žªS¬ÔìlþÄì+ÛÍä·«Ï;—}?(ç„}fd!ocÝ0;#Úg±.øBÕßúï]O07ïÝÛ5vs î¸ÿŠº« ;ƤM;žñÜÊõâ8.3δæúßdëËØ†kÔúhwž¡_MŒïN7­{=,Ò9®zB¸_<9ñŒ[¢³0Œ®„›Œ²îò*c³ÙäÀËÓÎ _@,fÔ$âNËÿ¦1.Ǹ1Ì~sÜb£˜øs~\ÕIç{+ù0ÒUÕ×é½?l˜å±¿NZ[IÀ¡Gª†Õ€vøB ÓÇ+ýaŠúƒAFfDaQh™÷@l®’zçYúÃa†&­,mÆŒþ>TŸU}o®­ì ù†ÿUxßY©s)Öí5T‹­m›9¿Opyl-“{÷âƒrQö¿;·j3Œãã®s¤ß¾eË ± kóÍ­+¼{3éŸ{©O³Šë¸÷8iE }#}C»ã ^ϪŨ-M»sã3­¥fTLQÝÀÒÎzŒ™Ö{ÜaÕ–e&ÿs÷^ZJnVey]-‰D“VÔ4|ôº©¾"üP¾qÔ4F7_KÈËkèí€mÏÃ%i”0æ‘(K•Sî¸Kî‰CÓp'Ëfj!Õ!.sKŽ4b? yÂ{Œ¦ª H1‚$KvÒ£·Z7žåFïf¶^ ÏÓú[û‹Ý˜äèÓîìÊ1[–é¡uÄœ+k3âA¹øqïOVê­ýgõ[‰ìì’Nvý¡÷iò:ÒT‘ÇOêt‡ˆÁoÁ½øˆ=|³råd·@_¿u‰º÷¹Ì0bªÅKp3ðˆÉï²Â­žhÓY^fR|9…Œl|ðÁ¦~øi¢›6ÜK¾]¬­W¯2=W¯V­%D„¡§¯‡Œ4e’5¶@S ÷g8÷|>òûþ¨}Võ)Ø,rDη ‘˜ÿ›Ÿ·Ï¥ì•¬úøS®ÎzTÀ¿}d®ê‘¹ï)ŸUXRƒhKb‚ð§Õ›½ãÖ¶e$ðcðëTÚöÇŸ·øÒÕÁhà[‡ (¸x¡WhXØ¢Å4)É^ú®´Û‚îí:KŽ11 øž})ôi(:»Šv—È·ÙŒK©¸äCF¦Þ´¦ô¶8±ð}ÏÝ终‘¤úáÀÓÝ?dïÎ9n24yb?üø(*ž?ÓÙžBWÕ׈e#‹iØâµ_%/Ûfúˆ³iÑÕÆúw'ud'÷¬š®¾ŽCT$%d%«™n~zwšR­9ì==•»+]W^ŽT¼BIüÖ;z Š®á¼N9þ[Ì‘̬.ÊÇ ÄìT#¥{胅'À7 ‰,½h¡çþ_#}|äI$oÿ„ïîÞm†G ä¢:)'×A­·U57‹„‡åÍtžB“×ꟽO[âå»7tÏÂ…rJ‹ð$´ñD 5õápP}=dá꼺9RæÛþرׯ0¡¡»jEѶ—mµ®ïP¿Ñê¤Ô-í>[sOÞêx”÷-_µ8ÂgpÐ7¯X3¢7 Ùá…?±¶,óèV_¾¸…YžkŽþtf ¾àß6RÅ%ÞÞaaûæÌ–‘WøÖÇ|LýýQÿÏPOM5%<üéܹ3û­äã!F’ò÷[¾û‡(jªýÅÔ³¡A",ìÅŒöªªÆpP}ïƒ2bÅßž{§ñy×ô*æ ®+B¦O4<@ž,‚ÔT¾L½uá€ÿNfÇ}¦Ñ»è ùÓÝr/WRv‚´›b¦>r×g´) á¼NYãh²£S¥Éq)¬+ʇ«EªW̹ße'§¶æ&®iœ{ðÊy¼EI¿(ÓWðí­y±ô…’¶ÇÜØ¯=R…Ñü#7üÔv@’œ¶õD„/Z½‘¾6H>€>Y\fåòÕû#‚G[ †}ÿï—y2ÇO>öõ]&*F†Þ!/_¶ö?vêèpzíÚ§QZ*ûÇï鋽JHР÷T߇™QPã{ˆ®77•/1f§_ÌÎ÷줞œÂ AÃt(‚\kÏ?»˜FXŒ¯èn/eî7Žqh¯¼à73ùßøª²ÿUÃ{qXëf‚5wd¶ò ð}ÕfÈÉ8¤4> 1dG«ÝiÚÁù´ƒó»8_óí¥ÿ´£-MN•øÑØýXë.³¨gu÷9Õ]ŽÜÝ2¾Ü}|Žßâ &óXÔœùó¤¾Ó*xüx¨hùŠåk¡Óù™;wÅýûçÃÂ.\ G$¾×¯ù…óo« V­ƒP}=€¤;'ƒ­pÊrîÜNLJNIÉ~^–õªšwµDÅäë›i6ÚLK®‹·§rcw¡èæ»'Bv†_Í©©Ç 㻘O˜N‘œ‚¢œ¢Ôчÿ¾™X-®ˆ— z63縻Œ“ãÚàxÍñ;¾'ädJ~2ÒÔu†; “ú@µ-w=àT4AXÏwíö¿Ù5 šä’ìW êz&c&Mæ`¤ÜÙ#‹Ñü£èü£H]aâÕK×oþ›þüiNI /Kœ¬ j¬oj>Ñ~C*_k€¾ƒábhðjwHäd)-­ïmnvq±BtôƒÙ³t ¯ßÅÄÄAO—ñë¾0Ëqâ†ßÝoe9-ê÷ÿœ&U'€êë=D9uúTlqëx4uZ}Êiu×5+»oÀ–nvSw uîyµmP´Ì\±Å¯'ç+¦LŸâ-ðÝøþ”’_¾líóÉ;v^su•QTø¼;VTPzFg(À Ï{ïð$)ߥkòóÿ >3s&Uõ»˜éWS-ñû¡\CÊÊÐûª€V2_¹ÂüéfdÄ͹seTT„ûèaUÊFÇ<14¤øù¯nýH0ìåˇ7žqq¡öÝQßšjjtLöÀK¼¡[T] 3„±~=#'û^pðe[)ƒa})kÖ ™Ø¿ŸLœ î㳺²¨¨¬_oPT˜±sçß––#Gö¥Þ/x)û×É ºmÑ¢èJÕÀP×0²¾®æÔ©?ŠKª~r¡JË|»÷îšj™¿Žg üðÔ•+¡û>%e½+ô8ìºó玼È*™9SZQñÛ5úm¨§ž8‘_W×8c†íòeÓ¡ûT=@TŒüÃÞØÊë²ì#ž+]mg'5xð·2ï«0_æÂ¥çA[³ùóAì}fˆÂb“ñùüU¬Â“ÇOç²&LÒ×ÿV,?KK°ÞÏ©¯çØÚ»ºÍ€þP}|²r³~ZŠ­4rê™Ì3wï=W ‰XÐIêê_y˜š”X”•Í26R¡Ó­,±÷Å‘¢(Ïp^‚­45r’ÿ9Ÿ|ç1…B¤[k}홟¯Ë¨‰I¯23+†Uc9fÞ<è}ÕÀg¿EŒ™ ¾Ž65¥?º•–ö_aÑÛÁƒÅuõÄi |Þ°oÍ͹9"Ò+3ŸUHSD ‡N˜á$ ñAPˆh>j:¶ðzÿÉÓÛi©iyy, M =]Q--aáÏòE_¾$§c½ŸYN" µŒ&L›F†ŽP}|%‚‚úC­°¥-¥¾îͳç÷ssr ÊË^×KHÒh$ U˜"I”¢ЉÈ$L9 ‚‚¡±il¬­ÅöB«ªš«ªX,viY5‹ÕH¥•ʨTTªÕÐùÉ_»ŽDø¶¿IEND®B`‚nova-13.0.0/doc/source/images/filteringWorkflow2.png0000664000567000056710000022303012701407773023527 0ustar jenkinsjenkins00000000000000‰PNG  IHDRôwSò¹üsRGB®ÎébKGDÿÿÿ ½§“ pHYs  šœtIMEÜ  Ü tEXtCommentCreated with GIMPW IDATxÚì½×w\רùX9£ªPÈDÁœÄ Q”D¥–,ÉrXîà¾öê¾so÷û0O³Öü3³ffÝÕwÄp<Ž?ƽŠF°¦i2Ò?@6—ÃærYϧ,,îWæ½½Äç„««q¸\V¡|Í "›N3:4ŒPÔ?1@º.ôG›nÊ<J‹É¬–(„àÚµùü ~Y.?Ðû­ ¡¦IßðÃñ‰U+óÑáaT» ŠòS\Šé9F˜šJ`Ê:¡X µkêqÚÕû\ ŒB޲¤cÓ­¹¡O‹Ì'æDª«,™C™÷u÷°\ÀéóB©ü®dõË|``§3,Ûø=W|$3L“þá!Fâq|«Yæ;ª¦aKOepÉLöóéïþ@|¡„7èG¥L×ÉãxêÖòÊ[¯âwi÷$ua–é:ø9O=[vtZÙö§Bæ³D¬‘ù}ÈÜw}ÆÖ²Ë|vöV™K«Væv{™|þ)ºišô3ã WàYu2ïgtxä†ÌŸÖT»(ç8ðSÆ26öïû4Ô„1ˆ÷œæØñkÌÌÌã³û½ÚÅÈè&*¡ÚšZêщÌì׺¯‘JÐÜ>ÚÛQç9}ô(ß$ž°Ÿ¦ºJdYÁãO¦”Ÿ‚U O’̯ô^%žH©±dþÍeÞÍB2‰Ó뽑N2¿UæÒª)ã/Ëüa}µ+tS˜ô 3ŸÀWaÉ|5SLNÑß3Jý®ïÐXF‘$@¡jí3¼ZÓ‰ÍådøÌ§||à‘5M8ÈräÜYûÞbǺ Žÿþ·Œä=ÔW™î:C|:Ãæ6År™R©ÌÔÐŽR†ÜÂg/^¸cïÃD<'Ȳµ¯Ó#—ùÕ«Äç,™s™gnÊü±ÅÔ›2W§Ìûû§q¹®ÌW¬ÐM!e>1¾82÷ûnÖj¸á×e®9ì(+Læâ”²y²Eh‰T ß’—$·Ï‹QHqõÜEluyåÑE–Ï~ùz/w³vÍVfgq®Y˺q(;˜O›„ƒ áp|¨žêh€ŠX%TV>ò™îår™‰éiÚÍ6Kè\æ=L$Djk°;Öµ¯!›ydn’H\f|ü Á`‚ª*iÕÄv\»Vd`àÑÈ|Å =•Épþ  P.15_5ªT,2=9‰ÝëY‘2Ÿ[X “Ï?ð–†iR(q²ª H‚B¾€@ ݰ®À4LŒRžùD_k%š*!c'T¤»/pذ{'GžáßúψTÓ¾í"! !aw:qy½Ë#šr™ÂL²Ç#f|b‚‹W®`w») [ò d373K®XÄå|ÙÎr9Íùóg)&0 ‰ééÕSÂ¥’`bbPh›Íx$óV¤Ð a¢:Äjk—i±å#½0œ˜ý“KDžd¡_îºÂ\>¢iô^²,“L§ 6_€hØÎp×eÒëjñØSâåÜ<NœÇWSƒ®+³9„I¡GVȲLÕº]¼Ó¶™Ää]'óÅ{àû‹×—˜¸kp‚¥‰@ÒâÖ3ïÇšñ¹×ò/ c1|á°UˆßPèÉ…$6Uy¬uÝ4KØí³lØ@UWWFenNef&Ýn<²ÏX¡ÏÐ%EÁáv-ߦ#ËÕY)—WôŒkDåšôÜ>UA±¸¸”â°nûvþð‡#|ôžÌ† mؤ×Î¥w¤À¾†vjk9uõ mU¸ŒY†g¨n}93͉C'ñÔ·S_¡º6Fÿè(eÓDU$’‰LÑvËçš,L q­ë*“S ÐDkhîìÀç¶Ý×ò¸üü,¦Ý‡Ã¦aqoÄç¦Éy*jïºíèW—ºÀîrâô¸­Bü†íMÓuJ…üc¿EŸÏ@]e«IMÓxäŸa-Àµxò»o’Bý¶½¼¢Ú9{ò~sI³ãW±÷íWi©Rðí']úŒÓ½‡@ÁS»‘»6âv™øÝ ]Ç?ãʪÙè|v±h”tk s—G(fÒ7‚ÚtÏ)>øÝ§î bU¤r–Ë?¤§gˆ×Þ} [!¦€[¶¼¼þ÷BˆÅ‘ýÒ뢔ãÌG£Ôobë¶6ëfÞ#W¯]峩3¼¾öy¶6¬G±æXXXB·xL½Òr‘d"A:“YÅíàõ¸å{ëÊŠ¦-ÏR·v …B $ÝáD×T$ *ž{û»ä²y„$csºÐ”ÅM(6¼ð:-Ïd)&²ªáp:QdXûükÆndyŒü<'¿ø‚rE o¼û:>’0™ê»À¹ £ä29Êr–®Ó§›˜EH:á†ÖoîÀ¦Àtÿ®\¹F&[Âî вi3Òt§ŽCŸÈâö»hi¨´*Å=PËSz‰÷&ÉrìnÝŠ®X™ KèËJ1=éO?¡«gˆRÙDRT›‡ö{ؾcºroR—$ ÝáBwÜõ§(š ·ïÎÍ$EÅé¹sÒ›¬Úpù¼äÒ@Ÿ`dxußÞFØçXyK ‘æì«é@Õ$.üŽ^Nбe#z~–sŸ¾OQ²±­ÅÅÑ> l£½!ÊD/]]¬kp`s» D* †ü÷”6~YîSÊ‚ŒËà¼2Jnð÷d 9öv<ƒÛæ´˜……%t‹e Äå>ý€3—§Ù¾ÿMZ›«=Ç?çâ‰ÄêjXSå#37C"‘DH ¾pŸÇ$QÌa”ŠËxÁPÌç) ÀíSP%IÆæ°SLMÓ{¥Hç«ì~~+Š‘!9ÖGOuk)fsÈQþªzÖ´¶RBÇ£gðºDë‰=˜FyåÞS!JNÐ?5„±LR¿6; (Ú]i~:ñÉR†ý{¹üÖdE ‹§IèB˜ÌOŒQÐb»À ü‰ bz¶ÈдAS³»úäÄ÷Õ/t#Ç™ßcÖÝÎ[oíîB0;p™÷w„—~ôiŽºï¹± Ÿ;IR ²vC+·Þw³œ§çØAN=Æt¹‚¿üŸÿ†¨Gçi#¿0Ï|Ffm}ýÒÎn‹(º“š5õù.>BÞÛÂÛï¼€^œáÿøsΟí"²³†®s—ptîGȦS” PD–ÓáiÜÄÚöZd 2sóT¸ÜD"‘ûºN[®@R˜ =TI,ê ÷Â9:š#x‹ÏjÓSœ8z…êÖFì•ìÂbC˜e2™,º­ Y2ñÕvòzË&2sÓ\<ü §|Auô…¥:³*Z%a÷¥™óÏ(0Êðë¹Ã¤.ey§åE+ë¾RꢘåÒ¡¥‰ï|?vUba¬›ßü÷_Ûõ6o½ù 0uõïýû^ú«¿¦¥&p—8RfôZ#©áÈlÚ]$XJsê“'ØÍŸ½¾Uº¹_B>1Îų}4íØAÐsû ³”ãòáÏ9uäiÇþâ?ý%!çýÏ0Ëyº~Ω£Ç™•üåù·ŽYLsòƒ÷¹Ø3†¢È õ[ŸãÅ·£­D§›&_ü>Á5ÅÎùënm±¡MögøÉOx÷ïbl«Wï1¶ zO¥ˆ »vºÐîR­Š©"ÿúÓI>‚ÿõ«fmÄú²R.(–/wÃÈçr‹‡n A:1ÅÔä,2¾p%>ä¥ÔïÔØ8éLÕî"ZS”çÜÑc$õ*¼Ñ ê¢Å€"™ÉQ&&æðWÖ07˜~zs?â«FÏ‹k¼K…,Ó“óD÷5-¦ÙE%ÕU!.MÅÁ¹Žš5µ\ºx”#óuÔ56Ñ´¶¯š"—Nâuú‰Ô. }NÕ¨ †h¨¯¿¯ËŸ˜àòÈ0 ¡:ClÙý ï¿÷¿ÿUŽöö5(å ×.œ#)GèØSIcsg/ærÄŽšgh:OÓÞ&ʉ1Ž~rO};uU>TMCÕ@’UlšÄÄÐ5â-ÕDü+/ø È0,ðûä)’]¾“‘õ w•º¤;ˆÆÂœ?ÞÏ\ºD¥Ocvx|1M|tt~;>›Éä@? Â璉Y`rK2.4BÕ:µêmó^„irêó$W‡Êh†Ì“ÖOjž¡ £L¹T¢´tîFù–£çøè½”47šd’Í™lzù-6µ‡8÷áo9ß7‡Ûe£˜IáªßÄ–fã£CäÜ2ÓS³T…h 3RËöW£$z38ÔõÔÆ\»Ï‹×a068Ìú–Š£³\d¬ÝmgiÛ–ÛÓ’¦‰¤¹ÙþÆw©êgx`€ž£qõê(¯¿±í‘^³$ÉÔmyžo¹ü\Í^!Ý“åGH¬kh¿Cê’¤]Óˆöy7ñÉQ——‘á "mÛ)ÌM3;—Æ4Œnx5?ÁÇïýÙ¬„¦²Ù2k÷½ÎŽ µô9Ĺ骣[8ú»ßÐÏàõûK³ŒÌÚx÷¯ÞE`2pþzÊÇüÜé¢}o¾Êl|˜ñ‰Iª&â¤ÛšqhößÃ[Ã3¯Õ0qá Æf'îúu‹éYN}ò)ÃÓÉÛ Á_ÕÂî};ñºôïçŒÖ³ýÕ³=Çí½ñÛîpϽý.ÁHU‘ðúÜ(Ê ¬ð³MCP, ŠKi°bIÜèM÷eøÙ/æÈ© vYL ö¼bÿÃïÍòñÅ~D!càªwñÊ•þ¡<3š`t¢HSµŠ]¹ÙYšìÍpìJ‘M»=œ>}âÊ⩺0Ë\;ùÿï¹Ñ“-fÌfÀ4òt?΂­ŽïýÅ›¸å,ŸÿË?rñôycÛÀÛø<¯½¸#5ÍÈTHCµ 5”+·±¡³ íFÎ}q¦µGÕxÚ7÷´ùb´¯kæÈ©Ï9]ᢳ½Å,põäçœ<=ÊÎ7_#ñï$½±­0ÃøD‚Š5›¡ëR?±ŽNv4¶ñÊ|xpœL¾€, òù<%ÃÄöžýI²Juûb-0—֚ˊ‚,-.Ó=av¼ö-Œ²’„¢,>»“ißõ"-;ÊKËÐeuqäÔ±÷UZvHŠ ÂÄâáI]ÕT Ó \.i[à›¿ä ÕñÉŒ NÐZa05•¦y÷3ÄO}Âðè ÊLΕX··šÑó'œ…×ð]*}2gÿø¯\>~ŠŽÖÂ,Q*ÌôÒÛ?Í®ïÿ˜Î7§ßûgºûf½hqVÔ²óoã.ÆùÕ?üŒñYƒ–ŽV"ý°mÏN*<¶ÛGغ¦ñ§6°V^jÛÖXs{¦Qµûq:´Ûßo)Í~y$¯;ˆV93’“ýœ:ÙGUÛ*ö•Û\>²Àÿ>•çú´ƒB¦ÌhR “Ó“Äÿå?UÒ¿ûÙ$‡§ÙÜè£ïjo£—¿íÅL—è3¨iÒik¶ xػՉýzl‚bºÄ'Rø]ìl—9sÆúãi÷²JåšV¶=·ÛÒäÙË$O÷c–óLOì܆ÏeC–Tªjb\½8IYwÓÐÖʱ3‡ùíä5ªë×вaº¶¸iˆ,ËÖ_Uæª/¼J¦øGNÿá_9÷±Ž¢ÈHªƒÖ]ÏÑÚÚHHìbîÀ ~ÿ?F‘D ÃYÍ®­ë°É9¦ú.qþü9ì6r>C]Çz‚‘¨ŸóÇàó9Ù¾¥õÔ%UCù ‹È²‚¬+w¼.Iª¬ß¥þ)hK¿_.[B8‘* .¶äëx»ñyÖ7®E–îÞ5§—šº(û™¬Ê’njP†ô÷0I™‚ :ê¦ïô(Ó“óœúè=TÒÓS,äó¤ 囩ë¹yÊ6/•U!4»FCkžK ‹?Vl„«zìH.·J1_@öÉ‹uG¹¿­UeY£²©åι’|Û•¯ ÉÜp?>DÉÛÀó/<³Ø1^¡“<$ êšìÝ‹sÉfSýYæ>Ïb–MƆK„[]„½2ª ktV$¯*´¯sòï‡Rüß“yššílÚêÆa“Pddn›˜(„àôÁ$#Y™ïìq!Me !žœÓàžš”»'ZCëºõ‹3…`RÍpâì²¼xdéõ*mšKÏ_$޽FeÛ8£ƒƒô_>ÅÕ®a¾õƒW¬€úFé•ìýöX?9ÉüBIÕñ‡#dI¢²c;oD˜™™G(Áh ¯Û„—ç¿ó=¦â“ŠºÓK¤*ŠMWxæÍïP;>‹3\…ju¦žZ™Ç2nž£ƒ×÷ÐÙÐö•2_tžš¦NvŸ§û|[ ’€ßƒÔXùƒôdK¸+ëx(ŠB0VCÇ–­ØU !¶€ê Âccò+%)n¢„rcÏÒ‡è ©N~ò)#Só·u"ýÕmì~a>××O¼B¸È£G[yiÿs–¶3^És6}1[¶ºpk‹±}ÌaòÁ¡ìRGh1)v3¶ ÌÅÃغ?H]g‘«½9.MóßÏåùÛÿ)t× W˜/röTšÁ9™÷~9M!Y¦»;Çß›ÇûŽŸ¦˜úD<¸xêס˪hmŒ³ýÌ̵ã‘2Œ MŠu¢—ç¹tº _Ckwì¦Â+óÛß#“Í#Ké\†|¡„Ë®Ý<ÿË,‘Ï)ä‹a’ÏæÈ«›]*×Ì*šŠšz*jîÖ»–q‡¢¸CÑ;F»6·ÚfßãðGhòGÚõI@vaé‘å)£l ¬Ø´»„&)øM'’¹<»µ•…AR¹¹¿¸,$ªܼbÛÌ«MÏþÉî·Þdõ¼æg\î™cí ß[œ”VÕ€-yˆ+S&_݇Óf#\[…25…áºÂÁĵnÒÂ…|Ë›y>”Âã#“xëœô÷ Ê”¿T«¾| Â,’Éd1½öÛ–» £D>_ _("Lƒ\.G^Øl7O[ÔÝ~š6l&Vº½î(6/.Çí÷B%ò…ë1ȸƒ¤B‚£ŸÂð5°kïòâÏt»Õ¸²VVeêÖè\éË16ã¢B3éî-©uà4Ëû"ƒ¯ÁÉŽ=>jƒ2ÿçÏ’,¤MdI"›5ÈäM4§Œ„„ͧóÚ"<»Tç²LÌt¬s *OÌ,„§BèšÝ]Wï‰ÓåBVm´ïz–ø>ã“ù%²$r[wmÁi×ÈÍŽráâ%tMÅ,©é\O$&U]ÉÕ“'8 °k÷Fœº‚&½8|à8‰Ù)fâ |ôóŸkîdÏþçð;< Ó´6Óxˆ„Ãa6¶w`˜Ë'Y»®£®ÀC…I¦Å_Ç÷“Ïbä—§¼úFùƒï29§fÊÔÏùyͳ—Ûž¥êkÖ ß*X‡·‚ꚣ=jk£È€Ó&q1>bR×P‰$Ë4lz†Öñ9ü»_¡È`˜Íϼˆ$¬9p84|5­´5_áôMo ˆVNbsëH€fw¢k7ï­îp¡©2._·”äЇP_{™º˜oqd, F»NsøóSÌÍN2=“ã?ûÿ¨éØÄžvã±)KYXÓ×?bÂd¼÷‡œd.1ÅÌD’ÿÇO©jé¤1ªÐ×Õ‹ÎóþÏú³hÞ0ϼò2 UÁ¯KŠ<‘è—ò剂¢Êl{ÞÇÀôÿô“)I`J*/¿âÅïÉÌ–øüä4º&#Ê&]ÔWkäêuŽ~œâ½O4ÞzÙƒÏ.!)2­Î¥1£›ºT ­ÍËöàñX0 Iz0G¬z¡KŠƒ-/‹’æÆ¦Ü¸ã„×ñÆ÷c„CN4¥WPIbfO(¼”úìxãš§§ÉÊ(6'¡H›®°n߄ۧQ=¡KP$I¦¢¶‘göûoÛSµ¹ñؼ¨Kù<.݆ßç{¢¥^Èåo{ù¨)>àéPªªR_S³üõrvÊ$I¢¹®‘Æš†eûÌÏ|Ás—ÑË íóaÞ íboû3÷¼Kœ¤{xæí?§eÞ ².¼ø·6?{¾ýC:3‚ªšö@Ïçû$¦g( ìÞ¡E†¶/Í;q¸ì´mÛCmYÆév“=ËøÂ(š+À¶ýoah¡ÅçÚö»_ÙÁçVyûo~Lº‘ ÷-eªiha·½â–ˆ!¡9=¸tù>î‘L¸¶™/oAv7!¿ƒwÿ¾ó¶ßW Gü_#A!•¦2Ay’:¢²ÌÞ7Cl“\êÍØivòãkĪšƒ¿úød‰ Ÿ‚„àåwC¬Ÿ(‘Î 4›L4¦á²K<óJÊvªGÅc»{1ÐèäÏÿ\¥:ôpdžÍê#hx -¯~¡K2þhÕ£ ›‹êú›»xÙÝ~ªÜþ;nœjs©¹s³æôRÓxçþàº3@]sà¡R>MÀ¦µk©ŒFŸØòx<$æçY®C€ÓáxÀ:be<–3¨K’„³¨QŸ ñÝØ>v¶oÅcsÝ×û¸ƒ1ÜÁÛ_óFªð~©ÍkvÑÚ;?Ãáá ÉIºOa^ø©­ö3|ùuk xí8õÚ[C0Vwã_Áª:î6¶¹CÔ5‡Þ¨ÕõÕ1èn±®Kû«^Ï'STClZ¿í :ÓT’$B•6¾\rª®ÐÔ|³Ž:<*k<ê]b»BMÃuYs¨4·ýéï©è2¾:àºÌ³Ù546¾I,Öù@ñÈÚË}PÌç± Ø´¶“Öææ'V@²,Ó¹v-æ2ϘµŽÔ\½h†ÌútßjÝÇ3mµTòH IDAT›±)×EÝfã®t]êev*AEë:¶l½¹YÌ*áV™oß¼™` `u~jùÞ.óææíÈòƒ)Ùú ¹'_æ·JÝÒ«Å⮪Ž?ÙXß"?é^I’©hX˳õ7F{’$±š\w]æ±@m›6Y2_™?ø+KèOºÌl^!2·°xØ4.=îzÒêþ¢ÀWg{¼)óÛ7o& Z±çÈ<“YCSÓÓ¹%ô'\æ+!Ínañ¨Åi±¼äS)*ý¶m²dþ(¸>2Ø2_ÑB·ô&WYÿR¾€ÓfcSg'­M–Ì-,,–IæÉÕá0Û7m¢"z¬±gµ…öë#sIZL³·´ì@’®‚W¤ÐYB6L&®õ¯:Ù y9yÊÊ$Ѩ´ªbûøxžry»Ý@–,»¹*„¾(óa.- jÑW¡Ìãƒ=äL‡ýÉŸ (©6$ݱ,Ÿ¥òËw¯MS0<:FÉX>«kŠD]M²µgýÉüÂÕA®Œ¦QµhNkÉ7‘ùdÿfff1aæ“ÌÏ19yˆhtuʼ«kIJàó=øcÔ/ô#ó¡yÔ@ݪ–96˜+o]­0 LÓXšÏ'#+ÊŠ½GÙl†ÃHê1dòÊ7E<Åq‚~/^¯×²Ì}ÉÜä|÷]YÔ`=šÃcÉükež]’yÓQñXRDB˜Äãç–Fæ©U.ó<ÉäS.ô›2Ÿ[#óø0ñ¡%™ë+3˜—ÒŒ^<Èôp…B ›¿ŠpÓjÚÖ¡©÷>â4K&§ðU5 Ëç^ª 9؈$=ú³„@Ì&-Ã<ˆÌ»úèšÈ£U4¢Úœ–Ì¿‰ÌûÎ33;·82—e˻燂xüñøÓ!ó‡õÕVlïN™V™Ì‡n—ùJûnBPÎLÐýác ë*žÚÍ4lÙ‡Ç)èÿâ'ôž?ƒa „01JÊÅ<år鯳p!¦QZ|½XÀ4M„0IäêÁ÷I¥R«pÛ_‹‡Ú™,œ»ÒKW¼€n¶dþ âN!—&ÞwnQæÎ%™?†ëˆÇÏ21±(óÊÊÕ%ó±±G#ó;B¿MæþZtWðºCVþ G0">t•œ©îY‘3¢&s×16šaí[ÿ•ÚúêÅ×۷ᦠ8f‰™ÞÏî¹L±PFu©Ú°ŸªúZÊéQúN|ÌB"Ü•k©mZÃè…ºÜVYGóöý] ’¬`˜À#D(ÒݨI)=ÃÂä0ù|E÷à‰Ôáòù‘ïë¾ ŒbY³Yòy ™—9s±›žÉ¶H’,c–‹VÁü©2+䙸ÄìÜ<¦3Òã‘ùÄÄYâñ/¨¬L‹I7ÚÅÊ—9ŒåéîžG–x½ù‡ÚW¤ÐÇâÓœ8ßaócSäR‰UӍЅ3ñar¾be€Y"9v5ÔN¤ªò†œ$ÕIåºB"?K÷áð¶¼@ýšZ¦/ý‘îÏ~ƒûÝ¿%×ýãô<ûz9ÎÄàÙR;žp5þ*ÊúÔÌÙ¬`¼PàÓ³ƒ´¨l ¬k®Á¡Ü.ó…þCôû„L®ŒªëP.`*nj¶½CãÚŽ{–º‘dàÄ!ë^" X³Ûï7FLLq¦g Có w[ò È¥ÌgÇ&s€Db”K—>Çá˜'›•¸vmõ”o>o26–DÓÌW¬Ð3éy-ˆìª$»Ê²®…|šb© NÏÊ^«$Lʹ,ŠÝ‹ò¥´$)H¤&®’5½¬Ûô~Ÿ ‡’bôWÿL2¹€Mµ!J’Ó£Dë[iÞµ»ÛKbÞ¬e±ÛD.¾* j-ý©G[Vfr”HÀE]ÄsãµrržÏÿrh+›_ÿ3<Fn–¡¿!1t…XÃìj‘¹Ñ^2é4²æÁ_ÝŠÛëL²3ƒÌOŽS6À¨!X#qõ(ƒg?#«x°m}Ó†Å}´£B©¢ ›¿Æ*Œo–";p³\~l2H¥²øýTUåVa”)—s„BùGÚW¤Ð…¨ÞJÔUÖX…”M3ã+ᱤ {}”Ç')•[&À ÌRžR©D)ŸGÒ¨º‚„„bó ‹Å’IUç+´:ñ¡³\î9ˆæ«§mïwn¯¼î0z`yê@¹|ç®bé‰ËÌ¥d6¼ú þ` PµM{H.[® üc£Ó¸ƒQJÉ)/ŸfÃk?Ä–ë§ë“"ŸÎ¡*&Š« £}3ùñlv(N\ 3Fó¹ “ ëª„Ãé|àïQá÷‹U®˜tþÔÜ JÔøïñš…@H*BZ•»[?Ÿ ˱rãkn••%jjVßã—KaxX<ÒÐnÕv‹Gƒ¬á¯íD\ú#ÃÝ—i^·EQ0 s û-¥†Ã ùAòÙ&ÇgqÚM$IÜ<¬FÂD’$TW”pû‹”gû¬Šò5$½%ŽÓOöÚ{dòYvµmÅ¡Zs ,,,¡[,+Šî%Ú±hûó·ŒnåǺ*;iŽ®½>¬çúѰš;FýŽ·n¦Ö%i1-­ÅhÞû—ÃÀâ:öùkŸÑñµÏý5kסª …¹~®ý€ÔÔ‘X„âÌ5¦¯R,èÞ‘¦õ86ÌRŠ™¾ó,$æ@ÖñV¯%àS>û)×fP+ª©_»õ®»X»+×QUû9ý‡þ ÙüáÊJJ©QOüžœÚHç®-xœ2ÓWÏto&;|†lÉNU0Bjô<ÓÃD«"·ô# r3ƒÌŒ\%“J¡Ø|øªZ U×£Þ× W‚RfÙáGy›ð!ËMÓ??йLkDû“c BÎipV%=ü>©B†×îÆcwY ÌÂâiº‚RvSv`wØo¾nÉ%°û*îs½ðWQaɧ“˜BBsxÑtý©^~$-Éøž~&IHHw™£#!ÉK#fû/ Wl¤¾s#úÒhÚj¡}%B¶c$º¹øÁ/1œUøýfΜ`jl‚/¾ÆÜ…÷é¾ÜG¨¶rÃLÓ´m†‘EH*Š¢~å4!Ù¢eßQN¼ÏÐáŸÓoš(6îh;mÛ_ÅòÒøÌ+ \:Á¹÷N2•›^§ª®s>‹J £à¼!óDÏÇt9€©ùpú|˜ù>Æ. Ôùgtì|í¥näfèûâ=ü^'ZUùÀ÷Ñ&—¯uóÏW?$§——¥î”Í2¥¨ ”l‚îà4?ü€t)Ëk{ ¹_›m)¤¦z›][¼—Â$·0â¬@ו¥_+“[H {*¾¢ó$0òóÊv.ÇW¶çbf¡xÑm÷Òæf¹@!“ZŒNš¦=@̘å<…LS€æð¡é:ÂÈ“K-ÜÑžl¾ðŠ™SñåØžK$‰€çf—[&³ HE}Ø»I A!k°…Š å!»Ãú×—¿‘fàà?’q´³þÙýhÊ¢RC'¹xô-¯þgÂ!Ï=ÍíÂ$=rœ ¢¦ž›uF`dã\;ô¦Æ'0Lмu´ìý.á°µ¦ø‘Ü_³Da!îÛv›ð$i±3…0˜8ÍBVgÛ?$àw0sùß8{ð4É…’S”%Ñö=øü²óÓ8ƒÕ5 L'‹Tµn@×Êwï Öжÿ?RŸž£\*#«v잊"#Iáµ/ã«ÛJ±P@ÖØ=~YFT´S»%BaºQ\É!z~ˆR¹›Ï½†Ëå@”³L\ø#“3“äsyd9C¼÷,Éùy$ÕC°a#U1$L’#˜ \6±êˆ6uìùœsŸvtû«xúœ^7‹Œ“$¼…eìÞü_S…኿˜=@ê|–7ÚŸ§6Tõ•j:ú Rj÷íGUdrÓ—9ýï?Ç»öÖíܤ‡rþà1šöÿˆÊÊðñ@˜%â]Ÿ’È„hß¹÷®Û‹Ò<ý‡ÿ‘’{»v,vHoÄ…Y&ú6oÁnÓnÏ¢¤Gé=øfâÓ˜l&Z÷¾K0轘!(¥Fè=ô[æç’†@uFizöÛèé®ý€la±6›åù²‹Íïü=•U±•×øM“¿aP±ñ¾çÇ­-Æö‰Þ ?ùuŠ×~as­z±]0Ù›e²¬°®Ã~[GGÁð• ïÿqžÁŒÂßý× 'ëøÝ§"å^LÍ'÷¥Z ˆcŠÅ^—QL“K§Èè.?ºÍ¶84 é¥âõ`í‡Â #g>&£¯Á¬Àå\ÜRRƒ™Þ#Œ¥hÞûC¼ö½Ÿþ¿ œ=F`ÿ«÷<ÂZ©(¥4raî‘~F©œÁT É(ºŽYÌ|iâ»@˜(“]˜Cö„GVH8ÕH¥/È— ܱ—™Ù÷¹òÇÿ Ý!ܲúŠš¥ ðõ¡@’$$Õ†Ó_ù?—±y*°yîü;YwݸÖìÔU’‚uûŸÃí^Ú¢TwS½åmÂù"šfÐwàçŒÇ³TÔ·QšîâRïE:_ÿ[üò]þ %²¯KfòÊ e ¿d ÍæBÕlÀÃU Äc]Ý$dˆWäøõÜæ/¦øvËK´V5Þuž„¤9q8u†®]!›ß‡Ç©‘¼Jr²‡¢·›bi;6Í$9ÚE® áryÀ(MÍc&šÃÍaG’4*êסeƒ¨Š„iÈ-$’ŠÝí!ŸJáp«Ó“”<…ä4åRÍÀf×I œ¢ïØ)p‡‰Ö®¹ñD˜e¦»Ÿ,ÑþÂpÊ tòS/6àÛû<êõï$ÄŸXØ!Ý¿0 fº06^dÃë?Â¥f¸úÉ?0pá [žŽu¡–Å÷e&NýŠñ„—/´bcMz¾ÌŒ¦}i'˜Œ)Kœ¼IbÎÀàòªx]ÒRl‡…¹2Ù‚@Ñ$©Pæð§I†LŠ˜F•±cޤãyJ±¤fßÏJ¹?Æô BÜÜÿûÆ3@Aqî*Ý_ü–TºBB¶‡hzö{D¢^âgÇ@O/¦0AHøšö –ï=NÁ>K n õ›ÑoÕz:Û©¨©E¼AÙüS³ï¸×ãfc•†aŽ<ÒÏ™Ð2Œ™v$ņ'ÖÀàùË$f_ ö_.1Ý}ˆA$Eż¾< f1‹‰Š"ËèÖ½ñ÷äç'™í?Aß¡_#;BØ—êÆ2ÕNJ©LÍÓã¾í„$kØ*…™.&úúïù;:Ö·c¤8þ‹ÿƒé‘~œáÙdšªíÔ46R».EYö¡•4®SDZ·áñ¸0ó «§¢Iù užlW”_¥£®å©K²†¿¶ ÎÿždbÝCblWíVŒÔ8éTÝkÄÛ….'¹vàߘžšÁ4Md-@ýîï« 2ÙýãóµøB{=þ+F†Pìz‘TÖNç ï‚ùþ#t%O“š›FvÖÒ¶ë%fûŽ3Ñ[Ïiœþ~Ÿ{éüu›Y{žPu’‘Áãw3#f,~£¸ÀøÅC¤Ò™Û A÷5PÛ¹Û‰›‚r1ƒ¬{ñ†"Ø”"—l¾² §ßBŸ¾ÄìLŠªM¯âqé+<¸/†sÓ·‡v 3ç×ÿ2ÇЬ‰(v•—ß ²¥IåÒó¼$·(f5ëÝù çŽàݳx÷ ÔÊ—X·};¹©n àŒ6¬iÁ=KmëúÅ4þÒhÌnűT«Òã˜ÊÞØxŸ“šV‡í›×?âû ç¯11l"É*áö½{ÿ]ÿŒüæ½x¼NR£g¼p–ŠÍß%VÓŠzõ&z/˜è9ƒ-ÔˆÛ!¿ð©’—ªÖ‚µ­Œ^:‡Q* É6Ê…I’‰)l‘Ø—\"P%’ù€N2oüŸ¢ëHå¥Rié˜Ù›Qa” Š%p‡*G÷ö ‡l6…½r+k6mgôü¿1uÁ†;ÚBÝæý諽ÊI`ºd¦'çHfR˜ÂD‘”;~ÉjÆí0IÄG‰ú£,ÌÎi‰T÷G$¦§ðPb>‘%ÜÑÌBÏçŒ ÏÐúâŸô* þGN}JèõocæÉ¥Cä&/3Üu‰êÝ?¢ºÆË࡟34:¾xAFS©¤jÇ÷pcœ}ÿ™ÛGhMÁ1‰5›öáóºnËà8£8Y|”—9Ëü¼Ix{×&2JKÛ¾H·=ò¹sЉJ°~ öžçÒGÿ‡Z$1#¨Þ½U‘nd'ã—Rv¬¡º±ùæ|”•érú/¤øÙOŒõ=5S`&+¦É©O8—ø›GˆÙM~÷‹i>ú$EcÈÙiÔjó¦›ù±<=qAE­ÖV;Ÿ‹çw8qiÒòל uõ2צŸÜ]잎º$¡hvt—ïÆ3‘›B–Ó˜å,óãxêÞ&Ž"K!Â5u\í¤,=‹;cdè8WIQQÓBU[3v[ EQ@w¢~)Ýs½‘:NϱÏÐkv³f]ç7yb5Eu=ØÆÚ—ÿš3<òK SÂæ‰Ûö.õë6£I9Z6Î2Þõ1Ó—Š=@˳¯áñ0!âçŽsià’¬ânÚCUc Ìeqi—ÑÎÈ1qñ Ì`'_yçÿgï=Ÿä¼ÎCÏßycçÜ=9f3`3)QEÉJÎZÛ÷úËnÕ~Ø¿ak«öãzw«¼¾×–%]Y²-ʦ(‘sA¤Af0¹{Bçð†³zDš!Ðè§J%4ý¾Ý'<¿ó<ç èŠÍè[ÿ3ߦ}h5>S%?~Š"ÞÞÉW]êõVHçj?Ï<»7q$Ï¡“ó Á²@3.ÆhºÀv$Ž¢°÷Û Z–8|´Âkÿ9Çk1/?þÓÈ=w=i„â%O25s–jí Q$;;…'܇æ˜?C k'©¡Ý,…}¯¾G!¿³¾wìÒu‘ª²”#©Ì|ÊñßÆß¿—áí»ÑUé:+R ìn·ÔÝ‹G÷~Î Ž ômÜuÁRWŒ mùò_ªzI m'ýþû|úÛ4ºâŸ›¥uÍ×ð™:n­@%·ˆ§­õzi~Â_¨âª9ôš*P4…­»ƒþU–úÇ4~U23+¹ïñANÈsüµ©¸F)k‘ì5I%uæ“:o¿›ã…>'öøñ›¢žÎv¶ÄK¯?WåÜx_ý$M߀‡Ç óÞŠÂM7ÿŒ†ºP½tnû–‘äÒ¸4_ËZÖ>è'ôaÆïgc “Å™I\¡Ó“ì#’lA0øÀ÷™›8MµRCíÝL´sÀ~Ïw0R§1¢±‹Á+ŠF´o7C«/S4ú竸N…ŠƒOT0 ƒ»]T!p‹leeÆB–PDpTãJ TÑLÌ«”C½ôïë&¿› &{èIt¬Ø<ÿÞ}“çËQ\Aÿ|”§Ã;Ù;¼‹ŽÈõ•óf„þþœÈ|ÖÎv@ #LÏŽï û D=l| J5ÓsÉ<)ñU°1¹åkY`Fâr×7G*±5O²9:ÈbzW*´oê"ÖQ²“z€ŽmÏ@d躬s¥–%žòÝ’Šƒ·ÎPSØñp„!EÃ{‰G"Öåå{ß ¤TbÞ ùcƒS£5,W°÷I“þnMÀSÏÆé?Q%[p1|>ú=´EU"Gñ¶Wð$5.µÁüAž>mÝ&ÛîExÞ[@QÛV©T¢˜æÍ•4n|  hï–+ h¡cmËE7jj@jðŠÏ¡6ÚBW]ðD{èŽö\ñ®pßvÂ}Ë0Bî<{ÖµÓy'¸ÅÒÝ–à~§†”+q*Úãt¤¢wý¸ !0„¾‚7ÓÑèÊønò\{/1_ø†@êÜL¨órõéÙFä3¿Ïé¤í*­™ý­ðVa‚©£ïâøh‰ªL=ƒ'Þƒ7èÇ›Ø~Ñ&‰þ>ŸÚ}•œFtp·re E'Ô¾†Pûš«8ñU»®›ÕÖ¤tvnZaè·ÕZì]í¿B·{C:;ï½ø=SRž+ç3j°uÇ•A0arß^óŠw&>|ë{ضÊÜ\ííOÓÞÞÛz£‹‚CXÎsßúv¶oZW°¿«¡¡€«¿’wß²gÿk³"qÆ®wØÒÑ\…Õ I¾Ö¬Ý…ßð~õßÉ—"Þ9ÀرCœ·Ñý­¬Úú^C¥Ñ®¤•ʰC½=¬C¡è$×>FlÕƒõ7ECÕ´†ƒÝE˜¯kÂ|™a¾aÃÔ[°¾›@¿ÝaÎ÷oè`ÛÆu7t§Ö”ÛWÂ~ìÙ¶²kJQî(Å<ØÕÏ EÜ^k_Íð4èÊ”(•Ö´MË|E`~ªzkPÜúm s‹‹<°¾“­×6aÞ ^]×›ñ%¦¬´e¾ókiK5a¾\0ïèx†õë÷Ü2˜4wËm(ª´ˆ²ÈƒºØº© ó¦4¥)+pÀÔê|æw(ÌïX ]››À¶Ê 7á²°@T«²{ã [7¬iZ(MiÊ éps“ÈKŠ÷4åKƬ”au«‡½;ÖÑ–ŠE0LNú¨VOïåó Åb€5kö. ÌïX wvtð âÁvpSá#ä餷³µ ó¦4奣½ƒ1±ÜæX\3 ú7Ò–Œ¿2˼½½Ãxh¼ƒ˜× --Z[‡—æw,Ð>ë:xkɦ«kIªÕ*¶m¯ðÆó6Sw¸ø¼&ëZG,Þ¾RÝcš>::¶4îK±¬ã{‡ºÜ}c5aàº.oz˜_Ž[8ÊÊ,Õˆ[æ¯6w±ª¯‡æ™êÞA͹»CõŽhhý·Üë²á¢Ü] ³ùUÛiÈaª‚TÈ×´c+ÃOõ5ôÀмoUaŒØM?m#KÕv8vvœÅB‘†í-» ‡ë×dãÐà-)c}£„Je˪6äkšÀ0â7åŽo( ;Þ=5É‹'¦˜·Dcô¤Ä»0I›WåÙÇ¢/â½{” r…ò]¡Mi\˜¿yè?«2j¦pÕfæî—Z•Õ2áñƒìôUXÕÛCÐk~ßB’Éclì}¤Ì5œ&Ÿ/37'Ø´é[ 5~æÿãx†ƒzÅpŒ†!ºt Ξ¢œÓX[­ð”s7[ì*z9p%R3±½!¤Òô±6å`~ø$?;Wc$¼ Ëð5åK­Lhê4§‹.~ïWäϨÃ|bâ ZZfik t)aq±Ìyl[Áuo®ƒRCýÌe8h´QôÇi¤Ìž"7q–ÓŽŸ^q»ƒ¥ÄH"yè÷2“(¶‹k)ulcvãTýžëV¢œ!4~†Bïœf‘—††ù[‡Oò³±*#¡Á&̯æçΤ9¡%ØKî+ùÜ\æñø,íí¢a†¥”d³eÌS./’JÝ|åÁ;èŽ+y÷ÌtÝ27Ú(ú s—ÀÌ)òg9îø(«¿;•‹”(…Ó´½óLµ—™A9ÄÌŒÚÿ")ÕÏÄÖq…ƒV\@µm¤î£æ×׃”ˆZ½\D `û"8ºŠìR>…@Š\KOS‰7*Ìœä§M˜ßÌ3U“+våõYàÍÏg|¼óŽŽÆ‚ùy˼X\$™,P.ßå@wäÌ¥9¨7"ÌO.ÁÜOAõ Ê»ùn×Å?ö¡‚‡Ñ'¾O.¯ëhÕp†@º%b‡~Clô¢^÷ŠüÀ#̬ێºp”öžÇ¨Ö@JéÕ; ޽GôÜ ¬SÃÔ‚I¤ÌX‚ÉŠ\Ö]@ÒÍhìù)~6Ú„ù Á\K` U®¬gðR˜Çb óR©ó[•%{Çý¼eþ‹ciê­s×%0{9Ìïz‘6fæ,N°—Rø’~ØB¡Ú²Žª”Óï‘ùˆÅMÌ|O/þ“ÏÓqð·ÚãÈÖ{ø¯©)yBcG‘FœbÇZJ³ÉÖ$‘âq iñó#’_ŸZXÖŸÓiJþf{/-ñXsn—æcŽ4a~í0;Àl&Ã1½ó¯Ò2Åfèìl\˜'·æw,Ðë0ŸáÇÒÐ[)ø“€hˆ AcÀܽsá^¾ŽT äg7·”€‹9?Ž®DÈ÷¬Ã x(toÁ=ðžRb¸›#¤öýš|ÇŠ[(Åøs&B¨xÉ™`'UÍddÆgÓâYþ¸X‚&ЗEj¶ÃÛGNòóÑ #Á,ݲ™Žø…º§V!8vð6„94‚r—êwæËó;èRÂÁ©~óÉQN‰8®*ñågfSyòiæ§'9îøîX˜g*6¯ž£pÓÞ$GçËõU*T¬H uv£R¡¬ùë.q)Q*s¥ŠkƒP‘K»D*:Õq)w?À˜‘ <~˜À™7HùÓ;ÿ„Ú…7 òFª¶2c^²üM‚,£ŒN§yîÀÒˆ”Ž7äZ`P˜çt¶ú•Á ŸŸæðá×ñù¦1 ÁìlãÜIYV' T*Ëó;莄±ôŸä$Ó^ ¹0ß8'd ©…1ÆlíÎÍ3/ffùÅǧ9©ÆoÒ[!IåËÐ bûf¬ÃÿHêÀ«LmÚ‹åõ &H}ô¯æ*æZ"¸VÏÂeOsá Šâ§â1ñÌž@'Êì¶ï¡Çé~ùïLŸa1ª€´R^yr”ný¿…¡4ËÝAR*—ùƒÙÏY_{s0®Q†òû™P¯ æss9lû,~…ùùÆß|Þfa¡BW×òÀüδЪœˆ¬bÔßÙX3.%›K‹T­;;øMcžVÝäÇesm©IƒX©-Lo åðôO}Œåõ¡Tó ÇI¯ÙN!ª²86Bü“Ÿ8BÏNQêÝC1–À{úcZŽ~J,ÜŠ++èJ„\²K×Ö4žÌèÙ¹ô<Ó À»˜-@)5Dvàª~ÿ ˆ–O Û0›•}¯Crù<5Å&î‹\ß=ª”T/Y-ÐÄk¯š¢ábå_ep°Bww©á†øÜ9—ÙYÉr¶‰h–IjÊ#ŠAvÍ7)·mÆ—C³ll_’RªŸš?¦vÿé“èÕ*vÿŠ-8ºA~ðœ@'žÜ(: ëz)&;‘n;瀕]Rl.¾Ó¯ÐõÁKØÑA ©!°³OüŽÐÔ(çîÿ>UŸ ®‹nÝ ¨­w)ÁuÈzªPÀÊ‘øè9ÜÎH¯Úpe @S>WŒd_ö8OmÞË`¢§Ù´¨)Mi½)_íéßE-fðdgQ× S‰¶áÆ @]£ë§ë¿ú«¼ ò}‰«¬t¥Î-\qîWüä{wS›ÝWwTgIz'µƒ±ÝßÂò˜€Ëb÷¢£c«†(-yÿüƒJë2«wà¨à{‡ØÙ´š…íM]u?2»ŸäÑ×°sU\¯ÉbǪëà8Ίt’Ó´;w›—”¯–ö³p¨È³ƒ°®sE4;áÝ~¾¼êÿnJèMi¨n<þ"©Ãï`Ô\UA8µø03ÛŸ¥ÜV_WÍãŸ+Þxo梞Ï^mÙÂLb=RØÄßÿ%‰é*™uà–ÇIú5è!Òm>RŸüR»™ïn!0þ É,¤bX¡¬ä «íiªé,!»ÈO?Uñ›YÖßÓ¡;|móñxüŽ]Bóѯ¹GÈ-ñÝòÃÜ3¸MQ›{ë6EQ.xOš@o½)ylGŸùˆ¶_ Úûãö`y Œ¹£´~ü<‘³‡)‡v!í¾Ì(ZÍÆñÆ)%:q5¤ƒp*žuU¹ÔÕ}ÓîX‰Z« ƒš/pùó„@jJ1Mxâ4¥ULfh;¸kñNJpò8ó©M»Š¨•‘fœôæg‘ª‡ylÃK5Þ‡å-q(¾[Ñx>“›w‹7·€§óÇxôy¤”,r¤3é[BéÅ90 °y¯ršì©"Åj™û×ìÀÔŒæ» D¦i!Râ8z³ŒrèËa[H͸\ù;5„Б·º ”àX(ÒE ©êwot´´Ž~ˆiô1¶õ1ª¾zJX¥mã{;Pñ#k³´½óS¹*¶éA+.Péyˆñmaž{“àÔ!zô ¥¥>©¨×B±]j¦‰!QÅuØS\‡ ]ŽáÁ¥ŠQ̃Œ]|ž”«ŒâTЪ•À’gA1°<~‚•<2ÐÍô=Ï’<þ>íoïÇ5¢d‡Ÿ Ó½ì}5ÕX‘þîR·°›˜ƒËgöó“Ó/ó×Vd 9Ò¥ìu@@ÕãrHæ¿{žR­ÌCëî%è |é~N„qq¯K‰°« š›úHaךù9ûU‚k!\©ªŸ¿Îœ*½ž:yÍkQÖc2»ž‰©êõ8‹Öõ áØyþyÊ…4OáXé"…ŠTµ›ÖOB Ã@mÛ¸îÊäûK)ql‰+†&.û÷ZMb·º Dº`YW‚ª 4õöªöØØ@—#s˜Öƒ0¿åY Ñ%ÅêI|ü+ôÀZ¦×Üsºê‡ßô*­«pUõ²Ãƒ9ý É£ïáÉ-àšrƒ‘X{wBݵѦ°BÛ©™ÆeÖ®lÖß©7ˆLg˜|è&›J8ñkº?~ßÀF´ô1*ÓYìÔza¤Ä;ñ6ññ<Ö†HMaÚߊ 8òÆŠ )R\h˜ê„º(ÆDN½E¶5EÍë$Fú©‘ä{Öc›Z~á‚×Ëyls)k8á!Æ÷nG-Í;ðïĽJ>ñµ%%+?÷Ày>ÀÎ=ð¼׋”T\‹™P‘…pmåß/À6$'óüÓìKd­"O¬½d0þ¹Š[Ô²Ä<‡©1µqR¨¹St¼ÿÕ^€žþ”öû˜ßü-ò‰«Ä`¸¡¿Ã_J0³qîÕ ;GòÓ_£z61³vãÒQsIgØe|³ã”[/7"¤DT3ĽDhz ÅU¨¤Ö3»ñQjÞɈ`ç‰y™ðø)4GPI®©?Ï£á}‹øÉý¥Ž¿•ù5Om¿¹l!EQƒ”Ëå‰ ©Ï‰Ë[/,0©|ëÉ ~­Þ³!}¦Ä¯_)²çékÛµëB)%…tŒ¥ÐÛ®_܆RbWÞy5˾#U UIªËÃ_ÐSo›íÚðº°sÏ&·á™KWžÙTbÓã1@â:P©ºH º¡ kb©_“¤V•ØŽD(ÓÇåý—9í|ÿ{aÂ傺y?ÇïÞ+³ëþ-^—×_XäךÆý^S»=6[ó]ºxϽMòø>ôj WuPËüô·øçÓ¬p/™áíN¼Jòä{ˆhîæ')F¢õͯ…Y\ýu*m©y=”<%⣿A«”ÐÝ7¶B§ëÄ8~O¡D)Xrû¹™ã˜UåÂ\0­]g‰C‚jûFîÅ?{’ÀÄAÚßú'ÔÝuÞY–ï¬PꜳÞ$ÑÓû œÛ‡ÔCûfap'5¿—Úæ?ó üû@ñ0¿ñ˜ëBb±Ø³šÈä bÇÛÆôö¨†ÌßOjü,fvIqº5"ÿ•öÃÈ÷ßËì`¢’&rü :³‹Œ=ðª^áTëJ¡âèžú‰zjœbUR"U½•â$©Ÿ£ºêëd׳¬ ¯_õ¶U`:^æ¹Å÷)®ð¡Géo»JZ›0)µ !N½'›§–ð㟥šXÈc”*”ý.éÓÔâ먙’ÈáßžEµì`éMOQšø§*´RHÅ œx‘ø™£`D¨LôŠBzý^ŒôAÚÞ=‚w1ƒffÍ6ÂÇ_%qân¼wã”CÁ¥ï§RI­g¢å!ò­`eñ¾…QÈ-z—~SÁ?~ݺ¼_¶k&É·÷_fõ;vf¶ö±0¸)+˜Sï^œCõ‘ÙøŠ]±u§v–ȧgPk5¸   !ðûý(ŠB­V£Tº}rÈ­¢Íï_X`ä¬-!œ4yìkú’ £ó¼ôV‘lQ¢ «·8¼úFŽ aÐÚ£óðn?½~•—êôðì÷|lZk¢º.“‡ ì›s°\0›.÷•ÅÊ>ñ´Ù¥¦N _v5š¶žÃi¹Ÿé ëðŒý–þj´ gá}bgO’Þþ*F‰ÈÙDUÿÜù` ßü …þû1ÆÞÁ¿˜¡*lüó‹× až{ƒ–‘ƒ,¬’bP%vð?hÝo2¶û ´Ü(}~„–ƒoPê’lK˜èáÿ y®ÆÜÚÁ-a–ódÖ>N¾ešö^"ܺ¡®3Z*,öm£\¶Öªm›©:‚§ß&0} O-ÀB×ÀeWJ5‹òìÂe?®_G9Ù…¥š—¬ÝíT–ŽšÛ«PêiÇu’ µc,œ 1v˜àÄ) ][©n¾iM(ÂëõbYóóóôô¬\+âôÙ2¯¿šÃ«Õ·]æ\™‚U7«÷¿¾ÈK[|ã™mÉKÿ9ϼ¨ð—ß ðö+Yæ>žý¶Ÿ™Ó%ÎN×0vxéè1 }lÛèů]<´¤ú½$ɉO Œ«ptV²ý!ÞÛ(þïîz5‡oæê¢qÁÍåÉ/PFbÌŸÅWŒ­y˜bnÃgë¬ÏµñÍ®Ýlè[ƒzÕ}&p¼)бþ©S±*fYc¦kþÅOñORİ}Ì&ãøG^À°À6zÅÁÕ üSGÑk]¸®3æG1d€ÉÕ÷Q ˜P8MtæÀÒš1)¥Ö1?°Y›&~ôôb‘B8‚T½Té+cv„‚° xÓÇNžDhÑ+j4¸Þ³;þ„ôU~Ÿül ŸP@J´…c´~üŸ(þu¤‡6Õßë:‹„ÎÆ,–È·ù.¹ë¿q‰F£„B!&''I§Ó8޳"õ$07kqø`‘ó^ïrÖ¢P‘¸–Ëé#eâ«‚ìÙîÃ0w¦È¿ï«·x< ¹t“c:ÃC~Ö„U¢~ûNX#V?“è"¶ÃøX…CGÊ…€O¹­lµ»èv éÝ?f!QÏÅNžŽßýïx¨V!4C¯o͇«¸h–E¹g7“Û%‘±Ã´|°ô(éíß§ò¹«ËF­V¨´mc,µsfݯÿ+Ñöu̬º;ã„J©w/£­›0ss(®Ä1CTC ¤¦^fwü)ÙìL½ò›7J5EPx”jj=z¹Bà ¶`y} wrö©6zJsu¨JßèH=Àü–‘Úˆ£©óÇhÙ÷"þÙscQÔìY‚S§Ð,+ÐF¡c5¶©ƒSDØ+é&”(V ç³¥`—âÔ|†àì ù?"ß6É8áoáO2ŸìÇQÞ™#T¼›Èõ?ˆH"k£¸ŠŽLakZãnn ÁšÉ–b'϶ïeÛÐF¼ú4ÕÑüÛú‰=Nè\áMR‰´AªàøAÇ vS ðWJhÕ"‰ƒH!"L®«í²7Õ¶ês§Ö=3¶?Œ«kÖ»ã ×çT¨ *ù¥œ¥D©AO0{ÏŸ1·!CË{?!yèu ÉïcŸ®S"8º£ZýÌa¥…\÷ðg‚s%Æì'´í{ áíerÛÓTýpj¨¶C¡o/Åî{ñŸ}•®_"عšl*uSSbš&ñxœ©©)æææ(•J„BËÍ(€áíAþëã¬éÉ#9þÏ¿_¨¸%ž>e)ÎÔïW©Õl,MåÑïÄñ¼•gäÃï¾&i ð£g‚Ÿ;OÕŠ‹«ª<ôõ»¶yõ¹9^ùÝ"ë‡S´šwè+j)JE»Â#åE‹ÈöpÝ2F>O1àA)¥QAÍô¢•°âï»-?JÇëÿÑSŸ2Ó£×-Fyù„뙣´zŸìúgÈ%bÔB-HªhV‰»Z„ÀõÆ({cWß’š—j¼—êU\öV¸+üÙèTTsKJÌ©8·ÝDfx+ŽQŸçZb“÷·ƒêGÉ£ëÍÿ¢'©ú}„O¼Ixú~Îíx˜àñßY˜`ØŒ`):ÂuêVSEu¨CP¤ƒ¡*õô¸ñI‡•¿°î¤[ÀÌ/’/ ’²Ñ Y»¶T·ܦè8ºŽÇ®àû™ÜýCb§÷=ò< [’~št÷ò+PE̪ŠYY9¯SÕt¹4`KzÛ³”^jÁ8Â* Ù7Ÿ±`:tˆL&ÃâââŠÆ E ª -E¹«ª¸ðï‰¤Êøtš š*™±ð5¼ŠËBv?ãá'%#æø§Ëszg½)Ög3ï\Çeßk œ¬h|ûé0~¿F"¢ÝoQ¶oU«Ýí ©%V“mIûô_ÑÒ½xfâÄÖ’O¶à™z‹–£G(u¬Áe4× Šá×Y ~ôMìõ{©ê­<_QÄWÒ9óäŸS y ýÝŸ~€wÍV´Åqææ²5J ˆD ÿìkD2Ùþ(¶ª2PçÛ>†[b74›Ò±yñP–WØÑ~б@Ä IDAT±ã¯Sˆ=C%×Âæ5R§F™[½š©c,¤¡³ae1JEjÑXYT%ÆôÎ?G©.ß÷SÇÞ'Û¶‰¼²sÜ-ƒ¹Ê@´‹ïfïÃqV¦Ð¹Åi~=FÙ瀄ö|û•5<¹j뺮µ ¬ÀvSñÚ$g§XØÚƒ`…º±Ä<±,̵÷#ƒBÏvb“/Óúñ¿SúðO Ú¾‡™µ[.<«– ìy‹äG¿Ä‹à:ú…+BàêA¨Íy›™u÷Só-uST<غ 5òzn K):wŽüÀÓXúŵìzd¶~ÌU¯.Mƒ³ˆŒüžèÄ8óÑÕÄF^ª_儺(ƒèói;G)–Àœ9Œ¤Š_«ºü‚ÿOÐÚÚJ0¤X,211AggçWZ{_Ñ6nó³ÿ7~õÜqÍåÓ“[wÅð9.¯½¶È¬«0Ôg0?^%×BA•G ¼½ßd÷F¦&ŠBÐ/8ô‡ù‹„Ž,3´.DÊûèö†ºão'³þa*ÞK\r 7ð e_®§…©ÝJäÌ~ÌÒ"µÔ2=Û©øˆî{ÉØ*þ…Y T×~ÅmØ¢ÈÌæ‡ñV,” 9—××Éäî?#zf?žB;2Ìø†-ä’±[Âóˆ[!î3ðk·ÿ¼ß*ªf—õ^«LY÷"Qp5 a—/ †¥¨6Fvh¥æ÷×rÑ.¨}€n ò}{˜Ÿó½A ¶“﹇¹õØv Ï"d< l]£ÇšcKk˜G“74¶m³ÿÔhýkyZ™Ùô8ïÿ'=¿Ÿ o;wnŠJçýÛ†ÑûÖ‘8ù"mÖ¢pSÄ™îFÍ#õÑ‹8‘>*~Þ|J´[÷c9þ•`r2yË­óÕ]ƒô¯`°Ý«¿É«•c(Rоà)ß6ÜC_ªëº`!Íó¿®’MµÕ½vž$s›¿EuQ’K¤@ªí»8·ÛOhâz±@©s‹›qUrûN2Ñ8VdÓ[¾Fxr ƒbÛj…q¤ê!×û ŠÙYwÞ©^ µRIú˜Þ”Æ´k(¶s)uÈ®{|ï˜9‡* æ6ÿ‹ý[/½õ¸/ÿ¡+ÔÅB¿@T0«KÐ×#ØÝ;ßý#"£‡1*E*í»H÷l§øòVÍ­²H[ÀüBïT<§µµ•ãÇsæÌÖ­[·¼nw!Þ$®hè— U nðÈ#aZC ]]aþØÐ8|²J¡&¸ÿ©8Û·øðêðУ!>Ü_&=caFL¾{¿ŸÁ6àaJF‰JÑÅ‘½kwGø¯Æ¡cUŠ5ÁöG¢lßÀ§‹[ðSŠEå&ã­èB`{™Ùú¤zÈ­yê¢ìfncwÝŸW¿üª{ÈŒ0ÙÕ‘åH„˜ßôìÕß$¼äYÜ’»ó˜[b·È7ÖõÓ4oëaüüm3Ëö œv‹ü¢ÖšI%ÑMüô¾üCÃÁ¥ô8 ÿ™÷0Dµ^–u øÂ® P‘B`%7pî‘AÌÅ cÐòþ/f÷*!qç‹Ñ)7R‘N|Æ Ü³—Ñ@'¡ñ#õBD¾vfû!×9„«)Ìmùvä#¼‹óÈ@“Ãß ßÒ 2Az}•àÌôR‘r×ýäúîÁò¤·|ƒÐlº~G¿,Vº‚G]¹’«š¢bH•ÖL§Ã;ylí}´†’×où)ùÁ'Èra#L²Ã_'{ÞÒ•¶ÍTÚ6_±‡KÝR”Ê þô8µÔr©$‘ƒ¿Âñ§°|ŠCO\\(Z…5_»`HÏmùîçüH?ÙU]u t†j’_ý5ò«åU,yÓ²‘™–Wê§/ðêt¸y¾txbÃ>óóCº½^/œ:uŠ™™¦¦¦–Õí.…M»C<çK¸Åäß0—~²`xK€á-˼ ©^/_ïõ^V5ZAë€ï÷û–ÖÈ%™ºÊú{B¬¿çb¥é[ñÛ„PH§c„ÃÑÖÖÝú ù‰>S§›K7õ—ý;_Pbôs?ss0¿ÏSà¿ÜÓÇCëW¡)··û>óì½›–×ó"áÅ#£üòØyËýDGÿŽw~ÊÌú½Tý¼““:ò!ùµße±uwôMB玑K† ŸÝé£bJÂG_Æ$ÁBÏj²½›ˆž:ŒZ+á(Š•ÆÌ/`G˰6Ujñ!2ñ¡«¯!#Lvè²Wœ(tŠÝ;(vï¸bY¢0°´¹ SuCsqíÞÅÞµ÷õ†n\‰^ís_øoW×®ÄÕ$±ÃÏ?è"5?éõORó\¥Ìóezf…t†Ÿÿ¬ëz—¤Ã-ðí¤Ã_íbc߻Ѕôöö’J¥˜™™add„îîn¼^ï²­ñ9ó÷Ùèô/úì•S&¾`øÄ­²Ñ.ƒy0ø7>†×{s)„ÍÂ2·¹ÄÜ{Ì<³½G6 aÞîv!@_f˜(’ i*_ÏÄž‘:ø*moÿ=Û'»æ›¤WoÁ¡Àì|†ÈÈoˆW1³íëTýQ4¯Ÿè±7z ©¨T»î%Û9ó‹ØÎ!Z¼Îäö§WöÀyÍÇ+?ÒHÒ•hçþ§Ø:´€î»=¾”ægnó÷É®ZDu\#€åõ7X‹¤ÃÍóLÒå¯v³©ÿÚÊÃÆb1˜žžæÌ™3ŒŽŽ2<<Üìcÿ90Ïd.Â<ÞüÒlëí óòÌ{ytãó¯nw¨T:vp.µ­RDHpu?¶ç|Q™0sÛ~H¶”Cq]3ˆcx@@qàaÆ:¶¡Z5¶7Œ«iàÝÍé§W£ Qó{ w‹¾js¶®Y{ õvk—ªèØÁ$vCŽúy˜Kþz×0›ú®½Ö»ªª¬[·ŽÓ§O399ÉþýûI¥RÄb±æb¾|‘ÉÄ ž`ãÆGo Ì›@¿Ýandù›m½<ºiSkö¾TJÝ¥û?_ ®Ñ+oÇ{å!ÁÞÚÀ2¿"Ym§W¤Ûš"XJ“»s•©¢4±_•e~½0?/Éd’ 60;;Ë©S§Ø·o{öìYV×ûóPè 6l¸u0oýv…¹¬°Á¨ð×K0÷4aÞÛXQxz ÎŽòËV‹þ3b*~ÂÁ`sð›rMÒáæy¸ÕsÃ0?¿Îׯ_ÏÌÌ `ÿþýèºÎÖ­[ñûý!îb¼ÂÜ\œ`ðÖÃüŽº ø…KP85×BÔÖøÑÖaÛ¼ÞáøÅb¨—ˆ]³f >ú(~¿ÿ*Ö¹K¥Au» ë:š¶|^£;èMiJSšÒ”Ûù@%9tè¿ÿýï) Äb1†‡‡éì줵µ•P(Ô</ƒ4}ºMiJSî*q\ɹÙyœJ±9×(®U7èíhA¹ONNòÁP,‰Çãìܹ“õë×cFs0›@¿v±—³³`WïÔ‹Dh½­èM—|SšrC0gd”9¸À¤íÇM›æKõŽcœ;ÊÖüÕwž$èû‵jµÊ¾}û˜˜˜ ‹±k×.6lØpÓýÑ¥tÈd&)•j 9ΊâHtÞTµ¸†ZÍ–ãòòÇÇùùH™œFŠÆžp" #$µ*?þΓ¬mñ75MSšr0ÿÙEÑKÙi°ênËsÛ"´xˆ±E…¶ ò¥É–RJFGG9~ü8>ŸU«V144tK`~úô>r¹wËh D.)ab¢Äؘ˶m?bxxmè5ÛååONð“‘g½k°5Oã̸kžabAaUPÃr›Š¦)M¹~˜-Á¼›²mʗ¼F0}˜ùÙINÙq ÿ¥Ÿ©T*:tˆr¹Lgg'ÃÃÃø|7W²×uΞÝG>ÿ6ýýEÂáÆ9„I)™˜(Q,fñzUåæBÚ„­Ù/r‚©rÖ»ª!a>7=ÁH-‚Õ {hJS®Kì%˜ÿüÀÂÌ›eH¯æ ³“³cT¸¶ÈìééiΞ=‹¢(tttÐÑÑqSÁou˜B.÷6}} ódz(J–Xìæ3vîx ×a~Š©rÆ3ˆ­5PyÁÏÀ¼Ú„ySšrÝ0· ó›‚y•k ds‡“'OR,I$ Þ”«ÝuFG?!—{‹ÞÞÆƒùøøy˜/–nÉíÏ ôó0ÿéH•³žAlÍ×8»ê30¯Ñ¬xÕ”¦Ü ÌKM˜/ÌòùèçažÍÖ-óH¤q‚›/Â|EÉÞR˜ß±@¯Ù./í?ÍÏF*œñô7 Ì™ž¼£`õ\ÕwÎ,ò÷ïf9©v¯È;Ü þcŽíÃfIÖ¦\³SsüæýcLWøÌY|åÙæ | ŠsœËd9fÇ©^§Þq‡©©)lÛ&‘HJÝXùX)ÆÆê0ïé)4aÞè@—ž™äùOPPR„­Ñ†ÚTFežÑLᎃù…Ö %¸ֺ"ï Øåó4åΑB¡À[¥£JTšãq-ÒWžbÜ\7Ìjµ“““!ˆÇã7\ .“™àر7sLM ¦¦§—ãÔ8w.‡ad‰DÊËâQ¼ã€îJHç«|dw2®¶Bƒ5\ë/N3Vó5ï̛ҠR­âŸáE\‡Fà’VãL©-ÍA¼FK)%Î`cßÐÇ‹Å"Ùl)%¡Pè†ÝíÅb™––)ÚÛË 7Äé´‹m[¤Råe»¼#]î®”¤ÕÓZªá6U«ðâ4Ò)Å­à›?Šþ4šmc{[)Ä×P&n¬¨‡tÐ*lO˜¦½ñeÿÈF²gyb󴇛p¾]e~~ž|>×ë%™L¢(7æ*—b1›TÊj¸1²,‰¢ÈeõiæA5eùÄ)’8þsS'±|­X†‰oþ0±Ñ×H¯þSæ[ú®êR¢/~Jjô(éµß¡fÔKP– y²ÙÅ㻦ø}¾fÞ HÎ.òŸsï2¿?Ï7W?Ä`KïuYêMY ;DR,±, ¿ßO4Ú,ÚóUIèMY¦]îâI¿Mêì~†ÿ‚™îM¸ªŠR™&yê·˜ÅY·µtŠðÌaôZ ÇÓB®u;¯Ü"¡éðåf蔣kɇÂÄF_!9vh#ݽ‡²„çÞáïÔpW Ów þhïVBf³žþJH:Qá…Ú>x¶ò›º×^Sƒ¦¬Œ¸®K.—CÏçk¶n½)tæ ¿†Lç&Ü¥À®§Ù5?BH Q9CÇÁŸ¢jmÂI|S¯š›`tó÷0¦^¡íÌ~ò©MHgŽøè+ÈÞÇ©ù°®¢"…³e“ßqP—ýgùÝ"½¹ãT»pJ¥¤bU)V®KY©\`1bñFá(ùc%¾_­°}`ºÚT_·ƒ8ŽÃÂÂRJÂá0¦i6¥ ô¦4еÒ<ŽgŽz h…@ª>¤tñÏÀ_rÛñ#òÁ žHˆý/á+<Õy4W£ßH!”@­.b{[p«=Xó:‹­q4õò È¦U°¼Š—÷Žíã¹S¯S2W掳èV¨Å]P 8|X¥túyŠÕ2»‡·ã5Ì/vÁK­8‰ÐX¦çÂÚ4òS¸ÞVl})øÔ­af°ü¸Wíd(QªitÇKÕ¼zü†”h•i„Å2<ŸùY¿ FùüØ)Qjs–CÅŸº¹õ,]ÔjÍ’TçŸ%ë1(å94»†£G°<›Þ7RJÊåz›a¨ªÚÜ,M /›YVšFQƒÔLÿÅÅë”ñ2TƒHEÜІñ›A¢Tç0«UÊÁö»6BÁÕ=ˆR!].%”õ±“z% F„šÇ B`ûÚpe ö™o{€Åüùï$µ…äf2=©kšoÜz9ƒf׊å‰ãèæ ÎÁ5(â»ÇD'ç”8šb!ü´°PõºÐ&)ÿ–\­À#köò>ßÅk爟ü>e5£ëŸÂUzî}þ#¥®ïrnh7žÌÛt}éE.’¼ò9®Epê-Õ8Sƒ÷_úN–Ô±FõÞÇøÐ.ä…#•"Ó'(´ïÀ2Ì«Ž­¨MÓ¾ÿÿ!lÇ8½ío){n°w¸[#0õ&©3¯`ÊVÎìø[*¦Qÿ c¿%uîSTׯÕ"Ì÷“ùÖÁ›ºeYH)QeÅ€.¥$7gQVZ¢ê…5àX.“S©vC»þˆ )%®z­ã:. s6ù’ÄRIDÕÛêú§ñîHÿ¦±Ž±5OàŠúæñÌ}HÏÑ7˜Üò¿¯æÞ™0DœljÕU½¨ÍÑvàÿ&VT9½ã¥èópW‰Ð©DÑgZœe!Þ¶4vÉ7ðÚl¡]Eq@C±Š(h8ŠŠk¶0½îB­ÍãË|BëÉ‘F‚¢X‚ìçG-‘<õáÌ(Šk# ÛÓÎ\ÿ×XHõ_?Ô2¡ñ÷°£›(ãMà6[—Ïñ“ôËäk%¾¶öA¡ØÕ¡®ù©ú‚$&bÔ¥bꘋÇ1KçPŽ£;»°T‰gþ(šôPõ„v£”Fu$¶'NÍ¡Sˆ¯Á­EqvOa! jÿ?{ïý\וÝ{~öI7ç„ 0ˆ”(‘”¨Ô’(µR'w²ÛÏök‡ªùeÞTM¨ùuæx5Sož]Ïök·Ûí¶ŸÕêV»ƒØŠT$EŠ9DŽ÷7§sΞ.Ä R$àùV©D\‚çÞ»ÃúìµöÚk{B寵–AÓ*è…Q4³†éNRs¹ñO~@òü!„;Á|²ûÓYà²Fxh/ô0ª[½zŒKùÙãþòŦ”hù³§OƒêG-þD+ž#9ô!¥ÖgI7¯&8ôñW)F:¨ºŒî)%–e!eãóÝh†ûõÛv›7~™f@uñÃïFðëï?u®Èûi–gÿ¬‰mÚu—á£E¦,•­[ùp QÏx™øàk”MTÿgŽ_|×Üi\–‡‰–{¨y=d[ï'>öOx‹iª®æ›ö–?ùoÉ€T 9]~Êß’ÌÍ™Xvcñ_ÊYLÎ4~Æ4E€mJf¦êäŠ6šK!Õ¤£VLö½žeDÄZtV'<)™(²÷í[öÄØ¹Vãã÷ò”ó¦ Æbîš=t!-„m-,d%¶.†ÃôÜZNþÝR‘Hfû¾Í|4Jxà_HL`«*ªQï$ÒÌE7*øç7Rõllxþ GŸÿ˜èÌ(ù–‡éÓw©ÉØîvÆûÿ„ø…×ÿš˜-©»“̯ú:é¶mXŠÉDwšèÔG´Ì~ˆ­G˜\÷eo+ØA0ý&M'O‚ШƷ“iÞ€U÷Rð&2ú>Uo’Ëëïh…³„g‡˜ëûKÒ-Y &7ü Þ|–šËÀ?ö*© ï#7ØulO'ãë¾IU+ÒtâÇøŠU¤–äÚŸ¡Z?M(}ÕÓA!¾+rhzǬÔa.RãÈØY¶Mö‘Š$Ñ® ÷ L7øçGÈúšñæÈµ>…gì7ø²Ó”©ã-–ɶwãx‹È\ž±DÉ£?ó#âÞ$þÂ* ×ªÙãD§Î‘^÷çÌEƒÄOþˆØôlc¼É2Òh%Ý÷êÖ]ÿÑ"ŸXO)-˜íz˜ŠÛû©hÞ‰¡¨G¶1ç/¸ÐQ êžUÕe´ÂD^PÔýT5/úü勽šGjÌ…Ðl=‚E WuñJéÝŽìviK, LÑ»uYéÈìh™Ÿþs†™Š@P“‚GŸ±k½ÎW3üö£*†.°ê6ñ^?»×(;YbZ³8uÎKKÌ‹GkH†Î–(û Ö¶©ÌÍÙôÝ$×Ñ'ä¾Ä07ñ¿NWe¹Ðöje—YY%<´¯•àÂæ? ¦h>ú7ćߧè}€àÔÌÈ“ŒtßVÄ[2©„[)FºQ‘iÚÐÁ]\IÏ’¸ðfh3騋à] tûâ&7t2S¯ ©èXš{¶*ùö=›v£Ha«A-²™Ñ`/ªUG¢`k^¤ª‚{=Ã÷ý/(Lõ̥…Yq ÍöS wpù>ˆíJPpÅ•qbƒ{±Ã_a´÷ADñ4]‡~Ltj³A“àìókÿŠÙD O檠î§úˆ¹öÇɇ[ðÊ"Žî –Jï|œgÚvÒ×Ùs ˜/ô¿¥JIŸÅ(ã²|ÌÄúÙðeÎá&šˆP †ð9‹»”%<öAFaO¥Žnš—³iT5B!¶ ÓmM­%1s`aÌ{¨„ú(âHÓ¦îÖÐj%¤ßhä•\ûW†Ú]V€ñÎíˆôë 6˾Zª\ënr×X8#¾Œ{øIÈþRÞD,ØC¹Sýö@Mgäù¯¥:êÂG(gkLæ%Ò²9ðN–³E¿úaœ¤Ëæÿ8ÍoæY×äÄ‘žŽò|€òl•Óã6©.½kÜ$Ã~vßçųs—¶dn¢Îø°Éïþ}žBÖ¤X…¿ãÉíî;¦6Â]t)Tj¾òÉMØ ÙžÆ¼‚·<‚°KxæG¨&¾NÅFÒBpjEyœRd5ñé7i«ŽRˆö’OnÄÒm¤¢ U[Q¯ µ¼†§æf¢÷dá£Æ´‘Ö]lv(ÖgíÑ Ûðc_c1 5/¦võ붸ú÷¢#B¤×È0µ6§T"Ý»ºËZewq ;µžb0Fxàgs}¢}äcmÈú­¹ÏuêÆßò™}ó ¾™z˜kïÅïö}ÎtSŒ­!~öáɶ§…ª7 Ñ"ç Ök˜þÕTÜ>ü€é‰Q´c+P¶2¯†¨éÞ‹B#!ö“¡ í2H lÕ}lÅçS¢”†ˆŽîÃ-ÚIžþ Z~_fŒÄÀ›Lt?BÍí^ðâÓÄ‹¯˜½bnÕk™Yµ Sÿ¢=pS÷!¬qTË]C1sh¶‹ºîZÎÖ…hRgÝF/žSœ\˜©`›6Ã5’=a:’*º¢Ò×ãâÀÛ5ÊŠBçj§>ÊóÅ:k{ݬßè%ä]U¸´+!]¯‚áÓyø™(«c‚×^šáÍ×rlëw‘ð9@_Òn¯†×3Ûõ•†7-%îi•èôh¤P.…¬„hLZá!Ýû=Ê‰ÓøÓg ¿BtâÛž½ö䬌݇·#qúŸPÊcxçΓ8ÿ{ê=OPõzK| ûØôÅ1e_vœB pÉÈš|s£XšÙàüeÆWŠÆvŒíjabãñ¥OâÏœ%qæÇæŸ`¢£ÍiÚOPÅ(K¸Fµ?å€Fª6[y¡í!vômÅÐŒ/ŒUý˜Ö^¢SirßÃTUõ!Ëûˆ-²«ÅÔÜT-˜¥,ÅÄVÊ®ù“vK½dLwifqç©j^ü³çÐkõ/p*u«Þˆß.ŒAÛ•b¶ï{¨VƸ(à+•©ú’XÚ%ó,5/•à*loýSчÖ§ÏãK a›Ûl8¶‰*Õðjêâ þÙA*Mø¦!Œeïò®ì–èòðècAüzö¼ûqcÁ¶A\–3¨°l‰­(ìz6FÛº 'O•9òî<ûö—ùáŸF?kˆ*„Ò*Í:~7ôö¸xõp™l¾;£-îúsèRñR6?‡^ÛF]ðeG©ù×aË,¡‰“T£›˜Žn"аêè»Õ|ãXƒUAØV# ØF’ÙÞï1o6ÎÍêój0ó IDAT>Ý0ºqW´§¡€O±ˆhæ’¼_Àºô>uÿrÑ$±Á_Qu»É…›ÀÊ|™èlñµOPq»ñ¦Pb)”âžr‰R*…Z&™"Ÿz€|ê>bgþ–Df­µ°PÍ BÞÝî¹"Ú})žžÚLm~iˆ>UÎðAlˆŠÇ ñ’—V_íz-ÝýhÊ— 7 Lw %¿—ÀLžb¤µ±Îó6SqC +ÈÇ:@hä[$8÷-GþKUPke ­Ï6ƒÐ°•jxùФŽý5a_jÈ…ù-ýŠ=m[èH¡` ,kŠæS¯0Þ÷¥@¨±<Ò‚d[w_t ÜZ™`±F¶iÃ@GõRhÙù% š{v?©Áwq.àÉÎÒ~èÿ¥ÛÂt×ÒM=ÄÏÿ„ðaÁ\çÓ”W¨£¡h ­:g.T˜Íû‰ê6ç«D›]ø¤Å¡ýebÝö<祿;Çþ»³ó&B@µfS·$º²LWÍ«=ˆ‹œ®qO»Êùž˜Nè pÜ!wEo„K?5ÉmÕÅÍ|Ç£øNý†öCÿÕò1Û»SwúcÃï`n”z–r¼ŸŠ/…áO{•”/Âtǽ"'ª|óŽ‹“Ó0l"™)r©~,}qšZÞÑ6&4þ´=CÞ:°$ïiš+ T‹2Ý÷ ÔÓ/Óúñ¦I÷ ‘H-Ì\×3bkQ:"9ò:]ùÀ,c6’I­EZŒí%:ú6–¦¢Örä’RsŨx\ÄÏýœºö-jñ$w«¡°¹{ºÖ.ÙŽáÞƒo±¿:ŒÊúØ­màٞݬmëFבM­…™ÞðC ŹhËLãLõÿ%Ù äà ËßÇè–âÍŽ ZË„J¡V¤dÛž£ZbºcÌt¿ˆ»\ÄÖPŽá<‡­G˜íþ„ÖÚ˜§Z”éÞï"Ý]T=.†îýK SPýœíjì^Æ.*Æ:‚š¿ùÖv0%î¥eÄ1u?™5ߥœĨ”¨{[(›¶’äçZ¸Z!æÕ.îSß)R5~5a((šÂ½9ý³yþûßLãR$…²à‘g ?_â7oå úê%‹–^-)\³ÎÛ¯åx¹EçÙÇüÝ ¨wõû¹ÿX…_ühŠß¹Õšà¡=!¢ÞÅi”ÅðV>Ðéžo¡jÑK™èBPnadS‚¢Ï­lgäž<ùIÕ@UOLmøùì0šYÃÖ”Ã]˜ºA¦çÛÔbÃXž¶ËÂqWN¬zdcëb”Ý‹q]’EÚÃn¢ž;óì”°±#Áº–ð’½gºXgô•cPl|€zp#÷tàÎ`T‹HÕG%رp Qëx–Jt3îÒjÁ+¾ ÕÈzªŸĶ;NÁ¿zñè¥(åQ"#¯£éä"6Á©3Ô=ÔÜ^LÿæµÛtñ§r|3å/€±tÅÉ»â75 mO+ÙÖÖÏøåèzÊ×awRj™§šÊ|eÓj¼nãNŒ<øt”MŠŠW»ôý“«½üà]M*~—?û ƒ¡‘:u)H´´$4$_ývœþ 5²%ãÒÞé"ìD÷ĈvWÑÃ>×¥çê>¾›`ó`•\bM-ê"$Ú¶J©D¿ÉHîʺP¨Fú®¦z„|òÒÞ‘ém&ïm¾z‚aЉ«e»“äš?ß[“zˆBb1Ž9IšEžG›ê¼¸}-m¡;7‰EÐõ¥»Ë]ÓäU@êAʱ ×6ZŠN-¸úS†}!,{Í1õ`7Ù…ß÷Y-áx’°z6ÌWÚ¶óðºh$nûŶ»™\Óf¢ãG‰ bzº™è|²±/¿²b›¤Ô2{šÊüÅînذúŽºtEAS—‡¦O½®{46nº„¶@Ô ?j\5ß ¯ÆšõW#ÐÐÙ¼õÚ6ÌðjônX\lÚ¶J:ÝF"±‡––è+[—`þÃGúxxC»s“‘£»FÍßìzŒ6ÜKļCH¢RLí¦”ØyÑiB]a¥¯„ùýwÌWŠ,K%“i#ÿ*7îÄ0nÎYs€~§Ã\Éóhê˜w8“ê3¤ –Ä ªNó/©Ö­YK}wÞÍjB U}…¶úÕ0w®«½u0O$¾JÿN\.ÏM?Óúód>ÜÇnæ×”®Àj_=æÙËŽÞÂ÷³*túmtç*ô¥Y¨©ªSmw‰åÀ|)aþìÌç®èw¨.Âü‘^v÷w:“ê3ôè|oW/ß2Í¥zGýîÛq—„¤VK38ø>¥ÒøÊ—vJÅCOÏS¤R­Ð¥„ã¹/à]é"—lkdЭÏ<=…29BNVytãú;è““Sü_‡Ï3à‹.Éûùm“?ÔJ|sçvTÕ1ÐŽ>æoŽÍð/ƒ“œ6‘‹5­(qKìªYÇî8Y©ÓfÝ®ë›%µÚ,çν…¢œdõj²‚ê:U«5Ž-09 ñøýÐ¥„c¹/à]Ü̶®f%õ¸+=Ie|”ÅC|ËŽ„š8дfIÞ/aUyfê°c}}!Ì68Áé@Òù—†ù12œ÷FyD½u?.Á\ˆ“tw t}åô[¥RãÔ©"¥Ò©”릱µì©wÌ¥›Ù–óÙIäùS Ö,&¼!g£Ø‘£‚ù´ó€yzb‚3¾(µÛrÍ•0ïéYy0?z´Àôt†x<¦Ý|FDzöÐ¥„£¹/ä½ s›IOU.Ói£2¸Ò³(¦í R‰§°]ú OtaÚHUu8޾æ£ÓüìÂ$§ æ_æg‘žœàìÌU{©mÏÝóX,‡¦Iª‹ë·l~%Ì]̶¬ZQ0wÏN` œf°f3å 6Œ\–Öÿ‘ß“üà\Å*RQ@BµeS¾@)ºî‰®NŸ#rf„ùûÂt­Ô+,-Ì'8L90¿A˜;žùâÃüÈ‘33âñÌKËèR‘\™Ÿ_ó•“ åžÀ:ÿ)˜/ÓŽÒGÒòê+ÔÖ<ÎàÎ]Ô½.Œ‰4½þk"'ŽQÙõÒ,â™A«šX¾(å¦lMÛDOážË‚¢Q·Qó*„ï#qx;ÖÄ|ï:Ç ;ú˜'ÉÆš€eº(^*Y&þ³G`s`¾Ì`¾,.%œ-Vøùñ3¼g»˜ié[‚m®ˆwÏÍ\í™/ßå>þSûqi ?º‡J°q³P¹{;#‘VTüÈê M¿ù'3EL·­£¼öQÆ}ãì>Ö½öKB% ¤¤Òus«Û %TÈ>ü3^Ÿ!) œ¹0´(Yîm©$ljæ/cKÏñÊ¡cŒë>,Oÿä°Ó(_‘›IsƧ®Þ4Ôë9NŸ~8AO@‰¹2L;¦iräHñ–Á|YÝFF998D=œ œ;´‚¦”Ä“™æâeÊ^þáA»Ž15N=¶•š÷²cvBÁŒµcJ‰çø¯‰ L1ñµÿ‰ùŽ8þNÇ›¯3·±}äM¶`äÅÿ@Épƒ؆ÁÌWÿ‚YK"U©©d¿öµó·Sn²TH[všÿÛ´ÙڽʉÎ.c•³Y*^†‚)¨9åc¾ŒÚsYÆ]Ûs€ññ)Ž×8rdeµ¯iÖ™˜(ÐÞ~k`¾<=t oÃU›е®¸Iµ©þ!ùš\9{}¸VB”€‰kzáKQjiB•ö>dí\å:ÅÎ~òu…ÁÖŠÿ’|Üš¢bÊ•wuãrU¡\Ä’€Ëw}´¤d$ÑÆ`¼ÝiÄ/Ù^Á¹)¬òíu‡m[Òß_ ££¸âšxlL’ÉØ· æËèW;ª†©é+nRÙBYˆC¬):µ¦6ôÏãΕ)E| ÀK}ê<î²h„µ¥}©V¬”)A ª«ï§d8º{uìÔqNæ.ðè¦]t†[œZâwt]¢¯À\WU•·ÜOSœáãèÖ.ÂÚíTäÍ{_Æ;9‰ZÌá9ÿ!m¿ø"g©Å›±Ë3øÆÆP*E¼ƒÇÞ$¯†ÿÔ‡ø†G>µ„7Ñg†y‡è78‚‘ÍÝxñx)Q*'YêUÎ*ñ«ÌûüÓá_qrò<¶s«Š#G+ÏCw´$¨·lfü©I½û?9„Ôt¤¢PiÛÈÌö”=2CCDßú)a]G©Ö˜Ûñ¥xÿx¥4}Ìk÷ÿŠÔþý(èX.¥RWŒéÇ¿ÃüêÖëÛªuæ4ñƒ'™{p5¿Çé²;P3± {ëGÈŸ(ñÍÊWØÔ¹Í©ÁîÈ‘tGKÍtbÿã u݃+=ƒbI,o˜j<‰­k€©=Lvz­jbbTb1P ¿BÞ}à"|‘ƒ4¿ùåMÏ2¹su¯:7Bêõ—ñ_8G¾£ YIã<‹^ªbûcºú¨ûÜ`×qœÆ;=ƒ*Õ¦nJñáCo‘øð"œböžíN}îúG’¯—˜+g‘KÒ˜«ä@6Tg_ñ ų¾Y)qÿš-š³ãÈ‘tGK u+§ˆ_Ó‹G÷Rií¹ú¯T ©¹hbâøÝhg臨½˜ñÕL<û'¨¦Ž]¡åW?Â[P¨D#Óo9ÕÏð³/¢ î£}ß›h¾8Ò®"ΓÙ|/žRÝï—OS)åÑÌ:ç*6®ùâMïwAgÀ‹¶B¶}-l=ʯνEE[šä©B½„™  ä·Ø¯] 7ô ùj‘GÖ?€×ð|aF)eš˵`î¤ZÈ!Ýl}ÁÓ—Z!å !Õku˜„ZÍÔ1=®Ï¬P¨”s x° ýËW1´ªhÅÒ/Ùžà¥ÏvÝUC+^™T&=A,]mä¨Ô+¨Õ*RÕ±<^§ØŽôeãR §Ÿ‡@ÅôyÑŠ%ên02Ãhr=j1‹0MlOËÐðœÿˆÔû™öÇɵµ^6°%Zaáïdì¹ç° ‰ÀŒÜh™Z‰Z˜B¸[öEL—†D`…› –!ùú/qkíL>ñÆÐ¼£çÑ»{¨{—aZ¹´9øv–Óš›õ}.ü crn¼ÂK/ÍñN/ ÿõ T)%GÞÌ2jë<õDã²>q¾È¿½”eãv>„"ˆ–í†ùwŽÝ]é®]ã´ü´õÉk%7AðôQ\ù2¶'BaÍfʱØu¼Gð£HA5ÕC¾³•С×IìÿÕcæþG¨<Cg ™^dzóe!dåî:L  x¨0EïÄÍyqBJj…YNÑÛ(ýÚ² õÔ1|Ó²íÉ‹Gà\>Æ—K °L”…sïª!¤@* V¸z"‡T?/…÷à趯 c±"ÿ:¿ì‘ßX÷‰Ö«µ ƒbWúît–ªÛox˜jûfDv #W Zø‡©¶ÜK]«{í—„F&PLË—dö‘ɧO½Oh¾•RÓNBï¿LìÔylo˜šWâ*L<þ îý4ç?Æ3;‹^Åäƒ8þÑǰڻ©öPŽ.9Õ¶¿‰j$Ö«R½rŒŠZžàÑq+W¼nÛ˜ßÐÙÖD©¾ÕH [U±Ü^¤¦  ˜Î3óôN*± •èrªkyÂü²yv­©fò¢-8UâÔù*u[lw±y£¯Ź:‡”˜™³Ñ½*k7z VküþµcèDZ4¶¯wc,lÁÔ*6Áv/ßþ~œ¸O\ô:”;hË⮈ i£ÏÏ`xç»E½€Z_øs%Mó¿ÿoQ§ØÚ†gh‘Ógþú"§Ðúê喝®£®VˆœA5ŸFÚÉÅŒí‹a5³†bY¸¦‰Aº#;û¨}wÕ­`-©&þW—릟cÛ’³ ü@(×l'è#šó(>E)˜8Eòí½˜«ajm5>$xêõµøO~„ð¦(½øN¿otZ»¯Œ¶”shåJcŸßíÇò¸=Åå"æ"uŽŽœãþé ´ÆšÑ>]úW@=ÕCÕó+üccä#I¼S3ä7íÆuüu|S3T0ñÍ(ì^ƒçä$NŽ0ýø7(FTâoþ3É}¿§ôüs(…i<™ úäqâ?"÷À·™ï {ó_)2ù`•Ь8ù­_%mMÐþË%0»ƒÒª5/ØÌnÝE9ä¿r – x†öÓú‹qÔZZ²›Ù]{(‡/³f -?‡k>Å—JŲ.Ûo—¨¥îѳ´¼2‹Z­R­böÁ=Èô4z>MøÐ^ÂÅYTS!·õI¦·Ýƒ\ÆC¾V´˜™1)-Ðl6m.@^ráP–¿ÿ9’«=D Éo?È34åë»yí¥4¦aýjƒÉ¡"çÆLžÚî¢lÚ(n®‰K¦@B­bQ7mN,`Y’d‹‹¾5.Œ;èÀÅ]tmvæWþšÄÅ}/÷ði mO`ŒŸ"t~‚©?øßI÷4¡ ûþÿø8åb£h’î»\[™ü Ò›BñôR?8ÅüÆíW”4uÛ.à½p‘ `ÌŽ’8ÔÎð‹? º{ŽD©ªB<[ Û¸Ý®‹ÆËw1þÕ?$ùÎïHýæoQl°¼!Škv3½óajnÉôýcÄŽïÅT"573?O1C¯1Ý“—ÅmË„þžØÇpåò è˜sÛ¾BzÓ¦ÏHŒúüðŸkð†ˆïêt`»òØ«3a^h «Ö¢^3ò"°}qŠÍqÎb4UpU\dV¯Çž:ˆï EQG·Z"xß9«X˜@Í€Z«ã:‹^­_´Æô8º‘Û°™JÐÅÜÚõÄ-¸knJëÉvw#«~j!=_ÄJøÆXèò}$‚jÛf(‘_³»6Cò­_Ò$] =ýÌŽz鉒~ø¤¯í²\ñçjË&f¶u_³Ûš'ñÖË4½©0Û*Ð *ÍkI¯ïÆwüUšÞþ%¥öÕäS¡e9¤„ãäø&Ê—­},FæÁ6%XÀJyùÁÆðk’ßüd’·yx«Af¦Ž+æã‡‚D<0•±èhÖèjÕ™‹zÙÒçB¿lkÄ6ÅL• tÛfïoçÙüh”o<é¿ì÷ ßr™ñ.¦ŸùS²M È3OÓÏÿ+šè…,ªæ¡’hÔN·‚MXšQ*“é¹—ÌÐ0‰½O̦¸j#éûš?5—.u¤]Íè·ÿ7loŒZЇ>vˆÕÿøßˆœÛÉÄÖ~Çó»éP‹B­u£_ïEÏçPl‰­»©_4’’ù_£Ð?‡Z7±]~ê~A­ã*µOªÑYøþ†–×ß&¿åI&ׯÃU‚ÿžä›¿ I’ílBT èù<é`¹²›m -—A­Õ‘º›z0•¢¼ŠÛXE-o•st˨»èÏ7ó|Ó.v­ÛŽGw}v9ÍK¡«›è»gÊ?E%žD¶·:~’`ÍÄŠuQ ùðWk $Z1‹¢€ïe¶#‰¥]ò€ËEÃV`{½ÈOñ¬•Æv›üœD E£ÐÿÅ~u!Ìn¢M¢åôŒê“TŽæ‰ZŽÐ¡wñ®Ì†¯‡:™Û¼åR¢¢R\÷(¥uŸ<ÏFIŸ¥ýà´–õ˜¡fò½›©&‚˜¶“|ïî|vÙ]X¿=ÈŸ~;Œo¡ &NøÛË!-›é)‹H¿Çh„Æ›› J'«Ôt• òòïòüÍ)Kl{ H×e9Ò‚+^v†ù?7„H6ëH^û·^Ù—g×NíAèK·Š*µh3ÕÄ‚×X÷a[Ó‘¶…R3Âæ•(Té‰1ýÔŸ.Ìâ:Fêíß¡âe¶ã3VÉR‚P1ý¾Æÿ£­X††V¯5öëž/Î Ö=Ô£žk÷¢b†â\óPÕ'ÅH*i"‡ß¡Þµ“‰GŸÄÒ^P%ÑL¹ûÕX5}†æW_Æ«¢1ûù– Á_&qô, Ê}37‰œüÝ“¡¸º›Lÿ¦å¸„ŽK,MŽ€-mê——B–ª¸¸¯²ŠÚv³mÍ&Œ/*-*ík±jï=™¡´áyLCƒ¶>Ø÷S"“Ò=€éöPM$©•`桯QõhéaŒº—º[¿Ø¦× µF¾DÅãÁ3:ŒZ¬}ÑÀi6n†”—ÝÕ`V|ü:.¥‰ô¦ÍH»Œ+›Ãòİ.ßG·%ª#Ìú•ýa[WVI´ªø¾‰ÇŒ’¾g+¶¬àšÏbyÂT›:¨zßÅ39F>@ŸŸBº±¼Ïó»ü*MM:~½aƒíŒ†*Mîõf‹v£Ù‘TÊB¨Š mƒŸ¿ìõ15Qãà{9^úç ±Xü3c õºÄPñ 5¥a}X¡zÝw—Ÿ¥ÔRí”}áÃïSÛ¼ïÉ÷Qõ8¥Xï™÷ g™ß|/åö5Tï¡V* ¸V÷Ôå ¯±%%úô9š~õ3êk#³¦÷ù÷pÕ¼dMÌï˜Õ¨…y<éùÍýX—?2ä×ÞV•ø[¿%˜õ2üüSUó4ÿûß“Øÿ.åGï#|t?vëãŒîº}ò¾9¨6¯¡Ø¹%ñ™ ýØ+°™"šÜQ,¯¡n- Ðgjó|¡j4’X/Ø=¼°úú»Ö~ÉŠq+ÔF9¤¼0ÇlGR€i£ê*Ÿ”L¯êB*:ùõ¾ð ­¿ú*£1Çû§’ØB … ÚÒK1üÉßü"Š1=ˆ¢i ΃rE$-wY™"µo/“=A%ì[hT¥2Gü£wñŸÝ4ó¸gsÌízšºëÒw“ž0s»žgî ¿ª†¨f‰~ðþó±í"î™Yæïÿ&¥Ö>æúV{ï_ñœK¢¥Ç¨­º—b2±"§ºª«t÷¹9t ÄáÓnš ‹OVYÕÀ[7y}o½Ùæ^ƒ5=.öŸ(R7%º*ȤëL¤M:â*Bl[r|޽ÇLžz:LL³Øw DK·—„ÏIŠ[:û­º)·´7Žš\6Áë±6DÀ‹íaò‘gH9BÛ¯aë^fy‘|KUkBœ8Ló¯7<îp³[·Qõä)6ˆî‹Z F¾= BPO­an˽Ďì£ãØ^lÃÏì#_c¾«yQÂíšm£)*ú]ºW¤°ñü×Âe0R»Æð—a–ðŽ RYõŦ$RDÉwtÐ|æªò•D3ÑóïÓ$óä»Ö0¿¾Ó]ÇVU0ð ÌäQ|ããÚ™Û±–|W×Ù E#ß ŒÄºñ"¤ÆÜöçȯêFª éG¿K­õ0žôè×=Ä5Õ„B½©‡LÓÕ‹–J×}T%?NðÌQ÷’kIþàêÑVêÁõÔC—5–‡\ÿÃÌn{â3ÕM±÷>н‹åšº(­ÙFiͶk,lBä6í&w…ìg)UÉó¢«Îž¾5xî¤ð¼¢ðÈóqjBàû¤£4õøø³?s©hŠÆsßI/ØX<>·ÞÈ^ßþ•(ë°¨ÕAÕ~¿‚ª 6íŽÐ¹9„b(=—žHºøÖ§(žåUð¸”E0ï‚|>ˆaÜC4št€þù®c†"WÞö…±/ÿÙľƂ@º|˜.ßUµ½!®é3 ËZÔ P «Î¶Rš¿ZÓž{6âRïŽs튢ðÐÆ ì0o~“ªhÃ[ì¤/In]?M‡_#ÖÖD¦¯)lE$rs§ƒœz¤w¸\V­¥4ÕÓÌs÷l$lèw×÷w¹p-™vl‰úÉea0·ý\óÿLâÕ¿%ò~[hå å®{Éöô¢¸Ä÷Ú´¿üwX¢Šk^!ýàý˜¢†ÿÔ;ø>ÚG5@ÍOQiS‰DIxDS‚ô¦~gð.‚¢¾0_ë|”GúwtûºÈÞ÷…!,‰Ô]X.׊;Å’ªxÁ¨ñç›zؼª EqV©‹¼$!— ¡( ˜75µ;!÷•óm¥Yþ²§…ç¶Þ}0¿…K{ì`Ïý9ócxfgA1¨&;(µ´7’×<ÄH° ïÄÙ¦U”R)0ýä·ñ  •kHÏVнÔ>2~³e+»rŸÔÑ kSïúÅz õûŠŠå ®ØvOU ¼ UžùªNTæ‹ól6„¦í ·÷)R©¶›†¹ô;æfmeæ·êÒ¤Ô½…R÷µþ^¥ÖÔCí{©V¨™ÜÆæ«_v2m•QlËiãE¦9&êvÀüù˜oYÝõ{ݨ¤„\.„¦í¤¯¯óE›/NóÞ‰ž¹ÉÆê<ÕÓʳ[û˜;rähi`^+òˆ›Ì»˜ß ˜g³at}k×>E2Ùº¸ àeê\¡Y&še®¸Þ6l“­¥YþhÃjž»g#!ÃYs-Ú¸aéÆÍU?-¹¶IÄ6†øRsC5k<¦”ùÞÆ~¶v¯B¹M07M¹»Í²Ù¬ŸPhëÖ=I"Ѻèï±ìh¡­.‹cTJS+®ÓÔØÔßÍS[˜/ê@ÐëÑùîÌYdzi U·fãS½ÇÛ2 ?«ŒP›vãË.€ü’-ëúÙÚ³ú¶Á\×Ý ¯af¦¶âÚײ,ZZºX¿þâñ–[³0“rù¹U[2W·X‰þ"%]Åp’ª]EÓ&oÙK6n4 nhN ÿÛ )%–åä1\W›šzýÅMÊå2?ýéOæÞ{ïeÏž=—N”\w¿ÙXV°Wh+{Ñ´[WGdYº€.EÐär¼WG×'Ÿ¦àÓœ…ÒÝ !„“P·,ûMAÓBNCÜM@wäÈ‘£÷Þa®X'_qöׯ§ÍÜš$ö,Êñ*GпôÀ«T«ÈzlHÛív&•#G7hFf 8v޹b§¤ß—i3›BfŠ–¨‡çžxûö•ž®ÕʘæÊL6UU‰®{n*A[i“uplšãçF¨®ÀN/ÍMáui<öÈn¢Aci9ºn˜ç9pây‚„º:œ…ñȶmÒ#§ÉVþBù¶ܘŸ```?–UXqy)¥R•LF²yóS¬Zuã—i+i²ŽMsðì$W žDrŬ¾%’ÜÔ& ³ø Eê¶sÊ‘£†¹ôáOu!„“Oñ¹0·,2£g™¥¦‡A-ÞV˜¿E4:J{»X1@—òù*GŽä©V¡^䦞§­”F¹óžpÓŠYyK)ÉO 192HÉvã3œÌ]GŽn æIæ_ì™[¤GN196LM ‚j·èóó ½E(4JG‡X15奔 UŽËS(Ì‘J¹nz¡¢¬„É:86³baž›ºÀÄðyŠ–º×±4ŽÝ ÌÌ¿ Ìg‡/‡ùíÛ3ÿæÁàʃù'žy6;G"Q@Qn>òª-÷É:8>ÃÁsTŒøŠƒyvrÉ‘Šö'0·k³ˆc'+1›¯/Ù¹tCödÃ9:·´0?>è„Ù¿4Ìmf‡O156LM 5ÃÁ3 04¯<˜P”.Ð< “Ê18‹×Æ6ÇŽŸàè”D,ÑM^J-Ïžmí¬^ÕåtÀÌ¡‘ÙûR>üÉUÌ¿ÌO^ óÛ lvåÃ<—[\˜/[ K)O_s+¤tvæÃ”.‡ù2R>ŸÇ\âbÌ¡Pèº{Ô,–Z‡b,ÍV†57„“ϸ4JgK¼wà(óuªj1ë4ÊçZ(ÌŽ5àn#Ì¥”ärƒ 6`ÞÕõIÜòŸ8Ÿ$ÀÝ*˜/K K ³Y>øøE5„Ç뢚Ϭ˜IUÉÍ29>LIº—åž¹eY|øñ ³,ç+½žåá-]tv´ßØ3–Êp9'¤–è3SŒ§ ¨ÁÕ©Q§A¾„J³ÃÔ?h·oϼTšáر7q¹F™ÌÊY›fÓ§óäó’É·¢\þò:05=Ãä|-¢<=¾rVÉJéaJ¶\Ë7®l)˜±õ(ÆÒœ•×r‹æXµåÜ<¶e£^<Á(ª¦Þ ‹%Ò– §žûøj©€W»þbJîæu¨áv§¿äدVJP¼½Õóffæ(•Î V˜˜XYm\,šÌÎVho¿50_–@°¸šÖ¢…ÛVÞ¤* ìM»>{1ÚÞ&7ô!ç¼Î|f)AÑ\Z7ÒýÀ3„Ãþ넺¤žcrà‰u÷ãv9wÚßˆŽž:ιâ(oÜEÊw Á¬ø…ôõ•éè(­¸ï62b3;+¹•Ù9µÜÝú˜U£’¦R.#W †ÛçG¹Aã,mkÁë‹fEjéSœ|ígX‘Íô?ó]|~/åéSœ{ï× Mâßñ*5Š™)êuÕÀI ª RÚÔ ³”ò9@ÅJâvkÌœyŸóO ‚Iš»Ö8ᔫøÅÄ>2vžçÖ=Fg´Åº#GÐÝ™ÅI>ø%ƒ˜–…PT4W˜–ÍOÒÕ¿í:³W¥]gúØÛí%ÙÚ¶8Þ9™™¯FضûkÄcAà Åñ„[¨Ø>+ÏÀ»ÿÊÄ說cÕë„{aÝö]T'q|ßo1m¤î«·ìdnäcfƆ"˜lÇ©"p}#`2Qâ—¥)-óâšÇèkYâd¬;räÝÑc«ÌðûÿÊÐùVïü6Míí`™8üïŒyƒPS'ñDˆjvŠb.‹Þh ¯!Àª)¤'¨×LTOˆ@4E=s–¡ƒ{Q[óxB‚EÀ¤mRšAw ø.†Ö…Pð%{ðIIþÂ[ Ÿ:IçÃÿ‘öUm¤O¼ÂÑ_%Õ½{äÙ‚àžçÿ#>½ÌÌàŒ@ñUë‰LûXµe7Ÿk~™÷§””¬ ™zž¥Ê:ÎÖ H ­ñ»Üa²'‹|£ò[Wõ£*ª3É9r€îhI¼óüãgŽ“Üüº6lFˆ±êÁÂö“x›“ï½­z²Ž­†é}äûÄ# oý˜ÉÉš®bÕª„{'áÉ03z·ˆÝHÀ¿jÑ`%„rU&º”°(ÎŽS×cÄ:V£{\D:ûÑÞyB±L,±r†S¯ý˜X{Ñöu„¢aæ¦u*ª®ƒ´AB©ù[œ#¡(à5ÔEOij°ùhà(¿=·:K“çQ(‘ ×2äƒ&ï”Î’=W¤T-³£w†¦QÇbÛ&íÒYf)±-”«_ªö™!}i›ØRAU•ω ™H”ë>B)¥DÚVc*êMݸuås-lËF]h§O¿Pçð…ôå%Û2A|j’I‰e™(Ÿ3oba߂ɹÜTËe(U4Ú[»¹Ül¨F€X[»’áÌÁ×°‚›¸ç±§QkþÅÿÇðÉÃøû[™½p†Ðæ?¢oC¥™³ª^BÉ&"M]„û£¹cÕâôPñÄš¨Q,”1Âþ…âX’ZnœB±ŽmÛ Bˆ…o¢ ±°¥M k[üÍÌ dnô(G½KÏ£Œ¿1ø(FukH«Î˜¹¥íîÓ,vm^M2™\ôq=WÊqHýÿÙ{Ïç:Î,Íó—™×{Ü{\xï€ÞH"%J”©’TRuwm›éíé5óe÷oÙˆý²nb7bwgº{ºËH*Ùeé ï½÷×{“™û EДД$ó‰ÐÀ5'ßsžãÞs;sûçêv³d,2ýºe2 Îgx¾õ¶¹"ç,÷þ‘œ±‰úî$Q _`âÛϰÖ>OmKÙeª·²#oPR깇äT¥ÀÖøg„S^zŽ¢»Ï´?µ˜`±÷CŠæj;ZïêQåñ5lj¤ï•™v2o°41@:™Áh¡öð9,fÃÏ{\JŽåë`3®Òtúml™ÍQ†®’Œ'1yj¨ì~§ÓþÄÚ¹¨¢  “„»Îi¡¨¢Ó ܶ«ªŠ\ÜY˜%JÂ=ÏR#ô=†*§Yºö>Yc5õ]Çw¼ª’ZdzhˆÊ“ïàvZºƒ9[%§Ú°9wGBªB|©—ÅÑ^R‰$æ’Fj½ŒÝö,®;UoÙáïE¾*ª Å|‚x(ŒóT+«,AÜ¥%¬…–̇pWT³:ð;r x+[ðÕ· Ã‚€¨3Ü5=JQAVF1ïH :ܵG°ôÿG¦.HóÉ—±Ù-äÂ3L~ý[ŠÎ.j‚^t¹~b›Ø*ý$Ö'Qt.lf±Å!Ҳʞ×)oî¦ÿßþbë˘Ýª’G.0:tFò@~%.‹ÎËöì= z…¢þ1Ý `Pólñ¯}L,Ÿâ•¶3x¬®ûoQ§§˜Xai6N°ýƒDzsŒÅÞ÷±å›šÐ‹*ñå~Öæž0€ª È»¹¤ÛÍÜè°zÊ)šÜˆ’€ªª(òÎ>uQ’PdQ•‰-÷‘s¨–‹wü½@jmÉo®Ððê¿ÃårÞñYUrÛ#Œ~õ; þJ*KÙœíeÕ¤®µõ§OHSU —™½ö!|Ôžx 9µÆÄ—ÿFÞT‰¿¶…ðÔ5ƾ)Ðóê¯0>ãˆUYáò§ÖD¿8oâ۱í[ >ø"ÅÉ×Ü4tmÛÓ¡¡‚HЯ»Ë¶+E…Ëq®dˆç Pcâå—œGvO?¡«2ñåvuwzý™ëS}ø¼½C0вÉ# HºÛQµªª¨rUUv¢|Ir–•$©¯£åô9ô:é;åŒL2öÅo‘|]øë+Ùšîge:Hã¡îçÍí5ôv7fcžÈêÁªÒÛFL)f‰,Ï Û@¾oˆUF08©?ûï)Y$´<Éj￱1¿Hç '¾#–è ‚R ˜O2´(2–â\¨¬l'Á¿óþæ@-Ï¿Á䯸ñÏ7ЛŒ(Å&O-õ‡Náq*”ÏO±xéÿeÝd¦ŽQÚqO©—èØ æûÞc©ß…¨ÈéK)Ö`$ÄÂÉõi-'õû3°CÐ=åWäu*3%þ¿Ðç$R¼Ùö"~Wé=gIx*›˜™ºD<šÄ\j!º<©¤Žbt‰t*‹ÓªYžÁ\ÚˆÅ"±9ü)«óÓò2&O 5G_Án•HlÎNø(­ ú‚…±0:±¹läÒPÕu €ìöãŸO‘ ‡°øÛ¨éèduàSæ‡J*h8ú .—ý¶} Í\'#VÓ~æX "þºTcÉ]d¢*yâ+Óä‹wŸqÑàÂU¼Ç¶S«Ì \Çà®Ç–Í*©µ‘<ío¿ßïÂãÒsý“? Åï÷>‘ÃÊt†q=¼~‡™OéëMÑzν+»ˆ]$I¸ÍïDÛ*Šˆ:ŠÊµ?E™-øÍ_:±ê¿ûÝù8ÿü~‚ö6špéóÿZùoÿÒQ:¶]«¡£¢dÃ,Üü˜íµ5UÂRÚ@ÝÑ—±Z Ä®1?ÜK&A29ðµœÅ.¬3×÷c=f›êÖô’€ª*Dæn’"ÀáS¯c3ëñÕt èÜ?ùŠÖMèŽ*ÊZ™ü#v§•ŠÚ:%ÃÚà™[¤ñÜ»ØÝ.bK“äšêskD·¶±UŸ@ͬ³:=»þ0õUØ­0Ò¿@.߀B>“ ŠcpøQ 3@æá"˜\V½Ýy.:J;^ÇY}˜ØæÅ¢ŒÞêÁQÄ`4 Ðtþˆ¯/Íæ1ØJqúÊ‘$‰Ò¶W0—6’ŒÇ@ÐcñTb÷x”=oYPSÛ÷8Š\@ÝMã‹¢A´ëX«¹"l”døfi€fg5^‡½¤»'eañ7aÖ}Fds•RG)Ñ­u<-gÉÌ]#a¥Hd3Œ»«™ÌÂ7Œ_¿‚¯ã,åNK}3~YG÷‹çɆ&ÙÚR¨3qù,Õ/(w²>ô ó EÊ;N€œ&Ù$Ðx§{“ÉkÂìoÄVÀHâ¯íÀf·Þ‘3.Ú˜CVœ,^úϤã1Œž:êŽ_¸› “Û¬~ÅöÖö]ßÍèÂä*Åj5Ý•j_éû”¬è§¶+ÀØõ¯AUHE6Q .¬N‚ `tU¢/ÆH$O(¡?@J>[ä«O¢ Ïä)(Þr#/¿æ¤Â%²:‘âÓ¯“„â z“Dû1;µÆ"Ÿ~cY0PR¡ã¥ÓVlú=z‹Ž.¸xáyJv9ÅçKrE0þÌg„ÐUÒ›Ó¬Œ]Þ½&¥’ZGVTPe6úßcvl…º¯b‘’Ì^ûIÁJÇ‘v®ý‘´µ“†$–{‰®Nãj®ÇZZ‰Ùu_U#·³UJ‘ôæ<Å¢ÈÒ•ÿB&AïRsâõÝôó³e°ÉBõÉwÉþ•Ù/ÿOæ/™EA²RÖý*þêf,Å—˜¸þ ýïÍ‚’C±ÖÑØÞƒNˆ[¸ÆÒøeôz=Å\’’úãX¥8¼6Æ?§¤­²Úã?i¹Šª*R‘ïÙ~££ Ÿ£ì¾#츫:î÷ØMØßÿ¹wuéÅÞ»ŒíöÔeV&HDB ³á,k"Øu·ÇõФ®ª*©µ G«å™šHgP$ê¢.”£¥²Ý}»Þô6?î'‘ÅYÒ%9I‘ú“„#C„—qS 7QUVJxøOdd=6—Q/`u8XZ%›{þ–’“Þœ']°Ðräe¼.ºÌ2ë«#;ú-p”·SÓu)¿ÎÚØ7dbI<ž$ýŽÒ tw4Õ©ªL>#“Ðaì<ƒ§<ÅB,t½pþv4©·—ÑðÒ?ÒpŸ‹xÇùWwSík+a‚§ÿkaü¶ ”墤¿Í‹’IR(ŸìAVá•W¯$±ì²ÙÖ\†LqÇiü2Êûßæxå.ÊL Ÿ}åwªÈ?üÒÊ—EX5™yëu ëÓig³4œ4¨0`¶[èn7aÕÝÊô›­”7í4f£×ªkõ‹>„®ª2‰õ)–û¿›>$§7ÉæTÔbŠí¹Q,U/QÕ~‰™õ!æ–&Èw·" PH…H§³”´œ'`°b21™ÍÈÎV»ý;#ªÊ’‰å,m¦´Zf©÷#&/éyåM ºg3Jo½ð¨ ­I$@gÂê)Çâp" ®Æst•6‘Œ„A2b-©Âl1#à¤õÂ?ß^¥XPЙ]8|èô /þ{J¶61ʉ'înôÿ‘Ë—qÕŸ úèIȇYý†¡­Mº^ÿ;œv3ªœG.î”$½ QÙé;Pó9UEõèôzäÌ&s—þ€¡þeêu£—žLSQG[ÌÏ[ÞÓœi9†ËâøAgHÐÛðkY`{>Žb*Á^R(gjyŒm¥ˆ`¯Äá´KÆÉÄb¬~¾Ó)+8Á;Ö‡ªóY½a§>nrznYuQ‡ÑêÞ™&Hˆ:qgÒ9½‚€ÎdÁnh¡¦ó:I¦™ajnœlþ6³xÛŒ-ßÓ!š¼”kv;ïU”ìó½_’UJ²kl­ÌO'-McR%9,«»ñGŽ¢,¡7èž`ÛkóY¾ýBåVÕ!—,N©(…ñá ®F;/ž¶bUâË~?!vÁ‚NHÇŠlÆš:è6‹¸­àvˆ¨n=ÏÝ5tAU•t8Ï{¿ ³¥7òWçl&Ýþ̺ èðu¼ÂÑó¿ÜõxUâ³_pí“QÕ¹t}µQƒÅN±°¢÷P÷Â…4ø-Ë7~ϼ,âª{Ž–cÝ?ðF"’É‚Í[EuÇ z 1ÏÐÀ8™ÜkãÒƒÇ QoÁhĸ߳1»ƒ˜ÝÁ{#+«¯õÞT ÁYA©ÍOjñæ]‘I!µMl}žT"Ž ³`÷Uã( I?|5G-dQöð>óΡn=9µÊÂÀר/Ðñâkv{/Jƒõ¬-,# *ùÈ Ó×>#‹ƒ Ãh§áøK˜ CŸ°<3M¡PDgrè| }|ˆù¡o0§Àì°SY×°·ºè "ºÂþ±¢Ný.¹¥‚­` =à/Ê^äxsÖ?³3@$œÁfÄ›ÿÂÒDsùó˜MF„ŠFä¡Y‰°Õ¼†ÙlÁìpã¬Òþúßc6JÓò²«ÅDxW:ƒµ˜%Ÿ+ Z$ÒáMйÂ]Rîî½½/°øªPãä‹2:QA.$ý]J1%4×G8¾»œPÚ½´‹eǶ(…,’ÙQɱ2üÅÄ:ñ­%ÂK³”û\H… Ñ6³—Lh–¢èÀns<Á¶ÚN:øÿуm—ÍVFüOÿGdwU©Š¥ZBwœ'»]"Ÿ+RÐéxù/Ò— ®þ)ÊÅ<Ô´ÛøË_Ú~4[ËòÛßFØVu¼ûWn|Ë~fj肨CÔé‘v»ÜÅÝZ› 1;¬D£[ÈŠŠ@L,„ÎäB"O;u/ü=õÅ$›#1zý"†šÛiÛ»ßD‡ÅW‰Š“+0è$ ¹‚d|&kèû祫¤×úûê}âɳAÉ“Ï+”´¾LËñçÐéî%uQo"·ý‹]T™èÂúÒz¬V3¨ º|”rŸ‡‡ ‚UXɇ•|d™X¤HÓÙ#tßyÿ¦’Fj¼õ §™þê¿°sÐtêWˆ©y&.}¼=@m•…ùÞ¯05ý‚úš2B³½¤Â[T”UâðWãn8AiEõžÖáA¤ÂîãÜf;…øþ¤iC©×K–ÉXdPÁ“5Ó•­äª³kì¾·fþnˆÑU‹Ý”ei-Lûñ$LîjÌê«Û*‡^¨G’ôxêzXZºÈìõOq¹­„æG0–§¡û–/bñÕ`5|ÍÂÕHù„ç'ÈË꾿¨³¡ä·YïÅØy“Ù€°l”4ciæßÿò_qXÖV(iy“á;GSoõSöï¨Sï "ÄÛ©€ÞQIë«ÿÃí„Brö ²E=õ'/à6¥‰,ްpå_I•ù‰Î b«9†Ëã|²m»$ × èõ;¶]¯vª¢€Ç#²ºU  ƒNTÙÞ.`´J˜D•´"òʯ¼¼&+ _ñŸÞsøðN/Â=kŽU•\,Ï~!n0ð›wÜTzÅ×÷òÌ7Å : ¾Æ¶nö3}Ý„‰$ëümg‘ æ®ÿŽ‚Ñ7PF*Å`s¡3˜1˜Œ„ûX_¬'PY…$ ‚„§î(öñÿÄøÅÆå2±5=ƒ·þægm9Ç>žs%bæÒïIRFç›ïàöx@N³6ø!+ËS$ã]8í÷”ê IDATzÂK£Ä#É„£¼ W …lx‘ÐÊù|ƒ=€·º‘ìÒe¦†ob©1PòU\6žô/Ÿ>„ÙðàŒ.Ë2ÓÏ: ò(‚ƒÉt—|açì’a¶WVð}cgö»RÎöÄ·„Wf©®îA§ImLsÙ)i~“Ý‹NÙD§Ócvù1 {*gI9TÛFm°zß¶SÝw™ùePÁŸ´ršfÞ¨?CWuÛCÍwÌn*z^A\Íà++ß)gXJ©8ü ÆmŸß‡ ˆ8êÎЮXŸ›$´´ÑÛHEsz„µ¬‹r{ fo3Ç_fu~žDTÄæ«Å]G ¸kŽS4ùwßÔHIýID[ ’šÎR± ²™,¦[wÌsÙaÚ^(°<5L<*èz“Êöcww®¯Vþ£ÎÃíßS18+¨h9†Éh@4Yizá/X¹Jb{[íi‚g0è¥;J ?ðªr³QÿDõhˆz‘Ž+}¦xïC‰½ÂÉ]GÜXŠ2~!¢ÓÑZo ´YÄêÔa6‹Ø,"}Si®š8ÞjÜmxV¾çR–c/›˜H2X\zº»Ì˜uZ—û¾¥Û=õǰîžû­·—SÙy³Éˆ½ã´êÜl¯­@Gùá_QÑÒƒ^•'Y%´8Îì§éÜ1Ü^?RÇyòcC¤£!”ŠÊåŒ%í´½ø.˃$c |¿ Ø~âîÁ?õ»ÈyLfýCÏ?ßo˜$]xA·w£"@a§'"žeke‹ªWþ%>ߎ׬w<òž¦$&»™ÕÿÄÌè46_5dÃ, ^¦éü?PâÈ1qñÿ!-”b³HN]'{ Ÿ%I:ÁP,ìx뢈€Š^'axˆ;»²ÀnvF@o±¢#K2†2×í5qª*“KÆP yнÅr;ã£7˜(ÒˆöJš_ú–Ç®³Úÿ (i½@C[Ù¾>[½¨ÃcÜ¿ˆÎ¢3!æÊ#6^6uñZÃs4j:ã%ˆ&ʺß&pˆ i ™ ù5êÎÏv~Ï€§á4îú“;$·»HJš_Á« (¹mR‰Þ¦( xY¿ù_Ð[K0Ú}³&nEæ·ÈüÏM­{#®ïû™;}#á{ÿ· ¿ÿÏïûû¢À¸\„Ã.Ìæ—éèx›Í¦úSíŒÈYìJŒ3•;Ô²Sï:ÀP…‰‰ ¶¶·9~ü8&ãÞg î2,Æ,[ó£ÊžÛ]…Ä*+SSØJ܈ »ÈªŒ,v"&…àñßàkÛ$¶>ÃòÀEÆ" ºÏÛ Ï{Ís~+ÀÆì0ñpŒ.ªOÄ_׆Á £âø_ sÝ ²µ†šÎ½ˆ¿ª ™Ú#I6gHE#XËRÛt«ÃJÍñ_²¶´B!—}êÎ{s+-B fñà4 ¢Î„­´êéµ;Åô™÷<™Ë²Œ,Ë;s&´à&óPȅŲCæN§ëgËN#ôMæ9lr”Ó•ëjÅ ?ØKQ¹zíHû䱜5›;™ìû=zI¦¢¶5f©÷CÂI ¯¼»ÔKhâñ€ %‹·ùî×yZ`45CpÀÉ\UUTUE’$ŒFíy=.2×ý€“¹Usº½’ãÝm8™‹Ez{{¹téÙl–žž$i² ‚h¢êôoÀø!K£³Ö—C4X±–ÔÑrîU<¾2 §ÅÔÕ‹ ø¿"ކ³Ô´´bPB¹u&/þ_  ˆz#UݯasðVV±2ü›“ê†ÚG÷yTq…ì$þÁÁ)ZD¤áA”3ø, çzêéh© ³•Ëå(‹èõzL&“&ć óÎÎWq8\L‡5B?J•ÇBŠÓ한èiÃxÀ'9år9nܸÁåË—I¥R8œÇþ ™¼ÔœúkÊ»bòyIÁâD·{ßÛ<¡’&r™ ‚¨ÇhsíL×R4¿üßM%PÉ`ÁdÝYŸÚôÊOe*ÞêE~îö2uw·ú^› @Õt@NGQõVMx>É%ñÒœ;ÜNGsýëo2™$›Í¢×ë÷´oæé"óWèì¼€Óé~¤¯þĺ$¨H(O—J ªÌ…mNu5p¢§£á`ß_O¥R\»vk×®‘ÍîÔpN§sÿ»^E £ÍƒñIgr 39îqD½‹ëÞic’Ñ͸óûÊÏXGjÑ)ˆ[Ã÷oÎÜèä$¢P÷L›MÛESIš‚¼¢qȃUNj+h®¸E©TŠl6‹ÉdÂnÿù»ÕUU‡¢myy9GŽ¡··—L&ƒ^¯ÇãñìKsŒ€B1±‰ íý1VòAýIçÕi·à´£aŸíÄA»’Íf¤¢¼œ`0x¥ö“”­­ÀÊétb±X~–³RYYƒÇó·Oí¹4 ö°Ñó‰L¹K’ôHR;~®Ñðz½Ô××333CYY±X dzç qfƒDO})‰L|ß¾¯ÓVŠQ§Õ5<<òù<×®]chd¯ÇóÔ|¯B¡ÀÖÖŠ¢`µZvSœ(ŠÄ¶' Ã3×u¯5ÅiøÙ =11ßïç…^ ãt:÷|È„$ ´65ìkJU´~ ?EGnܸÁ•+Wp¹Ý?;-}‰D‡Ã˜L&ÊËË1ÿhØíím.]¾DKs MMMÏÔY{ª–'«ªJ,#•NkVdŸä½°°Àúú:¸Ýnêëë)))Ù—¡ ìLÝݯÿ42×ð°e™þþ~._¾¼S’Òéžš«]ªª²¾¾N"‘Àd2á÷û{o@<çÒ¥K,,.¡7ž¹ó&>MŠ311ÁÇŸ~Êòòò¾\zÖ‘Éd¢¬¬ŒªªªÛsîµk+4ìØ¤¡¡!.]ºD2™ÜuB…Å>ªÌÃââ"çñŽæM§Ó\ºt‰ÁÁAÌ&&Ó³7俉O¹«ªJ6›¥¿¿Ÿ›7ob2›q»\š5Ù¹OOO“H&9qâz½^Š ßÓo¿ý–h4ºclu:, :ÝÓQé …B,--!Iµ¯)—ËqåÊnÞ¼‰,ËèuºgrÊ îIWš­­-nܸÁàà ²,SS[ûXî@?kˆÆ¢ SWWGYY™& îC2¢("Š"Š¢ Š"‡ã±,1yÔP…™™b±.—‹`0¸o“!ï—)¸qã×®]£X,ÞæÃ3˜r×=Éjzzšk×®1;;‹ªªØl6ÊgòAî·#51>¢ª´µ¶>Jƒ†G AhkkÃápðᇒËçQd³ÙüTèK4errUUñù|”••=– JUU&''¹zõ*¹\Øé”×ëõOM&ä©'tUU™åã?&ßþ¹ÕjÅï÷kÖd±¾¾ÎÄä$‡:;ñŸgbb‚@ @C}ým‚y’‰FUU–——Bjjjhll|lßIêêêp:Äb±Û6Êf³=“¶é‰$tAðûýôôô°±±A&“AQ|>f³Y³${ˆååe–——9yò¤6ÞUƒ†!¾••"‘/vw?5éßh4ʵkרÞÞÆëõÒÒÒr †|mmm¡78ÿÒKZÊýIC6›erjŠ`0Hmm-‹‹‹­ž»Ç2¤¤¤„ÚÚZ-:× á Ë2“““x<ÀSá „B!._¾ÌØØ‹…C‡QWW÷Øí@>Ÿgjj ¿Ï‡ßï¿Ýœ§EèOEaxx˜í­-Ξ=Kuu5õõõ˜L&döP¡çææ‡Ãœ{ñE­ñPƒ†ÁÆÆëëëœ>}ú‰ÖUUI§ÓŒŒŒ099ÉÜÜ’$ÑÐÐ@GGÇc’£ª*«««;véÜ9$Iz¦9@÷$°ååeÆÆÆhkk£²²A´Ùî{Œd2Éðð0ÕÕÕ+*4ÇIƒ† 8¦¦¦°ÚlƒÁ'^W666øê«¯Èd2X,š››9rän·û@Èzff—ÛM xæíÒGèétš¾þ~¼^/íîã³OLLÉdhmmÕd®Aà  ±¸¸HOOÏSÑÓ#f³¯×Kkk+---¸Àð.UUÙÞÞfeu•ãÇŽ=s‹XžxB—e™bÑ(çÏŸjf"? jrr’ææf|>ŸkÐð#$333ƒN§£ººú‰×Aƒüú׿Æd2a·ÛTŸÒìì,&£ñ©È„a}}ù…ÚÛÚp:š@4hø3A‡,Ë¢ûûQGê ‹‹‹¨ªªÝ¸yÒ½X,ÒÛÛK6›åpOV+Ù'äóy†††p:Ú þ Òé4“““ÔÖÖˆ†±§Ùl–‰‰ *«ªðh²~r]UU¦¦¦˜šš¢ëÐ!JJJ´§¶Or_\\dmmööv­_Aƒ†?£/KKK¤34çwe½²²B*•¢±¡AËÖ>I„‡¤®®ŽææfMQö ™L†‘‘***¨©©Ñä®Aà —Ë1>1AEy¹Ö8ºÇ(‹ŒSZZªíîx’=ŸÏÓ×ׇ(Šôôôh;·÷Ñžšš"ÓÖÖöLŽPÔ áaôemmh$òXçš?+ØØÜd;¢¥¥E³MO ¡ßºû¼¸´DWW׸÷ø¬ 311ACCƒv›@ƒ†Œ½^/åååš¾ì!dYfjr«ÕJyy¹&'…Ð777¤¡¡ú§¬cô CQFGGÉçóÚm [[[lnnÒÔÔ¤e÷‘H„¥åeZ𛵾ž'…гÙ,½}}˜-º»º´´Ê>áÖä¥ééiZZZp»Ýš#¥A߉'&&°Z­·ÇPkØ;û4;;‹$ŠTUUiyýV„¸±±AOw·6£}ŸÓðð0f³™––-:× á"Æ••ššš´ÕÍ{Œd2Éìì,õõõ8Íy:è„~ë:Âðð0ÍÍÍTUUim±²²Âüü<­­­Ú®s  ø˜ššBEí&È>pÃÒÒ¹|žÚº:M O¡g2úúûq¹\êìÔ–€ì#r¹Ã##øü~í­ €D"Á ÚèÑ}°O““+*(ñz5ûtÐ ýÖŽóh4JÏá,‹ötöÑû™™a}mŽövm×¹  3³³³äóyê´Ñ£{.ëµµ5¢Ñ(õõõZ)𠺪ª,,,0:6F{{;åÚ¾íýD:fllŒššmk‘ ¨3³³³·Ç¼j:³we™ÉÉIíZà“@說’H&éëëÃçóÑ®]•Úwù“J¥èèèÐnhÐð:³¸´D<‘ ¾¾^+ î1¶C!666hllÔ®tBW…ÁÒ™ ‡{z´»…ûl˜"‘ããã444PZZª Eƒ†?ƒ|>ÏôÔ•Á >ŸOÈóÃôÔF“‰*íZàÁ&ô[µÛ©éiºÒæò>eE§ÓÑÞÞ®eF4hx›µººÊÖÖ–ïâñ8‹KK464h}UУÑ(}}}+*´Å+Á0­¯¯3==Mss3‡CŠ Åb‘ÉÉI|>Z¯Ï^Û¨Ù¹9Š…‚v-ð z¡P ¯¯EQèééÑ:«ƒaÅåri»Î5hx@lmo³ººJCCƒVÏÝcÜÙxèt:5TBWU•É©)8|ø0^¯W{û,ÿùùy–——ioo×RY4<nu[;œNª««5'xmÔòò2ñxœºº:­x }{{›þ~êj먯¯×cŸ‘Íf¥¬¬L3L4< ¢Ñ(‹ Ô××kÍ»{ŒB¡Àôô4Ú~ùƒLè¹\Ž›½½èõzººº´¦’ÇàùNOOFéèèÐÒ†4<Earj ½^¯mÜlnn²¹µI}]v•ö ºªªŒ±¾¶ÆáÇq:šbì3âñ8###TWW4ùkÐðH¥RÌÍÎRWW§í9ØcÜj¦(cbbEQèèèв#4<` 25=M¡P ±±Q«çî1b±ËËËÔÕÕa45DBO§Óܼy«ÍÆ¡C‡42y Fik{{gˆLc#Gs¨4hxd³Yæ´në} :&''ÑétÚŒüƒJ芢022Âv(Äáž-eõ˜elllg×ys³ehÐð€ŽðÜÜÑXŒ¦¦&-Ù‡ÀoqqQ+mTB¿uý`llŒö¶6*µñ}Å(-¯,3?7G[[v»]Š €B¡ÀÌì,U••”””hÙç)ÉÐÐР‘ÐS©½½½x½^:::´‡ô˜ŒÒÈð‡:­CWƒ†‡ F6wƒhÑùÞ"ŸÏ3;;KMu5n·[ÈOÀžÞe™ÁA‰çÏŸ¿}wSUUMò?CÈ·¼ÞÍÍMΞ=‹ÙlÖ¨AÃÚ¯ééiüÀ]k;5ûõèí×-çikk‹žžž»?MÞ.Ï=#ô[ÓȦ&'éî¬ A(‹¬¯¯S,µ'ñ`0œéH§Ó QQQA0Ô¨AÃbccƒååeNŸ9s×¼†B¡ÀÊÊ …BAÒÃ1&£‘`0xý’e™é™ÊÊÊîºN«ª*Éd’ EÑdø=F*++oËsO]UUâñ8ýýý”WTÐÚÚzû­¯¯óÁ€^ j©ß‡"Ëè‘7ùËZÛ¨ª*SSS¤ÓiÎì%UU …Bär9M 4v»»Ý~W¤#Ë2333¸\®{Öv.--ñáG¡7›5öPöKÁ(мûî»÷ÜØÚÚbyi鶺|{ù2f»Mâ÷ä)) ñq»D±'„.Ë2ýýýär9žïêºkñJ>ŸG•$šÁêК³±Pˆé›}äóùúýH$ÂÈÈ ·WÓ&“I>ûì3"‰8:m!ކgÙTŠCí<÷ÜswÕÈ#‘ssstuwßS¦* è-fÚNžÀ¨•°‰H”ùÁ{"mEQ˜žžÆérÝ3ŠZUU ….¿–cGµþŸ;ŒÆ˜¾Ù{×Ï9¡ßŠ gff8q⥥¥÷> &#mòCAo4žç[×ÔZZ[o§d …©L†Ê–|U•šP5<Ó˜$—ËÝU§U…™™L&ÓÞ…ƒQ³am¿îƒx<Îüü<ííí?8#_”$ F#‚ÖT}‡<³÷üì‘z8f`pÚÚZššš4ê1accƒ™™ºººp»\÷>xƒƒ6…IÃ3 UUu:¾ßr•H$˜žž¦¡¡›MKóîõ3˜™™AEmQ×#À#uwòù<}}}‚@ww·6Tÿ1¡X,244„Åb¡¡¡AS ‚`æææPUU» ½Èd2ÌÎÎRUU¥ÍÇ8H„®ª*ããã,..ÒÓÝËåÒˆä1¤Å¥%VWW9tè¶ë\ƒ†‡$˜©©)*++qÝ'³¥áÑÚª……’©MMMšótP]UU677¤©©IK|Xk"yŒ¸µë¼¶¶–ŠŠ -:× á!°²²B8¢¥¥E#˜=Æ­›^¯÷öÐ1 ??«kMUUVVV¡¥¹™ÊÊJTU¥X,"Ë2ªª¢ª*f³Y{`û  är9Z[[5ƒ¤AÃCàÖ˜WÇC vRÂÅbEQ°X,š {„…B,..rôèQŒFãmÞ¸5AÔd2iòÞoBÏf³ôöõQ,Ðëõ “I§‰F£$’IŠ…N§“S§Náñx4iﱂLMMÑÒÒ‚×ë¥P( Ë2Š¢`45‚× áG°±±ÁÊÊ ÍÍÍÌ/,ÅØ…ˆE£x<N:¥ua?"Ü* êõz<‹‹‹„B!677‰F£ÔÔÔpìØ1Ífí'¡«ªJ,css“b¡ÀðÈ‹³É´3_6Äívãp8îó§áÑG###D¢2™ W¯^Ýqª $ŽcGR]]­ Jƒ†û36·¶G"Œ#Jv»‡ÝNss3>ŸïgïæVU™ÐÜkI#-íuè%U)0?8@Æè§¥%ˆ((… SC˜ƒMTú÷D©ªª’ /1µ¦¹³“^º¯mŽ®N°6ÒÒ^ƒî®ñ´*ùT ÉlEzLckÓé43³³lnmñùçŸS(°X­¸].Z[[)//ß32WU…äö:Ë K$’TQ·¬’ÊÚ ø s»PU…t*‹ÅjáA ªœc~lÉ]Ge¹›½H@üdBÒÒRÞzóM z=f³½^û ‚ðØÒ&ùLŠ‚ªÃjùnxŠªI%R˜íŽÇv˜÷Åb‘\>Édf}c«Å‚Ñd¢¶¤Çó@sß•ç]ȦÉå ¨‚ˆÞhÂdÔˆÔY1—!Éa´Ø1îo,”BšTVÀf3ÿ$eSU•l*h°þà{<ñܧI&2X6Dán¢È& ·b $Ó2v»…L2ŽÎlÇ ï6ªñ–¤‡•••¼ùË_b±X°X,X­Ö{lÙÏ„–ÆùâÊ&¥Uÿ5—‰||¯þðÏDlTÔü5N³Djkޝþøoûú÷%˜l2F4œ$_PîKè¨2+cWøfÒE]sº;Îb1áúÇ_8r†ºªR‡jJ’D}]Á`€ßËåÂn·c4Eqïì…ª’X™àãß~DVïÄí²’OÅèÿö2g.ðÂsßs~îÿ*[¥·o™£¯¿ŒÃô`4ªä“ |ýƦ_,w³ßR÷sÌ­áËèdøüVåR.¼q³NU%º8Á'Ÿ\¡û¿¦©ÂõU‰-ÏΙ¨®«à ùƒÓ§NQÚŸ¦½£îÏ+Ê}È1²0EBpPYøYò–sq®üî__‡cç/ÐÙ^¹ûz*…TŒùé‚-¬ô_¤oNÏ¿z «éáUCÉ„øæƒ÷±Ô<Ç©ã ?ë3+ù4s£S”6µâ°”™û*‰µ1>þ¸Ÿ£Þ¢¦Ây›”lŒ¾¯.BI•–¾írîåÌ߸³ö8‡ÚËÙšCv”ãÖG¹|±êÃgiªõòØý=AÀëñàÛílß }_e â磬¬Çð;MÄ×–Hç%äô&á4Žr¡ÅyÒ‚“²²Rr±-æ&§‰%²˜œ¥Ôµ4ã°ê1˜,X¬*zI$Ûbrdœd¼?¹D”’ª&PUäbžùá~b¡F—ŸÆ¶6‡npãòU‚гõ9\RšÙ‰âÉ,«“ªæJ=6öò‘˜L&Ž?¾ïAŸªÊ¬ÍM³yç?ü† ÇŒZÌ2qõ2QA!_TH³01ÎÆFAgÄ_Û@U¥Äús«yŒJ’¬l 6{“›ü~ºÂPˆ2;1E$–Æ`õPÛÒŒÛaFÎÆ™#Ëbw˜Éd {¸ ö©å–Žl’MwuTä[++äeP‘ 9R‰4Š ‹ ³iÇ#WU…l2A._DôXív„b‚‘Kß²’÷àôyp[õ¤ ŠEÉ`Âj³|/bÙO{$<Öúžª*,ö~Ãg]¡¬ó8çÏÕc ÇLÿU¾ùð#LÎßÐTåE)dI&Ó¨ˆ-6LFÝŽ¼™L2A¾ #ê Xí6Ôl„Áo¾&j¬ÂYâÆaI'’eÉ`Æj3ßGÞ*Å\†T*ƒŠˆÉjÃhÐYšcrboÏ9*+}·•ª*¬N rå‹QN:œäSa67M;ÙTQoÂnÿ®JÎgI¥Ò(ªˆÙnÇ “¾GF*±­5”Ò,©x¹¨`²90u»ç-™H¡ªÂç ä|ŽTòÖ9´b6-LpåÓ¯è48im®F‡L*ùçÎ›Šœß‘1¢«Ý†$‰ªJ.“"“É(aµ;Ðë”B–xJÁb„t:‹¨3bsXU•É$ä 2’΀ÍaCvÒ†«äóYâ‘"ˆzl;¢ÑAmK3‚£Œüâ8ëk!D‹—º–flR ñ z¿ø±ª‡SÏwQ×ÚLù®Cp§¾ÝùÝTE&LP¸ã\ìeD¿§Ä"€ÍWß ËóËj,a}qSu%©V—Ö©÷U²º°ˆµ¬—çËß¿G¨`ÂWbg~tˆé¹ ÞxûE¢+ã\þzƒªÆ ¦.¾Çèb†Êš2¶†é\æ•¿ýï°¢°5;̬_@Ró,\ºD(õ6ÕºÙbQQ I®}ôó)3•enÖç§XÙHðÒkÏc7í-5<–!2‚ˆÅjĔ™ÆÐ^‡Ãa§ùô9DŠ ýé\X¥¼®5¹ÈPï §ßþ5ÆõÞÿ×aÚºZ(¯k¤PHSQ@ÉÅøæýß±’ñû=DÇG™œ^æ_gþÛ¸Ò¿FeC ±­Eæf7éjÓýѧäck\úè–·’¨ª€Þìæè…×h:Yêÿ†ËׯÉ‹‚„¿å0-‘‘›lÈ>|5>,éu¦¦WeQg îØ9ŽnxèHô)H¢d£ŒÜ¸Ž¹º›_ ›aGYË*Ë ŒÏáqšÉF–øæ£OYgPU£½”¯¾JµßÊ쵋\ Ë¢DyÇ ê]Y†û{IYÔ]H‘Efç7‘eIo¤éô+ƒÔT’³|ýÉE¶£YTÌ®2Ž=œí±æfÈz—Y]ÝÂå" ’‹l03t“‘á9,Õ•xÅ ÙÐ&ß~ø>áM ª‰ão¾KGƒLx‰KŸ^d#œBQÀæ«æÌ«/Sê0ÞEꪪ°8tÌüeÂá8Ö²F^yû5¬òß~ø +Û;„®·x8vá5*y¾ýèÖ¶(ªŠÁæ¡çôI"#} BY%v‡‰ÄÄU†'VwΛÞ@ý±—8ÖSwÁ©dÃË|ýѧ¬n%@Òã«?Ä çO’]å«Ï¾%‘-‚ â ¶rîÕˆO^âÓ‹cx}>ò™™ŒLÓé íªdæÚÜ蛤 «ˆ’žÚž3œ<ѾëØ$¸ù§òq²y•ªÃÏqæh5#×.#” 4ì–›‹É-®\¦¤Í„O]`¨cDÁnL°0±À!ƒŸ¶z'³7¿åfßä®m¤þ莮gîêEn ÌR”eQGðÐiΜî@ÿDꘀÎ즪:@ÿü<©d=+ ›”7>;žaj~‰t‹ƒ•å‡O_cz!Âé_ÿ Aë£ßòÑgƒlœ9ŠP•LhéÉš_ý;ž?\ÅêÀ׌,²›|ÂR û•_PfSùêŸþ7—Ã>W‡Û5BcW%f¸¾½…µæyºÎtc’òDâ2fýÓY.(k=ÊÉí8ƒ7/2uås,./Á†fÚÆ£K0Ò;Féá·¸p¾ 5½Áïÿ÷ÿÈÔø,­.0Øt_xƒ:¿ia‰éÍ8]mäúŸ\£û­¿¡³ÞKx¶—Þ¿ÎÜt-3#Sø»ßàÂË]dÖ¦XÝÓï¨{š‰fsj„‹¿WØåR¡bÙ"(E¦®~ÍðbžWÞyŸµÀ•~ËÕ¯®xë7o {:yãüQ’k3,l±úª©¨©Æjj¥¥ÊÃÅúב×9ÝUÅúÌ(Ñb–BQ¹«^õŒð9¹x”ÍÍ4ÕZ°¾«éLNš»º@)ÐûþLnH¼úίqII¾þÃï¸v©’ómŒÞèEWyŠWŸï ²8ÁJ´€£¬ŠŠšj”ÀêýV>ùrœŠ~Å‘fËäóвŠA·ó^J!ËÐWYˆ™yõWob•Ã\üÝïépº½“ª›“´œ>ESCÙ. ]~*ê.é8râ(éÉmém¼‡9yÎÌ•ßÿg†ú&hª²3üåçÌGŒœó5Ìù-.¾÷!½7Êyå¥n¤;”j!‰b2ÑùÒ9tÑÞÿÝ—ÌÍF¿ô5#ËE^}÷]¼¦—ßÿ-×¾¾ŠØddbzƒÓ¿þkªœ “Ãc1l¬£¼z“î'(Ñe¸Ü;ŒûØ/8Õdmz”X1C¡¨ íž7U.0úí—Ì„Œ\øÕkˆ±9®]›ae¹)ÇVщÃíäÖ§øä½/˜nk¥´bqqºçèi-côË÷¸t‰ºª $“YjºNÑÔXÁÂõ/¹rù2õmX€B:Š©¤œ3§_ecø¾üú+jªÞ!ÞDpÀzGÆb{[A ¬¹Žòê*JŸ¤¥ÉÊÀ7×)È ÉY.yÿáó9TÍòÀ×|ûÅ”¸ ŒÞèÅP÷<¯Ÿn%´0ÁZ"G.'£7?¡fKÔQ^_ÇÕ±AVçØHÈ®®Æ‹2ðÅ,ëKBi‰ÖªÉù6WWøü=Æ$¹!_È“Îå0ìÊ6ŸÍ)x}.QĬÀå6ÝŽF­ŽRÜ6#¢TÀb7"‡swV08ß¾?ÈúF5²,ËæÀÓÔÈ HNQA’Ÿ9WE‘ñÆË8øÊë”ú­`šä VÕ`ü¢@!W(îQ-O>£í ô/k`‰Ô5óÂ[¯cß Š[¹ÍôÔÛ€A¾ #[Ôm†—]Ë£‹VÚ_ý ®Þ^FG¹öî]”@ ¯¾yvÛ/;œzëç öõ1>òáÛkðÊOaû¾ÝÐPl66‰¥Ù94³eKc7MƒÔ⨅‚ôˆÞB‘Þš¾†!;9òÚOñ÷ö2:4Æå¾.l±f^}íè6~ÕãÜÏ~Á@o#} ܼF¼å4/¾|ËVô´i4‹Eپ˲ŒVÈ£éÏ…")Žm·‰(IŨVS§PМÏf€DÓA\%ÕHOÜNQÂb±n¡BÑGlèä zqþ[ËªŠ¦åq&yågvï?`ôN·¯hìñ5j¼;c–!μõsúú˜}ÀÐíÂõÇxùÕ“ØqûˆÖ :’ºsˆ;<ô\Š›ï¿ÇhÆEûÁlúc=÷‹Ê­¢¤"o)F‚$ƒ¡“š¦ÿãPJšÙ·¯šŒœax:Í£ H*ªªlý­ æŸDãÝ‚EÓ4 ÓÄÔòä2:¨œ9A,^B°ògz{årÏM‰V^}ëù£Š¿›EõD) Û¸ÙÓMÙÑ7pYd%BÄip»¯—Êãoá¶©ˆ‰RÜRó“s„›ÊX{ÈlJ¥ÝÜÆfsyð9F{ú‰Û«½{ŸÅdæ ¬"–ççYqä¹£gù>*Jâ”W&™£°UÜAÀ,áøJ¾#™{\ë]娋gHD}h™5&°xƒx#%~&îÞa&æ@_bfµ@M[)йöU ¹uW GãìóãÓ”økHÍ 1>¯ÑÒ^O(ìf²¯™É‘^æ–Öñí ô¯x[‰*þ°é¹Y29 ‡¢±0;ÕÂ&ä™™˜'\ˆºö£Ì=ìä÷¿¹ÎÂR ë¹t’¹ù jŽœ£ùh‡×ÞçBç’gaSß7j¢¸‚T7Vq½ç:÷«"ì«-A æïpá£.êN¿€/äetf–l¡+çqxKPÍ43“K”´§áÐq&ï]æßÞ¾ÇÒj  iÙe–s4ž|‰ÖYzÎÿ‘ŽÞ¬ŸnÇâ´l b+þ€‹ùéYr…Tc“Å…œ¾¬Šøy×&LCC7Œ¢¼z† .+6‘xË9öòY,’ÁÂä¢+Âóèo‚dÁö17;C&§a— ,ÎÎcóÑ×ÈèNŽþèu´ô þùŸî&qÔƒ€Ž¦ÒIæ6©;úÍÇò<¸ü.—n=`íÌAl^ûöã yžš&[hAÊ.s»³_"ÌÜÌ"‰3/°ÿ`sýäµ<º¦ƒÙô K«ë„Ý «‹s`sC:ÅÒ†É+gÏRR¹;ÓK:_,R„ zn¥…%Œr›«óäE+»…¥/Öý(è†)n›…Ý^^›Š–#´Ô„Èo&™[X'à€Ù±eJÛNÒpø8w.ñÎ{÷YNÃýËí‚&Ê6Êëky0•§¢² ED¥u• ,˜TÕ”#‰Þ²}œ<³ÌÝÞ[¼×Ó¨Z)o;†Ý&³©:‰D}XšÀïUð¢ˆb÷ î”ᶸøq`q)¯ˆ0Ðu·ã4«@ÿµOèÕM$E¡ñäI¢ÁïfQ¶Pô$©ôEî]xŸ[š† Ê8ýQÎýø4‘PëË?äÚÅ.þþ7¢LiÛ Ú[«X}8A8\Œ— ÄÊ Xû¸y¹“3¯œãä §é¾ÝËûƒÝˆ²…’ƃ¸AöŸ=ËÚÇ×øè_'ñ<ÄêëqÚd¾®@÷ï¬@„gç3 ¢ˆ «Ôìogøë|üûßãPtæ§7h:v »TàÞ½›L]é$ò‘MÎ㊔àñúØôz¸Û}‹k®©É1ô.~µ…9B¥å8¬–ï¥É]UšOžcqé߸úû¦7À"A*™ÂSÚHu]Yù“uñÑï²X„, ËMgZ±˜9†o_g±£‹PÀÍæÊ,Þx n¯×IWo'Ò+ãcܾåÇë²°:»B´¼»ºÃ¾¢ÅAÃvf/öðáïS(ú& ›*­'š±Ê…âº?ëÆdó@zžkŸ^%,eÛc&RQÉâdßÁ\¸x÷³ŒUȳ´’¡ùì úá„ÇGEDÉBmûFß»ÎG¿ÿv©ÀüL†¦§1“3t\è¦'Å.é,fD ì6 a“ÎO/ ·Uñ°ã:Ý·ºñ¹m$çç —Uâ°ª»LýVj÷·1üþ þí×IT}ƒTÞÆ™†"ñ #·¯òñB?h,c÷îà«1Èo¤è¿zén˜"Q’²²(§I÷‡ï0îVÑ7×pŠ›ôßíÅ^-b:c=Ýd¦ûHNá©ØG<âeD‹A@ØŠb·è.HV¼+7¯à2j1·ö¦§¬Žúš>î~üoÌöøI¯.b¸Ê8uª‰¡[×Xîì"èw³±<ƒ¯¤—Ãþ¾W$jŽ¿B¤ù4v·§Ô)(4œyÄÏÖg’…ºã? ¬õ0ù¼Ž¨¨[Ñÿm8ÊKu\N•œ?Æ¡—ʉFƒäšÃîvRÛøñ ›"!M'_¥ZSq8TÎýòW¤³¬Njs5õÇ6Ñ4QV°;ß©Oîw«7ÎÉׯÍtѪ$ŠXlv,j1ãÆWÖÀ+¿('“É!H26‡Yq6ŸÂW.» xÊšxëÿ(E3E\Bð$ñ}ûÉå5Di‡Ž‘Ú¼¯#—×Pì-ƒ 8¾&Kÿ÷ÿ÷ß$IWWW%R^†j±|m §ZlKJ ½Û‘Ð’¬âðúI”—ˆ–H„‘°8½Ô5E´¼ ùKt4D«Íö˜à%«ÍúX*¢ (ª«Í†Å¢n]P„âûVÓÈ3rû:]7î°4?Ç@ßÔP´`·Úv¹ŸŠ·SuKhIŠŠÕfC–ŠŠ—j±nÿÆ×™z›ËdY™ža_C6Ûó‡=ê²²¾N¸4ñ•à ¢„j-ÎÙjµ"ËÒ.¼’¬`±Ù¶ ÝÛ®%‹u‡ž‚ qX-Û¹ôò®µwÜþ\‘%ÕŠ,5){ùl–¥É)šöíÛ¦§`š¦ùM ô‘‘>üô<-§Oáø’Béû É¥%ú¯wðú«?&‘H|)+++üî ¤±è^9Øï-˜¦ÁT÷;üö“E~õþŠ Çò=¤ÉÀí;xe…_xY~>ƒåƒ¸ÔÑAÛ™SXìö?ç(¤×b5•F²¹)­©+*=ßÂKvje•Á7ùéO~‚Ï÷|ždÃ0èììdhfšæãǶ-?{ë«Iú¯]ç?ýé6=å=²ìÁ|?A¶y)-÷šZýe‚  8¼Ôì?´G‹=Øè{°ß_Y ­?Ê* l6ËA¾“åéIR“xyÖ¯!Ç4MtÝ@–¥?Ëü–¦Ç‘\1œâ&³Ë:‰²ðgÆ|™±ê¹Mf'§1,>‰à¶)ÝÔóLOLá‹•}©”f±¤$ý»-+â·‹M´BMÓù&Z¡À7ëxøüÅÕµ†a~K ­ ýIô2 M3þ"Çþé’ŠÝný’‡ˆ‰¡k躱µÆ:Z¡È·_ ÿ>¿·X_’ÙIMöóoÿí¹võ.ËÉÌWÏ÷¦Irüw®w“)|ÓûÛÄȬrýý?Ðu{„…‰Aôö³‘Ö>‡ܾÚE¦ ?÷¹5Ôñ1¿ýõÿ¢ÿþ™]9þùµ)Îÿîw /þ;éj’^œäöÅk¤ÒùïÎ Ý4M–†îpõÊmœ‰fŽ>€ókË75IÍNa8ƒØe.}rƒÊƒg©*ûs7Š0Ñ3«t~z {éAZ›K¿eæP“äd·f9ôƒs=Ÿ% LÖç¦ÐìA¼N™‘ÛW˜Hy8qj?åËëF!ËÜôá²’ç.±k:‹“¸KJÙœ¾Ç; ùÁYëwáÔ&5;‰á ãqZ¿QÞ5 iz¯_!m©äÐ2î_ú€þ±uÚ÷“]Çmý“ù×4 –''°GX…,w¯^¦àªãàÁª§òý¿mJîæÊ +膀+%ò¢o,19Ÿ§¢*Ž$B69Çìª@Y©™ñIœ/ë+Kä ™HY)…µVV7°yCÄbÌgâ}’†–gyvšäZQµŠÇqÈw‘8XSj:©Å9—’Hg1pÒ"ƒ^`en†•ä‚l!/Áã´‚¡±27Ãjr$•@¬—’§ïÆ úFÓØ£aêªJÉ&w7¸5Þ¯ñÓrtüñRJÑpÚeL£ÀêÜ +«ÆÇ¥êôݼÁ½¶h„úÚò]%„M ™ 榊i¥V—hImešþ{Ab¥%OX8L²™ ùLŠÉÁ5²%Êðzì`ê¤çY\ZÅDÆã÷90H-α´´†„7ÃïR¼ÝEWÇ(‚?HsK=¤W™›]$¯Ø="±0Ês†Å{º–aøv3Iƒ3'J±)›2` #¢È"˜ÆvU$Q’‘$qëÖ¥#Ëbñ;AD–% ]Ã0LDYFz"BϦèþèc¤êô×Y¾Ÿ@Ãq´ByÇ´bšÛ¸Qz¦YÄ444]@‹&A·žŠïoÏx4¶bD¥‰¡ë[7I–E0SC÷ñ9ö}³ô× h¦„hjÈÈŠ†®k˜æÎø ›Ë<¼?Dëé3[sôLqMDQÄÈ­Óýñ'å9v¢‰P,NÁéF254]A–„]7f}+Rx‡NϤ³i²4ÒËÕOû9ýË·ˆøÅÆ&Zqm§í.íwi‚«oDÓkoâÜXäáýöŸ>I!ŸêÓ4Ð5Ó4·ùë©[¦aèˆ2’,bê:º®ƒ°E§m³œŽ¦oñ¤š.ù´P@VÔ­Æ$&ZACV”í"4E>$y;Êv÷Øñ—žMqëÃQêŽrôHãc›~7ß"ˆH²„¸E/ ÓØš§,o§–=9ñQ&¦aìŒK–d+‘D 9Hve†{·±4œ$^VFƦ£|»pj˜ç#ˆÅšeÛóDØÚ£KÎÒñû1Mµ1¢¥%lħæ/›ÎlÕxlï "’Rl†c&š®Y¬l&IÒ×…b’š|ÀoŒ&Ù‘LAàÐ˯ã^ëæƒ++üõÿþ×ø]“÷®ði¯ÄOßlåÓßýÍUGÌk2?1Ц(/ ‘N­0¿˜çô/þš¸¸Ào„&9m ï4ÕF·÷†©çé»ô]wF°ºÜè™MpÅ9}æ«‹‹¬­&I..³±‘ǦÊÛÍp¦{¯sáÓ›’Š™ÏáH4óÊ瘹õèµî IDAT ýXœnŒ\Ýä…7_£0ÒÁ…ë°;ù4†=Æñã,ÌŒ2¿ 0?»ˆWÜàꟗìÈ‚N&/pðå7h®‹|½åaM“¹[\êL+‹’zxO¯öau80ót[˜ã'ZYœe~Ac~vòòRkQ@6¸ðû?2±œÇa³^Ki>Fk¹…•ÅeR$—WÈk±Ç ‡™…Mî^»Æ¤]cmq)RÏ¿|ÌXŸ~ÜV7’Q «){í-‚…1>øð*’Õ‰¨çÉ v:ÎòÜ8³óóÌÏÎQ¶Ñýþ;¬äU,2dÒ9jNüc‡jžK©•¿®Îìƒznõ² †X˜š@LÞgh4‰BG¼‘Cí¥Ü¿~…±ÉtCÂ)åàÙS¨#\¹v«ÅNnc…´&S]_Ãúâ ËK+8ã œ~áÖGÅô½ÝÜììÀºEÕ«ÐLƒñ»LßJ’ÚÔ(k9ÂÑã-È‚ÁLÿmîÞ}ÀÆf‹+@ó‰3T%ü;ÊЙì¹Êíþ%œ6™Íµ$šä õì‹T—x˜¸Ã­®^Ö7ó¨6UíGhiª °6ËÍKW™_ÃBåu9s„?ǽÑ4 ŒÜ¾@ïÐ:*9Üå­hÓýã“  ‘@Y-GNyJ »íCÓä g0ÎÁS'ÉwÓÕÑ2›ÆéQ°mŽ1²hec2E’g~pU„…‡]tõ-rðì´¹~îÝ`#ÃêÒrò qß¶@É,ÏÐÛq…(ñ(ÇÏœDX¤ëÆ=R9«ƒÊ¶#´µì4,Ñ6×xØu®›]lzTG Z†žK³±²Hͧ_¤©6Š©exÐy•Áái²O¤ŒCgOpí4_1 iî]=ÏÄ\’Æ£4”[è¾|ÙùULQ%^ס£-H…Ý>e|z«ÇE) +qŽìsãÊ=N¼DEÂÍúL?W® ÒvæEBŽ,·/_ejv…Hu#GO´c‘ ÆïvÐÓ;L:[ÀâôQè(êê7;:±/çqºí4ï«@‹JäÊX7;ï²¶žARí”·¢­!L÷Åó,­ËX•kÉŽPÇ_¯cè–Ƈɩ!~ü³×pŠYÆF¦ñGTÔU3¯ :º§Eܦáü@Ç’¼ð7ÿ‰º2/£7?à½ó7hjû[jj+Q”Jiz*Á4ò„+kxåå£l wð/ÿÚÁÜô“×o`„šxë­ Vy÷¿ÿ?ôÜé¥ÑµÀjÖÂO~ùsB“©‘1ܑֆ:"Sv?‚²ôÉ©eüì?ÓZ`ej”´ì~nEô[áC‘`YñŠrÕµÔ6Ö"åæé¾Ù‹âKP][Êx×:nOPÖz”ƒ‡›HŽtsõÒ-ò¹uúnÞ$#Úi9rcy€O/táŒ×ÑPg¸ë*ãs;eûI&ˆÅ)¯o¢¢"™Ya=§QÑv˜Ê¨…î‹—™]J³>=È….'Á‘3Çqjó\xçcVÓ…Ý®Hr©º;ï‚+ÆSǰ䦹~þ*«óã\|÷Crjˆ#çÎu¸úÎûŒO/r÷“÷¸?ºAñӴ6•1Òu®î?“W ½ÌôBšêC'hk«fæöez† Å#„KT74Qõ³<5ÄüâKƒ·¹ðÉ \‰Ž=†%=Yä´FnmšîÎ{àŠÒ~â(br˜›×{XãÁúC=yEß`zb–¯Þ%/à+«"î¹ô›æÃwΓ*X øŸë'Áh E‘±»¼xœ.‚A²jé d³¾Ò*â"ÞÞÞÂxoz}dJªJQg8ߥ°”Ln¥x ˆÒã–«B6ÍòÒáŠRì…`e /þø¼V“ÕTxM9UÁˆòÚXMm®j@Mñίÿ'W.ßFqp9,H¢X´ŠH2ÞÒ*J—ó?ùàíó$óü~ßdµwQR‰V7`MOñî¯Íå‹·~ÜNuKÑß±žnÙkY™_Ât†ˆÆȲB¸´)—"™Î#nI’¤§ S‰ª‡xY%v‹‚ÓïCu²›)–×UVà°©¨.?±X€äòîD-!KšþåŸ8ÿáUr¢ ŸÇV´nY¾œÑª*ÃÜyï_yç·ï0µT ò=Q‹ÿ[.ÐA@uzð8mXÝ~BA¢(â„h9v˜h@eòá žªföh¦ºµúÚ8³##¤óV»›Ê}M$ªª)‰EqxÔ5×QQ[‡ÝÜ`m3ÏîcÙêñb³(x‚ÜvYuRÞ¸ŸÆ–Fš4"ê¤7Ó,Œ0»œÅã÷¢ià úY›aiuý …DÀãÒØÞB¢²–ÆÆZV&Fa6 Í'OSYWKÛ‰£X²‹LŽ224MIÛaš›ëh8r”ʨ‰áqr¦ Añ†#´ž8BÈÃ}1l.ª€dwáu&GÉî ns…â4´¶`5VVÖ1 —í¬O0²SüD(©oÀ–_`rj™lj‰ñ©*jYŸb~5×ç)Ò9àcur˜•äÆcŠ˜ÇïAVí„"!6ç§™^Öi>ušªºÚNÃVXfv~i»õ„ ˆ¸~ÙB0Å*‹8}Aö:Bmcu 6×’ä³›Œßï'ƒ·CÅ”¬¸*SƒClæžhJ!Jå´ÞÝ\g  ÕÀ"hX½>”Â:ãC#L bVÑz …úöÃT–ƶj¦?{cæ7Wº?Œäòa“ T·«™a|pŒ"’`²±²ÈzN áèiÁbUU<¡n»å±ý$ɹõ$+É4ñÆCœ8{ ¿Ç¢J¸¬Š}Í5T¶ 4`azt˜á¾~Ö5 Ó†!Zp»lÌ ²¶žbb`OM3mZÙö‡¼üÚËÄÃîíºü®`‹"ãÄp9”-S{Ù¡AL_‚ö£‡Øwô,?üékTWD‰”W³¯©2k¬¥2h™ Ë)l^?ЬàF±[݆4æGÉÚ"´?By]#­‡Zؘgu#‚ˆ'¢q ñŠ*ÊÊ}¬,¬¢ ‚Y ¹0¦x8ôƒiÞWþÕûâG¤ŠWþï8÷Â1\R†koÿŽŽý[7ÀGü¨å Ûõð„¢0,2¢Pt‰<ªÿIï_ñºå-¼ýè»´~AŠî…GŸ™&¦ið¹µEQ0ô­rǘr9 “-·Ï.Ã40ˆ·çÍ¿ûØGaeŒ÷ÿù7 Œ-m¿àˆVòò_ÿŠs/žÀ­d¹þÎïé¸qó•%Ѧ#üä?þ ‡5¡¯Mòþ¿ü†þÑÏ.L,n¹¾­”iÅþ_|h>¾§·º°‰»i¸å–4MðT4òW¿ú§ND-¬páw¿ávÏXñw¶ÖOq†8ýÖà‡¯½@Ô§Ð{é=>ýø:9ýù‚¿ÕikŠÕ]ì(edIoæ±T:‘„âAms8(äÈi‚bÅbQ¶´Q›ÕŠ$niV‚‰ñBR-8ŽâfÚòÝš†A&e#µÊdß]æeÀ0ˆW•¡ÈÂSL$[œÛݯT›Cϰ±™ÅdìvÉbG ÖÓ›dsw±l¨ ÈØlV²›iþ|ÁàªÕE0òÉkò<÷»n#‹—X‰ÓÌÜáòÇxUDCnAÀøœ Øü¥”Æ\Œ ÊæY×]¯Ž³z³›Ô*ã}w˜‘Šæ³’ʲm_û3`>‡!ÊØíj‘¶ªE‚tîó#E%Õµ]2UVe0 Sg#µÉúZš‡Ý]º•DEäi‹‰ a³»‘E¼V`3¹†1?AÏÍ0Áâ ØH/äQ|öâ­@”°;U„”ðL“m1°©ÀF2ź4MïÍdq¬î¡¨ÅãÔk¯q÷Ö]n}ü.º RuàGÛ‚Ï> ,N¼òc„Ž.v^àîe“puç~ТŒÍáB„be;U!—ϰ¾¹ÉúZ†Á;]ˆšL¢2Š©kd6sXvDгbå˜ù/²’ÙÌ¡ØíHR±’V0Q¦Æƒ —¹|kœDM >g±™ÌgòiÞÌ!Û]¨JQZìNв¤ó:@V]¨[.5Y•0uoy#g”á~Ï.ÿ[’ÍKÛ¹Whi(yîÛÎóšzç‡û™\Ò¨oÙG¼¢ŒÍ¹ÿÎÒÜuDm”Ôú&Agfj‘Bá9˜E¼ÓË»ðÎÿ¿,Í-R0@’ŠÂÃîöpJL<¦&bgsj˜åM–P‰ÍÏ8W„Bnf¬’y@g÷8-Gšù¬L=¢±ÔC~aŒÅµe­nÆîv£9¢Ô´¤$ágöÃZr « R(dH§Ó¤ǘ\Шom ^QFz~Š¥Ù¥ñ~ÁšZ–á;}d-!jZPR`îþk+IÜ¢ŒVØ$ÎâP¹^$üñr× ¦Æçp”û˜@t ºìŸAÁϨáh€É¡!’-e(Ù¦fVU5°4ØK2k¥¾©…’ò8«ÓÿÄÊâ Á „aäØÜÜDL­20¸@M[3J+ÒËÜŸ]$§Ûnã¿Xþ¨.¶ )¸½vÆ—–Ñ QÐX[]Eµ»·¢¦Ÿhø<ûÕ¤¨¶õÂîÒ´/§ÛA –àè_½EiÐŽžÛ$™* {ŸB–Ϭ±‘ÍãsH¬'W-nü^'²–cm-C‰G%¿±JNñºÜd*©¥U ³A˲¶¶ŽÝSÅŸ³« ƒ†DIÅöc[yõõS("l$—Au“Ÿéxä@gvh€´%›o¼Ž“5Rƒ],íÒ$õ'¤¡¤:¨®¯äãŽûÜY1p•Õñ;ÐÜ.‚±RŽ¿ö3â~+Zvƒµ `Èý¤Îf¿ÍnC6ò¬%Ó˜> ùÍU²8mÖg.¶±ëfôô¼¼Q{”—ùvU$›Z!kØðÙåÏäKEµâ ñì?Á+§ö!˜:kË+¨N÷gî2¹¶FA7+KÅÀJIDÄ  oþéµ…¬†¤Xñ†ø÷æ¯^lGÄ µ²Œìô£šY kÓoþ=½FÏå÷éî¸ImÕ [s3ŸŠ‰ÈTÚ_zÃF–ñž>úð&S % X_[¡`˜(Z–Í öˆ¿ÍC„^øÙ[¸m¹õUÒšŠß#ãòØ™ÙÚ{Ò ö<Ä“ˆ3ɸ¼rÓ+äò:VÁ`¬¯\Æú`/?ÂK¯ŸEON0zóÒ®4·'æ#H¸¼´ñ5ÒY ‡"³‘\ÆT8T í™kj’ÏlâŒ×ó£æCl®Ìqõ¿ãÞ{ÔÖİ•›L‘пs•»7“‚७¹†HH§Ôßǵwÿ€Çã&5pº­˜€ÍáÜŽõD›sÇ7®Ø˜Š„$¿»ƒWÃCkS q笲øJ8xê(7owñlj»˜šF´ùõ%lNcw<]5N´¸i>u’¥¯ò‡ÿ6„`šø*Ûˆ–× Ÿ>IGç]Þþ§~L]ÃSÓFkK ‹w/sçF7÷®¨¦†½´ž²Šº8‹vå—ÎwÒXábü^÷¶è`zhk®Áò5ÚU›E$‡CGeÌ\Š{·è¹¦"˜:–D-å• „¹%̵¹øñUνt† ×Š „k[imœæÞù·¹¯Hh–Óg‰ìL[¬XŸYÞWÀîpl­"ˆØd‹‹–Ó§H~r÷ÿùŸLÑ[É‘#ûç{¸q·Ôb‡BgŒ¶ú |æ,6ý—>¼Èáö*†î0xÿ.²$ k: í 8ž³†À_DaA¶RÕÜÌÐÅ\½`Ã%f_¥fÿAœjîK8Z6‰±¾;DìŸa U5é¤ûÓOX­ˆ²46HÖ–àÌ˧ñ:_äõåî]¾ÂbÐÂ@÷±ê#”ÕÖQQr‡ž ‘«K°8Ô‡©¢º®×Ú7ûnÒaÍ!nÌ3³.Ñz¼õ[§&YÔ4íãbg×.Šx”£#3T:CB1·Øá÷Âà8}7oÀæ2K9Ò¹I¦öa·JŒß¿Ëh‰o—0‰Ô4`ùôý³"?øÕ_a‘$ÂÕ „º‡é:ÿ •åaÆ(8*8ýÒ <ö:+Šms‘{ݽìoLPUê¡÷âGhóå,ÜG VPQúxD­(ÛIs¿»›*ÿ³[K ’JES3ývsíü%Â^…‰ÁaüuG8v´U~öš¨îõMÕܹsNR(¹ƦR´ž{‘hM ÝtsõãOq‰iƦfѵ(v/!¯ÂÐ훨›a¦úGYÏ(uMµtövq]Ía7RŒŒ-²ïÔK”{²ÜüðJ¸ŒXÈE2•Åéõ¢Z¬8l£}wH”©Lø-“¢ïêy’¦‹òŠë +Xn, : cCܸp9»ÈdÒäÀÉZ*T'ß¹ÎõO/ؘÂ]y€ãÇ›¨h¬gàâ=.|``É/12¶Æ‰7~üùV]Q&^߀üà2—Þû€€-ÏÀýI_ú!΀Ÿñ…qz:;ÙXšc=o¢²ÔІE*ððN7[Û–T"VÛˆ·ç#:?ù„DÄÁè½~¢µ„¼f?C[_äVg‘Úz¼Vƒ´&àŠ¸¾r“» „jÛx5XB2¹ˆÃëÇçs!?üÛ¿ey%…d±ãq#¹nâ{øÁoá F‘DÓ!~iÄk“qràÕ71\Q|N…Wƒ ’ÉõÇñ Ó¹æè9Â5ͬ¯§-vü¡ EÆY¿Ÿù* 9Õ§Æ­?Äëá VW×U;h‹"áh?I ¢žµÔ&¢bà aUe'_$ZßÊÆfQRñ„B8LÏ ~î¯@“lDc!J+ªž1Þ¯ãLm~N¼ú¢#ŒCŠòøŽ×mÃ{üµͬod%w0„ËaÅôáçÿ%AA´âsíôAm^Ž¿þõ Kdr6—À‹$@ó¹Q%Ú±Æ_•T³–Jƒ áô𸦟7ÿ‹—LA$\§¼¼Œ•å$ºV—Ð÷Üiºß¢æ,&…\G0B,ÄÔòÈv?¥å1TYÆMàuH¬.,°™3)k>ÄÃM(hè‚•’Šr6™B6Í"^A0u4C TV‰ÏeÝid!©XU‰Íõ §ŸßO¼¼˺•¾¥PRQ?& M­²¼¸Œì±ïÀ"~Ç.&5Yìg*i£ºÜOrqG¬†#gŽã÷ùˆ'"6“,/'±øâ:s†’°‡`¢ «T`e~‘œ©Rø-M•ˆè &x9Á'`¾¶æ,¦‰VÈ¡:C”–G‘%_¼·M`uaÔFžHU-µ(h˜’ƒÒÊr"ñ0b!Cre ›?FÛávd=‡Å%ó“ÙØ@qùñ{íØ<¥!T«É,`‹UÐÚÞŒÍ"£:}Äc2kË,/® º#4l'ìÛ‰H°ÚíPÈÉC¸¢–ºÚR´ôËË«(ž(Ïœ¡4âyŒfŠÅ†ŒÆúF§?ˆ×¤¬² ‹*¢òˆ¥ ü±•ÔÒ«É ¼%Õ4·7ã´)OðhG „XÜ$É„ËʱŠ–æçIçÊöµP__Ž/Å©š¬.-aª.\Nµœ—ƒÇöòÙI­®Îø¢%Äqå”ÕVãPt–ççÙÈè”Ô5³¯© ‡ÓM…äÒ+ËkX|1œ:I,ìǪˆl¦6°¸ƒDBÅFD¢bÅíTÙH.±¼°Œ©¸i:~’ÊC=wQ}ÕøíÉTŽÊÖ#´í¯ÃŠòÛY_^deew¬‚¦öÜvo¬—Õde~MvÒtì$ua´‚†+” r¢"አ¼v…|ÁÀ+§¢¶†€×ÂÚâé‚HuûZ[ë …}hIR뼉jÚö×bärxâåD¼66628aœv žH‚òºba+Ë$×ÒøÊ8zú‡‚VÈmŸŠ$PÈçPê[êQɱ<>/vÛNÃÅjÇãóár;Q-vÜn;‚(áôxP•¢{OR,¸<Îb– `uy±YäÏÄû,‹‘ÕáĽõ;²TL7e—Çý̆+p{ü~ÜWñ-\û#\®âíW(ZïlNwñy¯§Øl«QŒÓëÃív"ŠâçŽ÷«nÎ"6—«EFRm¸ÝÅsùé±*[ŸoÕãÚ¹Y?R$‡ÛƒÇçÃá°muÁPNö§é.ˆ.§˜N½¥X¹½ž-Ú×Ýíóáö¸Q•bºà#Úz¶èc³ª[|"bw{ñl½¯XlEúwåYnÇo{sÓ00·6L³HPlƒº³¸†ûhØf1ßTØÊ™-¾'l-˜‰aÏl¥jšF1pA,æ[ ¢´ü8¾­ß,fûwwðè _ýWÞ¿©óÿëÏpÛäÇž{úý]¹½ÛóÙ×,šfŸøøz›³<¢÷îCmgìó‡wͧhvÞ5a‡þ<ún‹¾ÅçvÖÄ4Šw¯Ý-M¿ˆÎÛæXc÷xØúÿIÚ>þŽi˜ÛùÛÛcxŠÇƒøÄ¦ßÅ£}WÄóä:îâ1S§ïüÿàò`€ÿí¿¾†M)ÖOxôü³ùm‡æŸGŸÝ|, Â3Önçy3·Æ;ÿýÿBOüˆ×~ÔŠ¸E„­Üð'æ_<„„^5vÿ6[ë(}=‡Ýük> ÒzäÖsˬ¾5ÇGtDq+¨ËÜù{û<Ø¡Ëccxj ·ö¿@1ÈÉ(®K±¥ëãÞó¿øæ,a°×œå«…o}s–Ýü³çgò‚ HÂãþMa—æ%JŸíŸôÕ®ˆ§ñ _h2T;nŽ ˆˆOD|öû;‡ÒS¦¤?ƒÙýO¢÷®xAžè.í|'ížë®¿Å§Ã’ž‡ÎÏ¢ÍÓêñß{\Ã|ÎϵÖÏè«þlEb‡ÇLÃDRx¶rr…­€´“ÿóÛg¾‹¿ày°9½èQyòlüB^•ž¤Å]wó½(>9Ÿ'_žºuÞßÍOÂãëö™óžý¿€ðLÚìÁ|Wa¯9ËW Ë[Ïà®·}œ{ð,%@¢öÈK„›UlÊŸï†!X<ûÑ[˜ª½k{°{ý/ ÷&<~Þmºÿw“ÈV!ë3íÁgóˆb÷ü3[gAÀéì-ÇìÁw¾³ Ó4I/Mrýƒwé¸ÞK6¿Ó)GÏ&¹}õS³kÿîÊl¦©³¶¼úTêÐìÁìÁìÁž@ÿ»´cì žÑéºÊÍ®û˜’ò˜_OÏ®q÷ê¦çSÛÁ>;·ù]¸>ëss'lczˆkï~Ìb2óØçÛÏíñØìÁìÁ|ðor7MƒÕÉ!ô °ž)`u¨mkC]Ÿäî­Ö³.ôBM7oÝ)˜¬M q}¾‡õMx] MM• ¥ºw‡É©E d‚¥U4¶Ö£’eðΦ¦Ñ‘$*©­ŠÒ×y•®Î‡ÁGO&?7ÌÐÐ8ÙœŽÝ¡á@;·uÏg¹{°{°{7ôÏç¤Çøøo3¹”Áð±:r‡þð9ÙŽÓåÆíõá xQŸF2r)ÆÆ§ÐD æÆßý€©Åu†;>æâù.L‹ }î„IDAT»¢qû“wéì`ºç—/Þ»· †îv3:“BµÊXœ®bÚ@jšËï}ÀrVÆçw³8ÒGχtcÓö`ö`ö`ï†þ9×s–F˜[xý¯Rq°³ð/ÿã)N‹I¢T×”>Õ ^duMœ|é¹¹0#ÿðGVæç™é}ˆ«æg_9b¦ÑV¦y0@¨Z"“3ð†¢T•ï§v‹ËÇšÇî\§ª¡55Fz#Çã§t_5õ-Í;Џw?߃=ø.¹å‚3Œ=eýùkãßCp ÓDØ£÷çÒó/Üän°‘Ú» —«X—×î YR™ÏoÒ!(v|ÁàVe- ’Z>Cj-«%ˆ,‚€ŠÇç!3¾N áEö/¯Ó{é}î¡.¯åÐÙS»1âŒWqäìQîÝ»Á;÷:p‡â4Ÿ8ƒßçÜã¾=؃Df}ƒþ›]HÒ^ªêóB!ŸG/þ¤Ì"AE‘•ù9ú®]‡=çå= y´Bá»$Ь6 f.K.¯a:$ ™MtAƪHd¿à]ñÉ¢(¢‚ͦ]ß(¶4u2é4²âGV´¿ôm…4 c¹üÁ%ºÃã,m9AeÛ1V¦éþô#:Î_%‘x—å[FjóÉ´¾=؃ïå5çO~%‹qöÔ)òùüýþD°Ûí8Ž?éÊ­²¼{Ö§Áf³át:¿#] ”Uà•ûé½Ñ…YeøÆ-,¡r¢~c":Ùâ ¼¶‚{è/÷aË/32•¤¼m?Kºx8¾Á¾-XìN¬e«^²#Ÿbbd Í¡q§ë>%M­„=Öb§£Mø6ÊÍÕÅÅbãÞ=؃ï1l¬­á †þ¤wû÷ïßSˆ¿Ì‘ý\!>  ÷ˆ÷ôùNÝÐ=‰ξ¼NOÏ ×&°8œ{ý!Êr8N\ó>]ÒTTˆWVm7Þ$ ¥ÕÕØívÊϼLŽ+<켌L¬ù‡Ž·#­O35y®ó!ˆžªVjÄe,QSqŸá¾ûN&°2tó2 ›ËDZNà²|{êOÚívÊKK™›c)µ¾·#öà{ Ó$#ý »wí)ÄßF%à{M«oSs–/ ¦i åó膉()(J±¯·®ÐQù‰çMò¹,ŠjE‹rÙ,ªõÿoïîZÚÂ0 ?û1f% ýÿ?ÌkAj»»ÒBã4b5Èuå`70¸gCÈ;ìMÌsÆqLÒ¦/%íï#ó4fœæ$MúRÒõÝnRÙ¯‡ÌÛädX$Ûݵµ&m×§”þÝ6ã{ g©µfš&__ÁŸ§š¾?(èp´{ùkœàڔŧcè»r’î•ßb8Ý{=ü5ίíú,žýØ¥Ùݯ<»Ùþ{¿xíqvK)v>Àc: 耠‚‚: 耠€ ÇæÃÿ£´išÌã”›«ëÜÿ¼ó àîö6Ûi¶|~ÐW«U¾åÇåw£êTkÍÅùyÖëµÅ`ÿù£§­ÕZ³ÙlLûz£¶m³\.àsƒü‡>K‚: 耠€ ‚: è 耠‚:: 耠‚‚: 耠€ ‚: è 耠‚ü³GÕ˜‹ ÀrIEND®B`‚nova-13.0.0/doc/source/images/run_instance_walkthrough.png0000664000567000056710000047653412701407773025061 0ustar jenkinsjenkins00000000000000‰PNG  IHDR§}¾ç½{ pHYsœœ&Í:4}IDATxÚì] \MÙ?W½Ô ½PR– YÊhAY*d-»ìzB E+•-YZ,-dyÙ_þc˜lC™1̨¬CeFÙfZ,…B…^ôÊýŸ{ï{·×Û¼ŠTÎ÷“çÜsÏ=÷Üû½ß³ÝßýeÇB£…2ºˆ¿…´‰ÿ+r • ´ˆ8÷ØglgÅŽ-@ñ÷m¡acHÀË.k3 á¶õª+Rù»6Ò2à†at Ž¿¿™À¸ú]i`+ƒa}HIŒW툟Eü})$Pÿeì›i⢢È8^Ža*4’ "##qüvhÒÛø!Öú :<ªÌÓN;àêêê´0 üz½è/}áóXžYEwùT ~ƒa­“÷yظÄÐÏ|uL%/…£gíFç0gãù#«ÇP/<°Ç{cX6pÀñs_ð’è2/°ÅÈLf8t>üµ±·_2¸ û)N4‡ÇF††zЦŒˆˆðöö†a¥§æ–âºLE2êí½æ¶¯Cj2§WÆÏ_0è$¨vÞ&‘šk“^ÙØ¶MZL$Ž^å&ZÕÐäÑ¿Ù"r©ghY΀¿I ‚³'Äs©€yD I„¥ý\±ÈÔdâð@ßi Ÿ?ÅußóR¶ëY/­:RÄÀß×A±WéÔåÔ-`Ãó˜N#Ö¨ä}é¡‹h )wë0óLjJsCâ!Q9Æ+2‚ªiú†XŠ–[wðR_*éΩJlÇì —8V4·¯w/ ó µ;ÑiL ëŠãÿÑ{K3T7žUž5Nåàóx ¥+ ùH©O@íÒ£þK-aC‘»¡Vk…qÌB‰dŒœœò«SÊζ¦éuê¹Àaþt7»«6EÄ'KýZ²•TÓôˆ?Äâ¯1V¡¿ü SÁç÷íØJjâ‚—µu‰ÆÜ¸ñÞÊÊGvö±1®ˆ¿/†°-[G;¹‹_!ƒqçE™©ŽšX|xøz±H+«$I®Š‘GDòË*jˆ¿/IòhˆQø‰_"I^uª\ «Ä}C6þ CŠ'nÆ8&ºù(÷¦ÊG-ݶæ†ã±Ä©·Yô•—Û¥?bì†y4)þróŠõõXßêð§µ:vþð:Oµ¹{ÿ¡Å œéM~þC†nwHXÎÑ~y™e9.TdªöNúyS³1TØ¢o¹üÓÙ SùöúsÀ°jb³WžéŠa­‰É\Ì'…å¼ý´AÍ8¬M* ¶iE…C®•- Ê©—Š-ìDn¸AM*Yèõ0Ê­ Ì~k`¼—•ƒßÚZ²$pxú[ KÀ€Dr:Û ÃD¿L½ HOI1_t Ïôdž㿚_Úd~MêصgÍâõ`‚‘ý{þuûG_ç}/òLÔsŠ÷p˜^­ÞÓíNöwHµYŽƒ?)zêó· ·hþ(ÐÒ¤éf>> ¥ÿÉc»záx)LøtF„„ø»9…'ãÅA¡—rÊ’OÍ[n^ ÀTÈÊÎÐЛ©©!!!SÊŽ±À\øŒ^Æ>¬†s9°1;OÝÙv(ŇäXÀÀžíæÆåp yÄtF›Éù'Y`!}ö$ç“ó&ºäfÈbv8-áñ(è|\1â91‰õ£-he•7BOÖÅø‡CòV,t \ÌЛz†pfßEÛ×ø-Û°5ëv²éØQ’GQ|äW¾/|eÔ­ùÃÄ’E„LvtÜ¿0-ñ¥\¾{Ñj<>‡;·A´ÜØ(ú餧Ⴣ‰€9Å®oÓy:~Üź \.—»O2¥ÃFÃÈkkkÉ«zöÛÛZN°ïÉ#¤à1Ð9ßÏê6<y4%Q‘½¼µžR‘BaùÇÙÎÎêhþå‹a±Ë<*~÷~ïžÝ”•å] —÷j*pïÞå^½†(=Améû×ïÞ¿ÒÑ!º¯*Í¿ñõ6Ùùk³Þ=O¬y¨·hÿÈe~uþ‚ƒƒýüüÔÕ믒9|üt›ª®fo-%%%Y‰Ÿ=éÐQEʳ¢>ôWŸäÁ.¨(yw Ë¥6Ÿø%ÍǪ“›‘Qfbâ‰ø`æÌ™õIž¬]—î<²35›¥ab¢¶yËúË×"þÈñ»‘Qý\ÉÅÔ‡mõ:ÉÚÛF§£˜Îädµb¹NãÓ53‰aØÅÜUËÀÐÆÞOôI?=àÍèÁÄqÜqˆá‰¤ó1lÇ÷Ü•)ÀÕuÎëØ#'€­§›Z4çWÚÎÓÞÀ01'nr¼F»Eý #‰ËÝŠ®ùkbC<]ÛDÇžœâê bcá±8~ù‹\‰ò(Dnñ^暪øƒäí\GLHþ•õ6‹4©>»7w“H–ÚjNæàR1ˆ¿œiƒMÚµò›˜-›Ö_g8ù¯ØÍÔ]s6ÅÅytÄ4‚.å–PäAlÝrÂV¤þă²ÌòÏá`±±_¸Ï9vH=™þüªY{fúsñû"ßLGt²N¿ùÛ ±ÒkòÂK§´‡?™â¦5ôqÒÍYë¹NÄè† ëÖ¬Y×ÈêO÷u{áŸpV‰“5w"ç5¹©C+ïdœú¥æ*ð[„é2Š[9Gt“‚~ÿ8>ƒŽÿZÆŸ©àÀ/ÿa1 °b*Î× ¼žJ¼%øPòÖ£’ý“ü€œø=[,)ù×s\'Âæ¾6µõôç¿ <€ ~©—GDÛBú`Û.8&I"on]ÝV þ¼zõªmÛ¶õp%WN-Åq˜Fö¦WÇ‹‘o-uvFøµÃ'±øs¿\i„ü©w0>>žÞ\8,$ÍÈY„¹ŽÙ³êÇÛ»FÙ†Hät›ÎÀF`þŸXå„JCñ:ݪÊÞW4M] ßþ:÷Î5ÓÑvb‡äÿ›¯ÛMÖ¢¥¥»`-Êd6ç“åìÕ«=ê¿Ô7äÛ_O¬NfóK„þßÞÜ5Å7ÏÂM&S Öج H 6€›ãÆ/DüÕ+jd}ô(OÀx¢f±‡ãã躴‹aTV¶?|À¡:íÉbWtsæL/Yó×$yhü÷-`gÚ-,|óè¹Kå“'„ë‰[§L‘ü4¾1™65ûÝÁ{×ø3 ݺuéÓ«‡œÄS¦ø z›çöYöª¥Ý¥Ñ]oÓ´¿†ʬG÷ß—•ÉçO0hâ=½?_Mí’âOä4cýM¡m‰Œ1“ø zéË€êÑȨ?Aff´±±š, Æ¢úSÚÚÚõv1弦ȃÔùkÈÖÿ=26î €Ø¬š¿ÁâÅ‹ëçJÊÊù*j2ÆÄ(ܵkÓ’%Z±±W&L€üâ”S,ëIŽãþ‹ÂjüÙbX§çãVþ 'ªæ\ª›bY¾‹ÄÃ7Š'†äÁß„ÁGà<²ÕôoÜí_’ðöŽ%®—ZUòþÞÇì³|HÂÔ†L>ôïàýÝ|*Âðkþ‚{]ùSî ]7ÈÕæ¯)ǘ|ÜXYàž÷÷öòòU"CðîsÔ{-:õ¬êÔðD'æv­ÅصgÆÜ|;¨“2e¿{d“6$ØúóºÃÐ"%uõR*V‹þtlÇŒéKký)d¢Hp¯Um1s…f»Ô¦RÈ-æe ™+,$œbjii‰p© #YZZÌ>˘¢ùóy…Å¥0%³'¼Un–ëdÔk±Áòaö À»;a$KÅôüµ^OÓê×È ÃÇŸ7Ë›¶ÐçŠ`æÙ³â&Ðþ)´‚>«ªÕ|H& LÈÝ—½˜ôs¾øÊ~€9§(ûÝ„Ô'®œ:bºx¡…t˜x”Âñ&ÁwZZ-lQÿ¥¡’=fJâqÜ’Ã={Ø þê5š¿†ÝËâ›ã4­èu †á¸ÀˆmPÎêEf#Ç룦¥¿ùUI¯XRÙ‹‰þµ/õÚìÈL'«Sâ³Ûäõa#‚Íš^MŠ?+sS aóðæÓ'8H&Î ÅŠ¶‚áá/ׂlM°ý“š©4òD+RÑ ÿÆ6Dþ¯7šÚüµdýùߛޣ‡ã\€íˆŒ|éí½ñ÷mrçAKñ¥âºšZ*nííÝÍ_WC ÄrP_’䉊RŒB9ö×|>‡ÁpCüPQ©'ßÂ5³¿–ë?„ÁÀ·þênôûëàm`23¯ qìŽNܧõyU2ü_\¼`ž¢OÃßLMG56þ¾ý5ÛÉmîF_ÚÀd‘¡¡??ªŸ‹ùœÿë*¿’ëμòÒ¹ngu.•œÅæ>ÚÃ6Zd¾60m}(üM ÖON¾ÝùûBö×ܸª±w=¶òí¯ ³2€ŽðÍ?~³ZmÈØÆv;Çï—ö] ™Üe˜f–*ñ»ï|^kMÍF\6RÈ·¿1¨êûÚuÑÄ.øàÙsGÛv.öٖ”æ4åû÷Æ>í¢xæàÁ’yó4Ðø½a¡´¤H]CS»¢›óæ-GþCu× ß`àdT6÷éÓ«»")•”•»i÷²Díß·ÇÖèç»À€n/‹B8—k}âDø”)D{9b¤²È2ô¨þüFH¼šN‘'>™"Ãþš"O2õ_¾p×í"ók1  uv–³@,ÿÕ7Ä–½•¹ä¡ö¯A">áwG‡ &þãÿ 6ñWßÈâNwô?›þœ·¬=s;éÛ78œšWëj1X2½ˆ ¶À›2gJIùoØ0¤¿zGö±t6ØNš`S†Ø®;jê2_¢ÕäH ÏeEõgàòä@ßÀñW¯¨Ñü5édw¿¬½•jJäÆ4-û¥SqÖ“œd±[­ÙSR"]}¶–šXIÙ õ?¿–.v†0øÇwf¥|Ã=kV@¯Hù³hä?òfÍòjD—Üôí¯P“•R©)6NŸ5 ÿˆ?Äâñ‡€øCü! þˆ?Äâñ‡øC@ü! þˆ?Äâñ÷=ð׸ÜÍ" ý!þ0¼¼¼¢¢¢à/ Sø«à±]RS³ßg££!aÝ\Â\S¸Ù‘ žr¥>›T$¼ruч÷ñIlÖ-üj¨Øýaìx( ˆˆˆoïÀB>Šˆ³±5Kºœi "å(wo*C O…ɼÓðpÅ/:–ôX÷¹Q£©?©;nÇU<=%Ÿß×Aõàè€ú„ÿÈ”°‘‚,:Oצ`XLàfNäàúÓ±3ºÖÿ•Còà¯û8'2Æ[þ!©©©Õ’14§×À´ð'©ZŽ1Ž Pu€¬GþÔ“z‡uìÓ€r:C…c1ìK‘w3lÀ¬Øòìœ4Ñ’%¿¶i#µ: ÞÉ‘lMÓ7Dý‰®ï&¹Ö›¤ò$ËÌç‹À2àzv ¶lû•íË>0ôN‹!p, ôjÀûÅ?ñ†qc‰W3)ʬ{¨Ñ3LØÀðäDkvv¦}p7W… Ì|ÿLÛ:ÆPäù_|>¼Mñ­ šý×R³q‡TPsDâUWÒ6ºT ìXô¦Ò—Œo«ÛëùZm7üsŠ÷.}‡²i€rî‘þ\EBò„£º`øk­Aòo“mпï×oMº™´àgœÃ¤ÜOž•¾CßÔtä7æ/åØ^ëé Bvœ¦b pÜq ûtþåŸB=£Ësn”d\p{æ³q½ù?Ô«7Ïu¯Ÿe_âLiÓü• Xæ“@%ؽBBBuèÓ±=öEÀËÀ>&DÙÁ‘ s¸´/t÷¡5d;¹¬ì‚éXÁ¦çæÖEw£¸'$/#;3­—ÕÊÕgé–-}{´\¶Ðe[øB¢ß©?'pxµ.FÉÛJVä¤ ÿ#˜ %À î÷‘Él.õ6%wÁï’S¦ <ºÝjæ¢çLºœùíõɃ¿ÔM‡ð&ßiÅŸ ^/8Ú »Ëz;‰ã9$7¢0›à+š@l!G.I© ÷@ wªïàâ#šxºGôéVëd]ÆêU«` Gùiu[¾œ¨QÇUíW­K¢Öœ Àÿ` rØ2Ÿ+ b0ðNá„Z»­¼Rͨ,ÄÇïã÷to õg“D;ì=íM¥¹ Q„´ÑoŒS83+¤·n}áç§#;oâï @ŽÿOŒÿ¡}G-‰qºøBŸ¼±œb×êà ?¿µrKš‡øûŠ&Œ^ -¹ ““[¹ºI»þñGÔ°aÕzO8®Œaó¿í%7µú“äÝþ;£o"F:yUõ䇲·Oó2»u%^AÛØÈL:l˜`çÁÔÝ­ÉN2jÿ¾(òªZ+Š<A‘×pÇ_ÁÁÄ+((¨Þ®BDòÈÎH<|°Ì‰ˆ¢Û,Í´ª)5N¡«ÛªF?~ÿ‚€Ì:t¨Þ.æÖÓb£ÚÎÐÄBÆâG_¬ê˹ºi5®Åê£þtvv®Ÿ+ÙËý±ßÈIRwI£PN­_õ- ‹<ÙÊÄ¡C›W4Jþ¼‰æü !Ö`eJ•‘ H úZ;Ž{¯qøUNøð¸Ä;|ü—Ûo­Z‘½2̦ãû¿ãaÃbooLYñ®·Íê«Ú›Ÿ¶$™GœJ¶rÒ¿Èeüvå¯v]{+˜8..ÔÉ©Ü:ƒÕøôï²õÆ?~ò Šæ”áøåH‚­Üü\Ø£N”m9(Âñ}ŽD|Vò϶¾i—¼â—ïþ;qi'*ÍC°œã7Ë*ÞzÕ•é£ù?àÛtÚY5wyt¯÷¥/æ©¿°°°iFzd“š¿ÞV::ys›1ëÓÉUDç¯iòrÏÿÏÀ>ÙÉsL\ôy0g¡§^NTØÔÆÚ1༠ÈùOjn)©ºa§K<î"ž¾Ú7ñYD|–„h ÷1§¹ñ«Ø¼–¼&ŒúŽÄnÑ„OÑ "Æm¡/)mX±¬£ôÇ̆çúUvG®®(`7ºV£‰´î.óÈùëå,wˆ¦£É·B8KT|dóVäì¬)|¶8(4møû–ø-n§ér?aóæ/·ÿ ²²út邸«_·QÎ|]!k¯¿< ûö»,”ÙIéÒÅ鯾Á`0Zzõ¦™V$G. WÄrÂ\Ý$ׯ‚½³…¨þü6è¤Ûv_âÏ$tµ$ÖÎ|ùï?£¬e*ÉÕ˜×Þµ+tÉ¢;zö,Ü8wÔþ}{8Np„t>_ .Y"° 7õ_$Ü¥K—9sæÔÛõK¼ÔÝ| üÊ“·‡Él&&KÄ_<==Y¬ú›Ë€C1ò¨Èsû#W¯Z)‘¨ýû–Âßÿ?IMû{ú$¢ 4õ–c¥AÔ“÷î]~ö4wä(gž;·1÷?› n¥ß}üï>4ƒñùƒ23oçå•Õ8Ûõ¯š{pp°±±1Öç< üíi5 þ‘¯s+îݸ‡ÙÓ Ã†V¦EôCƒÜcøûâ¬ÚzTÞ°a㸅¾´î‘ILbþóNXKS)ÿµju7%(Èf}Aã5”Æ_+[ 2™)”…&õòAäžjRaÊÀ—÷÷ÓµôÞÂÂB2ñnê”bAŒ––•!$Üu™ˆÇZýÖà8_XYµõèÚ©;ñµ$¶¿¦Y¼´ëàÄØ‡tŒªZ3•æðÜ@xÿƒ Á•,’Hª ü¾úŸÌ>(@žD Å'Ý?&Lù/ãS%Ÿ @ÙÑÆ”øZš- >»~¢Hâ«WK­ô©3è*´°ðY¶–¨ÿR¯˜7sªìÉ3u1ûëAƒ¼e½§˜ãóÕ Ä_Û—¡a”“l+Ì äˆuÖêm¸ptöÞê-ƒá„ô÷ ’%Á3œÍ¦ÕG¢IÁ¡P‚¢­]VlwKl^?ödÀt4~ÿFFn:mX¤©ôi×ÔÛ;,ú6'[>O:²‘×ëOïeŠ'¶è»´±_/òø“àààÎ;×›  áȵ¿ÎȈ61Û‹ì¯E`ii9zôèz»Ø#ȵ¿– ˆ|ñÂFG§â@=“'kWì¯ut’@ü50ÔÈÈwm-¼=Ëk5®øW_½1Ô÷W]­ëð±“}l«^ÒÊ·¿¾|9bÈ’ñ¹ù<}]&øÎí¯ÓÁÃÙk2yÎ͈ÂñmégbBƼûª—QYù (lMÇ/ õüŒ'z½7‘  šâøb½½p (´m„õ籿6³'l€ªÜ#•=̾²EÞóÜG8(êÿº¤rב¬žOô¶7d”}àdÊï|HÌPârJCÙ?òQûW¯Xµr¥âö×>)Ð] €`Ø%p2èþù|¶æº ñCÃãôS¡ým-ÈþºÞ!Ïþ€jÆ/†¤!=lmÔé¯ÁPøàúÅ“ÆÉ™8…¤ý òÿùM)„¿ç/%=ÈüÇ{ÑšN'¿žLHØ÷ôÙ›En„ÃHÿFv½M³ýëß×â“Âc8£níZ¶Àé•65þ„õ§’^¯¾T8ñPôÊY.è‰ú³›ñG…OžäOžìŽøû¶äUƒ½³gê³wZJ%O “'3â✜ ‚< JÊÊYOžvéÔQ>yœœZ#ý58¼SiK‡)ûk¹hŒö×6nçB¬{vù—³CÆ“N¬àÒ½ii߀èݤçú÷Jú´i÷›†&ß#¼w·×µì»†Íb|UVl»Äì?é× fºÍ̧ÇN›Æ! ±{ED˜x{Û!þ¾Š òé0Eá69ÁIßyÙY …!ÿÉ ®.ò¿«&Vâó>yþ“++T•” Š¿|'û«l¤¿oÀâö×r毑órÐô毯¤êví%)ËþšÏç0b3g®ˆ¼&XÚ®Á`€Ápkì׋쯈?Äâñ‡€øC@ü!þˆ?Äâñ‡€øC@ü!þˆ?Äâñ‡P+þ¾ª[+T}" ù!Ô†áÄwÄGe—1µ!tÁñÿ.ù¾ùºõª+É)–±r•ȦŽß–“RñžµhÉ%¯ü+p¼œò‡K9É%|ìÈâµgÔ 3bá±›a#­'²âó ó µô´ë¿aXÈO)Ó‹ŠjíÙçÁcuÉ:$dmñË監÷ž»òÂf\{0b7~a!Ë^aà^@9ޫ׵ƒ‘åÿîQ鶆0,q摈gs¾¶{ßïE~éû˜» Â“·\9á§PÅ\|uuh™wøð6²*×ÏróUžü¦`ÄŒ´"š8«†o­½c** Ã¢†{èB{•0åž8þ†>K°bw50pWE|û¶Õ¿/¦éêÍûðïÃR*< 3Öݸ»8I«»À½’yŠ¥ápã%÷ʨ\Å6k/ã^ÄF·Áy]¢—O¡6#m0juor¨"¼¢7Ô< ½\øg+b,wîq°C'±ù’c®zÓcó`ÀÀã—¥žKÑlë-$?„ú½ú (¹Ž±§ÓGØ—†q1{¹Dßèh¬Mz;üD_À•ptÉvtäŠ,¾"«fô2Ç¢Ó«uÑa¯ž9”í=Þ[x.¨y:ÛÐSÿNê¦xÍû•òGòC¨'¤œæzmˆ¶¶›J=Ó7Žmt°w„a[‡isÚÞ ŠˆÃñR[Û1ü¤$oan;v¾{°Ç4ùÙ†ú{Æ_¼â¹ö>qÜœbmãµ"¬))IÐ`¦%)’m`xt Èf|²à¨¤´T@/[&<ËÈ*áçÄûkç䇀€€ä÷Ýö-÷lÍö‹¿òËnóØ £Ö9—ßùýâoŽŽš¢‘Ÿ>} {¹reP‹}ãú>%åwýúU[¶¶´´òðá7K–¬B´"ù5hì9xÄj õrHjbðøY™¯+€àü¡íŠ.˜³ÑÃC4X@L{Íš5[¹²=íR:㮡IïáŠûÑ£íFFª0`5þˆ/­®®´d‰yeÅ|%ee$?„„›ÿ±Ê ÀßÌ2ìòþ¯¸ð%K×ðy€ÁäñxÞ&êœlð xLm¦0›ô´L3sú@c>ÔAî£ }#xTÎËRcV~_Äçå”ëë±óËìuÕ¨“Š”‰ÇÌÔb`£96-mƒ™™¨=_uä°ˆ¥áàI‰ÌµD/+÷W_m›0&ä–êkI|Ã/â34a ×ËLub“)¸&Ÿøðrr¨²=ÊÉ72Ð8ÍØØœ¸(áÞܼbx9…ù9ZðF) θS¹Ö]z‚‡Y{fú“BÀP/ãñ UÇf·sŠ÷ ãÎí5a€:¤ „¯­ÁÈÏþW·£.L cZêt’Ì™Ö7fÒï7)£êhGÇÃññsµÍ”™…=Fg/v¬Å\g';ö¢¹ÛÄ[шˆ>>«%3§´‡uuÃÿH:=-ßÌ\`Hkëÿ )¼ŒwÕœ¯°¨\S“¥\/€ ”33󉔅YZÚ]¾wù]Ú²ŽvWXMv%LœÃÁ½ó8~ *HÃ"¬w2aÊ@ÔÕÕíÉçˆhÃúâøí ä×ëmGß_ÿ™ô yã }Ž\âŸ$^Jqnµ},L×%óÓ̉J§˜Õ’©&ùí‚á‚KÙû©EÛ_@íÁlƒmßFËgP9¤ƒÞ¯öƒ5° ˜ºŠÐØb.Ž¢R²Ú°˜LƱœŠé¬bîú³´â[XýÖÀK3 ‹<õÁ ˜Çù¹däà·õéi•ï¸mV_…×Ò¡å›B K˜¼KÔÞ”Œ‡øéùüius®ÍU…§÷©ÿLÓ‚JØ…èúZwwÂz˜œ¸AZš-8낳Ƅ µº¦+Ú37“ëRª¨2åŸÅÉ}´“Àˆ[jv2YêJ@«3ÜŽâ š îžY’––VÈÉjï´ç:ªu¥›V¸ µG%x–KäÁÔÒ¼à<16ˆ` Âñ`AíVV‚Z?`'Ô^•Ád¦?þtä>v iÉ)Æù'ÉúýˆÀˆ6Á#ˆ/ ö௽5uÔtá‡õàÁ7”!›M½wæe“æä."ÛyE{Øì‡0ðê½nâÉ0*1¡¥~kD‹4e鮸í‹Ùlêaär +üéÄ%³Èôb»D/Dô#º8~™Š¡>oe¶5‡k Lß%r/‘€QCí Åà奄C±v/½Ì«Øz¯«¨M²¼T3è2rà¤s¨½©&ô3œ'Ñp½ôñ©¶Fs¤µ›w µP,ˆ ¼S"x@cíÖV¿•SÜZ<¢X³fÔb¼äk·#*Á‰ÑëðhúàT$ª6¡ö„‘ÁtdçÎæH~Ÿ‡ ÷oÅãOw׺p”*$¡?awÂç‡Ú#~ed"—"ÅøèÚ¹ã¡c§LmG×ôÀ}® B奋Ì“Làã³FlyJ¨=Øó Z;YKô5TM/¬M†øÍ0›0ÔËó´”YÓ°þÒÏ®ÝÎ]þò³ŸEzzG334ó‰ð­á<}RHX˜½sm&`îüqÖy–³O×ÜÇ;ô;7¯ª€â犥€Ú#º ÑáŸhüÑ£üÖÊ=¹k­ø¡l¶™™úwÈ5’_CÄÊ¢•9úË…žý­9üø®OgmÓYŸ1¹Öï¼”ìòíÁ°f æ¼#æõRÀ™3IëŠWV’s˜ŠâÐá÷Îs}T¿S‹k$¿Œ™ãGRÍ[#GÎY$5MÂÈU+W’Á<¶H(­MK=´¤ Ý•ŽÄ½ubß /õ¨A™Éww‚É›³g£Æ“2 ”ŸßêÍë¶”%óÜïšb$¿F€~2]1˜ ´WK,õX%CŸÀ‰]×bç%5^W—øC@òkLÀqüÇSçžþ÷Úw6œ=eB]Œ­EñþíçO=~\Ö\ëýC;;;Wt·‘ü>G=z‚‚‚š$=‘;v:ŸÞ48þQaÚØ⟫ÙSÆÕ(ç쬆]s0-Z34ÐAÙ¼åÅŠåk‘Nü¤ÀÈÈþúùù5=bn?}«Ì`ˆjO~4ü΋2¢/ªó¹ ^ 0bj„ÖÞgú½Ëu 32ÊLL<‘ZüÄÑôÚ½×oKŸòš)תc EøêQêp›ÁÒÛÒˆ ÞÕß¼+5²IDÒz‘åÒ§Ç™Šå’å°îào<‡…aCh« 6o¥¬YNE½”½Xçj¯.‡·5²Ÿ•u¶vÚRà×—Ÿ­ˆÉ%õLcØbî“6Ÿš˜„Í' Øþ^ÇÓ~ï€Ð[n1m0ï?ïcÊ=a$~… Øœ (÷ã¤q&¡ ò„Pùb*x×-ÛTm©z¡ÇÇ“V—’…7œÿ[ÎÍ;x¦?<5øPª{¿:ÞHQR±ß!aáöÎË`À¬=QõÅÜ|;¨“€#ãk%01;2‰9!hmKSéF('Ï_š<ÆN,²K—çâ–užb+Õ¸LÎþ²Wî7Gu–_R5gæ—ÉßÝ"1q²ü‹†Y€Ð›Ô]á=ðxEfîi1â2(,,”•-Z]VOFä}`£¨Si6^ÝK‡¡©ó64> L„–WÒÏùÒ’£ÔAXZӪÊ΢¥xÚµûÄX¶sïþŠœ‹Ò±‚¯¢¸'¼WÕ”yçï¼qã‘j¾éØRÔ%§´)&ŽZZZŠä `2ÅÏûmQRð‚ @íÙ&ׂ¯h“k¬4áö“à³O);±?>`á->S’žÎ33cJÜŽdÇeβ8;OFšAS/U¨¬¬TjB_I/šÏ¦æ0kX¨;X¨;%êç-E?ff^u´Š.}¯Ñ±ãH3H~UÈÎÎîÖ­I9H6ÕQ«E—½³öVÑEo45[OE‚Aò«†ÜÜÜ&&?JB\¹ÑºkŸuvï¶5kV.•kpppPPûå\þq¶fku¤$?q<~ü¸I3l°ˆŽÙeë8ONÊÌ+¿Îv$¼B˜~^{è7¥wîĘšÊs^þÂߟ0yQiŽ”‚ä' ùùùM›!O%Ÿi*'Õ²5ýÌ· þþH H~²±~ýz¢}ˆŽöôlâ&Qá[¶7³«jeöÊŠŠó‡w¬^U§/Îߺ-ÔÏO§z‡âãÅK¥ æ y ùÉ}xÈ÷C‡m’Üœ¹˜ÒÙ¤/åä.¶WIYyìoz’&)þàgÛÉ*1‡­÷ %‡1íÂçJóó›Ó“4¯ G´Õ2@RAò“2Œ9tèÐ?4µÙðìç¯Þbê´öB)Þ¿úÛÌ)å$Û²uýr?öC[­ßÉÿ‘¹’Ÿœ›%›·EŽœ½¨vÇö4jÏî¢ùÒ?•}øpûr?Ú–+69™ec3 i¦>ä'´ö$Œ$)‡¹|Ò‡¬‘±1éÍ–°5)æe—Ïçñy÷ã“‘ù0߸;ù13Ÿ_˜—­¥ß=3㑱‰¯0¿œ¥K¥Ï/âëj2efQþj=êС“ɤ\ÜÂe<Èg-UÊæ“.zÅ%däš™è›ûýš¶u´˜—^Qï·Ô.xu¥¥‰ðŒ¤ó_â'=3ÓŒð&L8ÿ]¼ÿ‘TƒÒúéN×Z{¬ì§&_ûËf ¸ÝY,'ÄÕ­­Ën¤kSÂÞ%ôfE ¥r1©%˜†Ã&›b$˜z’_Èõ_àï”*3eÁs”üÚ[m0ð@3;‹µ`' ø©‡ß¿ T† Î ýá‚H;ÂÀÚ8Ï„ý–"¦–î·&r’`úcˆùnÊü:Ä’ÚÅ­ŠººØ§ ´Ó^áfµ"¥ÜÈôü_vÚŽqðéhÁjÙòª®NfHx¿Ý4Çÿ£"™Làˆaƒ¬O–7ÈÍðYö [õM¬±wí?4hì P7/×ÍZ¶•ÌYR{Ëk'…ÿ­®SÞÝró|Év8¼ôWú÷+QQ!^^+‘l¾ºü­ZÁßx sjÒ½ôØ¢À°ñ‚CùÁ^Z{€6¶Î¬š½žÈùGt­…¹mN9®Ïbçú·­rÚ,ÍüÚÚÊ8ÍŠhÙ3ëÕƒÀjÉÉÒû?aÌeúêH?*¢fÿ8À¿o5õ–‚P¼\«¶ÒPðt„ö ´ºÂŸ]»q¢‰³lß#óÛ±·oË‘fÂØù !kaä:ú·­O÷¸_ ¦}¨: .^®¯;Þ×}±XÎ;b –’ëŠå0×!")a(ÌZ}=ƒÎê˲§g°Š¬ƒ8Âs’ׇÁ€EPnÚñ”A)% k²;ºví:¤4õÒdѧg·ãçÿ42³ªé´—kìSÅ2 í©Ùjê]BBèoI >‘¡xów;4Ò/€Å*€ñ1!“ÝG…†&ûL4­J|Ùào¯9x»ôBFkMB{ÑQ…ž^ˆ"$¿&ic††mÞ"u9ÛÏ"/ó/;[Ùû]KJvC]îÀaĨãLÆ‹«PñÄÊd̈ð4K؉í­K6²1;ßxz¡¡‘ü¾¬X¿}O«ƒ¢ï»•ðÊÞí[˜êØÊO¦¡A5Œ5øî!>þ££ãRwD ’ß÷„}{QÐðÍcæ.•šæú¹£‹]æ×Œ§í­H~ß1ýWÈÚeZíUaìX™ƒ9doä÷«Ãikk/^¼¸ “tàh¼ùP©»rïÝkÓOY¹6<¾›YðêCCé_í?ðjÁ|ôŠÉO6¨÷ïMU{7s_7'ש•¥=ý^¦¯øÄÒŠøØ¥€ñJ¦E+ø'óK¾óÛRCÄ}èf´ IÉOk×®=~üxÓc%&vÿàñ³š«Öìåêeoߨôê,'Me%GI Yü9ugn®1¾±Ûˆ°þY<—0¢èf¤ uˆ¼Í#ùIÁ´iMÍøÜ•T¨½ÚÛªµ6¡ìf0jÞúÂN(Œ5½zT<£Ý ^òÃs«}ï·b¹ÎÏÇ"¦N÷Aš©ùeó€!“4¥.¾ì{†}€$'·Éqœ“dMùFèT÷ š‘6ØÅg†ï[™'ÿoo²ˆ7E¡6دÚNÉ'â(cN¡E%}`–…}pÚy.^vÍvö¡ä“±¥w¢Ù»²OÄ~`¨îÞë4eŒåÊ“jÀ* Üð¢ÐVšÂ„ETæE­gõ[ƒa}H%“"eÙµ”·¬µ«²æÓ–qïà/Èæ°lߣR¿“ËLé+ª\KÏìеW3 ß,9[³{÷¦Å‹N)Ÿ‚)Ï‚ömq0a¶è.©˜:½¹z¦’Í×–Ÿ!ì‘\L85ÜaÒÅ ëW‰‹&žYîž³^{«í"’ŒZ~[G¡?ýµ2à%nÞWµ²Šß|øhz0gô°Iñ\ÌØ8‹0¹æíý=sác¾ÐQï>Ç.'v=00àò^ñ_¾ Êïîù£Á'NÄêQ’0ô»œ½u] m»wú$<#v]÷Ä0¯c¡‚öÁ^¬à‚RX¸uã^¿100e“ÿ‰Ud#©ÚéÝk‘‹µ.›L<ÆoÝù­N”ö(U§ž8îï{Âdîy¸—º"³zé(ܿϢ=¬¯€ÙØm¤›ÝA¾Vé´›]ËÝ‘“ÁÝ_“ϯmi¶ÈL‡ø¸$ý9ÏÌrx’BÙ¦fKyIGiïfH¤åJo²ê ‡•¬§›É˜¥R?z[âv²Eë¶]zWõ- Ÿåd޸쥰k]YˆŽÚ0eŠf‡ŽUÝˤä÷ü-‡@«Ø"ùɆººz^^ øùù5In<ÎÿÐ\“ ÷±#¶W«ƒÁGÚËõß—ÏÍ¡è`±{C\ Ÿyz‰/ønkÓ‚|§!ø$÷CÙ UµVH*H~â :|ø0Ôac¥¬œÿðMjOô2J1ïîu‡Ãä$‹‹ srjMkO¨ªýDþ¼\#ùI`îܹMŒ’½‡ì7ª–‹éõ°sï~÷… ¤î½ôÇ6¨½Ú–+öúµ¶¢Å¥ëE~ ÍËõ£œ|#"Ûܼb}=•› ŸâñøL&ž®¸ðK·Ñ/RkíQ4nÖŸWod)âÄÖ)SêÔ‡0ðL=ɯAy¹†èÐòM!нUìužÂ‚sûg_ðhGûAêý×FX 2”<¶akÔöá3ˆ9ºx¹nÞ¶ƒdÎP{¼Â" ¥IØnóJ“H™™™ol¬Ëã&Y-òx„oßœœ×ddàWRab¦‡WÉb*EK#¡¯þ¾¾ü”—k.—ËlkŸ›öð™Qê-š›KЯä!kÄŽm¤hÅj#ÕÁË5SCz“©¥ê¸añrS–å86æ'üÄ6f4ðøu0ì”äàþä—¸¶A9Éëaý{O>Žãþ0àÐB i¦!Œý¾—k3fçcðà”±Y]¼\'Œ2 Ë9*꥗W»Àø5¼ôKĽÅ9\A…EXºÐ_(Q›IÁ ¸Êð…7oR,( ®¬˜‡4ƒ¦^š,zu58y1Ű&+ûQ ½\â”Є—×ê]ÓÌ®Æu­¾ÔÞÏÇ?N†L`üš4&·Þ½Ãnzm *Š úöãëÅõãÇ=Í›7«EδÛO$¿&_O©î•y¢‹¹„‹ÎòÓ4oN-X/×÷25{OEn?‘ü¾/ î!ø2Ï®•½t‹–g™·Æ ¤Tcÿ+Ä;t~yÙ½ûúô‘ޟܵûõ’ÅD'¶—1¢Éï;†¬Å¢ÉϦÖÙ2TÔúô‘¹tÃ’ÅèÆ#ù!°)4ÔažtoðŽìYáç]Ë|ñÊ_~Ù6~KêÎÈÈ—ÞÞkÐÍGòûNqæÏ{_˜ËÒÄÈ9‹(«k~ù‡~·%Ç~¥=oïvT²C‡^;;":ü¾„„…Ù;{RÚS¸'© u˜qíÒœÉcå$+-Ý­®^³¢³s¨Ãƒß͛狨AòkâøßéD¨½Úk2ÐîÖ“â~dµi±5ÕyóZ†#‹3$¿¦;²­†Ö%†Jó{öº/ÿXvÿþ ÚÖ%gÿD’_SFjZªÅ°j½Ç»¯­]2°3ÌÚ37%Ư²_zîá|“ÐÜ4ËŸôaü¬-IËçT}ßwäDÉœ%µç`è–Í¡—[9ý(f¢á%-¹ ÖZë5†J:œßºuƒŸšAòk¢øXV*ºI|ñðœ—n1íAx’oÏ´`~ß–·áóÎìü!Ư1̹@…„™h}ö\P{ ì?p*Œ°£(ck· ëœà¢‡´jÙq„ä×d±d3í?-­uv‹nR–ÖB#ìð߆ß/T›_y•€¾XÎ/}|ÄýJµ®t'Œ<Å´áê¶q„ä×”qþðY+¹+4züã¬ó,)‹®ùø¬yþ|gûöµ·™þë/þý?H~Mþ+?Í/bhÖâØÔßN,˜;GÖÞöíÝÉÙËÚÍ ¸"í!ù}èÜQ·3ÇOŸ3²Rôã ßâvú/÷3•­= Ô›ƒÍ[Ö¯X®¨_¿ݦu'D ’ß÷…i«¦@7n =R%åj|]øßž¾‹3Óå5ð¶H/]YÁß»Å}IµIѼ¼‰ y …‹+µi¨@òû¾±zÕJÉHS_ï:f«¤Ìp_"ž³žXˆ¼ "ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! 4Vùá8Žîjýüüü¾Ü\½ô‡zÎì Ãl'7Uþÿ®¯,*–X‹ÏÍÉM­ZTTÔ?oC‹øŒT3&bÉï;Æ,~Ò¬èÀ™ÄôXÜ‘Ørœ£‚a +ˆ„übÄÂ_Åå‡ÁcE gÖ-;'í}9ŠÉÏÃËɯ1a"çˆ%5SyþPKcº¶N‡þ‹snUÓÜ(a@á8KDå8ÎÙ¤õ³Î NVTNtæ¢:7˜°kð™%q½ÂðL™ÁÇKÎbmÇ¥ïÑ4[D¤˜° ?½˜Jo½êJòÆAH~ A¿¿^?ò0B°Ä4'­ÃTîÍÛnWøö­=ƒEEE,Ëæ50¼h _¬ázž¼Mqí… Z¿~=䋪1¿‡õ,üóÝ”mÇMÎSÀa(3M³Á98®/êbº @­BÃBù½=*½ˆõ÷ð ‹„qăš@ª…’orÖÔÔtò\‘ C%€5hµë˜ T+TJ*gú¿å•¼÷ðiö¨ëê{zÀÚÃ6AAb¾ÓNÀ Ù8ÛÖº¹©Á–ÜÕµê’üêцâu'ìv5Š©ÑRá,aL- Á"þgî Zfkφ98::ÆÇãû"ƒ,,,.¦æØ±@HDDu"óÈÈÈÏf¸0fÛà ±;Øš·éîãapóâÞmÿ~„S1ÛrÞ~¤¬ór²æJ• Éï @WI¥=¶P™„l¶àP{8¾Ó £U'Ø5 ¿F ÓÙb–êôáÄäÁ_ë©Í.æ¶°· q@|n0÷‚4&ëKNëÖ 6ÃsÖ=JæàyîÅöqíɈŠWðMt3å².Ú… ÄÇÇ›ÞÁðÚ(L#Ҧɞr÷™(™”F‡í\|ì„)éÈuQq뤴œH~uÃûïÄb(AƉtM‘aíêóà\€Š•“ÿÐV¿³ú­`-•› ‹SR½÷‹7/åT !NBCˆÖ Ûa'Ij©ü2vN0q¶ :‡ó¦wQ€¾›—¿AU§QR‹p|·¿ÿ:ù'J9ͼåÎ ¿>0îò÷0ªø%5è—‘-ðuŸër”*Øäíw`à!޹s'zš‚üŒ—œþ©`õô›ÍKÛí!1 ì7àÀ‰4S¥8®Ža…"³/d’=cüâªåûú#,ÿ˜O´¨Ù60ár"†Ý]müÍhMKOÿ‹«„ùüƒa?€òUÔ!/ÈŒü\ŽRe;±´XúŒ 4¼ž§Äô1L•ì!×´Û,–íq·™³ÿ¶iS=•ôs5„ü‘ü>éëŽÀ?9Óba*€ËžKÛe”‰§dõ_†ãË` \<Ûð7Uzæz ù!-5Ed³újʦÁ‚:ÇUªæ«Þ`XëÏÏļ8Y=AÑôØ<Û6X¤³@V‚s±ŸÌ\÷†c®»[i|¾Ä_;$?„úÁÃjóºÔp7yã b¿ªå¢óUs>Žsî„ÉÊêLi ºàøð¿µI¯¡°´´à®báО ÞÒ3ÌîŠÔS_;$?„úA|Nwç0ãðN‚xÑå¢82o óºý û³-*&ìðCæ[[4û¯‘6õïææDÞŒ+Þb¥ü‘üêiìç´<Ò¦91„žÜ€Kdtµåì̉÷= Ñ Êy…}ڭäؼSo¼ûçÜÔ"#×GïÚj; ø9Ù¬ô©Å …È!·*-Ï9¿Ñ³^®È›ñ¯?’Bý ü§Ú³œzXPj¤Å`¨ùÏÇ.找('ú¨Ö6^+ÂàÈ6Ž|c.(;‘ó¬|cÎðQŽ-t „ãR[Û13N?ýé±ùÖÄœ—ÍöôâÆ^ Ïõ/ñªÖÑ~aT[GÈ_;$?„zìþ,úHMM%þ^íEy|rµ×É;ØIøÆ\ZIi©"̤$"ú976ªê\p3>±FeþÚù#ù! ù! ù! ù! ù!|O¸ÀlÕºC7cÑÈ¢‚çYwn¸°gÕ%çÝ»7ÍšÕZCCI429å½F+£>}ÆÖ±ØaaÁíÅ"Ož,6d&«uGD+’_CGô.Žíd¶‘…µä.Míö}GNºó¢,á`ôªÀ€šæ¼sçFwwíÅ‹µ$wÙX· €Ø ”FŽ\P‹bgdD›˜¨Ijbòdça °ÀNK» ’BÅ•yP{ŸMæ0ÏŠÐTGMÁl?U¼i¦µ÷Ù”#GVÞºÙ¯_ >Æ»ru÷àAJP{ŸM©¥}i{ôÑež«‘ü ¢Z°Z×(½" üX–Û\í‚âÙöë§þ¡lªÚ"EŸ8±eÊ”O/óÔþåÌ–ñ–#ù!4 \Ï*PSoY Å~V5ÒUµf7nDYYyÉOÆ+ý·FÚ£0~‚FxØzÿ€µH~ ›·FŽœ³¨vÇÆÄîópu‘?$«E¶VVÌ?ÿüqèPyÓ_è<1¿Ì^WÚ+TËC]ýîäE€©ÉdU½ÞÍÍ+Ö×cŠ ¬MBÜ 1N6ÏÅcj‹>ééi™æ&E­g»Ábˆ–VP$/ïY¶¾‘‰ÀûPE¯¼“Éäñxð—J©2xž±fefN¤Ïxañø99È’ðó øºPŒjÇR“ ‚K+‚ =222¢ +—r–.‹Qµ7÷Q†>yßDï‰âÈØ9ÞÄý—«ÛÆòý…/â€Ýkûµýó_ñ×ì»ãÛ¯ŒÇË’Û¥G/¢Mãñ[0²\µçä\40„Ò—8LÝë³r²Y/ö!/ŽsLá'"d›u ¹û0‡;7Þ!çÖì zŠ”örR¾µ.lQ“‹c…ÕŠÜÂr}âKEà€¹%àœÌ}{]&>/·o¯’™ùÔØ¸#Õ¦z¿„üú¢Î'‰µI„'¨=ðú¬®Ñ¸D¯ΞÊê·†z²‹Ð4YŸcˆL~lÓ F4gª‰zË%:3úÝ0ìP0ýÖX`ØkàL:öŽ_[ Ÿ–BemG K"ÉËH566‡{J P{o­FPå9îÖaç™PÚ07Úu’­0 pã3„ê^ÁÌW30Ž;îÿ7qiÁ±xsêXLÄ· ÍQ'|o]*ݯ À,y,Àœj£~úèAF7g*Mñ­ ¬~< yl‚Š÷Ÿ·Œ,ró Ÿåû´ts¢Fšy£öv7ïmõÛÕ+no4QªÑøuÏšcÖžxvÓ}Áþ¯ P8+­DŸþœøì}`§±°áá™a×D‡a„8á®r>_¥ºóóŸÒò#ø³ ƒÅ%®Î=pü…Úƒ¿P{ÄY&Œ0S¸¨,MÁSäÌr͉ïÖý/•‡ÛÁÛX„‡»¨4'û$МÙ<=ù_3›®˜éE‒ -]“Kþavá ù‘Xo« Í©¼‚|fо³¡ø“1º? —±yž]lÔ üè3õL“OêšÂÈj•¿¶ÙŒ|„ÿ=RÝaŸ «{¡ö ²îç‰f š›h¢i`æP{ÇÙ–¸´Ú$i2ïƒ-Ñ<`ØY•xž€ž×® ·ßX`­Sqg Ù*YÀ=m²¾(bÍÇ÷_˜ ÷æ?#I˜1}JÜÞsDñj®=Bx¤Æ®¾Â^=.TÉ'™‚8äI«‰EE¢Ô ú´rNW̬Úö¬ß•|0Ǭq<-º¨Ò{½ØqR“|ò›Ûn¡6‰˜‚†ønµ']~”/ƒ}?õ´ðwsésج áµ*$„ð–£Çí[:NaÇŸàêÞÙœËÕg€½Ël7'.'ngh¨{à77§¬âž?:s9b팻…mîýDt;ù 4ªÛs7zÉîàÈ>Ú7l×€½!Ëç:;\Ê©ÈÿÉï?•AÁ¾Ó¨ò&ÜݸÛ6± F‹\1ãAq•2…9Ä„„%ܺd°¶ ´ ¤éü¦áUòÛæ9ÈÇ•¸4XTXHÑKÙAØUuQíÒ»…çp¢×µûX«•ÙœKU-¤Áp@z*âK¬!óyð7üt)Guw˜[<çŸÄÜ·öúÄ^³ lGGn|<ŽÿçæÄæÄqkÊŠÞ—#±;´;™Œ=4#32¿PÛÁ£Èޱ¶åkË "ÓÏB¾ˆžm8qkÍ…½_›³Ã‡þ[¹ ÉOĹöQ0%³Ï²Z—,!1^Ö.JíòAù™J=d&–³K±b| œÛ9vA×Qí`MkïÉÝ›¦:C$Ó|(û¤ªÖL4æyî`Qò£Ñàææí¤>ýõZªü¾¾?ñ¡÷~ «W­¼ó¢¬.9 îo.5þæ_L[Û¢1íoÿš;s¶¾2è¡ûLÔ­JôÝí[f}©¬“nÝöÂÏ·.¶cÍ0%%$?„oôˉfCìkw,þñ]kéï6mmÙbÿuѳ陪 f-&˜½Çi–)qO/eeW³±á/{S?ßµ’cKÅñ×_Úýû£©„€y3¦Ôº4ë,Ï® ¤xœëìg3±v²ÏrÓ?æïÿ™~ç<·e-Êü÷ßýûÿ>¹FòkˆÐgVæòjÜË»{ÝTg˜œ¬ö;w¾rwo[ó-ül ç¹¾†Î›×¦¦O`Ÿ>Ó¿[¢‘ü"X­Z´//z^¡ªø!Ó¯L3â³ÉÜÝWîÜ⾤ Ä+`ŠUóæîÚ½i‰4RQY¡®¤<û{&ɯ¢][Ívüv+³]GCù)±Š}:°LО@KV¾{WвåéϦ¼xQiøpEµGaÉâUä—ï?›’L®ýÎYFòkÐÕϸìÇ߮ü¥oÒOrïû—O´[47ê¢_Ól[¶„CD×›×Ïtï™ÇbIyΞ­7nÉðáµ)3õUn7rŽ“†5“L°eKÁòå«åLä ù!4¨©ªN.ø  ïEÁ³üü¶mÛtéD:êÓé^—œ-T½ò$ýmÉ==#MÒà¸qu-¶»êíåãÇiåü†ú}•H[šåË«H~z:Úz:Ú_#çN̾^±;w6GÜ!ù! ù!Ô 7ÿ¾wóÚ•wE¯Ô[±Ìû[ÙôÿbMÊõëSSóÞ¿¯ÔÑQ9r¤®®ºÛH~ŸGp0±¨]«V­¼½½›*C‡~:aJÁ4og`3©Êæ’z5ÿçϼ—º×.çÐÐàÀ@Âõ€ðîÓÞ‚|>žqW×Ì|R’ßg ©©Ù$¹yUòþY™’©\´¡Sç:|kjÜSñœãã·8:jPÚ“ 33^^¾GEe ’Ÿt())UVV:;;7=bbÿØÔ$ES·Ñ ß诫"g“››BoÆUTšÁ‚äd›öERAòû?{×EΆ3Â"ìÒ”¢R„E¥(`,X»ØNš Tð(ŠOAl€åÔ_=Ö;°{vÐé¥üðL×ð®V :PDŒ )¨â‘ѱ˜¢ˆg`³üD¡Kâ`ù'¦rþ‹ÄÌ <'fMöŸšÃ‹·Þ€¬QY æö”,ŸYiÄ⡟Qån¨Kà¡$’]ü&¸îw[*)ë{N[C™N¢­}ò=9hó¾!Ýi«®¤D¥æž®c„¥%oÄ+ïbB™®Wª”•Å€0ÍN¿ cU»îŸ1Ñ×Q·u§¶Í[€%E8@ÒʾƒÇ/=½ÜÚü/ôËZÈÊ=VíË{®®âͳ™ûºr”ÖÚ(!9ûMùOζ³é3±¹HAÂ6ÚI’ÿ³Ç:¹ÖI Wbî=žÉb‡"”°}‚~VÖ=-­.ÕÞ‰ßîK«³Ñ •ëÙBÞ :Cƒ2O/£ÕOàI”_÷û,‰Öª©¹7óÏPSy¶–[¹ær©CƪÌAÝ4Qÿí´ÛÁyTw·èÅ}eñSMÒëMΪHN=nn.ê)` vðž_‘UÀ¯M?ºêOŸ@}GÍ5—ö—ç‰Øø¥}À˜BñkÑGžÏA‘x‚¬”‚È‹´ÏÀ}hÇtƒé¼£–†ìç§*Å@Ÿ˜ŸŸ/ ™î‡ù%"ÂÁpƒ3´' å ¯ye }tj)î9ò›v8õ¼Þ àºßuÙhö*ú)è\¯>}«í^úÏm¹7÷´À³*>}äVWпÑvàé½ps;ºÞ;P1ã›Ñï‹H,gQã‘(ĺÊíœ:Ô1Xݯûýàà®Èã>{sÇG޶NB™{…[¡­F¨oåàIqë¦á6iM±u¤¶»¹[Å[E'€VònÁNÑ‹“øþ,„ÌœŒo=¾Gú}Wxúôiûöí[X©zœ0Dh¼«;¯û—F{šÈ!ªméOñpÇ0„üù?ìi¬=™ŸM°¤UO¿Ž}EèG7,…¿©F‰Ì‡ó|j#3èLJ¬l‹z•ÿ,KY£Þ¦¤éšþ¨yrÿ·óíÔ)@ ŸäääZRy¼Ï{Öú #~g„Q@5{>\8vl£Þ|þ2qè×’1Únp#ÌT¿_ר±~1DM’â’R,(  _ ÇWÎê÷Ô°sk’>Nß@|S4X^ è×Ò1ïÃg“tºÖ{‰]+nq`@m[UNß@ åí­Eôû)0veÒݧ2Šõ°EÝŠ@ÝÚ×%¼[}÷ÙÛ; è÷ÁÒ ý¶Ý{{S—À…ïõ7¯»}$·bγN3ÈV¯y±`þ( ßO‡.“=~úVB±Z[Ñä¦ß°h]¯˜YÌY;þ·vÚTÙZ;{ò,Öˆó%¡ €~?)t:R3 ÊÊÊV­^=`ü4)Vù^¥¥FO›:E†Å4jgÝ€˜§M¥ÖÎ>}zçâß§œ+x˜›#½s׃ÀÀ¥r°¼è@<‹R DÚ5Ô¾ 0Ú·ïêäÜUØGU BGèý¾_aYhݺu¿~ýzöìÙ‚ 韻îüû¯ÈŽÓ©×äXC¬5ýýÔ©•Ï&&•Þ§Gï.5jª¬\[ ЯFèééÝ¿ŸÃá´`îíÿód—žýÄ5Œû‰¾vÓ5¶@<[×çnŸûëœúƼjU¿¿êСRU9»(!t;>0ìÔ¹7èW &NœH™o©8‘ðæÞƒ ?ýÆã×fë/Y†ˆ˜{_ Ø©sÚ¡ƒIãÆÃN|@¿Ц\ œó¾ƒ»Ž%ZKa4j'Uî}DDtÝ“1n¼Â³§›4Ú{U€~¢hÕª•··wË+•ó·2”ÚÕ{cXîÑÐh/qôèÚÑ£ÁÈ'Я2/^ÜòŠdSÔŽ¾#~iع«×®_P³ù ¯Ð_¦ÑŽ-›–~Þ=ôkáh0÷0;Ö¼$üÔ0îÑ0d?ÂÝL( _ó‚“ŸÃTVûVWß}àHwkÛÆÄ¼*4пšÁ’Óg6":ËŒèäN>Œ¤Mž¥§ç°ÙjîZî‘YÕ8Ë͹§ªÖjH³ÓÏ” ’I’›{6ªÀÒÃP,;¿XS™šƒ”SÈe¼ÉTÖÔãp8ϲ²tÙlì`2™\Üû'ú‘äÅìŒ4M]¾…sì™q?G·­8CžÚÖ#==…Í6Áá³²²ØljP!5-ÛØPä"ÏŒ7'«¦Â¬šžœìûj”± B„’Š¥<·i±ƒŒ”ðù댲ڠó~½. AL†×‘æM$3/&—0ºÈk)FJòÅ©'…°¡Ñ¯ •Ú„÷É0ùŽwoÞ¨©6Á¨¯OÑÞD„žÑl$#íy>kèJa¾jãÓ)êö°Aüáï.bÿˆ¶]>Ujœä>A&:üj§¢(ÁÔµ  õJpT´Šq_>gÈ¢¡WÐÇij&œÆ%Ãöû8zSAXT½§í[~ãRIÛ<ÂÐãÄåu#úÌ=Á²ðéͳróŠ»xûÍMs{~àprždët1à=c¸ÒLFMmCìv‹u9öÜ÷|6w`soÙ¨…~˜*~>Nm‰q³½~|›sxAt„y$á¹]· ÿ\Wä`gO¦Ååß5ñ[ÐÚ~ÈÒíÇG^ÄžA»Žë¶ºÉhÓËÃ^¥A­” ^¹yÖ8]lÁ \yÇ`ìã1Lû¿ë³9ÃÖý5­gjRTDmSv»‡fEF,ÃJ=°Éxÿ]æž»»Ó£¢9H‰JFÂN:I=m_z±}P"éììœɶ›²tã…Éì§tžŽs‚…*²8ŒáË’bVºŽYó(ë"à9 üBL‹ÜÆ'gcÓVWÔ{í¿-÷P¹íÀ˯ˆW‹P/I!9× ÉË–JkmUT)Û ¯ê6òTÈT£.ß÷Ègv,>rZXÕ? æpc¢mäéßêý¦ï‰Ú¨ÒÁpðÐþiéa9%›ÔHK9Úéíp‹Yù[ÔT‡‰}ÛÎAïm¨gÙ ­^ž¿­¦Qã¼³îÝÆÐ6&8œOLfkJ!9ŸÌÖôh'‡SVŒÄîq KæG¦<ÿåDNQ™š¼Xƃ—êòô)€ï…~€æ€ap,Ÿpæ¾|ß¡þÝp¯XiábJëúèãÆ'þ Á¦ªŸ%ítœy"7ý‹1³X¿¦¬Ó3IµB{¶Ñc-„Ñ9tûvd~|{ð9ë¹Ó,Ÿ¤õÄ:0|vMY§˜5‚Xrr¤ÕÔ p€~-'wo´u©÷òI K÷òÞAí̪†)+#ÅĈâ´q,ÃCÝw;5!¬ü Õ¸óTzËöûŽBƒUÏ²ß ÇpêtX¹úµdø-h¤™]êÕúÿõ'wä( &ÛæÀµžÃÚ>cjÊGް·ßLÞòÙL½nÙ™°ýö©ÉD„¨¦f¿yGö÷ö:&ˆaê˜ö ôkéxxëz'#ó†K|æj×@¿‘£<éîßxs9„è77’qq”ÅŠnˆ~éba¦fÁ¹‡¿“’²²¸TËs㦂9°ìè×âá0´_ƒÐ@¹¶7ã³L;j%×+ÂäÚ1Çvuúý`çqX*õ=ëæ©X#W§ZtÔ2 óókWߘK¹®âðè÷“@W§cJÚÝVmê±ÕÑÕ¿öÍšþåÝêýü–„†.¯9Åã˜, ( ßOCj4òFV„Ôf~i±>Õ{>|t­“ο_ ™•©¨¥íÀ„}€~?'Ì´”î=ÌÊ+i%«XM[”øÌ}zçæ°!õž%ÖIÇ!‹}û6LšT½ëô4iMÍ!ZÚJP@¿Ÿ]:i ùœOºüòež‚‚¢UoÞÌ2©îj Ÿ¡9iÒ¯åÍË××oœzûþ½ºªZÔ«|¶!d<ÐP-û4G´L–bÿþ¿@öý5âŸôŒkI‰o _±ddzš÷37mª˜¯_ß•œüüÝÛ²vª­ ¤¦a¹ ôûF>.\(.ÞbŸ#{üÉîMM­WjßwÔd?ýnðÜþmó¼mXÌ´¥]ì07ÇeÍñ§¬ŒLNncf6ô«C† 9}ú4u-”{o8²Þ"š{5Áfâ ÌÃO¹æÆõ°R·ÞÞ^ºK»bb„™YAYY¤˜˜;èW ,,,0ýÔZè$üÝv·ZÇÀ­UuƒW­ ô÷¯Kà Wü:§Nïô1 Šúï¿.úúV@ _5˜2¥NÂ?z:¡îÜ£açêº:ÀoAíÁÂÖÿæãÛöøñÛÒ,Ù6Z‚ÅDÂHzCXÊU¬çÔ׿÷â…J»v`d èW¥ýÙ"[žZÝÍp–­Ëœœ¼|5åš\»‰¹‡½GvÇ–çð¼_D@1Y”˜¨n½—f£=á'DËvíúýª´?[^‘œý箲†VÃÎÍû,]K[Ü¢A;ì7¿¿÷ï’ó[eÖ-€ä™DȈæž÷È…ÉU$ñÏ?×î „úµd$Ù`îѸxåz¿ÞÕ¬WÚ³g•£#ßÅÉ8&E¼•ƒÜÂñoˆð“+ ÂM¤Q:|¸4”Î×£A¨>6]–ï tü¶7#°ÏIðlþŠvi(û¼UO©.$Y2¿Í-¬‹Øh3qFcbøÌ¬ÞâËäÉ7ÏäS}BÓÊiÏ'›¯×WµCˆ±vÝoóæ.Î4;ý„íÌ xH;ìœÜÈnÓOÎ7skkxÄÓT ZšX°|¡SP ½Ž•[fRl9xMc{”м4ã> vl79|¹u7÷1}|ºt7᥃+$m*.T™ö…¡È;„{)ŠÅ·"œÏ<õ ¡­vcâßö&ÏÌuwvèæ½‡¾º—ûÐá¿´Q!¬tPÂCRä¾BÃmŒ äŒM' -#È«^ëS>v×–$Xtb^ ßÑW@#¹‡¡ØN£†2mÕȘgÏRÂ|íÆgb!‰E#½”rü3‚ð‰Ž¤ŒU#OÂzX‡Y)f&Uü¶\xéÄÈ ò=ãúݺŠˆÚƒ.QS #·¶Ztñliñ‚%®èƒ¥‰}¡ªM2Îí5´C‡ 2Ëýãb£yì­ _dtlyª¬Ã·ž¤Ý4÷¬*±ú0Ž-òpDT?쎈<éŠÐîcîk|#ŸáÄìNNPCaV„ð}5¤¬ï9m e»…67È÷ä Íø–<§­º’”š{ºîZDÐ$¤ÞãK[7’-]…ŸîÕŠ^…f2[a¾ýR6O²vò¶'ôIâZìÔ= ú–H=ÌÏÏrÜ9wÏ”­IW¢vsp%÷ùë¢Òme+vÑåEò}V^ˆÎmŸg3ý?aºj£„äì7I+û:ŸD3ÎSÄ8ò [á …©T!†¡ÓËHr:vÿb!=ñ°¡eR!'?_˜{Tª^ÝwÜ7wÏ$2!'ãZðÕÐÀ^T »Ž_ˆY-¢«Ù鉴ãyÖé–ÅÏYMmé8QXÅù ÿ޾r©H´VMͽ™†Zi¾µÜÌ.—ËkL¨2uÓDý·ÓnçQÝÝ¢÷•5@HÒëMΪHK;jØ•çê>޼e“ºyb{ÄR„^ÐMMš–E×ÿüug§ëàÞAvZ(.3ò èg<û„ÙŬîþa ´äÙr'ÓýÂ_YY+ÆŸR>–ñ‚9["çòðXÓê#Õ×@»ø‡VTô¸|–¬0–¯|¡T’S”WÌ¥žØ i‚<…+ÊRú¨¹ÿUAUMY™$·äS$D‚ ÅLÄ~äø#- XÐñ‹ŸSái)”á;úÚe£Ù«Pè§`¹ùêÓw±úÑî…¡ðÜ–{sO <‹9XÌJV'>–TÚÀxt/NzB9÷Öãïõ)›}M 9ÄÄ:Áš£ïoä³ ûB5EÅ”WaÖíDeåzôIêøâà®Èã>{sÇG޶NB™{…[¡aaî+2Z·®bE„{ZqG›ÚÙÝN yçx +ž3>«(‹nvú"µ múƒ—†S© 5Ò§—ôî}—ô4+ =Nà^òxWw^÷/ö4‘CTÛÒŸâáŽaùó;~ØÓ8Y9z2?cŽUÿ,Sj£GÓ$½¨æFÀhªç¼ª'ŸòqNAÇLj‹»”‹û)?¿„ç'¦¬Ì„rúµp<{®Ñ¹Þ]Mz„¦?jêF2£µ²r…yù>L˜Eôk¹`”}ld í3ZXÕ?&¦À©n¹¢ëÊ›¯ê?tÈx(  _KÆ~}iåzQuÜÃpr ±$Nµë{E¡GÔhg(B™$¹ž ¨Ùd…׆)XüƒÐ ì@¨˜ö$9ó­n&¾H’ÛääU¡€€~-Ù©—5hT"ng¸Q@öpÓ^¡„$ÂRýQR/žÿ*îõqRO•”pI2ÈOÓ]àfé6xMB&êÅÜœ¾ª°¿ ЯÅc”­Í¾gôÍê½Ån«ÏÜ…µÙ¢îAú¾£¾¸é iY+ µ¨­Ý»“èä1,z~1™«#á(Ü7ŒŽ¿ž‹ªj°âè÷s`ÒˆÁn?Rl[¹!ê¦&[{z nò¯“ɶfóG;‹Éx[„lq—/ÔI;Ô©âU»OVÅ?ß@›è÷“a@wßwììm?±.‹_dõ12¨sÜnïÞn‘‘+ÿÉ7£[ípËÖ¿Í}}Ý&nP"@¿Ÿ ³§My’“[€dˆVbµËJNm_¿¥ñ2²³b¢Ãœœk³_]ü^NLÌÖ×W è÷“¢ƒšjžceHHÿqS™Òådø\–xl¯ãäIŠr²Fõä 'gjBiÎóô³çNº¸Tp,/µ}Û£À…KX°¾è QuLÅhvØ SSg»¸Tz˯¢‚B~ý ú‰bûöí´ÃÕÕµ[¹ÆøPˆˆ°™è&ΨØäòjü¡!ƒj·WoLÌýùOòÝ ã>Îçðð—K@¿Úðüùsþ=´\î¥fd#e‚h5ÔÉCäP/»qoy¦æo_Œs™èPÿ‡WðôémttŽŽ‚°?“Ù*0P¡(.—䀉f@¿ê±téÒ   YÙ;2¾u׋¡_Þe¡{?û‹éÙýØšu¹˜³s¯ö0 !'ÿçæÍy‹€'@¿ê¡  Ð" &1=».Ü£!¯ÔË Q;©ºb1Å곇‡JJJ¸‰‰7PèW pǯå•Êþ¿ÎvéÑ·Þ-Õœ÷Æj_|[UßhML˜»w‡º¸ø[€~¢íÏ–W$þ:Ó¥‡eN$Z‰Õ¾Íî]!®® ±áâ¢ðèÑus Я…C¯AÜ£aë2§Ö–BÃm·èèÜFèôkшûû’º~£¶­ YPÍNc»v‡¸º4ÊtRÞË •¶ºPFÍN?S‚H&InîÙ¨KC±ìübMeÊDyN!—ñ&SYSÃá<ËÊÒe³±ƒÉdr’àYkÏÎHÓÔ5¤#Áž÷stÛŠ3ä©ÝäÒÓSØl>++‹Í¦&:¥¦ejâ€\D¿ÏâæäqÕª³o–‘‘¡««‹ÏÍ{úLSOWï$êl|ˆÁdf¤e° ìúQF0ƒÝ¯ê¥Ï¥±fË®^?9rЯùé—Âûf¨òPE~ç Bm”Š:”Vöj¶D@"IY¹¾ëOYÚ,w¬H,Àá“–õ¶\v…¦.þ)1ê…ósz‹‚HIL4¶´„7YtyE/q;{ó|„TzýN^Tĸô_Y@/&¾PïŠBU2YW×4;'_K]…6ïY[aÊ]UB’C×§&%Þ!Ïüð–€:téVËÑw$Ãä;Þ½y£¦*×l ùˆ¤ˆ×ÈR@˜¯ÚøtŠº=lJn ±D;û•Œ·ææ>A¼m蟖†zH‚ÿfÜÄÊJØž§4B‹NåÙÙS¦cMõh²Ú©òÆ";¬Gåç´ÕÃÜxTÄ–»Êqçóá)Û¼­ZÐÛ©´Í# =N\^7¢ÏÜ\!»Þ<#ó9¯¸‹·ßÜ4·ç'çI¶Nj¥_!‡+ÍdÔ$o ïË) Œ/u;·åpʲ²^²Ùjˆû);¿LSY 1Zs8Ÿ˜L¾Q³œ¢25yI!·Xöƒ—ššò8ækÐWn?§¶Æ¿Ä¸Ù^?¾Í9¼ :¿‹<’ð܈®[Ю+r°³'Óâòïšø­?hm?déŒöã#/bÏ ]Çu[Ýd´éåa¯ÇÒ v^¹yÖ8]lÁ \yÇ`ìã1Lû¿ë³9ÃÖý5­gjRTDmSv»‡fEF,ÃJ=°Éx‚§ =÷^“] gœ¶¢çèØB¯üƒä:…åPTÄ—C¨å¼4V¥d.u.ÚQnd>å ߟ6+Ø»Ã0ÜàÀîI«®Ü‹°ÁŽi«ø;@)ÉWÚÌéñÓ;kÝ£ÉH·”Åò ÉÈeIŸ¶keÙ9!Y„Z³üõØ~gFg#b>{ÊÚaòæh·çÿœ5;÷ù…T¿Ð°Úa>r†ùHÊ¡Œ?ã)Jøº˜aúÅÆÇQžÃByË¬í¢¨ð´'B”%Ÿ ½Ú DM­œMEHÎâ[ª›`¡Š,cGxIJ¤˜•®cÖ<ʺˆ¸D(¿Ðϧ±ÿÔzPÕ±±±”n|âlªúñYÒNÇ™'rÓkU26–·y ¦t\5Fë‚¿—yè"DgúLçjì_Ñ湯%=4Üòé£TkI( _ËÅÉÝku^=±4,ÜûôújWÍöÔee¤˜oÏЬz'¤PkíW¯y¼`>Яå"ÀoA#­\s óª†~ÇŽ};¶Qâµ`>,}úµt<ɸÓA·kƒOïß§ú©acÇþÚ€ùÖ?V:rЯ¥c„•Yƒ°­X1B5¾OO×f³3óÈQ³¡h€~?>æÜ—TÓ«ïY§voô¯y¹›m³zÍòóÛÕ7æ¼¼**P,@¿Ÿ&FñçþV3´¨û)'wmðÿò’¼ó—¬Y»|þ¼z0ðÁî;u‚BúýD°³é_VVöoÞG‚hõåÐÙuáùó–$%²´,übÈ’i ‰_:õ€~?!ÄÄÄŒUY'Ï'´Ó7­ÉÎ|â‘è_g»£vúõŠÙÒrþ^¿~…¯oõmÊOeîÝoÓ½û ( ßO ÛÖ´ãINî¹s òedå,,,ŒØåcëÚ×—ÿ"!=ýü·Þ½ý¨ª*gc3RA±}kIÔ½;ä=ÐPŽjªS'7GÌlö@üújõÛw_¾Èk£¤Ð§GSj÷SÁŽ¿{ÿQC]ÝÐp$ä3ÐÀGvî«»YÏÔ´©÷’mµ:¶¥f´Ð/‹ß’E/úö4iXÌû÷¯ž8‘ZHÁhúô¡+ÃKúÕüÅÆF3°¾è÷#éîSÅ64÷ª‚%£€dRs‹ ÛH0õ°Rñ ãPgÝBš{ÕÂÌŒ…PÌí9Ý»…O ßχ[#̽/†$ˆV饷ÿ>â2©Ns©CW-÷ó¯Ó{¿îݘX K¹Îâ Xéôûiñô%æ^½NéÞľÃÇ&ÕTÜ«¨"Œh„`si ßO£!Wè÷ò¥6gl}¹WŽ(` Ðï§ÀÞ£qì^vîǯÍ:*Öt´³îë§Š7Yt ”Я…£ÁÜÃh-•û2_µ­rÕC®ž0^¾Á17`¢6è÷ƒaeHˆý”F 6^O»7ª:úרØÅnçà eôk±h$÷04Ù=ªõ¯ÓÔíZѽ è÷sáÎNו ï L}Ö¥%oÄ%*ù;x{ÔøúMÑÕ% 8€~?"3·Þ¡l~æßüßàž‰.°¾ZÀ@ü½tÆ  mg¿ÈÆ’Zôªä3zÂïèÏd %•ö„{N˜Ð Šè÷SÀR_£MP£v_¶ QÊugÔ¯ š­0aÂ8( ßOÌ¥ó©JªíëÔ-#˺«ÖiX7Ay+÷êªû÷˜8Vúý~> 4Öݵw¿Ñ—BOîÞPëÖÕÁ-4t¹Ÿ_í/Ö‰‡»NœhôûIá:y"þNºz3¯è­ŽqoáCï¾ÈqioToîQðó£ÖÎ9²U¿ ¡oPi.õÞ½ÅÃì'ÊÉ«ÂÎ@?²ìÕ³ßvÆ72æ1cfVõœ<²èýÐú ô@?è€~Ðý ú@?è€~Ðý ú@?ô€~èýÐú ô@?èýÐú ô@?è€~Ðý ú@?ô¾ú‘$ ¹h{∠~ÀÏ‚ xÿMIòŸ ß· „\?üƾQ£žRýhoÛ5·âçuç‡)»Cˆw£– /%®èÓÔI+$ÅšVNp=ãL,$-å›8¹Ö‘Xÿµg\suxL@ü€ï $ùAè#”ì¶ïIÔ¤|E”£ÕÎ%jb'ôñ¹ ðÉùFEóHZ5Ê×lPÌeK lâs;ŠI’)âzüÿí‹û@ +;×韜‘zî÷-‡ž|Ð14öòóQcVcÇŽIHH”|’5ºo#“+]‘¥üdç܈R7w·Ð` 5òùz¾gƵˆßÿ—ûú£V Ÿù³å”çùcÇ>IH(é÷6×¢r7õ|\î'•”ØÅ;‰{,þ¬*d7Šؾ>.ñ–”¤ÚÜ•Kt•ù÷FŸU"¡ioôaÚÌ£)!¾ö¡†ƒø€jÑå~ÌD=§ýÛ~é¸y‰Ÿ­ñþ]Ë⻄Ã=¿à«>`½Ñ$/¦ïø…òRõ·Ë]ßüI,ùXî@ˆ~Òo¶#'§ýø¹)A¸’äûÐ4úy*ú(ï¿Î­OA ›Ós=§ï£žÔ9!öĪo’fx2^‡ˆÏDz„À3ëÂ]„†Ë÷ÄzC‰­y4틳þîûÙ(ð‰t%›åúTŽÞþäå„ ùÊwõM€…,vDg’é‘‚ÐDuª=xâŸn nKå?—´`{fĸ“V”ò¡Ä‹ù—•Gº› v ®`aJ^ñ3¶D1Iˆ§‘¡Vƒø}Gض`Õæ5a§Ž×5A>Œï)yÎNîŠJR­uÆ…ÎéµêgC4IÆPìK¸ …Zþvü:MH&_D[OC5ì^’ðêÛ¥—CTÇxá¥Þ»F¾ÞÈÑGU„×þf\ˆÒèÎ"^ï™XLêœóT¹ûGÝù0ÃP² WÃÊcþ°hàªØ€cSiw ýÏÀ–ê§÷Àé±WéÄóg.ŽžŸôU·]ß©‡íc)8Òr‚ô[2¡Ú‚ø}ßà>Ù'ÑñÚí4?ìøôA)q»}}#–ûbŸ¸<ÒN¹)/gOñ¸uyåC@¯úñ9fO‡ª?þë‹pòªÔÎdéB¼7Ïiš¸¨gÅ’¡P*)aƒx¯ß샬”ðï<ßg¹_7±Lžr¤®ìK¬¤~oú}Žçìèª?Aø“þ!¤zT9E§ÒØf R±‰ü0™êíÖUÊmÒòÉÍœfI–„:> ¦ |µ&íÈüƒÒBœ„6#DµŽÓ¸)Ÿ¡ëZÿ(9»ï“”³ûü¬ã/µF®³bu[ˆß7_ùºú“ÿ†Ð>lÝÕN>«k'9WH”LØË¹oDô%D͵»??•ž„}ÈÎÊ8>1•r 'Ï8 fÇö– äQ"õ÷Q&Ç)/#«w±‰³øojCùd?QÐ’V–LˆóÓfemœ˜*``Å\ó&o|#Ý®ÎU±£_õ똠GtSwÝÕ"_ ¹¤U½èñÆ…±é˜jÓχ,ðZ½:‚Ö}’ü øÐpˆõªþ*;HØßØû é-Üü]sÿ3¤¿HTôº³6ÔA *’ìÕ„²÷娵_®êѪ>š#Ö’äÚjO÷I$} ƒø}7(¤ÿÍœöå°ªþdNH¹–+Ÿ—OÿÞEDß>9ß,©ŒÔ1%2Ò¯]KÎ())1”^±:þ1:‹H^ùY’ðŠ×0/ä+_wg/kà}D– ñÎËò'ò­KþàkBõ ùó ®QWáfTL1O|"»ûH’sùL9-ÝõD«Èt?ÊËvI®»žt>#;ëÓ[â9‹ØÜi[}Óýº´Ã1"¡÷.tâ§m H÷ ±"jâ¿àV¾OŸú·4±€îŽˆß·…ýoÛ¤ÎQ+=ÄéUA›þJõ°7¢},§«ÚÓ ç†ï®t¢@Þ\öÅD¬Ûñ?_S1^,áõJ}OmþØi«AèäY~óUß$©9l©çÖ­\ï2ÆÒEÐa­²vŠßEÓ˜I>ÝBé–çŒjưÊX ë5u044\Ä ~ß°D™òæt‰Î£ÇjÊ•Oô¬W'ˆ6#èÁC7÷1Q‘GxÞÔÈž`9ajŠRR§8{ûí¥ÝË­ÛüeŠÎÿCÆùÚ‡¦Íí!>™Žµ~}8! ñ¦3Tz©SOœ÷ïjÛhbéÐC¿½6Bôˆ¤Oùè—@访ŽéAÍøç<ÛjÚïzJù)>ê·€ WÛUÉ@^Žá|ë4öpÌaA~ rrs‹‰Šª©Ávï¡3"N‹x^{Iš«ðÝ6o~EÊzx:}9®Ò¬í»Î›Žp5Vi–f m?í!þN…4 õ¸/H?į©ðÅ÷"Õ<µ•† {Fn­8WŸ×v«îu›‹^ùD‘x„_º ‰32²–›õýBj ÿ¹µJiˆ®1µÑ•¨ åc¶QŠ./Rè»Ò¢-±àì«Y2×nüýÇÄ€?°}% 3K½6¨†…äÇâ϶·ÁÛ'’n&Ò£øk±Qê…ý[wÆ@R=æÚYè6<­oÏrƒo ܵŠQ„ÌÓ«b.ýêðáyXè†ÔÌ\EUíYséñD%ûz\µ÷…8ya«×áÀªšK—Ïn⹓?zú ~@‹ÓX”HõŸ“yƒ:Áƒ§O]J’+補|c´?€vwëf„ÄåjYHnÖSŸ©¡ÓÕȈšÏ×Ëo—ð­ˆU2‡V/Èò§OT'&"d5ÆmŹÂ-ž* à/~@½ƒÈËÑ!VD—¶AtbT´Eï }L  ¶YséptŸŸ>,£‰G~ôô@ü€ƒé±ätºvéèÊ…¿Ι8‡ˆ7CJ­cÛvÝÁ?••©ÞJ- ÉÕ”ùÝU- MM%„ò/k…‡÷OÎ7BóöDfÒrnÑó¨ðÕžA¬ŽD ÷û«$Ýß g÷+º²” – B!$¯,z_ËŒûÑGÌïK̯¸dHÂÛkÙ&Êì=ý? E èæ³رéξ£cF kÕòÙ›ƒÒ–T>£ö…ä|H’´©¾Á†F¾NFUë„Dzü¡t{ ˈº…|,• è¤Ò’“}hšÖøÿÕ¹²*B·P³v•~ôô@ü€ùžó‹Ó•Yì)ž]¥è^äÙÚÓ`oÏäu=—Ôë6CB‚T¿mVÏš©ðäÉíºCÍñ~^œJ¼öÍ•CVQ%8dU`@&eÜüç^Ñ÷|´¶a\?ÓÂf;B}‹Ó:ðü+õrèÎ\Ñ‹Kœô÷<¿‘/ð?CFÌú¾—ã¯q[Ùh«OѲŽîAO¨@yd¤ÈR;oo•²ÒR1ñº>a8ï³üýÛ~…~ꤛˆˆ_9²/¬Õ8_hØ„d6uRr.í.1wÑd4*’0+âèà¦Ù´ÁI""±ìúà7³ó$y±‘iˆ YêT¯S¸ÿEJÌÔîãýèRØ7©UE7ëzïMRè͇äKÛëV<SÖ÷œ¶&]ðsÓõ·}:Tà cUæŽGœ»“™›ŸÛáß[à¿g4å>4ÑÊöïÔÜÓõJ‰¡yߺ+-y7y²|uuÒi%îúºàõÛ¶“mÐ!©^:¹VŽ–«£t;¿éÛfª©k-·Û4н·²¯É†$ÿàK¡¿Óa–=Ž\VCJZ·nuàÀ– æÔñ÷Ø?mšREjûþB&Y ~’U’Wy„ÐFˆÇ26ìÌÈI¼RÒ»·fuO¸˜à?k\O9y²€øñqZDD§UwÌâíiêå®yê\išyK–µvVB&/ÀXk­Ã YøÄ+"0 i[Ùe&ÆóÚ›$V©¢¯‚¬pu/$Åâôê–SL,.ØûʇéyyùyE„F8îºã¢/rE'»²˜ø[y$ɸɻ"Ò±3}ŸLI2ècIeV´5±uN9 ¾,¾åm^¼•`yuó:ÐL’Ü’}ÂSkäf'7·˜¨(d°ªøPÛº$‰É„T;7¯{Tl$D§±ÖŸq&TÛ\àÜÞ ’zƒÍ±nn‡q&í‰V‰qŽ8½ÿÒ߇⎳è”÷Z®. ·9å>Œ”è<³J¹Z Ÿ‰óDƒDù/R°ÒÉkALÄj„¬± ï‘]bQ™I'¸„$¹•“wÿ`‰ž— ™’þû(Cã8q B¢æ­Ô éÜvsyd÷½Rg½¡o5Öéýá˜Ü„*ô«C™ÙÈÍÍ1*jÏ’\1ZU9· rQîâÝ‹Ž—{g\ýÒ¸$»F1Úë²…J´VE¦Sÿš‹Ýùg|›[§æ^Æ’öÞïéL %D·SKÍåPín%-D]‡"îý™˜ ª2ÿwp}ì·Ko‡éˆS!;XK{2qD³Ò/«[G›XÝòÛ>ãÊ ÏˆÇ¸"õŠŠ¼º4q½Nìvç w÷_~p(Η%2®Ë«B,–Ø÷õ»X<€ø¡Ôlôë:»ªÏJúyŠÝÁ³Ö°TfÞP£|2/ÒO=ú™ˆŸPEIbÂ,¼”¸¢í“^*z ¦«šâ"ÎR¾8Iù`™¡Ë®1."KŽ‰çµ» |(Ëü4øi†SOÏí¶EO$ïí¼ÝôãXðÜ×áÉÀa¾VmM/Ý ’‰(åøëÏ4 ëœ$6¸¾P\äp.´Ct¥´eþoˆÈY]¼DÒ@ =’–ò¨hº†‚Ù6'ò¢s—(œrô§Àn¬|ÅÕÂù}Ë\%.ûÑ}P[tx¨ ÛiŸÀO0|­¹VNw4‰Æù(„R¾»«%~Ù]â8&ân핇Ns—»Äö˜kÎ+ú8™ ˜Ã1ØgÆÚ Lƒþå9ŒR: ”°òáCËgmJw9· ±¯pQrþ£‹""ñå\±69=Z‰‰ÕÈÍ^EÖpTЕ}wkÅ 3Gø=?fñ”²cóè¡ÖCq)[¦^§z„ÇÜôþ¬R¬•XYY™XÍéá÷ü¸¥ßÏÃELŒ¨GVW–lMÛÉ$9럠KŠ••…ýqe4wâÊÃä<È®šÂSk;í‡OÆUë1Âʇ͹JÞòrÞàçËIO×%I':ž2áÆ àg¿’ÄOà mÛÿé¿#žAÛ-W\H\¸¡%nëN-.¯n´ øê¬µD’´²o’ÓóÜýS°×ÚMäkm>åù¹±êZt€÷…¬ÜÓ>з¸ѸçyáþCÕcRÆÑÏâs…¤U.UÅÓ²²¢=ú­Žœ~ó_D5Îüv_š¥ÿlu6ºÿ¿!qV}D|ŠÏÑ3¾m˜•ž¹ý—À˜Û$ùZž?B,,®¥e=Oß?BÀ8IÄÈu–svlŸÚVÏx¥î\n-IûÆÕ¾F#’e] %œsÂi«zVÕ4T‹è¤ëΖº¿Û¹‚Ÿ îÕ>ø|ni/+"+/oßÜAt¶× U“W’±S‚ w>`èwBûgü”‰Õ¯{„{&á_Êý¬’gg,X=—äçðÂ1«Ѷ³éZwÚL_‹îþWõÙS¥(±.δ³ÝÃH]««íŸæ Ç«ç«Œ„ž9¸Kº¤äíÉ¥K©†þt@ëF[H›o\MVÈã^øò­‡.›I]^wM¨çWCȈ?NwÊ=ôçS“ʶ3‹r³ÄÚ|1Á:Ú¦]nÀÒ³7ÿævÌnQB@û&É=yɺ.+«ê§‚5Œ~¹HUæ%ZVònYyk÷Í ©Ì¦„;¼ñ ”л¢rO\µV…Ühïß}uÚv.H+ë²ÍŒ3èv.]µb’²,EÇu™Ñ©SOÐ?Te”ÌÖcÙ6ÏrÇb’¤GÞª¢z~–zjhééñÔC9E“NѼ‡Iy€„*Žèd2Z8yw’¤˜—Êï}^•P—¾¼0ÔdÝDÒ§ÒuñV·x5é1÷¿Jò&°ý÷Ž÷ß[ÝíÔ˜¤üü|QžwŸO’óy#Bü ¡ž‚Üà¥ËÉ/*®œ§ŒZÒ _~zÕTq8…gm×ÜpÖ£úU£ŒÏå½™¸ì É{'-zûÕŸU‘`^°*ÉctÆ,W‘ôT“UïÃ/—>¤ã>ê¿®Iº glyÈ5ä´5"ñûˆ”i•¢¬yܵÑo¥JÓ8 =N¤zðÝŽnüWàö{^«o/¯'èÏ™å8ò†Sùþ}Rsð½Ssý…ü‘#QÕ£“ž¾Õ@O´vŸ£KáqÅ›Ïô3þ²ø)·e_ºtªo_™Úƒ…Yº¿ ^¿Ì’…/J¾•Þ¨},¥=ycΦQ‘ÉÑ[4Ì5Vcû%¦ÎE ¬ÃŠ{û!uÏj[[sªï/2H+ääOÑ•y1Ÿb‚‰PøGåîÑ1Û±äô±"qVu`œ:Y6ÔV´įi x~5ñ³Ièyý5¡¬¬\ïsLe妙$Äd*?î’˜†žÕÜQ}op4îøÅë»tû#YårØç¨¤.#GŒ®c4yyMCjZº¤»oß“ýê«%þ AZMÂ]±º˜Âܶ½hÆt£º_N_𞽫'û“ÄÄ™âüäPjõÍÓp:f³ßüyu KGÆŒ:^»˜²R©º)(Ô{«I)Ù¬GFšÚ·âÛèßp@ù@ü“F_¼rCN‹ýõʧ÷l©¯òÑ6lþÆM+æ4ÑFLãá$I-ŒÓiG’µmûì©R›6ƒTÕøÎLKÇâÝ;í©(íµŸkÖäÍŸÊâ„Я·þÞ½¯çàQ_çŠeï ÚHµò›×ðýæx.:urÇPÛ²¯–K¡¡/üü»1‚ŒŒŠŒŒÇšµËçÏûJ†¹ËJ¥oþ#9>léâªÃ çIOs_\¸|£{ßAÍw•V¥ŸRO¹þÒÛ' µ†¿ƒ‚‚–.m^›™Û·½™6Õ×ÏO¬©"œ?oI)·depH3§\<(èéÒ¥K-, vƒø€šÑ^µ‹Ãˆ%%†8y4mä÷®žëfh`ÐY§Û/M¹qàRÞ’Äsgc´µ‹´uZ7aÌ$Ùzíº'óç-ž>£žM :å«×à^ JÓŽ9ßJ•ýT"in>Šwˆ¨$%$èé—Xׯ[o5ÚQZ^©aQŸË.Úik?¼K'M£ÑÛ/Í6ƒøæK.îUð|ìX™G•vGúbÂcOÏ‚@už…Úp,˜Ïßbã¦þýd %“ØîÝoºꙘÚC-ñPAá )·Òÿûûü9 I¦–~×6:I´–ªúsYQîãÇþ{ú(üՠ~|›Ý½æ|Í4[÷«èV–•–ž=“œüÔ´‡R7ÃÖjêŸEJ¥ÿÇMIy)+#io7²2µ¹’aWêóõ1Ç3@à.xýädÜ‘‚‚÷Æ&JúâÊm*M@ýü¹Uv–Ôô´;¯ûöélÝoíïâuâ45ŒØúøS[562e? :tÊК×qÈ+ >½©Ï÷%ÅŽN5î—ÛªÒÖ¡>#G@­€øâ?À×ûwï6nÜ(â9zôh}}}Èœ7îd\8y¼]GÎ&,…š‚•–rý{óAêu «þ6–½¾eŠÉOqñ[îß7`€Œ‘Qm-³ºø÷ûÏd«q®22m¡¬ ~€BFFæóçÏeBû¸tìØ”ï‡ÃÞ#µfJw2¢v*’Pn?ÔÙóË”gè™ôÆì¾õâ¢6Nz’våœ÷¯`î§ëÂ"½½ÚJJµB²·gâÏÏÒìØÚÕ•^#qœöÙ³çuŸ^ýµtÌ¡@üõâE‹‚‚‚h·¬¬ì¸qã O~>yAJNAM» »÷ÀÆÇÖF½C¿qS±~âp®ŸŠýu¶{3tòJCCƒýüÛ1Z#ÿ&X6î計ÐmüÙ·¯pèI Š V@üuŤI“öí£ö·SPP`±X!ß?"6GZqÔ1n–áÊÖL¦Õg¬‚q;Ã4‘ì}ø}ËúÙ³•±ò5OV@èTöãOe¥f:Ѐøê]]]111 WWWÈïO^æ¼áXuþ ײŸâ%ðáõ #íOhèr?¿vXùš;Áš[ã^ࡃ‰ãÆÏ‡ªñ|‹!ù£Ã‡eO^ù«nðÝÉ|ÀªÕkü4HNHîå+›°ò}Í/WVùè![W¯/Ôˆðc+P]f²4ðu7Gíðp›V?á+{ŸýdgŸ>ß` ]LŒÐÕ»{íZž…Ũ9?@ÝÁÉ~^¢©._[ˆüœ¦š<«jH¾Ovv¶¦¦&de“`ÇÞƒßJùhôñËÊz½ü;1r@™o˜f3³¼‚‚l%%¨„€]ü mv'ç;›´Á¿œ "†2èNÓËÉH+äl6ßb‡Ãe¢Âô¬|ž7==C—ͦv¥är¸ fqNÖów[G žƒ‡Édºk#/qìÔxæ9y8-6[d62==E—m‚ÏÍÈHWÐÐUæGƒ2ÒÓ‹I–±!?—Ë`PW|Íeê d#+ï…º®‰|ùN™Ø£ð“´®–2•r^Xï‚à9ØØQH’;²|_‘iKp?EàS´âU•B…,–"ü, µfRʇS窠·g ¹Á´Êò@…,¡™à0GŸ‘£tÙÁNxÔócü…gvöæôéy$¹g „¯äòÌLyµB:1©IIz––leµÔðÁ&Û’é¾XpJ°0Æ8·wŽÑ!ß,¤/§¬†Îûõ¶IœP¸á-}Gµdn hÉ­‹¸…$Í$ÞcOJù><¯&I¼L O¬ª|åˆCLccS‚–ðÆG*BÁU‚L÷ÃX,/ë$ŽZ]:1’Ì¥ïglÒª#µä§‹µ)ùƒ‡‰!çæo„6–ɳ¢÷ËN@-ؼe›Õ§úž•ÿô±rûŽ žs¥Ôe„ÚL|VqNN©šª\½â¼våJÅoÀ™ï!ëÆc=xp¥sçÞP‹?°øùúÚXË 4àc2!Õ£øîV„thÕŽÚåa­kŠÄx¨™ÀÍå}çf"Jü8‰òƒü'†ì ªÂ#PŒ~ê¼A!KT„Pؘî'ìì+…±4¶<_hbeEÇöø¿<J¥èACC SSÄ}ù\pGC¯T?ì·ZîHР!“úÞCþÃ_}’FÕeY˜)ý-Ȫ»Ù5®¸‹¶=>ÖM[—T Iß/Ôþ/BBªR?mó§øÁ©qÔ(èåu#<מKÍå n1—Á¢Ë÷‡#…{æJT?;vzç¿ÍÏíð§žþ÷Ònɶc_˜Á÷QhÃ~tﮌjg9~½ÎÉ| ÕNSQ"ˆ“FwK›º$øáÃSúRµ‡!w\Í“£,åHú'IFòŽ”¦¦ä)´—×ärp9ŸÌÖÙrJ2ºšå‚Êy›žõ^‹­Æ¬CbΜIñüØâ7pÕrJ:°-âÐi¦¢Æ¢Õki!I'?w§´g¯}×ÿAúÈ¡WÉÁ»ñÎ^OŸ»>8˜~íÞNU~ôŠêágƒO"¹ÌÛÍaìǵû£—, ÆŽå\œö«sIP$Ž<&Äëlök-ÝáAsÇS ðûyš m0l'8˜ÞÔ«ÇŒµHGÎ2‘|ã>ÖÙùÐò­Ñiq‘˼–¹Ìݨ¦¢Ç ¢À;K¡0㬃»»ŽÞÀÐÈx{ÇgIâ …-ó+’5\d;¡FgW’tõvûdõqñw¶1ײ+¿#nHÈÚ€êß»p–y{ãL0íDÆRݱ¥3Ú»ÏXY[’Pbôbgw÷­‘‘"|Åu»ú[}.Ýã^‘î,ŽÛloçàì¿n÷Jêö{{nÔê¤ÇêЊ—6’,qÀ«¤±oçòàÂy™LgQy~œÁkõx!+Ý/Ôþ/B^¹Š)¯”ƪ hç¾GT»mÏxå÷~OgZ(!¢¨·Ž–C36;îñ›tÜçÍz¸R‰ÐÅk™£w·VYÙþúâ|%—£ÓÅ`Ïhfœù¹è‰™f½ÜèÆªr&^|á¤!£¬V—?þ¤S§/#FênKæ%uæ7¸w›sa¡Õh9,$#X¿nJÙìa¬Æ¹þ'¡•I’^Ø¿˜Œd³eÑ›4B~c¹dÖˆÂÂP…?¶øÑ°œ0*û1C#c*~µ1 ð¤{-Œ€þp¿O0d4»)ÉHW ŒÐ²ð(ÚD…a¨ZGGó»MNƒM|Õaä§ÜY:ÍåÇy˜ÚÎ-ÜŽ××,?>K¾³Ml$¿ùNëÖå›Wó_ÊýJ’»èCáчk¸£TvÏaJ°°\q>¼Š=/¸~J¢b¾˜$K§å–NKšR›Jñùmõ¥¶ë;Aèu‰Ýô@»é<—u#æãéùmøó0±ñqü;£.¸_AQw1×´¦ûÔ‚O‹E½tKMàW9cUæ¦;<ûd‹?T\5†ù™ñÇE´ʬ|TÐGÕàcÑ3A„PAåeÜq—þb‚Y,ÜE{W—[{DFóZF|¤¸)ä§#)¦›6î|’å#¥C2r†_T>D½ù#  Z‚ø5Ó£oOÿoŸ—Xàb]b££›ðòqñ±ßõýþôxr/­{ß!‚Ÿêý¦Ïe<ÞEY$Wé`È륑}Žr"—x.˜ñÑû÷í3}){uAZrˆ9u½zG=™n½SÓ‡ü:czÛfnÇ·eÈJ¶û5Ø‘Ì_ùÆízÌ\¯ÒFKƨwj¦Ó†ÅóÚ÷sêaƒîh(VNÉm³N_^´gj<¾¬l»˜Xmª¼„¿aTÄ2rÑðí]x£8‘×'9Ì»ÇTïH Û¦à1]xã¢òÚÝ‚×áF•8ö 9pöñmÝnË|M¾˜cc°yøéÅP ^¼xÑ®];ȇïC†þ©ÀáXÝN´îË7ñó©éãçPß Ãgv¡}7lÛN;y“nͧL¡þM™Ã_½Þe?äǂ̯_9·ý¤û_÷~3­¤yÏŸÕ%Á„˜ØöÈWîîµ™t ªs¤ÜÉ'€?Nj>Ò2v¤¥ÐpKy0厾ˇd&ÔqþÏãli[Û_  @ü5B^^2áû„™{õº°Á“g~ËIjXn?s¥ªÎÝäÙîu]çî0æ’‡Ï&étíñ­@”r»èÖ=¼’²Vlì'©o˜ik×½œ7w1Tˆ F¼~ýÄï;ÇØA–GÏ_Òb›~ýK·*ý$ù©H]£~}(¯˜èuNÎßfÁ_XX(Äð¼yó¦}ûöß9Fìºfý§Y_ó¢Ù©—† &ÖQq'ç¹^k×.UZZìk¦yëÖB°Õñ| ……… ?üæûÞ}ðèåBAEµ¹¯EŸS.œ˜2yRc"éÔÉ!‹ÐUË›i'?ÄÆ~=jÖÌ™ðH€øꀢ"°´òÃÀ ³ŽB7Rÿ}‡$•T›¥¿.†Ê®<2cŠs÷Æ)_…fûSæôèýš)[öíãŒ5ÕÁAjÄPW¼yó2áÇ‚™1mô…¬ 8i†Dë&˜ZB tjÏÏÙ³XLé®Sš~¿\??J3î_J¿{môè¦Q© 7n>Ÿ<Ù{Ò$¨?ôü~øûÑŽôŒ‡=ÒgÄ$…6õ8¿ôÓåøXmÛýñ¯îó|›;Áºz}ÛÌîÝ!/OÚÛ3ëÃY'O?šã1GŠ©ÐYuÖ…Zñ@Ïïg[·ÛoˆçÃÇO?yZøº¨„[".Ε•ÕPk×¥“¶¸¸€tRFnßÌðÑäÉ^">œâ×Ùÿ}ù2‡Ãá|.#%¥Z+((iv4h£¬%Óµõ@ü.q(‚–‰N;àÏ”`&KÑÀ ŸÄÐüÀÈ„’$o¦e<|ð àenñû7e¥ÜV­Ä¤XÒòmT4µ´û˜te0¾»m„9ï³ÿ½sññÓüWùŸŠ9Ÿ©žŸd+ }½žjÆP¬?@ ((HÄmfffkk 9ó#"úà±2ò³±uEñI(w0P®¾Û—^PJ•žš™–ìå1ë+¼}óïï[ŽNœ¤¨Ù±5¿Û',záOµ[ìÞä}(”•‘;wé÷è1Êâ¨7–.]*¬ |? Þqvþo‡Íê¥]7«! ‹D‹mŒ?i¯¸qÓ®^è¢ýöΞŠõãï$Û‘²¯Ý¢Ð‚[H¨¨,-B©[–RTJ–êºE´H;i£=Zn¡E ÚÜnhÑ¢B«ÒB‹C÷ýÏœ9Žc)*÷ù~çwÞyç™9ïÌožwÞEé÷~Z-góÑc«5zó«© uî‚üý¿§·"1}ºB¯Šf±ðš5¯èv¤â4•.]ºÐ­]ÔÔÔ&Akñ¶Ãö½‡´ Í…håk.´ †‘ÿ³_–%í\·°ºió€Ë6n^í>[ÆzlsxÆÏO°ûFß½SÖ‘LÚ€øMÂËË‹tþ ƈ#àl´ &žQéó»Î°Q-ºK繤žŒÝàï»à‡e¯[·|®§ ©|-gpo a„nß¿ÿ„¸•´Œ*”Ähssó‚‚III8¿>›¶Å²ÿóʆ£Çù›y â²Râß—CZê&#ã¤òýƒÕÕ…:¶*B? ôØÀyøÅùÀ,»–ûôg*¸Œ|a%ºžrÞÒlè·n».rÙO“=^üüä'¬¶±Åñ€6LÆ—o=”SêÑZ(hé_¾–5hÀ7t6ؾ=´)ÊGnhð8Ý„»ˆqTýd/Ò.W ¤Üä[…­èž=+'O^…hWâgBiÕá)Ë’ã™7»M!»ƒNiû綘 $0ÆyÆŠ±ÑwÌKS™s¡Ãt3¥V?±ÆÄã‹iË ÿƒWÈú›‡ØMm]˜„`Ó?{|fÚ´¦V¤ç/|VGó˜YgEtÚº Oˆ:ƒÐ°ÒÛ]MbuŒ³ýbfLèÙÔ'w‰\»ÜÓkÜaö!~y¡ZŠ1w4@“Eþ}~±Ì°åž¾q‘á¤8b|¼Ý“ñn6qQ‡É€¥ƒkR\´ÅÊ¿“|zz@­²µˆKHÞp½Ì]§ŒVú[²ìÍ?Γòó.<–šM®ênd×ëy|R¢ÓЄ Ó‘±­CZBµ¬mÕ=+1Ya|ÂK…ˆ,@®®S¢£w/I}lÜÞ£«›MtÔá]÷*Õ“G¡cሒc3Ùý‘QùBx±kÚáh2eJ16x¼N¤¿§­«gBt$B31ÞLBµÌ¨÷M¦Ñu°¬ŠKÊ.ÂXºöÚúg©äÚŸâ—8¸N‰‹Þm³.;aŽhmÃJ þîd†‡}üâ§Ö±§A¡­}˜|l{T=ÝzFF¼Í¾jªô‰M»Ÿ±2Û`•‰‡òöÙt뺂?füM ò¬úúz†‡Gj/>—ùg:O¤¿v Í1Ú¬¼˜àó_QÁóY­®|$R]UV®ŽX0ß»)‰O¥\sqiê£Ê“1Nê_qµþõÑ9Hþg+ɹ|õ(Këÿ—»Q—‚ ‡p…ñèFsvž*·W ½ˆß‡§ä?ÞqpùÔM‘¼§ÓžMìÚ0òæ˜Ãî¿KÇhß!ÊÃßèw.™ÓUÜùòéÁÕ-7VŠô÷vÇ!uv’xg;!˜š}ˆÔBJoÓãóØñ¶»&8qªž„2Zt1u™aɵžâžø²÷î/Ø ¡D*^:+&8Ç•»Ç^wˆ˜¸«ŽÁÔCqfr,U B;F ¢sC(Š´_€¼þûSò•†²4§r3÷ˆãz\õµ!í-©»¼+꟥1—ìŽíPl[«îÕ6Œq5tø‚OIË UÙÒËkfC¿ïaÚM¾ÍŽ{õˆüÒâwÆeœ[ri™¸BÀÕ¹×§ìx¼{ªÚ‡í£Ÿc¬Gî„Î!,l!AÈ¢?ßqóŒ0öJ_>˜Xþ5ío—\<“8zz/îbæš.+s¸‹2>vkà—Жgl{ļ3™q^/e›M€Ý㨘µæiÆç³ O}“%jýš6¤-3$kÏOÎ\eþ¹<~SÓä]”Gº³NP­9^Ÿö¡g’Ux‰”´O~OgêK"¢D[N!«I•V--D}̇÷ÎÜD:ÜæMìÄä·£u|ìÑ?/~¥Ú‘JÙÍHãIú„t$RÏpâä§Z=ÎdLVâíÞõsÕž‰ÐÒ-‡. ¾´ú*ç÷…”‘{Oõ(<|ü©Ní‡>=ã“ëÙ¨ÁJJ}¹ãp~7á–VmÔ…s©F¹µ\œðÍ3×w€"€ø@Ûƒ!"»¨åþW–;'<ŕӔß*’iÅ~»‡íÆqý9#Lªe-æ,"d˜UH7”UèϦÐ)Q)ù×C½·±©ZµoŠ“!oMã?©§÷m\üä´ÿþûÂÀ\³ "ˆªÒ¬, øCcs·8ô$T ·5Ï·Z+Î@¨»§#2êvJQ´nÞ_âú'È B®®ãlZö`¬L+¢Z¡òJÒ<‚ ¦ÛuuÓŽºNeÕõ™ÀØvHZÂ2>¥$Ú´K­G[ ó¡P„?h{Lwü#ùÊMy•ž?cgX¤Z»±µ„ÄdȰ&fóàAåÀ5j2y%3X†æãu*ÄL‡ÜIyh°ÉÓcì· ”wâvRDfñU7f!BÆb®ìÅ(Ò Þwìô«Á^9n1» Þœ[i¼Ä?5X¡I¤³XÇËÛ÷iŠ à€ø@Û¤êÓ»V·!q{Ä¢… ›˜xòäûö­˜4‰ÓÓ }#A¸#åaÊHpÍ`D¨í+ÂQìfËnnwG *êa{{§y3aW{¾%%-ñùÖ/íÈmݸy:+’,ƒöÏ ª³êsy';(<ˆ´UF ²eÛN}+ûÖ2à̾¨¦+ͤIþéW ¦çpèÈmÉéå]ۇ㠌澎§>„$Ø-«·ª“3‹êN£¡.é86Îú˜QMgǼ|ÅÞ½¡Ÿâm™™.Î %©l…7XI;"ødžFƒçïÙ³bòäT †öhº·»òXKŒ-éÈÅ?—ëöîÝŠ âmÛ1–•••‡Ï¤«iëÿœ=¬òò¢'ß§|4“'ûïܹÊÙ¹óO;K6¼õðp…ò€ø@»¹T:vœ`14=ãZU'i1IÙ–ÛQÙˆ~ÿÁ¬œ}ÈÿÁÁÁò-zrRRøŽ÷ðè åñ€vˆ‘Þòÿó—EÇ“Nê[6ó$G_â ~À·ðêÕ«mÛ¶Õ‰tvvVPP€“ÓvÉÎ}túø_Rò¿õÔÖÿât²U••ný“›yåwCcó!ƒ[Ób\yæÌæ¬ÅC‡ˆ ò•„Ÿ|NKýTQAØÙ:tS„ßñ¾YYÙÊÊJÞ .äååAùÚ"ñ‰)¬ª{4¢:+Œ˜<³ÑMø:vTÓÖ§û f¿,#ÿ¿{õ"ó|¢×ÜŸ`ð¿•ïÂW­÷ð¡š}høÁá#äÝJ©› ƒƒ ;˜HÇìß_¬Óß@­—1”Äø–,YL‡edd¦M›ç¤ ‘”vµ¢+kôï¡køã¹IÈ*˜MœA !«¢üâÑ=Þs=ZÀÉÃaaúùËuèˆüý›¡ÏßĉâÝ#?‡J¶–‘UƒR€øMbäÈ‘§NQ3—JJJvì?DÛ`ûÞC}G*¨õk‰Ìù„†Np!UðĶˆÅ‹6O¦˜¼4$0PžT¾–°ÙÆV ¡ / Ï”kjh €ø ¯¯úôiyyù &ÀÙøõyþºøÎãBa?£Õã(oRo^HtœøCs#D¬ùÓ{ž,=¼ A¸9xšI ”™’‰Ð}¥Æž¢;\õW¨3½m}äùäsOœ¸>jÔ|(*ˆÐK–,“Ð&ؼ=ÎÀÒNº«ÊÏÜiß!V«""}¼=¿Ëá«:u:‚T>Þ8§†¤øéÊ–;.¼Øµ¶¶¹Æ.r=èl¿U cg‚ðÍgE)w¤Ô1“ÅQG?m·ðìþÏú¢fÅxKv¶’¶¶”ÄÚ6«#×›ÚOo•]›Mr‰Ý;ÝñoT¾Ê[·7ŽYw¤Í¾š Òik:Œ6!Â0¦_.R3È“ÊGþŸíc¿u^IFBÊÔ½A„ë²§·EÏ¿¬|Õ:ÚA[ûéÙ³»MM§@É@ü€ï¡  @YY™'‚Yð¼BYQ¬Ñ _(ÔÚð+´DžíнGN´–òÑü>bÜŠð•þ¾ š¾É‰£Fw©/C¸qÃÅ”ª•²ƒ"ùIDõª´¢ÍbÒܪc8Ó»§”D›vÁº„[¦ÒH\`óuLM™OžÜèÖ­\Å@;?]‚ÈD&_ ìÂ3 #6Ü*s×"—²2sÅST–¦n£,&“ŸÁÈÍÉ餠¦ Îÿºà~1–PS‘¦îµL&ƒ½JFMSŒqc{3?½¿. 6¿¢ô ?ƒZ]{»Khª×îÀb±øùK î}%W±JŠrŸ•hjqÚ›1_¿È/z©¨¦Ã›NNŽ8Û: i@)ÑÖâÞÍY¹¹ù*jj,®1ˆ•““+.­¢ ÃàQVf޶޿e¤ÁCø”ó”Ð¤Ž½V†tâ‚û¹¨“Ì´§XEEcœ““)®¢£À@%×V« fǧÐ1t>¦"%ËÞ÷ÿDeÎàOd|Ê_ñ¤w1ùJÉ<}ÎÃl„©À<‰møè4þKˈ^G1þGSô)¹k2Ÿ¬ôtu##Mi…¬µ#t¶šÒù—bLÞÓ#Œ‰8ã‹™‹”“Î=³´Ò£­%9éø›ãöŽX""¤l,'Ì/—Ô:"ßêcüÅ £©‚Lˆ2\rúMðp5ôá8AŒáíóÇËk„45uÈ O)ÞÉŽz{œ¢Ò“R“äÕ×êÌd|¶'!oKݦ£­›_u;zM2ûóXö*õbA6☾â0B‚èâË:‡³~ƒÌÒìñù1ˆ²ªê–€H_2@°ÏŒ—81*õ}°±&óÎ>¯/m‡¾ôµ¶îÚ3ÐÜö[·zýô±ôoJåoŸ³„E<#RúâE¥‚ü·¨4Qü´¨‡Ñ¯ux£¬Üp?}½à?qpã9XY eg%÷‡—@{?!ò_èÕúœw v!•M*¢¯jÝ+¹WWj{ò*t˜ð;å‹ Ðå®’fÿ—ëÙŸy7yù”JªÇš/,|¢¢Á™úz=m0ÒR§¾0Ò`O+ÃG§,Ö16¦…äñÝ—ÜÄÜûO'R8Gvë÷—eõø‹¤÷ò¬à™¶®zõÅ3òQVIˆûìOFý.ÙèªhÒ_ÎP¯?;CåF3üMK ¯ö/‹^ ¤J‡å•¨£f¿æ¦Ÿ1m‚"ûho>¦¦ðÛ˜b¿Ê¼^®ÔãílÊvÕ@èëÕsnž¦a—iûÒBõ‰áÛQŠËÕÒš“OþÖÓ$¨_ˆ¡flkSR¸cõW~£v@y“wñöÆ1I#²©We—VñX•’UÈD¬R¿]tʘLaCL’úÝâ§÷<¯—²Í¾w;»³œæ¹œq)7ñ£{wDå{Êtᔽy„å”Åù+¸yÒô3Þƒ_fðŽÕ’¡Ÿ¬ƒÓ©+7'ù¼–åþ5W×yë ²ý7EÅšš ìzˆÏ †`u–‘áŽÊ]™“S¤¨® V}«(xð¢14{6^U~æì? ~@;©ö¤nÜeÏý¼Ý=-4Æ%ž}¿‹sÅǶÚ-8ÅPü¾³o éž4y¬ÿôu3‘Ñ_íËÈ‚ì#> ctÇyàÛÔ…Q”sÜn¼¥ª³OìÎk2I‰ œñ›ÛŒ ¨­Ï&”Ü·³´bHvõ[½QS¦#ëÍ•U±æ9 ™¿^A†’+‘®f«=«Ø¹ †,߈xqî;77UuÓ°¨›¾^^I|!!!ô~©­d»Û/ÃïÝl-Ý{;1*È3hmvÊJôòôsZF&“è„aœ{n—Õܳ]»oˆ âç9¢¬nÉv4Vjøy¹Ö¡±¸»®Ÿ!e13×Ñn!ª>iõè´dɚjáƒs†’‡²¾/£÷XŒüÜn?{7oÍ^ìÝ…ý»8x98ωz¬$ÿ”û‹OWãÜhŒŠ?Äé;eÉþ]¿˜z|R"7%¿”AÀ<êÖ0ÖC›Sõj0Ï•ss oñb=Íâ£Ì訰µk©<«án•ÀÉÑÒu-{—ng§§_=®7$¿³¹!µaN‰Ãœê‘¶=•I¨ƒí]¢ÖXˆò¦^õ? z«ú’î’„´Zl|<§ŽÈÒNRŠ÷¾ÆôÛìM9IkŸs:aQu~ÆÚ¸¸úÖ* œ8©p¹££#oÒ7Ÿ’¯àžØ€9œ¡ׯ&ÔùYÝyì¯ùMÇzÄ傤öoÔΨ`–ÕRû3+•Ó•M[ž±á•àÍãDЧýHmßñuûצPA(¯ÎÅÄS9Q^òŒÌ’^$½¼·už;++Eœ#©6"ºÐž}ƒ˜ð‡¢ÁãYEï’«vôýÙO^&ÆóÝ6e˜¹Ïø×4„¡}ª ‡¡ÞÓÖúsáöÝ{KU;ú÷,-­‚;,ÐŽÄï‡`à_²rÌÈaÑu‡EMO—P?2ÌéíQíó˜ßB‡¦zf/:VB{ÝÖN=Ûyž:•úš:‡Ó{¡×HNÊò·y73.§Ä$»¸÷§n7Þ]Ü¿=ÊÔ¨)6oßöÆÛ›Ó·]ZGÇ[§ádkc]ë>ÒyãÞFV;úCãÇåqôåâ9“÷M\rø oo™b‰ñœG­7oElm'C@ü íÑ[Uyyè «©ž?gwB]bN_®ÿöñ½ùžsš˜‰›«{EÅAfáV^ðâ_e…šÊyek«Xë/¦>ÿÜÕ Jâm“Eþûþ:Í™´¨5 þ­R”kzz†ˆDR"¢Ú3³*YüK ^±5{бJ>ä>/ÓÔä8…/ ^½zW¥­ÃéI7òÌÍy!Sݰ“ɬb òœü÷t‹PRše¤+¹)sr^ˆ+Ê*ˆÑ5Ÿ•¹Þªô”eU·ݸé­ûì(<ˆ´a&±ÿÄ™^¿·Âij®b½.è×Oë›¶²´š™°þÉÚ¹ód¦á=þ´„ZÆ~šÞ„ÆQégo™j)(#+Âm@Úš #$"2¯G©i*œõ[bvʸ8ªL\ÿ™RSS„nóé%pZ\?ãédÊR*^!ÂÈm÷°¯¼Ž£ØM@+D榕DßÞû”ñ€öÀÄQÃCÃÂ-œæü̾ʽa¢¯#$¯õÛÚÚÎ Z9w*»q¦ FýØm;RïK2Ž›ÝÁ˜ÊöBª7©î¥ú:E÷Žà4R¥ÞrVTq;à¢Zgñ³‚::²Ü›Iüæ×‘þ ¡À ~ÐNðó}ôäi^Q©tW¥–ÞW]:¾oÖôšâxlÀúÞ2ÃWnòž¬±Æ`Çñ…„LÓ3NÙŠÝfî4ýcÌ£âÿy%1¨?YY>‹/ëŽг5HDHêèšã[Ͼt¿½ÚR“ÁzÝ;dƒ·!(ÉŸ‰Šr‚£œ2oyyíi@Å»ú€ò ~оPíö›j7tónå”ZdÌO~>t.>nÎ,×¾?¦|ˆÛ±Õ€š6«âÝRvœP@€1ùe:}Œ)g°n‰µkÇ#TN†”µµã9 H¹“õ¼ÑÖóª “îÀnæÉmª=v(:v÷“ ©ïÿµ›ÛM@ü ½Ñ··Z_v`EøÊ¡v΂ŒNÍ’íÙ1.S§Šuî¤9˵Ùmöó£$0?ÿŸ®?¡þ°¢Bu†½fh›âüFò|òDòLJ¾Ë4ÿ5®þP,?øÀf(÷Ñã„Cû FÛ‹IÊ6}sâßÊ+'ää¬-G‹ý=[ü…¢ŠÊïä‡8°žÁ¨=šñM9ܽ+ú×±³ÝÝDEe»uC.Ó  ~ð_EMU)À¿îœ>?)xò´ä]q‹ÅÇסsçÎ rrj=øùk¬î7͹µl¶·¯«µÌÒâ‚Ç7^½zQZZZU…%%%•º©ÉȪqÓôîM}Ä€†é¡Ôü´!ƒ"âC44à§@üÄ€¦P^ÁʾûàùóçJJ*+*:täctê$+#Û·WO) ±_Óæ·o²óòo½.*)e~®ªB‚‚%%DëÚ]Eu(ü ˆÐ<ך »sçή®®"""pfÚ"—³ï¤Ÿ9idýC´¦!¥¬Šª¬J”Ï*г—5Ó$]?w‚Ÿ¯ƒ£½í϶³vÅ®–—ë8bdgnœ¤õAHý¡©BèûSmÿ³Š=»ßMu¶—‘S‡ßñ¾==½ŒŒ ¸8(_Ûc¼",Üt’«€ CNe¤Ã¬ïÈDw5“e6[Óî±µ£ ׂÓ=¸0ç^õX1D ''‰ïÈ¡kW?9„RÉÏ»·•{öÏ™» â4sss®ø)))9;;Ã9i+\øç ñIwU±pžÛŒÙYO.B¨èeYʾhïfžG",l©ŸŸ\OuÔS½Ù*]%$;Ι+Ptê…OššÖRÒÝ¡l ~@ãwîÜyüøñp6Ú×ïå—”±$»öhѽ˜Mr%}Áó·yÏõh.Ù#?-g°ÉN¥\¹ÌìÛ×A¤“$”Äh==½—/_B…g›€ØZògµV:Á%ùÊMC-Õ΢ßY<ž=IìÔùq‹Ê/ƒ%¬\Y´`Áb(-ˆð5ÌÍÍá$üú`Œ$¦üä)HäUzæ•¢â›W‡êë¶ññ+íìºüü |Á™]»Âœœü Ø ~жÙ÷×i =ãÖÚ»¸j¿nÜþý[¦ôKN^ËV¾ÖÁÉI|ÇŽ°©SAÿ€ö%~&‘Vž²,9nQóû.!»ƒNßøXj5Í«um(auhzâ2fÁ0S¡Ö5xêTñÈÈåžžÐh'â—Gª¥s‡Âe1©I/_Ÿ_,3l¹ƒ§o\d8)Ž_ jò07›¸¨ÃdÀÒÁ5).ÚbåßI>=‚j`í`k—¼áz™»Nƒ1&##Œ‰’eoþq˜”Ÿwáà±ÔìhrUw#»^Ïã“ò†&Ô˜X˜ŽŒmÒâ¨em«îY‰yÈ ã^*Ddru½{Iê›`ãô]Ýl¢£ïºWé¨þ˜<  G”›É®ÑBåWáAÆ6®i‡£É”)ÅØàñ:‘þž¶®ž Ñ‘ÍÄx3}DÕ2£6÷¢7iÒ§„8:[zK[‡$ÒÈá›ñé™ul{i"%”Pi#gG H㇬';”6†]'Ž@¿sÉœ®âÎçO®~l¹±R¤¿·;©³“Ä;Û áÀÔìC¤RzËÛ]œ8­ìÈÇZ£ES—–\ë)î)€/û!TL‹œ`7„ ©.dx鬘àWî{Ý!bâ®:S­Ñ2“c9ÏÔí1ˆÎ ¡(Ò~„úô§𳕲4§r3÷ˆ¾L/Çã«ÎÑQÕ›¬"YRDëØ†ñ+2>²òrrÚ`´kk>rí>-“…/ #H{Ò({8´Gí¶t®éu¹f€ËÊœ™Éø`Ø­+H[ž±íóÎdÆy½”mþƒ¸ñ»ÇQ1kÍÓŒ-Îgžú&KúènbJ©ôA3Ž‹ëçaÜxO ¾}û6Nš4JÐöů³IæÆI¤<Oñ[2IßÕjí—¤‹ùEEûæ'SiÖβ¼‚U'Â<¸(ÎQ¦¿ï”¢æB Ýÿ÷àg楣%ˆU©Þ'.Ìë2dWZŽ.qSËhÒâÉ=J.-|ãje÷žwž†¶žÉQy¼Ãlú*tç.«ÞAÄØ÷&ÝÓüü(S•^¤ý× Þ»­Ÿ×pÒ¨ÛûÝ9¯Uö†êë\½ÿí=BÏ$«ð)iŸüžÎÔ—DD‰¶œBV!“ªaÕÒBÔÇ|8qïÌM¤£Ç½ ¨Ää·£u|ìÑ?/~¥Ú‘JÙÍHãIú„t”J7ÖÓ¢”ù‘*aùî‘*IY"/ÄB´"ƒ¾ˆ(wÍOÙ­ëýd+Ê@JùHnR›…ìÝ^£Ñ”òÝ'àðO,s«í†?«Ë-5Ïm¯·˜Ý£-köU;þ6µ‹GÔ.blýâhçuž@{¿’þïæŒ~ÿÃød@yì"Œ›ôŠž!&Óè  [í€â×/ëF]?}pg§ŠŠÉÔ3œ8ùé†V3“•¸@»™îDµI§é–C— _Z}•ÇóûBÊȽ§z >þT§vÍ"«ô½´jãoòº)ê tV@e£CÈôN*s/éŸRL‰Ô¿¸¼YjoãûÑêêÄã¹S·:‹¨ël dµ²²"×Q€psˆ á×”D»ÝÉœ CŠiO”˜Þ0„NøÅÜÕzzeÆácvÊÇO•P„?h{HHך™]Ëý¯,wNxŠ+gw«H¦ûíð¶ÇõçtŽ0©&˘³ˆaV¡/;0(«ÐŸ'M¡S²•¦‡zocS´jß'Cjãj²ÓS¨6ÞZ\RõâÅ£´OFŸGJ2H7R#Ì¡;BÝ1ÀuÚx¨§!Ɔu#‘7ìÅ®P¡êêÅSÕ'Z}dG[š¢¥óÜTúwÄp](Bˆ´=¦NŸt9[¡ûO™Á‹TËa7v VÍA·&Ï‹ûô)®ç–nùj¡j9”`þMNþ×Âb!ÄÚ$¬o[݆S±›šØÏdÒ$ŸÃì'ˆ·ªÉ|½{Û€ø@›e¬¹é†è˜Ác&·–Yg6]ùhì'øed¬ÑÓëÔZ6§¤˜™ø ~Жñp¾û@‚–‰e+¨Èþh¯ï™ÞHOoÞáÃá66?{ÖxŒ;fgË›™Y@±@ü Í3ÅÞ–YV–òwN7uÍŸ³G¡ÛiÉß§|466¾7…¸Ï–úigéøñªÑ£]µµ¡¼ ~Ð^` 1ùýHâIÍ‚BŒÝ×±¨ðÀÀÀ~l~0÷Ù Qõ”~-jð³§å]GÖ‡r€ø@;dœ5’{Ö­»÷óŸöhÔ¼™w`•^8ž0w¶[v'ÂæÂÏo ùÕª?çÍ“îСCóÚ¼o_Ùˆcºþ¦eñ€vŽvŸÞä‡ ¤]½–ÿøi?“zÅõéåãëÏyzÌFH¸ïl·²ÙÇçt€TA'' iiþ‘é]»>˜›(«èNšÅñ€ÿÆúÈ~õæíÁCñü‚ÂjÚ%䕾¸MEéÃìk÷ne an8@‡Š‘ë5¸¯Ÿf3WI.]J¸p!g°¡Ô C>~~âK›Ü¿ÇHK{Á°ç$Ì :Q89Á€ø€¬”äœYMñÛ„ûw³@c~‰ö††¶ä§Ñd꽨€øˆüçÁ'§^κœ&§ÜCI½¯”b’}*yû4÷N^ÎuÕ^}ìF›wìØÊàËÂôã‰é|¾ˆF°(xüùÚßÌü‚Ï–æºZ}¬à‡@üà¿Îó7%±Û¢M'Î"z韯¤ï$&Ù{ Qov3ÑÛoXQóþ]I:ÔGKkð@ŸcóΡ}úéêŠa9y4cz#••É}¸E“_E¯X;cß.˜·&j@üà?DáÛ÷û÷î:~B‚Ns~07Ëñäÿì—enü-'.jØ2*¸5&äIâ""|ÎÎ’?˜•Œ,¿ï9„¶‘á°/ýü—@‘@ü =s3ïEáÛbÙߺ³•¯™Qí7üýŧ+íó˜9£¹²  ”oÔÃû>üüIŒÞ½ûÝ„ñsE „ ~Ю¨¬Â{žèkh&ËhÙ ø:ð ¶žråQQ—?jôTýAoÔ¼o$Åç4mes„´³ /5¸>Ñ•!µ’©×}|ÿ^´K—ï¶DÇxxS’=|”¬¥Õˆ«JÔ¤Gq¹áæ%)阳ó|(Eˆ´1$å»~u}©|ç ™©SÚö[³Ì@Äkm9¥Ä'Lm9…}O™§Ž#絤”÷RÈ*djË36Ü*3”—Bz0?¿™o¨mO& ÓÇ?aª~a¢!qy¥ªª*¾ÆFT)È/è¡Úø¨+G’šw›¥Y}êï\2Ø0\Q‹%åÒÈþ£pÖè£~aã•1¶'c¶ÆNWˆó r\§Š±Ã×÷òäÉG(Bˆ´=ªªX_Y«-O5*&OÏêî™2jšÔ.„ŒHÃb”¤Ñðä‘õ*K[V8«è¢ÇúÛY…ï°a >—Î$WmØs˜EnÒ­á9âqe%_ãoªl‘ Ñòæp`)½|¥•Ø’ûaô4D(6£ŒšÞ(íÂ2fˆ ÏC¨”Œ™¨þ!)¹XÒ||¢yãûèEñ€¶ÇÓ{9ýlp•/©yã¶gmšÈ£…Œ‘O™3 –jsäÐ(«ðTßK/Æïpý«¿v´¶ÌàÄ—L„ÑÔÃÌ#ò ÉÕÒÈÕËz<ÏÍÖîÚøÄñºÚ¤\nâÑ‘ú·ÑÂ-Žþ“ \éx‡Õþz˜rl9þ-©¦å‰¥ÚÈGÐq!i+-Ë¿_¿ß  ~Ðö0þE'¼žPÑÒÕë0“wN¿¹3ç² ªÓ«ÚȲ'Õ†“à¯/ /·ÿ¹:jXãâÇè$µñĉâ_ס„ ~Ðn™5šÃýõÛ’CGŽ 5±¹²}y/›U 2¸ÿ÷Nà÷%ø:ò;;SRajƯ Ð<“-°XÂkó},Ñê…ñ€ÿÒ’b³§O¥ÃÑÛwJ*tSíoð­™|zõ$=ù¨»»GgQ$gÐÒ6;8zs„öå½;NŸ.%%õmw€ªJá»^öíÛkÀ€Ñüü¨Éý/ÄÚ®ÓœyËÊ˯^ÏÎ{øðõ«BÖçÏ|üüU•¬||â2Ý”•u´û*ÈHs’Ê©îç÷ó –“ëåï_«YÊ›×ù·n_yòäÕÛ·¥Lf%ÆHP¨ƒ„˜p×ߤz÷¤SíD"øÁ?ê!,$4ÔPŸü´!›¥¤U†Ußñ?ñ?ñ?ñ?ñ?ñ?ñ?Ä@üÄ@üÄ@üÄ@üÄ@üÄ@üÄ@üÄñƒS€øˆ€øˆ€øˆ€øˆ€øˆ€øˆ€øˆâ‡1†³€Û |Ê |Ê |Ê |ðKãçzû3£¬ìmTToü‹sëÃÿz„ÓwmôO×ǃŒdò÷‰^éÂMS’½3hg6({û.*.¶MïF»þÛóù2_ká'»à×@ù࿈™¡ÀŒ±‘dÀbIœµbMü$Ó¹iìÀÚµÑ)ë¶&UÇó*ß\í©\µl å Õ&fqmJ16ûæ<+Šndf’ߢÍ*?¤]†ÔÖ”y8JÊ¿ Êc6Žg‰¯xVG³Ò8¸»Núù•ö*œ+.®…mûܶN¥@ÃÑYG‚â<Æ ° |ð«àJzrä×U/„8Ê÷ú|0¸;¹Nâ‰l´g $ënTK”…ƒªÃAP_úky䡲çaë²ò %ä»Ïš¿X]†÷VÀŒÛ´ö̹L$,i5ÑÅÞj`ƒ»`½È<“ý’tXZK7“Ù!×Ët„è°.AP&J- 2¸O×$fd#aቓçZk²$?éüÝŠŠ kkkÚø£IH%ì6ÐTKŠ\Î9›øø3”-Í4¨(7sÕ¦-…ïÊõÌ&¹;Zp™½UÅpKëè Ïó·Šãb¡xƒòÐoVšFÒK»þpþ°åô*MžkËÂ749<€+Ëÿ˜Iþw øy¶~(çºuµ~Î]Œ Vñø+oýh2ü"y±¢årÝÑ*ŘQ7»çŠºÔw×™ø©usÙXþ©!Žò]½) A®G%HY å잣尡ưhê‘£ciOVVVd¸cò(¹2Ί¶ß ãä—£Ù(RAµÝ“IåS%ˆa¯åå !ø»MEˆR>ãÙ{ð^ÊÉ£«1mÖÕzÉöIšô-(‘;[‚LÅP0{劓¸àV±šÉ‘½AÁøÒ’Pcba:Êß0­§¯§eOÛe[f y\L‚!EFlÁ§gòfA]iÇ ?ÝÜBVf;A:Qu¡yÕ²§Z–>8!®Fé´ Ñã‡t²=™åî:B›=HûUI¹DhB”ò±G´,Ðm°@ÉÈÌy;Þö§ÀßZ/ã¥ÒT%eÀ™S …FR6"Ämõ’µ-öŪ‰ bŒ†ær‘Q¥«OÂøŸæµlé¢Eh˜,¸¸4í:'RA:©Æt¸?ä'íì9êjèpý€3l‡Sç¼zã ÷mvìv=-QpÊ©Dš•Wè -¥A¥'™IÊùu½ò2ÑqB‡Xä _wÎÎÆì~”0J5(_ÃaG’c\_J-*Ñ.kúEÚøzuƒÈ=î/©1è˜ËQYª¨öüdTA´–Íù©ÛÔ-ç6¸jš—N~§*Š‹Ð1Kv¦; æIrˆöœT&mk~Ë.F-½X+âv)%½îrêf¹'VÏy&b+Ù­ppZô„íùÛW m´@Zyù¨'­Y²ýn(ƒ~±âÉr AláÝE)ä'Æ€ìòÐ(|}èïEk¯%P+1 $“Mc×4Æ­0G•©­boɵ•ê–¾ì jÈ÷®ÄGÓðähK·?ÌÙhé©Ýt]n-ùÜç’;M»yo!æ^kFt#¿Ël2ÎHG™ŽúêFÊã!´ôâJ®QM‡,V.*EŠøa·u=”÷Ó1<Éu]]uk–˜eE,$ÆÏYå‡ Ê÷Ëð"'Íßwq\õ,ªcdùgx„¥¾Ú¯c^Aòªµ§ž••½ÝÎÛÇj¹K^0°;B¤? ß¹Ád!Fha:'Lµ©lk÷Ò²‡è7d‡\ê$жpÅØ•–+B„ê¸/Õ(Ÿ Æèö¢:üDóNm½$`±·±dýx)å®(ý j&Ù1û9~[Ê_ãÔUê\Bþ_>›[Õ¼%œý,²Ü‹·òY.** -(߯AÔªÊLO²2Hb_·¸…öõ­9¿ºq:2’ªx ‹Šû1‚´‰àlv‹5(›mÍÙ{‘;U7HU¬øR²€Ý{*ýAT¦jEk;KuEˆR‘Œ"¤ÓáºÊ„ítüõd*t…D†w]í¨CºMŒ–(‰ºÚºÖɤ"w‡€ÚT2à°ënœSï–¶Ù!–S¥,ÐÍ ?ÙÅz[ß›~i§K_q¾Êˆ9ºV›QÃÑrö}.Å×À,ü U£™ùfºŽT„Ò¼„'-t'@ù~ÕÙS=’²G©S…×dã´G´JýÞÌ/Ø«®´úñÒ­þD `¶1–Ñcµ¤$ùñ®xÉþÿ©œE}u›„¥|ûÖÛ±ÕãçÚØ¥ZE"69Æ!ú²ôÅeEŠùe&NøžyãÀnžê¤+íijé®{Éu2ãïél‰¦’‡¼ÛYc»nùªŽ>)#Ì‚O£§±QÓß ¸úà³}^ø°5ì ÇpÏÍóÂ-èN«TÓ°Ë(œ:äºÒ3¸Žxê(¾ |¿os:ßôñÇ7Cé`êCœ¾b„1õr›|HE·‚µŒƒrÈø´@ãà+T3köÅàe¢™VÀs…`®7æeBD¦Õì$ñÓR]8H™~\ãù©Ì?Ÿ·jB,UB ¯&­Æ'µšÓW6øˆÓåqá§èHß½Â&õàäBI<ég¬:=hÉ¥Åâƒ9o#D(½§„Ó¬œ ·gíž.¹Rö07·Û,ÌíF–sl¹–õâš½¬?í1”§o›c<½¦kÔ®ë%Ž:]àbø¿à7\¯_˽èlòs¼5BBHTi$gYrtÅóëÖ“¦úH˜{­ p†Êòí0$º:öí¢‰ñꜴÅaëþÉÉG¸“Ùxûe³iaû}ƪ5ã:”ÿ«X}ȯ""öÃW íõåÈD¡kÖP*5¤—ä—’˜‹‹‚|æO¿ƒDäf͘>±¦ÝôÐÅ!kº !$ÚdžŽQ0÷[³†rUôxÏFÒ®¥aÔ˜s’ýÇl‰ ªn2#²f ¹¹*ÃÄÕ ÄÈŠ+{–––IìZ'¢Ö›•¥¼鵪߻rѪ®Å¢Ù:gسòì$Ÿ~Y›¬ÝQQýíŒ?Æ“´“®˜z)ÖcÀåÐ> ð®ë±)è$¦fÖ, «ÄÇÇרˆ¦qT¬qý|ŒæÕŠñöön&›”UG™ µqA ¯¨›CÃæéY:%Z:Õ÷ÚšíXP¾ã\ÒÎãÛW*A†‘xW©5–Ý©ö-{KRßwFÑ+h÷ÈïÄ˰QrÁœÆRC*"ªóPDÚÛà,F‚œlP[Àšb?){ìz˜…-¯³jéá¥9Ì \¼î†»·"¶ö›!ÒÚ’÷/\LŒ‰©øüéç7ÍðÍô¢ïºÈàþžÕk‹9’y˜óF$ãêw#œUˆyÇËË‹üæ¼')\A(\€òµ2Ò’UMOÌõÃî§s^âÅE†×Jq§Õ{Çö >qìzÎ _âÑå3áV¤ðÆbæM‚Ñ—cáº`òCÙ\Õj$P§]k}Œr:“1ÔyÚm—s^MŠIÖ{kQ~¸yÊ? |¿ƒÇÙ£hê½·ë¾'Ñ“ºÕ¬`ÏæÕÝÂíVÒ–ú[©›!DUZÒÕ€µïû©´ìM‰ÊŽsí÷uiQîT­UÍÛ A¸•!«8nëæ ÷E´F© ©?KÆ :pµë‘ná˽„|Ó˜âto*ùÀª·ªý½úŸÊyº|pÊ÷k¡`¾!Jù¶þ¡4Xî`„PÙ=z˼ä(~´¥~Ëp±>fQZÝ÷?ŒžH5§t´´S12Ôè;t¤çÅ¡óJörw»|eïæ6æSés¢»Í[9º™éõbí¦­øÝç­hcèÆƒÛ–¦>ij’Ÿ¬µ#t¨žIÏxÑ#(ÞÊgÒ‹zìÚP·jÙc5¾Nk™u£§G²ÛãУóªøüß=æaª~ølèŒÓ%ŠššŽŽàz”¯õ©ÈÝ+ FuªÓÁˆ$äô›†{ è°hß:©çõXÇ’äXʯJNH+ÆbBeô+73qBGe>âl±ÔDOhçÞÁåœúM‚ø wÀ§O#oÊ#T'WWzV”¤8„ÝÝà€d<.¥}MWWײ·OãØb,?ž}´Wa{¢™ëí©å˜ê9íÜßȉÕ}< ¢‡zD7ãS™öù?ÑOË*ì6{ôÂk¶&ïRóÉH£?ÿv„«áëTæüÝëÄuéÿèd(7AÌγºcœµe¿¬lÜøwv÷ph!cc¼ÌgDžªyõÖ“ùv¾å¸À~”¯àï9 ³Œ þºCH\-bé±\G ´R¹Ž1Ý©.3™Ó°cו×ìéÓ B,ÑBv'){Fsâbú%©O߇У…~'ñËD,Ag€nŽa{HÏèÙXhÙCìùÀçÏ/x½±òôÒ§:ÉÊÙº†Kþ!Ä~'ÑÕy¢~Ž8›}÷ì<œ7½¶Gz9‡p‹™Ò‡G:»cOÜ]ÃÙGyµXíüÒËFA—”›Û‡‘÷ŽìÍÙ–¶Ž:ÒrÅ­ Ń=¢/‘‡™À9ÒÂ'.iñ¸¾¯ìétïž™G=PåZÑÝePÞ6êÄt7šý|‰Ú8m™F_³&zxPÕ-£|+ fD^åh³ŠN^>§ƒ¾,·KÌ7ð-Çö |Íf£âWÞ´‰.ÃxYƒâ×`ú€D\{2Ð)Øe/w¡ÞŽ›ò’Ï(ð6äÙEV]Ôζ‹îWòÄ_éøŒcMMV'“zÝ¢'¬89á i¹G]t‡‘ ¿‰œzòýϱ}uc*ÚÏß>mÃG“ÎÒ“Þ]KI>ú®g~ð†æ=/ÈHüû<§È=›$Ðu ‘:»ŸY¾šL,¯¬¸töô®ô  eC—;È·KŒ_ÊÛY¢W4 #éÀö}‰e2¶tž>q áãB(ëÜþ-;’ÊðD÷ùÍ>ˆn[·å€v“ãÈßÉz‚ºÕ{.)geF‡’ÿtHf¯úÒ¼çGŒšW=nµ™™•Ñ¢‹iˤb»Íˆ{Z6.üO÷ WJÜõ¿s`êÚ†ë > F×7  wÐÄž¬|Æ$ºXÇE,AHÔ$Þš»X[·å€vœ9ý½t”ÒR„Œm\—-šo¤ÃõÄŸ=W”¡†õš±3{¡I—¯Ì{î–ð\1ÄÜ~í-reQÑsÔQ•§Ò²g³òb‚aœ·¡ãÚËbîß{;že‹f$p½RÕõ!Ó§“fp^‹×7`£%G6ؽkòB• «Ï9Nêt½ãBVÕ²Á«"–«n$ùôk®“ÝÖí@ù }À(ÍÙ+¢ù½v8Úø0ýšVµ‚=K¸‚4§T^¥«²2õ>é‹óžK+(ÉÊ!Dݸ¥Ù[Å9N¡·%eQ#J_r\KÕ£ÆÞ¯¢CøV¦Ç㓪D§W磅sìΡBs×]ŒœcȨg€û±ŠÉ¥¥¬R–4%.ÝÉD(ÿú„F×;.&§_Ðz¬?ñ̈á:Þg’Ø"Ÿ‡Íu®Ûºý(´éÓ˜„ñ$VÉóèµáÁ몣  ÏQ÷•yÏëðè§OKF¼Ï_–"õÎßgmü#ʤ¬‹G–/ú3!-‹Ž\7w°X¿7 Œ@ÄÏ?YW<)¯väû3æÔD´{j­ƒhÞ³ÝÖí@ù ýÀ/¦èI~Ȱ—ɾ'×I56ïyºp]G®®®µV¼{…Pç±V{ð¸øÔql¸BˆQCÂ.œãW'Wq<}õ´´™­ÃEV{\­‰†x½Õ¬]»Ñ2C?“ê×+ÉùÕ9.„Äéa"Òÿ7ðõb,]óL JküÓÖí@ù `õL'š ;T3Óç]µáJI­¤ÇfÄì¢7ÇQÕ½½ÁyÏ}ôúÐCËÊ·]8ÇÏW…g¡¬m.ĶjïÐ`ÅwÈMiÎMê-VÜJoòóF5óOjvâîµ  Å=ÛBÔ@¸6Íñ˜½¡T‚÷&}\_¯¼KtìÍÙ¶š¢f•¶n?Êí„ë³^ç¯Zµö䙋Ÿ$'«bï:ÛÑz7Æ¥vƯ ¤­g!&9ê+óžKˬ¾z@bvØa2ÇyÔâa™8 ÷êÜ…Kß’wc,µ,2J[Qø»Meh8cìœ~46rÏþü¼Ww22³˜½`®š4§¼ôÐZØ›ùÅyÆ]{ˆ°dü±Xi~$U‚]¡9hlÝã"—ùz‘r¸6î">šêìÑÌ#¾¶uûP>h?ðK«„E„}ñŽŸ–Z³ôÕyÏõ&,º>aïÆbjú±ñIÍh­‘µ#ùùÒÚ:LŠœÎ³Ö>`ý—Ž‹ƒw°C ÏaÞÖí@ù”@ù”@ù”@ù”åP>¾‡*Œ/\É|pÿî»—ÏA3‚¨b±Ê™ŸÄ¥åº÷P2h Cø—²ùí›Ì¬¬«UJˆó •øã‡*!¡**¢Zš}zª™þŠçWݺ;çñãÇe;¢ø:òŸ+pIq¥hg¾ž=%tú “‘ëeå€!ïEÑ‘øƒbR²z&’Ý5ÈÏ—RÞ+©zp>åaößf#­ôï×z²SÓ¶düýzÄðÎýû3̆w4þ¥²½(¼wúô~ñv³ùZY¶™¥÷í?$"ÒÁÜ\´Oߎ}úŠ!$ö…´i7nž/€*­-†ÿ›wì:¬ÓìÙß9R—.|þþòUU©+V˜çí# (òsNué§;›¶ÄûÌ—ñ™/÷}9ÐXðxË…óeÎξPzAùøfVF¬5±q$eïdzRÓ¦&ü _áíéÁÏßRIÑ«Ôô‹S§JþxV||©·ßÈâk=»¥Ou\ÜŠ‘#DøÈýxVÊJ‚Î΂‡… 2AJZŠ1(MâcÙ瘘˜á“Üš7ÛSfþuîrwy)í¾šÍns|üJc#†­­X3æIªˆ² [êç·¤¥Î5f­\º`\óæ:~¼xaaÒÑ#„õ¸ÙPžAùh„'¯ÞžO¿4tü´–È\µßÀÅo’RÎ[š mÆl£¢–»ºJD‡–°ÙÏO.2r™§çâæW½ª’­Û76»ìÑÈËóûoÌÖðé3 æ”€/óò݇´ËWû nÁr¢âR‚B'Ï¥š3i– 7o^>k–t‹žOO™õ–ÍñhVñÃåK—EÊ·œÙ:t˜>C Ä”€/RVÁJL>©k:º¥w$ÈèÄS¾‘s·ŸfïÌjÿþð™3%ÂÉ™ã!³råÒ š©ÚãõëWµ¨ìq™æÒùÈ‘-ãÆÍ„Ê@]ÖFDX8Íù9ûê,!sïú%Mõž;~ÿúìiòHóN-TÉYŸ ä6m=+àdzZ±lÞ<ÙŸc6éùb½Ê•‘UƒBÊ@ ›¶íüi²G£®k¸jM¤¿ïüïu›ªnÞα´èü3mvq¿w/½W/£É$ãêVooéŸi¶Œ,ÿáÃÙØø@9å€ÃDzϪ}~ÿùû5wœrá¢ÙÁß±í†M¡î2?Ù`A™™W~Hù0~ý¦„ ºüdËml:oݺrÆŒPÚAù X¿6Ârªg«ìº¨äýwlÅü”÷ÇD‰V1ø?ÄXooÿþq̶ÐéÓ¥ZÅr«Q VEY«Í€ò@ëóæC©‰ckí]CØáI6£,¿i«[v{¿ïò‚ÿ*+0~<±„xÕw;|}úµÖ©Vç_»6ÂËk!”yP>ø¯³uóÆŸü†¯ï?–~›|T–ÙÛ×uøT ·¼ê°å’9‰ÁZu”dœQÑÏøI®í×ÁHI‰33søÖ#=|xµmçV<Õ––bPàAù@½·®ÚCGå>*PSUnª~Ûh[[?²Ö¬ÉC¡*+ ¡¯ƒ£ØmH*srŠÕÄjnå99ïÔ4ª‡P£ˆËH)H Ð˹9OY 5F†XËË{þGú¹¢²uOµšZÇÄÄmVV.PìAùà¿ËÙËו{÷ku3ŽŽ÷[ÐÔ–‡%%ub*Ê „î°¢ôJXã(¶ßv\\ÿ„ƒ›A\ÔÛ 1T ž;jk\•öºG±²ÎŠè´užu¡aÛÛn yºéDFe"¤ñìí»TUVò}K—Œ2æc;;ñV?Õ¯ ØƒòÀšÿ³w&ðPuo?c™±oe)†¬£Be©hAYêUQZ½¡"¤¬½¯èÿ6ÒJ;íx[Þx[©·Ô[êmzKEYZ¨P£bÒ îÿÞ¹c¬1Ê’·óý̇;gÎ=÷¹çž{~ç9çÜsofeÚiè÷ºJ5øŠÖÙÙ·œi±Ä~¹0ÂYhÔØ°èü"uiÔÊìÔŠ¬MQ";ëE$çÎÜUÆþ"ˆOÖ{d¥ñôë[ÃM…Àg,¤8a²Ç@¤pR‹±lw8LJJðüùCVV®üŸã_¥M™*ÞëY=ÑV{¨|ȼòÀïÁŒAœ—ùñÃiƒÛèŠLçøyÌrFôÒ-Tï<4¡óÚ‰PO¦™Æ­Æ]À¦nœ0/ˆ0óå \I-îJã`ƒÇPÀ{!Öýš„¢-—¶¹~½Ðª3/røèã÷ |ƒ4‹ oR5L`á‡Ê×—(..VWWïU˜Å%,uéÒâbåÞ³¤´´TAY¹·^ÁÙ»çÞ…¨ëuz¤º´´VYIªôÅ3åjͲ…Rþâ™Bóð!‹KÞ¾÷`øPƒcæÜ-4ÜrzËÆ°?T8ÏP +ÈÄ&ýz=Ù'ë ®òw, $jß]¾ýy0ÒFj‡KwÎPªoøÖ8Ѧ,ÿ-áÚ§mKÈ ‰NãPCòwrѳsnCåƒÊÇe ‚d5l»þv:y¹m—›²‚žáúmi0¨T*Š¢½hRå­õT“ó(zQ…JÍb ß8Y¬¶h×EùÖ«(¯Æ²«Q£^ªLðs¯F-È=sÑ»‹ŠÊ ªÍrÞH©É)éÿœs~së½>ælv°û;çõa½œWÌ&W¥’™`¢·»€iÜÉK“——Ïò×´ÔAs¨ç vúŒ×;›ŒÝÈǵHÔ~Ö¾cóWºkGfžÍX×r¯«ÔÍ”½hE±¹Ó\"n"§iúüÚA:î×ÚçêÛ9NO—û<Ãå¿Z®,#ºU‰¯5s bµ³ïFçР´Ê†o†(ºûs3*¼-«-ßuQh¾õ{5Ÿ<©€Â•g‚dƒ1XmŽm³_Vž`æøÉ×+©ìœì|™*ê x)e3™$29?7W\Y[Y†T^œÇ@eµ©ø$2&“Iæü¤¨M“æx"DàìÆ&‘Áû;+—ïú%Ð…DÆ.ÎPÊÒt”[šÂfææQ¤U0§Š—Hq^>Wä„ÈTWW·Ú£,¿¨œFã¼óŒÍf“HE¹ÙÚ4cì[yiQƒÉý‰ë¬ä½yWkdŒ‡°ù4©üÜl¶¬¶pãIÂâX`.[H‘¦óÅÕ˜ZØÀd²É¤ªÜüRm ;jî¡ß¶Ý>{Œp-§Ru™Lâž`Æ5÷ìØœ¿El–:çÚ–æ?`°‘¦éW—Erë¬Å/vš,R?⪵0wÉðãrmh²ÙVu5vI‰Ü®.-*yǦpEd–—¾dT©jk“[å0–WÕ(ÅÈ@w¥ZìÛìRrrsóe¨ÊŠÝ¢ðùEÏÉJš-¯¾bŠq;£þ G­™ª'ö‰Éã–äj6‰"avõùÒ¦RÍÛü’òZM=®ˆÞ,¨àeNÍëge•,M]-îù|*/}U©¦¡Õ˜&ÏÍz]ÊÍÊ*­íc£ Ÿ‡FžÙš4¬'¡é#±@«Ø•×Gþ½"0uòß;¾C@ù3ú®…[èÓ4)¨rDxžÉµ%ËŽ«ªôgqdE·:;$,‹e°Ø1ë:˜ BUG:‘×hš×raÌg-“A¶†}U‚ÜÛóI)ƒh´æ÷à§§˜ìñ¼Oñ =ÏümXytĵåÆ$²§h•V#¾xwîÁôíÌf‘ẖ“W1(dA( Pù0 ³±û˜#{$%V5 «ªÊÿþEqü*·€äø5€£‹Â ÁÍgZrŸøýææ‘œh·öfƲA Þãæd—œvzËO¾ÆŸ°Â9ÛhE©ü­â¶Ç ì@Ö#g_º›ˆ ²κ%©… ©W|Âê¸ÕÍÛ;91è¯FsC)œ#Ú;¹e¤%›íèY^²ÜÆ %tx;[$¦^Æ| 3¶Â—9¾!ŠÊàŽ‘f€V|™l”&D¸JÃÜìë’3î–¡¨‡QÇ&ñpF|Ú›÷´øDìܹï—1#!N®Þi)‰Àz7znÞœ³FTrþ'c‰…8©O»TÄB« ÜöpÏùhqcaù&gàÿ?w¼R PÖ_« 2k6£á7îŽÏuœw$ݘ‚ŸQ€–~f:¬˜HЯ½7kùàqùœ¼½Ó°ì•‚˜ÓÂ< ¥EÕ91©3tþǺš´+{‹më¤2èÍŸðÜÆò \N-ÄëM4Aâðö™–˜ð§ÿÉWOýy9‹ šÆvîàtR6'fŒUË}[\J'›(†NÞi‰X’ Pt{×û|oß l¥|Üy÷(žWÊXóaŒÔT&|»” U¡/æˆî´äø|DÄÇI.³BOÎñ[ôû–mDˆ‰¦üŽÇ̇žR›®¦SÁ£/ÀR b:»ÍNM>8!Í\É÷ ~l–•mûŽ–V˜äÝ"ÐÔq\ªcËÔÂ}¹›¾á‰ mkótkó¦ÕEj:7R¸okÄHIÕײYB$a¾zJO¥e•Ò~}TÆÒ¨¨ËÕQ”&7‹>äø„™e‰V hÓk’S^o¤ À‘g\4vaòi=jߥœŸäWcûQ ãNÑŠ7ÀÖ7Ó‘š#.o[L^%’“‘…•k²b7&hÚþ$Zè¶ãW…œ«ˆµ–KŠÃª-„èkH¼_ëe ¨÷Ù9äLú–‰¹fo ¶^Ëðž LK0?bnØ`k·xßÜ–7aúÝ=ˆÔŠKw\Ž Ìâ ²_sÃõ÷¼š]Xs^Ö£µºbI Þ2¼ö<[m#>LG¤~À¥E²X]‰‰æ Žuýû̼DӢёàÃ9ÎMòû;Yj¤ET>¼ëªí`¬7¼ñÕ_%œÆ°®*^h°jËËo*ªŒÆ‹@tŸ™Ïr±…m×”n_¸®_C@â«z^9¯r¯*8`鸾Å~Ö#86Hš¶e#ÞL&J³Kl îÆ‹)ø„3fIaƒËÅmÝ╚µoÏáü™Ä»UyŒkçy®mCÚ±­M4yþb“ãs!,ÇòØÓ½S¯'ÜvŽ»¿tѰºµ²ð./µ±žn ;œrÓô[3DC”wÁ[›~å2ÓãgûßqLJñäöŒµ?ia2¸é×ÝÅ'…ÕµW⛚՘¦òV¹þè0ɬ©ñ­ömy)·1R¬ÉÌC ÝÕ³µÛx¿··%²ÑØU9—|ÛOk6ªª`¥’ƒ[üdlDkÓP wÄIê3h©|h}=?6×ÕG•‹€¿"(ÔÊ5T7 PÛƒž¾>ƒã>ç,Žõ0›Þ6kíŸÅãM•?O¹Œ ?•DOrnh”vpˆiQ´ZM^å¡úz@ òI¼®Œ¹þ×?†5ÄîâÝîŒÆúF¥yJ5,v›‰ÝÎ4ݦ!¯_€¦ *z*T­í…»Ûxÿ§Žãú£/Ñ)*Ø>¥9Ü^ â8umŽ:ˆà58Ça-ÍÉbHi¼„°Ÿ4 ‡†Ûßrë˜GGxHšH³†s;&µ¦,¯¸±‡ª=Û¾dC+Zu½œ¹°\q¼ÿpíûj{ÍqEÎÝ“wá oþÅV æáyùв6D³ïSè@yI§JImáqÆ8R V¢·¹2¼qœž‘߉ìÍ“›km—òP ê¢Ü÷†„xs4x«Œ¬9O7·:@£ò½+mwí±Ùø†¼ìðˆï™BAm^WS„¯.¸ªª¯[3“»gqq…ºz×,Íf Š‘ù}6N‚¢Ú:0®8!ñÙ3T¨c…Y’ÓeÀ)Ì€¡:¼h—“2WçöO5$‘¾NJeO<Îgñ†¢eì¿øN¼ACi³{ªùäÕ†<üȆ•ãÞmel.uÆ54r–™·ÃT|ÜÅ{¨G©¹µÉ´¬ìàR^{¼Ûè²dwƒù]÷>/—1‡nŽ8·¦DbM1¼N¾tãÆkw‘›ä‘; ù×ÀbV6¥ÞøEfôB“6ÄîÑŸ×OÇ—½J¼BŠHIY™—"V6¾U¹òî.˜{D87ïÌÏ:Æcè×Þó*’–žöÁ)»| U¨ƒí6Ý=néÄÜââ$ß±Ø æÞú |É$£–Ù†ßáš¶Ñç×ÓÁGæ¸z›¶/ÛpÀ¬ Ù»¼$æ†ý–¦a ã~ò; ­Ý«GÖÃîóÞëÏDM–6:N¿–@|'Tª•y‹‡j@ªPY¿RÆdyäÙÎÍ‚ÃTÍuUr¬Ç¨;9÷€è,ÞE—P9[Ö/Õ¶¢â­¢;Å­_P@nq)Ę™© E%¹‡‚]Âtá„ÞÆOÿ~­»;S÷%P°¦Wáý½;÷èfïüÜsü–¡àß{_´ådŸpkjjhp3=ؤƒ‰òKÍ@1-õbŠs[¯%’’áKÊË¿¦ÖnX‡s>•º¼:Å›Wª|? )J–aTÖÊ´rÛ‹Ž¡N¹d È4ìž:à³ñÑŠI"ÆKÿ¥_Ÿßد`§`9w³C¥s¤ç ûßôZY´áºøŒµ3Þ¸^„­ìMç‹—(XQ#9E듸(šµổ3Úœ¼ÊÉÃ( PùpŒ@=׆F„mß“â»?#ÈÝ ´ŠºzÝhç²… É*3Qw·Ðéºé·¹j^#PÍÖ‡p;‹VY8מ˜îÁöË=¹pET¥¹GYÖ¾[Š(æY®ðrݳë´éÎ(”ñØyvP¦œêƒ7l¬rgë8Ð×ãM<}çìðÛ6e¬šÛ²q>å#³8/láB9ÿ4> khÒétü¸ »ÜAÑ­QK"7½V<ÅL2géfeEnkŸpáîýëÙýçrÝ­õš¹³dS̯ãC/¡>®ØÍ”Œû’I­AQ–³ý”e«KYì—ë¶ÿKä†gàÌn ÅæD¡)+ÚÌØ–6TÔÒ7sSlwM PLw»üsÆ?…SšXŽáí{à—®×:]˜›~u¨×’w™+­t$A…=/ýÖð.Ÿ4Õž¾N§­,'Žm>ñÿ“±”¾A(ÀF®¤Äm–!® fOT` ¿ÿ±aKŽ¥ú9b!D'¤¢/}œ"#W¯F«/&îܸqn«}[\J`v-Ïdÿ’eaª*Z,´[º§ô©ßzQI"‰ðBü#bI,¦×²ƒÝ¯P“Ç ù…‹i1ëé]/ßùPˆJó§`Šè¯‡2#™ÉŠüyïÚð°ûï¯îõ0"š*È 6(ÊqN•&bºeRnJ?r*çU¥‘RË–¤ª_O¶°Ø|e“ù™Læœû3»ñ)ÕÕœ¡fv-›$T]ZQ¨£Ñ¸ïF//­`°D´Õ%˜Ì:2³‹ŠYªY ¨%cŒßZ¶cÜÞqO_Ýqöºœü)Ná‰õ9ä•Y™g¥%Ò¤%8emˆúõØÚu¥r*òEŒDui,tVx›ïÙû0!-á¥OBdä™óÌ„ê¿/ïÜx!.Þßíê璘½j5y•ÛŒ••¯1•ØÉ-ÂL½R½x_}ùS;Lgø5ônÙÍÄ ºáÔÔŒ©ÅURê‘IÄ6ñ –¨Ä†ô¥uR3Òyóð b‚™LjZ8?| öw¥?–ÒNJMåÅ çØÐdàÏë2rôkúdr\RZÓÓ±š¿Òª¡)÷E“ØÌòÊæÏN $Ei^„ðÅ*¼ÜàÙV^^Þ"÷¤H-lî7¬yf’’¸§<¾ÁrvÎ+±cÎÂëfeY5»Y÷ ‰"½¤áB`®RlBrëôÛÜ‹wùðhK‡µ™E4G¿†±)rø’ð/å‘”B“Üç&NŽŠKl——ÃQ ²÷ŽãˆVû6»”Úã禟ۭ·ÇË'©Mf÷\ÜÆÄ»5‰vÄ6g©SOO"2÷5§®+¹Ã±ƒƒñœp™‰ t“ÅCRlÈëX\}~t¯÷&gÏeóaèëÉñ«šB "\œãýßÓ7 7 ïqhXŽY·8kˆ;Ýì&gàãmü¼ÒáÁà ‹Î¬õýàþ'cÎCŽ ÆÆó›„/i˜hŠy†± _è  D­žÃ+ %Ê£a£¡“ÓÞ‚(ZI©ÜbÓjò*§ÓÔ 0@å뺫 ¥€¸pOgYqÆÆÈ´f#1UÊ®n/¹ åä›Îe†îH¥uþ9´œms×ååUsoïøÅ !”¦”ƇÇz赟È×íÕ…¹ÑW(¸‡ú˸|®ç˜ìÈŸç»L“³ÉyÕìyÆ‹Ã|)Ÿ™¦Á™ßh >(`ë^pÓñY@-oÅN·`÷þjN”Á¨û®x}½°ÉˆÉ•¯+ £ÝÓU€öÂ,,uÇåIŽÞ+=#µKŽn´è@Á¢Æ¯áÉi_‘È×íÕ…¹ÑW’êÑõ‹=Wîöl+üîÕ¿Çñ§|rrׯW™™}똖&MçáÃDÿ£­³>HùÀO cÇîÔÇŽ À«^¿âç3?ÛL @a€Êü¸LŸ6åé¶©wÍPTRá?ò… ùW¾×/€Xg,yù’¯×äÞ¼%b2¼Sç8tèäâ Äz7«ïÞ+·™ >T>H7sðàÁY³fÁ|ø>‘–¿ñÇÞQ“fö¢ ¬Ê²™Np«§NE¬rðeðч˜?ž¹kžW¸„8œÈR0ö»ßኸúžf2¢ÓgzâDeï*Š Îžå‹=T>H·“ŸŸ3á{¦Ÿb¿Þ5àÜŸL"Âù¯§ošÛî[^…Êø-Œ:Y9rdÙ¥¹·ú×S¤õ8‹vŠÐéÓ°ŸÇ.›©ÒO‘Ð<"E6®8zü_AÖƒéÂôI~®÷òöøŠ3õpŸVWwUP°×–¡Ù»1Ïs0,óPù çŸì?–A3×+GGj?Ïûy~g÷—åMqlÚàÔ#Ü*ž3Ûq±hg8g"¥‘ã¸&kññ± õ:í£¯KYbìßÁò›6ÏþŠ“UV¼)þ¸€bo]k= Xà¡òA úÚÞ:tzòŽˆð°Îîe;Ñëð±.3ºlzŠÆ;;l³³8nM_®ÛNÌj¦¤››åWÈqŠýgÖMažÏê;ß{yyÃÒ•àÌšöSÜ–mc={ø¸Õ¯‹ƒ–}ݾ´aõõOºJBDSÓù²dëÖ§!Á_?,ª¦f»úThXÿÎêÏ5¶¶f°¨Cåƒ@ ¸LŸþøÍ+E¥;"@qÞÃQ†z_·;f½fíÕà•S'ëB‚ùÆDB‚—ŸËŒ³±–èIËwì( „Í òA &(õS¸pérO*_úÞø¯èçl.!‘))«]]e{Æ`C~¨á .|AA õQUU9ââ=ô¶ íÛËa!‡Ê@Z2gÆ´µãlfùôÀ±þþc÷7ÊÓ´…—²v±ïnƒY,ñ[7?L˜8¤KRÓdvòä={{´æy;Êöš¿o¨|¤m‚—FGG;ú„tëQ.¦î]âï×%I‰‘¥ œ98}z7.FÃfS|í17¨ Óœ<ÙçðáMÎÎ"Ý*~§O#“&ýÌç‹ã!Pù ”+V¬Ûo=«»&žý}GÈÒ®tAädNœàqútŠ]·ŒœU}”»|åC×Ê‹‹ÿÉ“‰VVõdr·Lõ' /‹üý÷¸áÃttºÀËa³)ëׄ/G»wжÎh­Aæ«W¯ î×%=Ÿ·n’KJ«¡ìAåƒ@ FANsþŽŸ>ËxÿÁp¬ÃW§SQøàÁ›‹}{Àæ9skÙ¬ØÕ«==åû}å2Ü(*´#¡|‚Íа°9=“Õ˜¸†…E>yz-3óâùÊÎÏ¢é´cyK#F˜Â •|%ŽvøÂþýõðþ¿¦öÓÉâ’| HýƒËÕ0™ó=\­GŽè¹ž$Œ¹;lÖ§ø¸ ÃGHÕ ý+.–ÓEžªÁ¿å‚çΡEEï¼~þyÙR¨y¨|HW0qœ%öa³ÙGÓÿÊϽ§e`¤Öc¾›iš ZÔ¸jWEyá»w¥,V IJR^QQKCSXCL™úÝå¶ùÈi؇«ÐuueeE>TÔÖ±E„ÅädÈÈö3‰Å¥•éA$(dÝAê}Ëfy ìÓç²TT„}`©ƒ@åƒ@ *@ òÁ,€@ T>@ òA þ(-gÜþ÷þëÏ?T¾cÕ0Q„EÄ¥¤û+«1Ð4P廳­{úô¯'O J_U1*Ùl*(„HHõS$kh 4 M‚k:C òAº€Õ«W×××Ût:û+&&¶d |[J_…Å®=tôäëźÃ-êÐÒF`ŸÖѪ8wûq^ö?ˆ«‹‹¤¥m..:ýçÑÛš"–£Åi Ò@¤U¬Šºº½W®TeeU¯mf_ß ÊùZú÷ïÿìÙ3b›Ífà é»<)yýÇþ½£&ÏbiËÏëéTÕ°¶‘Çø|eïVó‘£Fõ°Í§Nm¨¯¯ûé'©  ~FDF–À>|¼sgãÕkÌE B…HðÒC òA:ÇܹsW®\Ù4ÄÆÆfKßâÝGæŽ-›læ,p˜ø»“„EÆÍ˜‡mlIÜmme¥«©Þ6gžß$Lª4é+ßU;lû¼|‘pìø{??øsT>Hg@f‹-iii1fKbÇÞ-C{Ï€oOjôO³?²jbbׄ‡vã;oY5/wíóóSüö¤Tûù)œ;»NEÕX_<, ¨|~QSSãuxà é+Ô¡hlì{ÿ.L“$,j7wñ¶]{]œ§ÉIKu¹Í7®'Š™]"{½$k>åoÛ~cÑÂpX* Pù |Ñ´Ãvuö>~ú¼oR×Ê‘“f^ιG£*i©«ua²þ¹ÎÞN\TŒÜ勊 ,Z(»24¾î•¼OØÕÙWøÄªÝ±c‡M½¶MÔõ†<{ŠÖ=ÓÖìñ;p væL)î³94´?±–T>HÇ 8ðùóç°«³OP‡¢‰;wv«ìôWôðÑ=99)9iéoLêÒÅÍÓ¦v¯ìDD(­]÷¿àe¿ÂrÊé•+W®Î>All¬½G@Ï óüþH;¼ðgoIä}åu ¼C²gl^Öïø±Ç)>°¨@ òAÚA‰»:¿’Rõ˜ì˜;¸¬Ž]ö ³=Oœ<ëæ&ד6›˜Ö¾}÷\Nv ,0¨|öˆˆˆ€™ðÃb×JÊöëùãZÍö~ô¤HO‹úûîÛãá!×Ã+)‘ââ’e•é۬߰Ánîâž?.‰$ò÷_¿BùjYoÇ T¼tñà˜±p‘3T>¤ÏRý‰efïÜ[GÇŸs¸qÇÂtX§öÚº}[@@¿Þ²¹¤ô%,6¨|Hf[Âv›™Þ½h@öí›S>´vÂD©^4xöl©œœÓFFv°ð@ òAø¢´¸XY]½ý8ÅÅÅêêê­c!ÌòRYYšó²k êö®£~šóîýY)I>ãŸJßf¬Þ:E3âäê–’¬w£çæQ(Šb-‹Ê[ÿ“ F¯zó²§æ"6ÜÞ;à1n3àä§ðQË+'/ȘD é4¦>íR EI­Î×J–ÿUTþÌÐ6ŠÏ˜¹¹©4ƒïâýéW®ÜƒÊéËÊWóûÓÔA Ü2Ûñ«BÎUÄZË%ÅÅ"RÉù)ñ~­— ÞCdç3é[&æš½1Øz,„ýT†¢˜ 37l°µ[¼onË)éw÷ R+.Ý=r9j$0‹+È |Í ×ß™qÛ°˜ w_JWù¶Ìèchö)PwŽÏMÊ¢[X`F.Ð{c¼pW4'ýjÅBt"qÉ×Ý—«§Ÿºnï` À~ÌÚrÎxÏÀt) ;ÐÆj ;ÃúÕOáæ¢ !ŽsFÛ±DˆsübæÔáî×¥Ô$’ŠKËÉÊ öÀ3èD:&*Úm™DdÂÕç‚«VdüÖvuÆ‘I9°u"õóšýö¾eä?ùêô„PÂæVÀSÀy^>«”ú·<_HǾ ͸Óû Õ¥¥µÊJR¥/ž)hÖ¡M„”¿x¦0 skrJ÷SÎ/(Öæã~Ù9…4™ï!뤤E`ùôeåc}á nx£$”Ôr§ª‚„Dy¹šc*#ç¤)pþšÏr±…mõñqÿß¾p \¿† ÜW}ªç•ó”ÃÂ@‡æoIPƒ?ô‹åÞ8¤¾ñŸ§Öø“MÂ&™5Š7Å/-¬†7|Ãy @ÄH±ˆæg4ix»#‚ÃÜŒ¹kO{­»¸tïTÚ4IKb=„sDIS~®_É£ ¦ÿ…Ç4ÍÝÖç iŸòw§ó0RjÒÔÿ9çüæÖ{}ÌÙì`÷wÎëÃ&z9¯˜M±’™`¢·»€iÜÉÎæñ£|Ïž} =廼âWË•e 4Aºá«GÍÜ‚ØA]ž{4=)X„ }Yù$ñ 4æú‡p3îì2gK¸‹·aµ@š›‚Jó”jXì6+ºhºMC^¿à8s ¨è©Pµ¶î¤™÷ÃÀ4†è<æ3dê•6¢o§gäw"{ódÐ𒄪*?¶ˆv¨uédÿVÒóø˜( ƒW-Eù5Iô‹ ~$r®î /¤Œ ¤I€ùó«[¾_ ; *uh›ç i‡Ç…Ū-ù®¾bŠq;£þ G­™ª'ö‰ÉãŽXW³I ð«Ï—Ð8[ó6¿¤¼VS+¢7 *Hdî uÍëge•,M]-â'ö§òÒW•jZiòüÅ—|=$§ªÚ§%,"‹(d;Š.$¾ªAî!ž”2X€FãÞLf™T“›ÿ^›¦Ìk+?)­dšVÇåêê‚°Aú²òaîÝÖYÆæRg\C#g™y;LÅ\6Ô{¨G©¹µÉ´¬ìàR|ÁåoÄ6º,ÙÝ`þA×½O7,æÐÍÑçÖƒHÌ Á=ËK7nܘ±öw©±I¹Ã ,fe³Qê_dFŸAÑÛ_N¿Ûwe>¤>Û;5ñ>Z«®„"ÈÙ²¾x©¶×Ý;Åï6¯‰0´Ëp}pÈ×€ˆs ÆÌL1(*É=ì~ Ù£²ÓêïÂuc[$›Ÿ2KÇíPv^ëåU™ªÂ"¯J°|Z{&‰‚ËKbnØo15n£ï ˆ””•y)`b7#öüoæT» À|u‹Èœ³øérPÑ•Ø)íçëó…´ÏÛŠ·ª-k‰(>¬¬@c¤¦2áÛ¥ÌP¨ }1Gt§%ÇçãÊg’ˬГsüý¾eb¢)¿ã1ó¡§Ô¦k`€éTpãè °ˆ˜În³S“NH³öPå;~l–•íøŽ¶_•±4*êru”EãPñ@#À‡Ÿð ³,qXá ³Sôw²¬KË*g¡ $P A–#t_ÜÊ(ÂZu íE^¡!HßV>£EPϵ¡aÛ÷¤øîÏrÇŸÔ±ŠºzÝhç²… É*3Qw·Ðéºé·¹jÞb¥(š­ávz­²p®=1#܃3ì—{rከJs²¬}·Q̳\áåºg×iÓQ(ã±óì L9ÕoØ4!ÀÖq ¯Ç;9Ç.ݬ¬ˆoPT­×ÔšG_µ2#ÿÜüuñš:V˜\…ž+¤ÓéÄqñ½úi¸ü†¾ôqŠŒ\u¾­¾˜¸sãF«ø¨’lý5û’–œF“û!²â€v-Ïdÿ’eaª*Z,ŽìñÎÈ6l³ŠR‹Hi»Ì–µ]±Œ,&÷ø>%³ôâÏ¡» Ní˜d˜îvùçŒ [+vDÏÀ™ ܊͉BSðž¡èwçE‘ñ }³ó_¬Ñ@¤CØF–Î>&°qSŒéÔ}I†ƒ¥°ßfê é¢ôÍCxijRyhp‹ó…´ÏçÏ5­-;1•ØÉ-ÂL½R3ò ç†ÏðãÞáj6Asñ®?¼÷ÃpjjÆT^dýI©G&q¡œ¿Q‰ éKë¤f¤ób’äÍÃðQC#G?#"DÉ<ˆ;E†±ßCË:5ÁšŠ‹ÃÓl0†·WTBZC£×;Î0ïýOÅ8²ˆQ͸´ £qrB{üÜôñs[Ÿ‘‘ ž³²¬šÝ¬f!Q¤ìç¦Ú7îBR“”D °}Ñ$,VRZ`3ËËË›e(J"Ž˜”šÚ$T‘÷5ÜKÿˉÓh›ãÒ$GbÓ°IæË‡û kzmž/¤½^ ¤ áövà¯D6»*ç’oû‰`þœª ÖøÃÚvƒ[üdlDkÓP P ©Ï`qkkø2åïÜÔMÔö †§¯ÏÀ¿U>€;Z<ÖÃlzË%o?Æà‹À•"9@„Pó«ÚW¾/e Ò§”ï›øgy‘‡þš•$à4Þ®’±øª0â/|„4{FXi|x¬‡Þ×Pœ±12-¯iHE•rÆŸ«¿ŸóýÁmë‘G^Þšóts«#X»‚÷Ó»ÐöŒY6¿ÑŽêÞ3 …‚Ú¼6n!Q¾œ§êêz>Ï.®8!ñÙ3T¨aqLq¹½©y€¡:|j_EO\Co^A"Ïôk>‘DÅDa)‚üÈÊGþòóp½‰…Ûò;nËùžœÖ…GWw\Þà«}§çûƒÓ_±©¸©û°¶ÏëÂû{wîÑ Ìøƒ3`Ïñ[†‚ï}|ЖófíA¨©¡ÁÍô`“Ö‚Yj‚Œi©SœÇŽný«¤Œ,?6¿etbt­èØê”K–‘€LÀŸVL1^ú/ýú|ζˆv¿BM—½ ÓbÖÓ?»^8¾ó¡<•çO7ÀÑ?$›‘ÌdEþ¼wmxØý÷W÷âÞáÑT)@^°AQŽóˆ*MÄt=ʤÜ”~äTΫJ#¥–3Æð'#o^³Ú a;ÆítCÛköºœü|$EãC}yeVgæÇYi‰€ò!ô-Üɨ[èÓ4)(ÒAÑÎsÒÎÉÉ<(ÛÖaoya1 **¤m:4sæL˜ß!b¢¢¥…w•5tx!ž‹ÛxKŸŒŽÝšDîêÌœµ L==‰ÈÁD ëÊÝ\) ÆCfpÂe&.ÐmLI±!¯`qõùѽÞC0jq£¡|ÍÈÕÑ‘h?‚‚±ñü&_—„OlØMðhO-¼¡×—Gºjº/Ÿ¹÷ða…¥%,D¨|¶¨¨¨€™ðÝRøàNSåëV\Ï1Ù‘?Ïw™"&g“óªÙ3ò•%†ÃøR¾ÃÌÈý²Ž$C òA¾HMM Ì„ï–:6«'ç¹r·g[á÷®eåOùÔ5F=|t[_¯÷Ÿ%;Ö–T>HÛ|þüfÂwˤI“¾‡eo¤deù|êÔû^W¾»wÅ a_'*äK^E]Ì„ïMµûþ8jhiÛ›%„Y9gºÿñGšk6];­WÈÈ(44„Å•òDDà’öß5¬êªÞ5àìIÃÂBù?zôœKYÆXöÚ"=uµ¢sݧÁ’ÊÊ×Wñöt;‘us öà^9:‚ÖMŸ5«³{½|Ñ›óK¶n{áïïK**_æYîÝÞR¾ÓIÛÂBC:»×œ9K/_Þ`aÑ n_]­˜ýxXf Pù í!* WxúÞY¼Ð{ï¡4£±ö=|ÜÚ .úº}?×ÈÀîù¼Z»®(,Ì –T>T¾>®ëó'a‘3yãᅥ{Ïÿº}­m<×®[¼¬OüÏUÑeKÃ`i@åƒ@åû/0ÂpHÜæ­c§Ïë±#þ•´-4dé·¤q)kSMuù\#!!¡ D†¥••ï?Bàb_úêÕö=p¬ÛgÒ–- üÆD…„iØ<{vQM­Æ’IG޼qu›Ë *¤cà —>DDXXtt´£OH·åß‹éîs\¿=)ÕÁ÷î•’„Ÿ(+u«+&ý|ÅŠ°„@ òA Ï÷«ß»Õó»ž~ÈÃmŽ0‰ÔU :1?OâIõm­AÝRÒêëÈû“Þ@Ùƒ@åƒt111˜ }Îó‹Û´eÔ”9$á®Ô€ô}ñXâ]n°¶ÎÈ·o•RRþpu•éÚ”óËV¾òôt…¥•}¾ÿ8þ~7sîÝ{R8´‹6«|‘_œÿ¨;d@NŽêêºnÝÿ.”§P»"Iøø7^óuteay€@åƒ@Ÿï‡ÀÄh(öÙ´-aÐáʃô¿:ºjÆ_G’‚—.;bhwÛ¼lÙ¯¯_çíߟ¶`¬€€ÀW§“žŽbå6 `9,¨|èóýpø/òÁþîØ½WRVAßܪSû–?½矋þþ‹‡…õÜ3pýûë,Zñüù½C‡Ïu—ïׯ5›-úûïåjjJ³á¥‡@åƒ@Ÿï‡fÁÏøkõ?->qJLBRkð09U6cÖ¼{ýôAγ§-ÆÛØŒ6Á>½bðÀCC‚qóì_û²sž›˜Ê3’jà ¬«¾ÿ@àÚõWR’¢Ž?9yxÈÁË ÊùVàS ÿt©ëúûÛ>~|ü´°¬¬¢šY…Ö£"b¢ò2rƒ4Ô•ô©fúÔïÇæ =&Län—–ä¾xùäÝ;«–M”””ìßU}˜¡! ¾r•ÒeÔÕÕÁÞÎÿ$’&FCû–ÍÊ*4ì¯*¤{©©©¡P(0 *ß§OŸ òA T¾Kù`&ü÷xý¶òνû¥/^|¨|ËúĬGQ’°ˆ¸”teÕ!úƒª|w£hQÑ_yùOKJ?¾¯¬e±êq ¡~Šd 4};’0œ‡Êù6öíÛ÷ìÙ3AH$’ˆˆÈÂ… »d©FH¯P[‡:v²¤0_w„…šîýáØ§u´*2ï<~|çs\\¤%Å{ÑæçÏÎþyôÆÀ"–ª‰ª! @ë©›uuIW®TeeU§mnפ†@åƒ|S§N‹‹«¯¯œI.Ø_%%%({}”ÂWå÷î2w˜a0Êût_^Em´Š¶ñô=ûJÒ¶a#L-L‡õ°Í§OÇÕ|fM"دÃȘÿ7z´öàcvvÜ•ª-o#‚@åƒt))©!•••0[ú¶ÄÇO˜ãã0ïkÞ($D"uÆܶkï¸1–zZš=`ó¥‹ÛêÐOvv˜Œ‘¿bwcc2ö)-MLM}ïï×p@åƒtÂç#„ÐÍÍ æIßbWÊ!5Ý!ž]ðÒ†‘“fV±?Ç¬Ž  í>ƒÙŸ_oÛ¾3wò$¾1)eea…óÖ+È2d, ¨|¾°µµÍÈÈ ¶ÅÅÅ%$$`žôêP466¶k_TD"‰Øyøoß“<í§IýäeºÜæì;»Qð!€¾Mþ±/ñùsá–­t?ßX* Pù 3bÄžòÉËËà é+0k؉»wuÓûùÌíoäæj)Ëéiuå:/§Nm7N¬;žŸðó•] Ë*„_ÔÕÕ§L™ó¡OPîMØ™8nú¼î;Ä@ÚË—ÏÐ'ú]4ì÷çŸë&O– ‘î³94´LLtx8|9-*¤#´µµóóó_¿~ ³¢O€¢èÖí;¬füÜÝRPU+|’+'ó¶Ÿü·.}íꎉ¶”n•=‚ðp¥5kW†CÏ•Ò.3gÎ\¹råøñãaVô VÇ®±óðï™c©jÑN¤ÿé5wη$R]•«ÐE!÷а!ÁýSS·:;û¢Êù"‚ˆ‰‰1fÅ÷ÏÁc=&{#&NûÆÙž‡ÿ8>o^!ÊË 5a@åƒ|¹™3áû§E…ÅzayÕ ®‹î=|>áÁvBˆñ.L ­Gí»”3Ÿ›gÌ”…u+ä¿¢|oÓ0ÙÛ§ÜÝo~º#Aˆ ±4ÿƒÐh4®»Ãd“#·¨œÂÎÍÍצÑð*—Íd“ÈÕ¥E%šÇ‡à„¸{1Édr´»‡ã•êÊœ[—Y†%B¥ÑZè+377[›fŒí›ŸŸ+£ª­ÀMäçæV£#"}6›DÂøŽM3ÐæycEe¯U´¥d `|צ*à–sÒÁ£0˜MϨº¬(ê(´#õlfn~EZåK^‹\âåB3PoË0¶{ÄJ§ð_È  {ZºÓ$r^Fª¬ˆÍÍX” ˆSäfcÓÜœ@–i¸4xšEx&S›æ<‰L&ý0·Ç• ™Ö3½zÝ Šd'ú &ÙKñ³ê} Z4¡E¡ Ü ÷þÌ>ÞÞš,µÚ`Ü%4Û‘9Ó)ÙÃPQú矴Q£œ` éóÊ8ÜXï&d# E“¸D€a> ñ g2ËÐaÅÿ“1‰Ô1E— 9œÆPÓ.acE‰¸ 4,í ³287ºÑŠRù[E´¥æXP(²Õ¹{‹Àí™Îw¦V,!û¡ñ±ñ®û&ÏÕã¹ XL^²Ø†·›SbrÚõjÔ”\ˆ šÆvîàtR6'ý+aüˆÎàrj!'$ŠÄoo×ÄÄ”ÈKø¡$ 7{Ã䌻ŒÁü3Îiøhagô€ªäpÎúµ÷áf’m{Ã'ü¨Ž[ݼ½“þj4·åcX-rÉJb©y'«  Ãý‰?Å<ÝÁêHz¥¹=´V­õi2ïm¢8y¤%ư :×”ÈØ'Ócú£¥1¹Û¦øg¡( °) оA~XÖQß$¥e7fžÝè;â(9Ûfûü¡œ?²ÄwÑM­=|Ÿ1ŸäŸÐÒéDÒê¦j{ÃÓ×gpZ6/ß6¸}\ "ÜE®£Ô|¢Ÿãeh‚?†+¹Pù ÿå{R "÷;·rOr4ì6öw‚èë±±Œ}x0Zx«3D–¨(±Z¾|1 ZtÁ¥H^HËŠFßÃxH·’Èinoj\„"¥Ÿ<·Y…Ë©²ñD޾D§¨€·ÉHÆí·¦#DÒO]·w0`?öS9qD›ígÆ`ßæ&eÑ-,°dè½1^¸+:7( ¯ÜQ¬†³qàž À‡sœ3zÊiöŽ4°ˆgl„N·“9šó²­µÐKJðFá„¶ŸKèy-^ˆ»“O.³¥ahî÷x$==«µ=èµ–Ë%·>M;ÃúÕOáæ¢ !Ë –þö†Œ- ôWÅ´2Ä÷¸ý0p0¯n–Àï8d„Ya—æNviök"ÈŠ¬Òœ‹…>ÀBØfå½µ˜ì±~°>Oª¾a§÷AªKKk••¤J_.Ÿs•ïYBT§Ü> ¬^!ÿå«Âª¿ &v6 ¬,Äý$b{¬§ˆÇ\e^Hk,¦Xñ¶Kj¿ "OïäA¾¸Þ ¯Ãm¬ þw”ÀuTø£Ã$³q,L7ÝñÐ/–k²¾è[qN  {[g\È,)lpѸ nÌßj猸™SpÀÒqý—~mKLF£3‘”†÷5…¶4¬¡ƒ± {ÚÊØæ§ù€ˆ‘bͳZ˜hÅ€µ 0§›•¼F8úÂòœÿ0/UßbÊ"²³:X¹ÿÒ†æKÆ9Äâ— ¤éÞøøé³U«iˆ‘R“ÎnýŸsÎonc¯œÍvç¼>ì`¢—óŠÙD+‰ &z» ˜Æ!ôè?Ê÷ôi5R=Ó¢cc¨S.YF’æ\&9„ÝÚ¹@Y…ºÒ~ËŠô9XÁ{xãz‘©Yç^¡¯' «WÈAù"C ¬”ây­þš;ˆØðê‡;8Õ,NÞ…€¾9V£¶§5yÛŠœƒ¿*d¼·“Ù<¢x‡nVk6ŽÓ3ò;‘½yrs©h S"Ùc>C¦^iöÛË—x·IH“:ÂÇâêÖ­ÿuxhÇõ„÷ ^@”Z.·(,®Ø"—šJÈåô󃌿dXk{øäP ê¢ÜFø;°jÛVÌû%éÍmâHKÕËÌðòÕ+"Â̹Ÿ;üÒl/Ö“7ÂH¿Œ¨½ÊroäæˆökYÝ_}Åã8vFýŽ>Z3UOì“)Æ%ìj6‰"avõùRªy»Ô¼Í/)¯ÕÔãúy7 *Hdî0jÍëge•,M]®¾²?•—¾ªTÓÐjL³W/_òcs¿~Â|ž†í·wÜæ—ºãì t9ùRœ›.!Ð=1:òaRf´›U,d…·ùž½;«|êê¬^!ÿå³Z}Äb‚‚ì<”ñùÑŸ~Ñ»,~»@ÖÀïõg¢&K§_K ¾DrÖÛ]ö*yuÈÛV@^ìô´˜é¤Båy–1«RD¸Zâê5£¤,.ÌV%9{:Šþ:“Z}yäEBälY_¼TÛŠŠ/{q§ø}«(øȮ̇Ôg{§&ÞÇZÒl€¿± ÏÖ5ö=Žyf$-<íƒSvùªPÛmº{ Ñlì¿m͆Ø=úóúéá²WÉf7ý‰¬çÓ"—Hx‹x3ýHÀ |K—_hu+Ã8îàåî-íÉX<´Ã‹u ÆÌL1(*É=ì~³™—±Þñ{…µ=]÷>ø¹ßÉÆ*)æïÙí%5!÷Xîe æþn)¹·¥“­Xr:HEùqÆùÞ¾}§ÒJù¸ý(Þ•§¬@c¤¦2áÛ¥ÌP¨ }1Gt§%Çç#">Nr™zrŽß¢ß·l#BL4åwô”Út 0 n}ݯ˜DLg·Ù©É'¤Y“ñC%?6ËÊ òyv ÆÆMg­, ŸØØÿ™äÝ4fT‚ÇW䞬\=ZW‡ ÂJÒ·•pÂ.Þd7YV5ÁÖ–âV‡úøx-y—ù¸ÒJGTØÓ7!ü6:}Wèt¢×ßnÝ•SB *ú•q*P·$´@Ö{ÙâÈ4º÷·,$³ôâÏ¡» *.¡hrL@Ø/¡ÔÙ‡Ñ;øøû:]“B$Kçº/t:ÑÄîµhJY¤¢/}œ"#W¯F«/&îܸqîÒÍÊŠ:Dwg/2#ÿÜüuñš:Vع„ž+ú„O´‰ =vW€õ0^X?›8Í@w§è_Ïî?—ën­*j›œý 9óÎÙ)à·mÊØ¾»bB6î¸<Åw3UK‡2P€Ø·e.qBÝ|âï EÕø(ckò’~Ý™²ÇÙ´¥=­Ûò4iص<“ýK–…©ªhƒs¼ŒU×ò o`ÌõÀŸYÎܹþ^=g(KÒ¦ºèÚ‚eKßbYÄõl’&÷¬•m×Z¯²õèCß©ú?½QSSÓ:в±ÃÓÖT¾ãF@HèÉÅ'_Ï.¹ÌßÑH{bC3°]-ãOöH¥Qå¦1C|ÍMÌ·H§¾®Ž›E„¾Ÿ d~ª¤ˆËÁJÒç•ÃÂÅ û4#Ç&$7~“î7Œ¸ÁÃ×4´(ùMWhkQÆ%ÑÑx’Ò˜¤$î˜[x|Óåÿ|¹‰`Ɇ7nKÚ§iÜØöÞqöø£†Ý‰½¤µ¬S¬‰Ø¸8¢_ÔbYò™]Ь‘ïé†foÚÎO¤¼¼¼e ZA!5˜ë 懯áüÏ9´|¸_[¹Ä ‰Kn im ®´p­íi}t#G¿§‰¡=~núø¹N[“Œ _½.VóƒxCydu³¤ÔÔ¶ÒlÌp—  çÞh«ÃœÛÛ €¿Ùh쪜K¬ç‰ùsª*RX;Hnñ“±­ELC-¼IDRŸ ÀâÖN¾Ú¦ßS"ìð„üW”ï[P¢6Pü{ËÌ÷rv°µ³ˆ´ZõGb„mÇ;Ô=^¸”NnÖ¶­H=’ÑCæöîÑ$È”6f¡ðfe­9O7·:‚µ¾x?½+m²á]ßBéx î=R(¨ÍkãFæk¯ººþÛ}ÝâÒzuåÆs/.®PWïôkêëÈø<;*æ %Ý›ÿ=f95=«ÑuS“’zÍØÞ=úD…6z3S÷%Px]xïÎ=º€³ŠÑžã· ÿÞûø -g¿†[ƒPSCƒ›éÁ&,}¹Ô ÓR/¦8mcÕ)i¾º ß½cãYWÞ8G5+DQÞ“3ÕTêr”÷¨;ßT”‹*öƒ…•éSд¨÷ÊjçhøGÄ’X,²ƒÝ¯P“ÇeïÂÅ´˜õôÆ®Žï|(D¥ÇùÓ 0EôÀŸŠ›‘ÌdEþ¼wmxØý÷W÷âØáÑT)@^°AQŽ3ŒŠJ1]2©7¥9•óªÒH©å<.åªüØüþ}³á@&ó3™Ìy°]Ëd#d²`“ÀÚœì2™Òêœu‰ØÌÏ$²HNv)µåî”êêMD l’PuiE £ŽFãjZyiƒ%¢­.ÁdÖ‰ó(,dCåƒ@僴A\\\`` ̇ïôÞ*yòx NãhœçâÅ­£ÉèØ­I´#¶9K­˜zz‘¹+ܺ®ÜMl ÆCfpÂe&.ÐmLI±!¯`qõùѽÞCš Os2Ø€›õôdš~]Bñ;¶.ÎQÂYØ7óˆ:»ø …úOY¦¶¢õ·»äøÓècž0…ûÆyÆõIĆ;â“ LPt&…„ù|1ã}#® Ë ëg gć³øÑÀäŒçD"Mû¨ÂÌ"T>H-ô÷0¾gŠÜiª|ÝŠë9&;òçù.SÄälr^5{F¾êÍsá:ü$b2Ü€›¼¯+2¬T^ŒsœÌ™õš À¡_[ÒÙZ¯ÉÜkENŠ›‚ >ÄDHå“oµÃdOEæ=˜ˆ¯dë^p÷Y„¹ŒœÅð—Ù¸»§´´„YU Ë*Ò÷$õèãhž+w{¶~÷ZÖhþ”OYÕðî¿ ‡pç§(ÛMÏB@­£·Œ¾²¿XÀ+åC6›jâ†SxSA‰%~&5†ÜÝpd–…µ˜+jiÒte„Ï qñ#}ò¡…%cÆ ‡å•é{8NžTÖ#H/?$'**ÊäÓéïyÊG¬d¶ë·t·$×9ÚÒV1°¯FÄÚIŒ: Å=/•–Õ@ÿ;Y†2ŠÞ|Nlyù²ºEÈÇ”!Cm`ù@åƒ@ú”úŸ=pdØøI½iÄçsfNç?úøñ˜‡×¸àËaWI—è‡y¨œ4Ÿm†NÁ3#©ÖÔ…&eë.ã¹nM•¶˜ºbè‰GÑ/MÇÆ•@Nd)ûÝ ÙâÇŽ=ÕׇÅ•é› õ½<^uæð~Ã`þ㛚ÎÈÌ\om-A|±Ù¯Dÿ1g­kʺËYØ–UtØuãËË"«˜¢è¯_iš®Þ·IÑУoÁ—º› s!·R”NŸ†}»l¦J?EBóˆÌ#ܸâèñY¦ ¼â€¢B³f9À’ÊôUæ¹Î:zþ •6¬wt­Ÿ–¡^§}ôu)KŒý—ð’Ú¾ãÍ¢…p•é˼*Êï-å;“²=4xYg÷rq <a½Õx‰nµ Eã¶ÙY·¦/Hç>£Q_/:fŒ),3¨|HßfÑüy»R ·vìáã¢ÕŒyó¾rÕ#!Á~­ÞüÕ刦¦·\ÇuÍšâ°0wXf Pù >Ïp£¡Ÿ™L2¹'zùÌqß…_·ï˜1®kÖ® îߓ߾-¾4(–T>ä¿€!MoÓ¶Ëi=çÍœý}GÈÒ%ß’BpÐòÌÌ8ÞT—î†Í¦ Iƒ¥•ùà¿È‡³ÚÞ3 Ž•sþxPÀâoL¤é;<-87HS¤j’””מžs`9@åƒ@þSD„‡EGG;ú„tëQ]=7ÓiŠPÜ›Jʺ¹¹¯ž¿È8@¸;MŒŽ~±bÅ XB Pù ÿ XýÞ­žßí¿ÒfÏpëÌ¢-íC£+( ?z|COW´{L&'&–BÙƒ@åƒ@þãž_ü–mæ“g ‹tñ˜Öé}›B‚— vñb¡šš¦ŒwÊûöýîáÑůŠ}úTîõkÔÛ;– T>ä?N€ß¢ìsï>|l8Ö¾K¬z]ôønvxXh7,#;ÀÃ#lÝúÿ-ð‘ïeE¶l©˜;wÊ Aа<@ òA ?ÆChØgËŽêzFªßð&#´æÃéƒ{—-[:Ú°ÛWº\¶ô×ò²‚}û-Z$' ðõËpŸù  ˆøù…ÃbÊüpø-ðÂþîÜ›D–’¡´îÔ¾o Þ¼|ÞÏÏ/"<¬Ç VPÔôó[þâÅýŽºÏ•Wê߉ ®VôÀ %eÛ‰nðÒC òA ?4^žøs~ùÏÒÓOЉKj &§ªÑfÌÏŒ²§¹ÙEyFo5ÊÌjdï,Š6`ÀàÐÐÁØÆ¹sI·o™š* †HIµÑ Z_/tÿ¾Ðë¥d²¨ã”)nî°o•4 ­©¦íïGlW~¨Ê/(,{SVÅd¢õu¢b¢r2rƒ4Ô•õÔLõÔ˜úØlcãnÓð½Ò’Ü/Ÿ¼{Ç`±ØBB’’’ýû«RÕ‡*@ òA •ð(¯üp3çßW/ž¿¯|ÇúÄDëë…„EÅ¥¤ú+«¡éRðÝYŒ¢ÅÅgóóŸ¼,ýøžQËb¡B@\\¨Ÿ"™J`@³‡ëPC òA¾•˜˜E‰m:Žý¥P(0gú.u(zøXúó'tGX¨ë Uц}ZG« óNÞãìZ?Ûe†¬”d/Ú\Zr!õÏ«*Ê$‹ÑâêT’:U¹V±ÞÖ×ïÿçŸO—.~3vШQ³áµ†@åƒ| ’’’Ä6›ÍÆþ2™L˜-}”çoÞ&ïI4µ¦on…}:Œ/¯2p´Ê@l£ð;9%a¨‘ÑØ‘&=ló¹³›ß`:;Ëø/îø)Q£(Øî»wã.^¬òó " ÃKÊé^^^˜Û×4dÊ”)0[ú?}Þ·Ñf¦—ƒgà×Ü{$Ò'üAÀí{’,Fšèjõ€ÍW®lÿTô™ À×¼ºÈÐŒ}^½Þyø#0ðX Pù ü",ܬ½¬­­­§§³¥o±÷PšUÛ¡+^Ú`n?½†Í¦¯^Ö‹¹Ô²Ê7oݱdI?¾õ]µJýIŠ/n’Ò52²‡…•Âòòò¼Ïââb˜!}EcVÇv틊0ÿÏÞ# qÿï“m'(õSèr›ïÿ›TŬàÈ^—1v¬8‹õ|ó¦U‹ý—ÃRÊéooobn †££#̾B »vëöÝô~>“‰Ó²Ÿ>À` ÑÕîÂdÏž35‘’ïrƒ……û+ÄÆ® „e•Ò$‰ØÐÖÖÖ×ׇÒ'¨­C·nÛnå2¿û¡¢©Wþúއ‡êëvI‚éé¬mÈ"ÂÝgshh:=:"¾œ•ÒD‡'ìêì+ (ºqÓ&›YÞÝ} ¹þJ óÊÊ”¿uyè;·wYŽÃdY\pî^¨7TC[ê‹1ôIu ¿­Þ~"•7þ”1{‚¢í½;0"B z~¨|Ž!:ÔtÒò¢;›=k Ê €òдٴÙwðϳþ¼"ÍDbï|ŠòQ%\]Ý*s¬¤\\ü+3&‰þwà ê!0øVœ5óæ#”\Þù_fÀm•gEG33hÃÌ ’wí4!÷X¬w·[©~¨ýÔ³g)†…ýae5Å@ùhª”¼§ºöµh¬³ZŒ<{1jè@³ µ{Ÿ¯ýLÅÊ®ÝÆ†† .„År¼ðŠU5 lsrãIéÙýa{˲γæ¡Ø(M˜ßv›žÜˆ HI¡´Q>Š2Яni…æb¥[ô?éR›ð§)]–Ø!žLƒâü²ýwU…_ˆ›÷ýX´“{ÕF¥ä^¥­Ãî5›ÖK÷ð¾>–uL3j´ô?ÿ÷écƒÂ |àãàf¦±Uå8å.i))ªêêõ š’Z¤®&WŸ³|8¿3TÔ7Æ–cŸ¥¥·UU©§ÿ‹—vÀ©b‡å˜ƒ,ÖAþ®•ûLc)*ü yQð^o¦3±w¤U9 Þ×IOÔ‰ZÅbýÊZÏ8„¦í$ONòš‘ÔÔ9,µ…3ŠýµëzmÄÆ>ìÓ1ø^”/åâF óEe»ùÅþÒ J‹Ù_dgùq~OÄ?HÒ3иi`±š9|t¡›K=ýÇÅe Ъ’£©×*Ê«²Ï·üW¬DÿŠG”+¸¨Œ¬©¥Ú­ŠÏê™0QŽzÿž%"‚7,húÊ—BËÞþØL;¦ ÁŽÅâ°XüuæÒÝÍ-fééé n1›ä&$gò\Šiëé1ZVÌ-cç§%§¾aéuä™<1A(.›Íö²›:2fœº*¯º†›AG¢¡§WI_ù>â´õ è°%È·ÖVDC%$äSýÎüø‹‹ÅĘ3æëuÖ.3§’3^ªiÈ•ê+í[(­­¡Ä¤œã%—+|EùID¹£RMR_Í¥1§Næ%²ÚÙämBrZ™K5HŠg¦<|‘S¢oPÉOq|Ü#ù6jêJe’Åd²š¶žœX5U­ —ܱB´L òJªÊìJém¹ž¿æy”åUFK¯s©¥XÃ=ú>ˆŒ8ÛßfZãÛ-UêïyØ0Ù¯¬ÄŽ•\"òÌe+Œ TV‹¼t¤_ÿ‰xÂ&¯|n=lˆÅ¾ìÑRT àI`bèê¨äç6"ƒ2LY%o´ŒhôÓLŽLây°î§™ÌÌókÎYM4ͬ’¢ÂR sNÞê,/3Br9œù {’ÉÍ 6ovgÍc;‡WwW?¿ÉûîM)[Í€ñY-½á`kr5Ÿ2f'±XZ–v$<0Ž?³°¹8sFSœÄsqÓ`ù¥‡É–E2§¶a±B±µêv‹~´eû"-WÇôÝ-¦ÔâyWDˆ÷•Wž&Õ/%ZõÒø§f’KQòÕG¨ee˜[£éý«‘ɹò” „çï¥Êרº.ò[ÏOmÞ &6[‡ÉAÆl½µÇDœþ4 "c)êHåx bYR=¬\ø ÄëöVNwWkׯeEmâp8t²å˜C8Ýç^_)ϳùø÷ÚL+6*‘IÒ+V ÷è;ADô›X…N»G}«Ÿ>=§«óu×O¯Îì«f8ü?ÿ<î×/XÐô•ïq Y¶¿J«uöiÞÃp“þ;X²«EŸÜ}¼G!éýÎg±Zð_Ùô3?„žWbä²2—J°;Mµ"ÓæŸ 5—#¬p$ø.Æ’í4¥ÂÓUDQb¼HŽ¿ F©‘ì VØÍlãž¡g®Z 3&d?}(“ÆA;ÏÍâ'†Þ›åmjJG;K÷?ƒÙ»½æ‡ðĉ~˲kcDÈëó¼+zBÿ!Û»³©_îVþ_ÛlU/ïBÝ]Vs„Œ‹!‹åõÚˬ:Aí´.1ʽR^ ¸fñù, …@_¾»¹Ñ²e‘¯è~wµZ²ï"a”OšÖ¤ bXìrÄu$éµ.Ôßøûò”ŒXvwõ¾üγ—$ß%ì´"dî¾'ASÛÏê¾Ør+ýApŠ ùòXÙ½¶³vLàçZïÑw€†^÷·‰òÓÒJTUdÓž?UmÓNøß%óùS¥ŠîuÂi.÷o‚AW½:}ÆÞ¼Ó®Ýç.BÄÍÌ-âÈË}ž¯Õ¾9^¯à{P¾·„äeq ©ðŽÎKbì$þvÿi¶Ä¶OTË\ªb:ª|ÑêÔ’êýð>³s™žÅª)ž²Z½þ¼þ}L £ âo† 7©äÇÔ¨‹pÀCKÍÖG•I‹9ﲿÒp¨Í ”ÄMM*5dÜôS\ËÕri|—Ú#´P'Aû®x™ ©&ÂÑ}«æmËúöPv§/¼S{ÞwÃDßõ„6 ÝN"2nV›ÎÔû©¤]…‰c^²¤·Ô¡8ÿdŒ¼ ASoÒ_ùs»Ý`”››Y$0Ä¿Î{ÔÔ).)QѪ0‰¾Štša[ÕPoâ· ³ü;þåáaFºñéB#Xy|—ÁFº{¹©+÷<¨ò=|ôVxù=ášIËÁ±aÖõ9—ÿVK#=Û|NîééAùÀw¡|ËÜ;[X«ø•™>¼J³ü{¿óÞœ¼§îâ?¤S/Ú«M> Þ”m+óNžž”K˜ÚÎJ£ˆ¤ë4³ª²e€®¾ó©¸m#**0¹´ìñ£=áØutL…c/^ä0z)*A+ß ¨;£%õø2¨ri®>BÁôÁé)ÄvbŠ0-·„È öÕD™ÉY9DU’”¤FßÌëÂô*Њ™ ¯Ìª6 hS¦Êk÷P*5^xNªž¿Ò7<áŸÕ„Œe—ÞQá6ÄèÐ ]†™}Â=jB$£ø.ך˜¨±:'§&Z4Þó ÿ—w'ûþº)ö]ù…èÒÄ,í™_šKQUïÑ÷ôldfe+UQ>íM1ÃT•Ø„•Û[K•oÛ§ôÖýùÏ’»Ìx6ßãƒÀñÝOÿì<çÏí¿ñ]Œ´À½7MvëÒÆx4¹vü9}¯Ó¹|Ÿ6¶“‚ƒ˜±ìEFq^Õ'Í-ZT~¢é¯žJIÆRþ´ –Åêÿ<‚cpÄÚqPˆÿyBæßUàt>ÊWÄ–NÎÄnóÛÀ="½4x&£¦™fRTÓ¼·ÃÒÑù,qu·ðó‰° \h[Û:·ÒÍIÁ»×’R2xÉ‚¦­|üüèûüŽîa·hý0·X›gyPT¾»££ý¼œˆyæeH–•÷¶®üÂïí½™p³·7„‘åÆ˜3›Ý³Zfð^¯¶Tb ‡…s—…Q{W¯eÞìi—f¸ïIÌ³Ž¤¨ µ®KÝ5&¦b™Ië·{{kqøÑ Öt¥]ø=zØo$Z²¦ÁÔ GëeËÖ\ȧò/ìÚ²eÊ‚mªÊy^äy¡Ø¹ÎÏÜè§ÕÑœ¾w7·óÉ6+ÜOÜjVtÏO¼Sÿ2Ý쬽~=·ÿ|‚E'’U"tEÞ5eN¥Kë_~êÊæ1•‡c{eŸsZq/ôNfµ‹ÒÁ£Í8ãåp÷yó\^„ôÅêÈ¥—¯êïZ8{6[mEÝâ‘\íµ}¹Û¼áÛNÄ:,κÂOíŠxJbí–„\¢'_!fv·Eq'šmÙºÖxô¾Àîa]䈜Ǖ‡Fûç-ôh­Ö¡¨T×W\º)ûǹyL{%GÂ'}¯ÝlýbYÉùLËhÕ{ô=ñ®  ª£Yy…çPcźíÝÅî§çž~9½‡ÌB—‘úÚ‚:mÚz£ƒšù:lOXyú­T…}.vêedRyQÑz=ªµM.jM[œ·ó},Žx_Ýái,J~·¡µ­HÖ¼£ %DO”ÐÂWÜçÒ^ßµ´XÞgZšílþJxÇ¢ò-H_O]–ܲ@Û:F8@ùÀw¡|4¦ãíé_E7¶Pùž¢¡§³!ÿ÷ôœÇw›çé)0érIÏôÖ7€¿áåÅøSé(h³õô³òé$ˆ„ŽÖ³¢ 1µ] ˆÍ?DàÛÊÁ׊ù_¿48?”\‹`ÁN>¾¾üzQÓ…AgÓá´Ðw çò ©áŠ˜H233«©œ¬xiú#õ…ŽVˆ!cÔü#£æó+¹¸™yù?1ÄèàÌu EXv±Æ#íƒGV¸Æ#ƒyþ™$*öòt.»äy™™•+§¤¤?rA `q$AíSBN©àOÖpÞ<Ã*q²}ƒ*ܾJ÷è{¢Yuæ‚ÚNB\TØúý×ÄGÖ±€mϵV“ez?Êt©tÈ@_¯’ÏîSŸ@ÈÜ*õü6­íh2!†â"L³®‰“p³îæ¾ÄnÁƒØyŒY©/JøµÜÜ·eƒ'òëç9U†4Ôëûm?œò}ÍÕ»¶k+ý­emÇØ jiºÌ|Í‘€%CëðþÁìÞÂíÜwYC?æÒÄÅeIëò)a[–…<öõV5ìØº/ruS§ÌVP¬X÷߃¿ÏÁw_6‡SÕ±¬WÖú Þ½ÌÒ$e‡rRIõc Š‹™UwCÝ+.‹ÈR¤äaÕCÞ—ÔËN}÷¾Æci·ã9k*±›Ãi»Æ©”·Ãéž?_~óÚ8bµiE™c…fݰ]¬Ú“ê‡4Ô‡£„R |dfàí™ßä+.84ê#¼‹èVsuõ?_7êyù®úÈ_¿Úò´¡aÁ(埆ª²bÕêÎà}þ´¾Lº³w×:na„7ó“7º‹ü½÷9q¤*O>äiAÜ»w¾ºÈȨöÓ-0!ó ô‚/°éß·êQùõIsvveÜù[¤ Eþ>r>$*Óz³­Eר›¨Úë%û$ü2~ÉuFÌä˜jù%1$?º|ȼ˜^?Ú¯ÍÚxW­t³ñ's)^³îžÔŒ1C<‚â(jF-)ÉÍ‘’o!ŽR |4%t4ÛÝx–'&.Qæâ²ÄG¬¨¨ˆš]NÜÉjÇ3¦/^ Y»ÉûÁä‹'wÝS$’r\¼;ӊ貄™.l\·hÙŒ½<=¼—©]÷\â¥!Kس6++ðš)9¾ÏÉǹm =z&>=O_¥r?®Vjmë“æââ ÕÞÞã% Kè°)Ia¼Äó¡Ñ•y O¶VkU¶ðìÝãNÝ—à{èímîܪ~ä;Z®¬ÝÛjÅxÚ4¾gÁ ·“WîáotYĸŒã¹Ë™¥S-ãr`{Ó]ÓËÏŽïuè*Ô<- k½ú$XW·‚Úxz¬Ö›¶E¯P‹^Â.z£è ºúãñeS•JúNöfë9¾¬Y73óM¥hå”Ê׃HHÈ60D P>P…W¯^!¾eïÆ +ßWeòynñ²3Ç’RŸ^aŒ<7çe÷NZõ‰Ä¨Ç Ú6k˜OzHAA¸>³ÐÝßQOªtš½,.Ê€òÐôàH7h§üi+÷T;=ö¿W"{wÒ¨O J-;Þ¸Ö³'§Rj_Å­¼®Õ̬;Ê€òÐô=rÄ‹Â÷"ÍyµŠúˆùPΟý5”/-%KU]±žž?nÞ£Çp”å éÑRAþäþƒFCF7bXÅï~?¾þþ--icë¿/ž 5_¢¸¦RõWü¸tô)P>š’Ü5ÿGö/^0¯þþõõG†‡o´´”ár Ùl‰„„çÚzm˜e,§É«µTâÏÏY\˜ð(›#/§ÎŸ€³¸°XL"?-+5Ÿ¥×™K6åñ„Óœï§(«-{Å%Åb¢ŒÏÜ÷zz‚ÙË2Ó²r‹$´Õ›s¹ïyóŠŽ3%@ùhªØM° >Ù¾»Q£œ½‹X ò±¡Þ½“ $ŸÃa¦²6S ‰bfñq°5 Š½Ê hyîOQ‘¶Žý‚ü#I·±Ô-‹µ]–Ä-³n‰Q· o¸:‡Ã,oeÝ;,ä2jG…÷ç¸Dä\îDûÔ4ëI¢nð–ô·a9ò–·löŒNåºgOöŒÓQr”€&LîËç„4Žòû+`¡›ëdž3fîÿÎ1 ŒQþ¼e,O¤î©ú!;È1üæ[­¾QÆšv ü½7‹µ– fnÖn£žD2c3hÏüÅ —GlYaÎ&¯º³ä~ã×ç¯:9Ô.1œÞÈbÑÊZÂ[Þ’í7Èv‘ÝZ E :£Ì(Mû©“ß{ÀÄÒºÏË*|;iÒ¤O +-ÝšÍ@ÖO•é#Ó§/yEÈÛÄkf£"*ù7Y¾!1H󦯦¬zeŸFÂËY’òå-;‘¯×¯áîƒ@ùhú˜ö1yÿFŠÓ «­^!Í\] ´(ßzÚíwìéóÓ¤;ã…C»¸¹|f$áá¯--«’¸Ùç²îTNGFöòJ/ný–è™>éö‘ÒÅ ‡)9¤fl´W^EÈÀšÏÀ Ÿ` ;7_ÉÀù!]$¥8(-ÊÀw‚“à ïu>VS]à\w£Â]œf^ÌZ’= G?|ºÝ{Lé2–c‰–Ô j³Mðêßå(Ê÷Úc¾þI²„Xmö8³ñDb¶|FéLž)+=fRp±¦üfyÈ1:²”ô j-•ùšçí=†ð:¹lY~üä¿b;½FÍ^.‹r ||W,ñp÷òò鏸«ž%ñæ¥1#,ÅÄÄ>/ÁZ’Ù9Ý­&°øSQ›ÚñW¦ä ÖbœéɨזCämññŸ*^\®U`pù,-NžLS¥‘„¼d\X,G3wǾYò¼w•-o ”€ï‡åË—{¯[g5Õõ+Åá¤õO#ØRR_*B³§O¥ïÞîܹÆ8›««µk#!ì¢IÛŒõÇHQ=º­}_ßå-€òÐ-?m¿ù µ‘`á~Ã÷o[´`¾¨è~*Ûµ3x•§²ç}3¦+TëafвJ«K&2užuO×ò4Y!1¹àæíÇ(ÊÀwÎÜ9Ž·îÇ^½k0ðËÌNÉÍxöïËžî_«UVNeÆtÏÍ›WÛÏTh.óe¦!ýmgöÏG ÔPAyP>~ºééÒ¿ßü÷´îع­N×O¨ðmØ_{æÏŸß»kǯæùó—fe&ïÛ~pÎlEÖ'Çq¾YqI³9³15'€òðã1Çqýw÷¾’Í¥;÷ù¸iÆrž=¼zñ¬³“ÓK°¢’Æ\ç_ÒR‚„ØÚ*ªª~Äà}‰Ä¡Ã9 -†‚[ |üÐÌœ:™þû$åÙ™Ó§%Ùín=[´Ö¬ÞÀËËHL¸ôàN/³þæ¦}5Î"vªjzîîÌ"ïA7n$+õ0dÉÊVS JQ" wů^M5rÄÏ?£n@ù¥´Woë6׉¿÷úí£Ä¤Œÿ2Þr¹>¼——h¡¨ÐA£šN;cv„üô¤ÙÂÂÖÂB°M‚ÏŸ=ÉÉË.**mÖ¼¹L«–ªšš=:w‘ìÜ·@ùµ"'#m¤ßµi¥™6éî€òP>Ê@ù(?Ožÿ÷89)'3óÝ»|êý1 I999vm»évø6L•¼{’ü÷‹Ôç¹9ùEEïEDšI7—l©¬¤¥ÑKV¾ n(€òÏåÏ?ÿ|÷î{÷îÝô_yyykkkäL“æêí{‘çζPQmßÕH^YEE»»ŠvEu!äZRfò½[Onßè¤ßsŒÕ FV»÷oCÃwÝ»ÿ¦O_鞆ìšuèÀ"DZÈK!áÉÉ…W®äç½ú0jÄÕÖß |à“xùòåÛ·oùÛ©©©ôߌŒ dKå=EíÜõG‹–ªŒû±­cQ= ¶´N¾ôÞ¾ÿèêÙcS§MWk¥ÜÀiNN:rüÖô©ŠÃ‡sè_íž54$èoóʉ“áïK$¬­]qß”|³fÍÚ¸q£°‹EÙpbÐt (ÊoÇNmƒÞ}?i­v•6湦­ß4k–£œŒt¤9=íâ©ÓW.hõ ÁG”c¬Û«›Ÿ?;ú | Þp8¾²Û·oodd„liZ\¼þïó”¤þ6Ó>311‰¡vsþ}‘waÙ0_I¨7lZå4GÙÑQé3c21‘61!AAk ÓªUGåõBZZº¬ÂóÙ³gȦŦ­ÛMGþÜ­íë®"ÓB¹ÿØé¶ø9Ïv”’”üâ ÎιzaÑÂV_0N[[…Ì̈Çώ‡ñ | >Wxš››#Cš E­õñùJk²šèp,ü‚…iï–Šò_0ÚÄ'ÁEE©´P}ñ+)‰/¶eóêyó—¢l(¨ƒ² OTu6-V®\9Òqñ׋_¯×À˜ø½ºé¨*™n/Ïž†HH¾Ôj/õõÒEV4¼×®ûª²ÇG«KϨë1#öå°?W®^¾¼XXœÚ¡ä×NóÒ¥ª[ýÖ¸¸þ‚B | 6øžèÕÙTضs—Õ´jÐÒéÑwÏþ —ÙŸ U½};nÈ™†Ió\…sÿÛ7xÈTå5ÂápdddPÕÙ$¸|û~Á#òŒf£m}6lt_´ð“cX¿i£»{«K0‹Õ¬C‡7¯_ý'#Û@ù@Œ7™Ð$ø7>ÖdhCO²3püŒ')ÏÚ«·ý„°ÿüó{CÊ M‰-›æÍÿ@ù@¨©©!¾}vþÔËʦáÏ+&.vìˆËÇIQ¼132 Ÿæyó[ÆÄíÛw,Š €òÐT¡(JAµÑ¦l6c}å†i¯žêø‰M£GË4VšŸ=KA±P>š0ûÓïoÕˆ ˆýhå“ 1Á“&ÉÆÇ†ëZ¢ð(M’â‚‚ÆM€é(Ûÿ²rë?¶ýÖíVV²›æˆ¿oBùÀ÷£|)7j˜/*Ûͧ(ö—NPZÌþ"ã)êb¸5 ñIÍÌ5lØ.U=t(ÈÕyN=ý‡…&vï¦Ò¸iž4A…|/Ê—BËÞþØL;EzÏŽÅâ°XÅÔ«¤=º›[ÌÒÓÓã{är‹Ù$7!9“çRœðH[OѲbn±;?-9õ K¯£:)u„â²Ùl/»©#cÆ©«òÆðr3èH4ôô*é+ßgBBœ¶žöÑ£ùÖÚJ‚hÈ£„„|Уߙq±˜sÆœb½Î‚ÕÒ¸™iÉ/Õ´ äJõ•vÈ-”ÖÖPbR΋‡ñ’˾¢üŒ$¢ÜQ©f©§£}‘û¶µ¶6»<Þ&$§•ER)mü«Þ(ær ›],¸ºyUmUy|4&§Ï„šX6þº‰Òr1™™¹yóFO°ZkQôs߉ò¹õ°!{ø²GHQ¼ ‹Eˆ¡«£’ŸÿÙˆ Ê0e•¼Ñ2¢ÑO392‰çÁºŸFHd23á¡9gI4Ñ4³JŠ #¼¾[Ì9y«³¼Ìè/Ä\§E~ÂÞ€drs‚Í›]ÁYóØÎáÄÕÝÕÏÇoò¾{AStKÂø,‹–Þp°µ ¹šO³“X,-K;ÇŸbÑ\œ9£© ‰N⹸i°üRˆƒÃ䀀Ë"™SÛ°X!„ØZu »EH?ŠºÄ»"-WÇôÝ-¦ÔâyWDˆ÷•Wž&Õwpc±üè”8Ž ð?ær:Ý«å.^-+ÃİX¾q\)m÷9]æ»á},‡Ócÿƒ»Ž":ΦԼÑjÌB0ü¨øî(¦Å›¼ìo!{˜ÕÓgnö-ccéo!ÍÑÑ÷úöE M_ù§eû«ôíÎ>ÍÓ°›ôßÁ’]-úûäîcœ©¤KôcÈbµà…´–äÂÌ¡¤ç•¹¬Ì¥ìNS­È´ù§BÍå+\P›ê»dK¶SД öE%Æ‹äø j”Éb…ÝÌ6î)zæªÕ0cBöÓ‡2ùg´3ñÜ,~bè½)QÞ¦¦t´³tÿ3˜½Û+a>-{¹E«Í »6vA„¼>Ï»¢'ôß²½;›úån%|®%srYvì‘×èþÛr¹Dºè)‹ÄÅò‰zíU9m:ô¡úÔÛ<̦ͬx²;8ŽÖæQªHi5²Î=–oÐU»Õ}PL ÍNú†•Ÿ–V¢ª"›öü©j›vÂGø.™ÏŸ*Ut¯‡Í×"î߃®zuú¼v3z良—!i)YªêŠõñ©¡!ƒò¾å{KH^·Ò ¡¼$ÆNâo÷ŸfKüè··j™KULG•¯KZR½qÞÇ+ýÃØ^ÕSV ØŸ7"®)atTüͰá&•ü˜uxh©Ùú¨ÒNæ¼Ë"r¼½¡6ƒHP75©Ôå£M›nµ\Ÿ=)§ÅÕµW2›Zù<Á+ b¡N‚ö]ñêÓ®RÚ¼MÉ–3ÙOæßOåk°8d%’)‚/€²WJhãAë´îPAlôU„*»;͈¿°­31~Û0Ë¿ã_f¤ŸÎRÄ<¾Ë`#Ý=‰\ƒl!¿{ï^}”ïÁý¼¡C¾â*j¿DqL¥êî;Ú¥Ks!ð=(ß2÷ÎÖ*~e¦OA,KªGþ½ß yÁwxxñÒ©mÕ&Ÿoʶ•y'OOÊ%Lm'·ÒgnfVU¶ ÐÕw>·mDEéªPDË?ÚŽ]GÇT8öâE£I¢ô'³%P/žºßXUç©ÅÚ çG»Ö¦;§óÊܽ,¾¦2˜Bl'öØ2@±RÚ\ýý83™…ŠÔyJ·eåNûƒ§P"¿ž±ñ·‰äDžüǺû"ˆþ&¤å_9m溳ÉeqÒz½»Ù”̺ؖoì‚o—™Í[WV>Áëœb™RUbVno-U¾mw`œÒ[÷ç?Kî2ãÙ|#,püD÷Ó?;Ïùsûo|#-ÅßpïM“Ýz…´1M®N?é\¾OÛIÁA ÏEø¼Ü7õIs‹ôÎb­0³éý6ø2¯EÙŸÃq¡YY÷ ¹L†ÚQá}X,GB4m­J‚žePþÊ—dÊ_qŽ‹æä9‰AÝŽ8¸ûHÙÛÄmYC4Íz’¨¼frÚCD^ÀõáN•Üãwl7p¾ãàØ+Àÿ ។Ý,ÿm6G¾}x—ßÑ=ì­ækˉòóÝíçåD<È3ï(C²¬¼·uåÛmÞÞ›ù7{{ó×£³Üsf³{bVË ž~ØR‰-Î]RDí]½–v‰H»4Ã}Obžu$E­uõXê®1é0ËÌo¹ÝÛ[‹ÃÖ›-í«%=ì7-YÓ`ê…£õ²ek.äSù—vmÙ2eÁ6UåŽÁ΢É*º"ïr&…››‹Ë Ãy'‚GòÌı½²Ï9­¸z'S›í*i3÷[A§_q-´Ä1èùó®RÅSô§ßRe´±Èçr«VØ™•Wx5V¬»Nb±ûé¹§_Nï!³Ðe¤¾öA ý£ƒšù:lOXyú­T…}.vêed2·R<âõZiˆ-%BJ˜Ê†È£S™’’&ÐËå[V˜³É«î,9Z€ûDEÌ55ïL»g³wDç/nG|O¾ñüÑÔM:pŒu¿ëmTžîÍáôUÚ%†÷!d"‹5_~óyEwç;ÞW·zKl_ .®ýWiæBùÀ÷ |4¦ãíé_¥ÇÍÇ?¨|OÑÐÓÙÿ€{zÎã»Íóô˜t¹¤‚gBVøð7¼¼?b*ý-d¶ž~¶B>‘ÐÑzVt!¦¶ ±ù‡|[9øòfÞ(ë¢À%×Á"Ø_°0¯/ß\3]tvñ0N }§pþ!ßÀ®ˆ‰$33³R¶())•]H)£æ5_èJ«¤­,ýìvƒæ-TéêôG:룄6"ªqÔvâ¢ÂÖï¿&>Ò©ö8h{®µš,ýiDdºT:d ¯WÉg÷L#º˜úBæV­U¨÷3­n«OxVqØ´À>#ßæ=8LdÕúmQÞBÙ“~4GÈÛ]šèðÄvÅ´ ;wÁ3%Ëô[33R­ö$•Üi˯¯-ˆ”XBþ*ýü€B¾åûš«wm×ö›ë´A›k6ÆZš.3_s$`Éк¼0{·p# ÷]VðѰ _èⲤµ.ŠW“†Ã©faزƫõ¼{™¥¿^Êå¤ò;gU¡¸˜yÐXu7ԽⲈ,EJVGaa}Ò\TȈM`œ 3À4•£¼rõ|ÚåM1?õŒ ˜wí4!šÅ(œO åÌ,ÈS× ¿¯b9ý²å@8i7äc³+'ŸŸø—e.Rl9”"å#3oÏü3õÞEt‚눱› õ¥«iÓJY!¿Šcð>Úzz™tgï®?tÜÂo£?NÞè.ò÷ÞçÄ‘ª<ó€§q7îÞùz袺b\`Bæè_:`Ó¿šqpâ’õªíÌÎ)yÐÑ.6î‘wÑ3¦—2?AÔR36Ú+¯"d Gš¶,“R²çl¢}ܽ™FL;hB_(QÛ8\ÞäÄòˆ-•W›û’Qj¿Þ½;³sçµ|—‰æÍ•QŠ”€¦D‡vmn<Ë/——%>bEEE„´ÐìrâNV;EFö.^ Y»ÉûÁä‹'wÝS$’r\¼;ÓŠè²Ä‹>:.ˆ[´lÆÞ žw^]ÞËÔ®{.ñÒ%ìY›•x-Дßçäã\¿¶†=Ÿž§¯RÙZj©Úº~DÛÖ!NáÊÏcl6çan€a##RVzÌ>¤àbMù fvw½ÚºîŒ¹Ç"ê ×mÅ.épêø„SIÌlŸrƃ¼7‹¹š3—Öáµ–|õâx{!LËúYJºŠû¼hÿŽ»#ƒþ¥¨,c'%7ÓE­€òÐäxñø¾†^ycë´¹s«ú‘ïh¹>@0;3oÒãiÓøž3ÜN^¹‡¿Ñeã2Žç.?d–Ny´ŒËAìMwM/?;¾×¡«Pó´]zÉHGm¦3ª¾U¯`«^|)˜Œr­ƒË›çÍgþd.¨~iáëËÌ1¦7j@i«£¤ç{öU%åkxr²dôÃB Ê@¤J«ˆC!»&;¡>Œ7®þÞ èHÈëÆÍ·?%ÏuÆ#Ê@Ó¤¤à]ã&àúÙ‡ivõ÷ob2!*j³™YcœÕïÞ%@ùhªØO|êÒµ¶:]ÍîlÛîcƒ<N5bŽš<­UKJŠEEÅò¤aûwxz|¢ŠXZÙûú®rskÙ ÎÊR=ªJ €òð=`fb´a‹ï ‰Ž vÆø '?Yöø8;-º~}§‘§¡’Ü,âBê„ñcPZ”€ï„EóÜÖ­ß0ÔιΕ÷ìå`‹Ï}¶Å$ed 33ï()5„©º~CÚâEËPN”€ï Å‹ÖmØ8ÔÖ髞…›ñ¬U ùVJŠŸ•ŽŽYttš¤Dns™¯;ºÎË+}ùòå(!ÊÀ÷(~‹úlØ8䫉_NÊ=UÅ:íÕ¿T„¦¦®];®®žÖ²åײü||^Bö”€ï÷E ×ú¬jçôÅg5»vØú§‘JŠò_6ZcãÑÉI7ÿ÷¿¨!C¾ð8‡‚w2ì{êîŽJNåà{ÇÓ}ñé³çß‹sÔ;}™•XÅawÑÑ~¥khöPo§¿~ÚE •¿”`Ÿ;ËRk«Ûq†“&¤P>~TØRR‹™EÕÃ/DÆ^édØ«m経yÏyöøî8Íe&ŽÓÍÕ¥QÒÜ«÷˜^½É«¼ôà?EE‰©isMMVMž Þqbþá^»–n9ÔФ×0“^¸çÊàaiÞþÑ%%%±·Rž¥dge½ãæS(qqqyùÖmÚtë¤×ݨë@£®ßB‚eåTfÌXÈßÎÌH|ø(.5õeN·¨¨DDTDZZ¼¥²¼†f{ަÄÂw@ù5=T¢¢Æ†Ýè_J³’²ýýP>Ê@ù• (ê~Òó—™™oßæꃔ¤”’b ½öêbbbßlš32n¤¾x’“÷ª¨¨DLTT¦¹t«VZmÛ¡C €òj -+ïÌé3¯_åhu1hÝAOŒ£Ô‚þ y¸›Y˜ž|7ùþ­‚ü·ƒ‡tÓÕnl}.¹øwÀÕ«™=zp ØÊÊ¢ÊÊ´k3BÄy‡¹„Ü)xwûö¿Ü«WòÛ´‘>|¦¸7@ù$0øDþë×FƒGYZ×âÕLDUK‡þÞ¢±‡ÎœOO~4w¶£¨hC?†i/θ2yr‹ÅT©Å§¤T3cciþòî\nÐÞß³»uoob27@ùøAÙóçެ|×OZ®]§G_úy'éÆù“ü @æчŽFΙ­äîÑêcòÙÍg)ò*$d½šš¦‰‰ €òðqûñÓèK}GLúÌxTÚ µs>v.º„ûjܨá_1ŵqóªYŽJs•?3&kk9Br6n\é2wê?”€‚ß÷èÜkÀçË^š]{н׭[âñU&ÃÌιzú­¾`œ ¶ŠßUXØÖÄë°(ß5k}Ö[N™û5b¶šêê·c§“£ý—mù{üøa½´›¢ðŬ¯Ï..ÎýŸÉ¡Y“ÅØŸ“˜Ô/êãMM­ŽÉ©ï<5ú—dÿ8EþîÊÂQ_CùÔ5$P„”€&Æ™Y-kW>V*-{m?Ñ`n|úV¾›‹ ;ú)³q=k¤Â˜_ú*ì&äï~Q,3ß;«ÆkÑŽkTØw]#þòl§ßª/œf|úÝZNõ*'«>iVT¬û‰¶r´ ÿËöqþÂã$Ò®E©™üÉlé÷¥âl¹ñ8.”AÌÑ8FÄúRɶôÿ»]·ÚoM 7\wýâ;³mígQRz" |41¸oëèÞrnñ¢½Jgâ,2OéÙ*ūՌVú%þê/'\;©LŽO½¨¯¶<>ýF³IAÑÚ¿\O´[5þ:l0ܳ·~+öð WWMîºÜX_Å!>= ¦sÖ«ÙŒÍiV§Ÿ·- 6÷ g¦¨²n«¯hÙ»Ë Ð“¢ YŽý–÷=iÅ%1gy.,–cB‰­räqZö(ÊŸ|k¾ýT½Z_Ò4}P>>ŠúP»÷À½\@(òs[âì{y[ï­wÈž¿Ð‡Fù=åGHaT3¨ÿZZ›ÞÐ[7ÖѶ•É£7;–\9).Ó™„Ô¨|,«>i®Ÿ¯’yÑþóYŽ»Æð'Ë‹¢m×N´ÈÑÛg#†*[\'VlBºó]hròYçVœ%ÄhË– „0¶â©+…z¦µÕg6kåP>šµ½Ùß0êE&¶*m½ó± nÜ:ã´!dåDÖ¢•“ö$ò]4:u o‹é°ýϱµIZ³fõIsqa}õ&÷êùÎÎqÛ» ¾ƒ8ÿ?1iúC+4!EÂþ è¦Ju˜1ò¡a.*mêÐØÂ I)"å I¡¨¨XËÑY?­´Ø·a²W_…}îYÉpB6øžûkùàÌs³O‰‰Où·Â¡_ 0êÅL o¥Bx"UÔÆœéó&zÍΤ>k<´L½Æ*dç”Ôóê䌇X‘cÎwÌ– “ëM[œ§ùîkÇŸ!c¦“R=,ÃzJ%ö÷­¢~¢·íô·¬¼¡W{ü™¢mÛ¢(MŠÎÔ¿®áØû„{„ì+•=šíjÎÆSãÿ»§ß²“þï<—¸×Dò?ZãhQœE[V…Œ %¦>™Bœù¡þ>å=À@`Í=žXKbZªÖk¹†gÏêh,*Ìy[ 0×B©Í,Öü·%D´ÃrSÂb9òÝ©ã¼èã|3OŠí™öšöŽe~ëzs½BÝf¯Çñ^„| Ü)µ²]ÇR7ar=ÝâÓÝê“ÝŽ:õñ¦¤$^»S¯U±B—Èë±Â°"Ê…°Eh:š*­7åùavK=ׇûrMÍPˆ”€¦Fò½[Õ+_ÃRü6§gÝúø4Ð×&$û[ȺÂŒjP>š ¯23¾…d$ܸܳýØúøÔ7°ÎÌÜ¡¤$ÖèiîkÚå@ùhzôdñ-$#ë¿´ú{>}æÕôiŠ›àÇ›ëë[¢ü(Mãî]CÎGiuéÙ˜‰ >Œµ[ï2Í9žoÇŽ=vwGñP>š&‰ÿÆ6®òÝ}ØjªK£œ=!*üçqc>6Ô\'·Œÿ‚”[6J?—fmÛµF±P>š6þÛV»KŸ—E}P×Ðø„€’R2û÷¿vœ¥ÐðyµeËËyóf¢Ì(M‘ÜÔ§ ¯|aû·{z|bGÇYžÁÁëmlt}öôtåiS­P`”€ï)Çz¯]g5͵ÁÎøàòùO–=>ý̬ÿûïlˆ«ó½~3cäTu(ß K<=vìÞÛgø„8WAæóž†úŸ‰’²Vh¨øàÁÄÄX æ S-üå@ùø®˜a÷óŸGOšÿªg)y›ý¡«ÕNûó£6Ì~ß¾S¦H³X;jš½¼Ò—/_Ž ||oHŠ‹ÛµçÀ!K›¯gí}(ÌïÝCÿKE8uêƒ}ÇŽ•úz–ŸÏKÈ€òðÝ"&&fo7iãæ-Cl¾xä®Dô4èþE¬=a&Mr»xñ@›Ö9´%¿lÌ…2»÷¤¸»/CÁP>¾gDDDÜ-Üþû®îý-¥å¾ÌÈõászrÖÉÀ“óßfï °wøbC.\mÙJÉÉiŠ€òðCà<Ë>;ïÕî];Ožý™­h÷þù_›Öm¾žìñáH+Ø;xž>µ«e«#£ÏšÛ,=­ÅÑà‡..¿ (? r²´ñwïqâ©c!ýÆN“bKTðf,róü EE¥IÖ£,Í#~²§ÿï‘ù0xðGëß­ÛìÈKOæLuqGP>~P:uÐêä¾ø]AÁ¿Žéö4m¡Ò¦ÿï¹oîߌI{–2f¬ÍôÉ%Í66L#åýQ§N^êÖ½…i_G¤&Ï%zýZ³‹?ëÛW×Ôtl÷n¸çÊ DJRÒ~š-ûÁ“”ïÞMñìu^‹Çû’©æ2-UZëèhuïj¨9æ[H³®Žýc´íýû„{‘=NKËyõª¨¸˜i&Ó\L¹¥´¦¦z—ÎýŒMdŒMp“”P:íÕé_J0KDD¯Ó@ú‡{ |”€òP>Ê@ù(å |”€òP>P>Ê@ù(å |”€òP>Ê@ù(åå |”€òP>Ê@ù(å |”€òP>P>Ê@ù(å |”€òP>Ê@ù(åå |”€òP>Ê@ù(å |”€òP>P>Ê@ù(å |”€òP>Ê@ù(åå |”€òP>Ê@ù(å |”€ò€ò! üXÊGQr¾[«Y°úÀꫬ>°úÀêX}@ñ֬å’h!‡NërÜåjôÏÝműrÐóʸ»Léud?ÙþQU|<(±ï(RmD'[H­ä¨?ÿŸ¸M½›B¾qÃÖzœL)”’’úØ€ÜnrÓk˜tæ®5k!tûEä^2—ûæs·$yË‚µXìÚ2—’mÕªS·Æ= Õ•Ùx’°ú€šPöMB’JÝ¥¥¨&’o…÷DZØ~¶+Ý´Êô’¬¸+Þ$r·äYÄÖ]aFkSäµùf x¤°ú€ªfA¯?¯¯”7ZVæp`Öì‰c.YUµÕ ®Ø[îvXv.Ä´ŠõB[qŒÉg¨Ib“HÂòYcC*ÙIKÆ/æ©Ó~“„¬Ä¦ƒtoçm›G±Ä%„Ü$ȵÎAqL‘ÅÛ=Ú’ÂrVaá--9»dŒÇvs¥¢BÏr/ʸv"0(*EÈGâ‚~ŠœÂã¶En`õ•‘ëùëÕµÑ&žçK"‡Ùÿ‘bz¥ns'\Ç 7¿è/øÛkP5M+´§©©™+hÀ;æh6ú¦° yÍw–Ÿàs=‘hh&Å55“FÌxœ³q×Y'ƒâΗïw²÷tšY‰Ç½ð›‡…Ó¶šbŸ¼:Üÿ—¡U»-¦ÅØ›:ÖÒf¿ñ¢ï‚õíïXk'Õ#HØEÅ#9m­ú·—ÝË\:UlÇsšçHÈ£;Ú*sŒß±îÂê¡Â]X3oý5{Ƥ¸š"ÖrÙºãÜ>üþ¶ÜÛ8Ý—ÓXðwÒÆþå~_Ÿ7”\!&‹=Eç§ õÕMrciù•ïÎK¥6«ò¶ŠÓ#M˜êWÁL-ÇrᑃÆV('‘†RýËÏ¥ì“qOßFqpY'jý_bâV÷Á» X}ÀG`ìqÀ;Lh€ßÉžg¬ü†·*ówcU…axz^g„?ˆ+ÒqÎîµ/\ÇûÞáíÅûù÷üs³¦È˃“æ•ÙE†‡Vž4–ô#euføe«Í•l+«Ž/ÃÂʾò,µ<°”lº’7ßDVàô>ÖF´GHÅ`ffÒ¬7/"ãË2p×»’ŧÓ}„î]upƒì:Ú½r•{Ä´©5B¶îÒ‹CÕçóßK•®©jæh%Æ•×2$nué»Õ…èó ÙÝÜ­Û•ærò¦×Vö7.µ¡£7//½AL5óÄŒ}w'Ùw–,=ãB&}«òLk©Ê7ÎÚÞ½uá?~1üÝðãä7Ë ×Ãöø(ªÐešd¸++â-¬>àsQöpêJ[kÇÿ·Lµæ¸Þ¾“·iëß‚ØÎÏöì°t|i³ö¯ÀUŽL—× ›——Uˆl¿›×㨥 ß<)hî+¾$T¶‰÷7%ÆŒ?T1ÉÇÖ¸+,»¿$/Vøèz÷ý ‡»+Uwqõh¿ÀêêÒ€Õ‹/Z¬´i$ÿñÓZ§OÅ@³òV¢1ý”ߘz}}jOÞ¸9àÒOáÐ_Ü×)-obò¾â£ýCfrôºÁfžç?6”˜®#Åíí>´ëúJëc$Gm¥^å.]<Æ®­neˆjFO†»tw×hêæ„–ë¶m+œ-ËMÞ‚¤”·|hjÊW &g`eM– 5ĽÌÎ'„-6î— ž“¶wúº½‘˽L¢—=ƒvNÓ“e·ò4ðú‰ßáÓá—û“?÷]/lÎîÇ4è=Œ®¼šIßúÚ.å^J.ýVs ßð¾0ù€Õ|!Ì}Ž.>ßz}¼`w‰«Ó­Î Ÿw‘×úW?äç Ù®bÍÿ€_ïYn::Ÿò4‘ù³÷Úº^fžWË÷[Ϻzg§q™½òò –ÊÏ5Žr”êâIùð·‹sݽvåzÌŘˆð Aî¬ë(~¥¦uùô'm9´ÉdµJ¯²6¨õ#TÎ/ø;®æQšßÞ±ï< $ëë[RS½Â¨8’žJÛs•gªÔi–¶ øÝHÛNÜäºv´ß°Wïꉲ!—›ÖMeºÎ*ŒXåÞy˜Ï]ÆéÔ–Ý‹õºi‡#?ï;š›r^Ø,ºâ*†· °ú_„¼{Ç\l­ƒâêò§>l×þ3ÍÚ5«*I>ìöFB¢°@¢÷¸‰úÊ_·´D¯è-\µOZze¼\¦ÔØy}èø¿oé< }¦Œîáàë æsþT„¢ ƒÄø MP¡µ?i½úGEÖjLðŽ‘N'+ºŽ Ù2â‡ÌÛÜ;ÉÜ 6Ø8Ûr“Üq¬bò þψ>qößû‰ÿÞOxp9DqÖ­…Ý´õ-èŸíxßÁåÓä¼Hýï‘«ºÈy?¿ÌRT޾Y‹²‰ñ›°ÎzeÜmü÷|M¼Í¯²ô`mÕ çN_ýGÙþ®IÍMžŒ×òò>Ö¥ÂPU¢¿`zÙÄ-£)m­V-[)+)*ªt3 ê¬>Pñ¿M2pú«¾¾SBíû…Úb¿÷qÀÔößö•q2PZ¸¬«•¾ò×]ˆV\²b5©‚Dã‹Ð³½¦fÓßC½ÖÙŒîŽõ®øZ(Œ8î¤Vqi> Ž:k§ñÑR¥?gÇò]'½n•»,>¿SÿUSÊýHö Í:eXV‡BÛŸ;ìšH ,íz´}áLp×Âùÿ ‹Êµ…ɬ>P—]ä¨1^xVqšÞ^wÏ/Ó«<‹YnغéæŒT…Óš÷0ìÒ} ñÂýÎÓË¿i®DŸ ë¡HÞQ }†™V¨œ~—zíï˜Ë×ã9@H ÍÎúf¦Í:VILåÛÝs¸¾í'÷ÚÉàcaQ‰ÙïÅVÐê4pðOcx×X3¹ Ñ—/ÅDþ{ï%?Û[¨hôhQéZ*çAÜ,>Õ¡ÛãéœM+.m^) 8¦sk© “lo¿}“Eï©+(PèÑZè˜Ú/'ÎɼI$%™xZ˜Îµª·m;aÓ –? jÿŽM:ߊ Ó*ìgW².ÜÎ%w\ªaUaER“¯ßþ;ávŸØ±º–u¿LŠ \CÈ`{Ÿuw(ÏX÷IÖëÞ BEÇEWIA7»ã¶Žê\>sIÁáÃo‹„v¬ÖÝI6œ!dEZȳ¾‰YCŠŠ^Vt((úâö$ùõ§ñ«Ëä#.ºòb‡öÛ.îpPõ o^Ö”WÆÈ=+´7Zýº\ÓϦ‚½=áèÔŽ"•êPb)*-fÛS—²ØâÂ+vH¶ÿÊ>;“ ‚^ËÀêûq‰÷VÉäÛtùÝü^Õö‘·ò8N-ºc'Ú5ÁÒÎfô˜¡f½;wq£ËÙk¹n¹l3oƒÐ =Ëa%Ó7†1FnùêϪ~ôÅ.ŸR…ø-÷ÄlájÓâÇéhOªke*ÃCIWÇ3ê¹Ç Ÿ_EÔÃWÙ†¯b6L‰‰Z͉ÜÝvÝ샞WÝúU‚íWòœL>{nî‚K-†—Ëæèu‡~ïv¢²·Kæ2ÿ{_ÉôR²´ð¥jVkê<‰Æô£w÷ØÐÆOÞã#ÃF/«rþÓz!¯n˜ãuîuÃáÄåÃVöç›ð ÿ{<›ž‹b“všÊ‘Y%éKg»†>BŠAXlñ5öòÇ¿8³P} ÄÆÃІši|_8òý4›¸=«¶kÜ[ƒõ*°±ä[”îS¬ûA0/¹ZÝFpH]Íþ¬áÂãU'fa„¢¹¬ªj1‹UVU…c±XJ•”â)¹akcׇóM ’œšùq¼¼E’­‰¤Ù+½Ö_OV©§¤¤„$¯y’j?›_EÌ>’:¯wÝ/ú¿;Ú Cü#VM ûܶIÿ“oCZ£'Ÿêc€o˜üMqʦLtþr%ÿœúã1IO?ZKr:tU5Z'*õPHðµ¹£ôähX*)³÷ ³÷4½"§DA*åõ7¢I+ÉIÒf·¦8^®²g±Ê«ª̲UU¾¾­²ÇÍ×I£ê'ÏÎN~Â@Ær[Þ¤_Ì<ª0z8/TÅ gsí¯8ÉkÚ”šÄ@,8ç²ÎØZ7’|ÓÏ¢''ó¢,¦íòp`ß óÎ|ÒP–” K)H|ÄÓŠ¿<³P} Ÿß5úy;ò™—™ñ‹é»¯êÜŒ÷MúæíWÂío-”œ·å˜Náêæ³  «œvÝ踤—e¦½åÿ}l¾Â±ù­øÏÌ+(GÔ;pÂtÿµ]”g8Œðon5*š±ýNSþ5šc<§›6íÚ¥h³nƒûê¹&ß6¶@ÈŽÚïâ´,¼­î³HI‹GL7ý;¼ñvr#gY#{Û9·,/§‘Ù·.Ù44÷‹™îMÜ wf úº*ôaÓ ‘‡ + ÅmñOÝ`ÜúqI{W,éo7oŒ–\‹– “š™Âƒ@jì ?Ç^¿Å¹Ò˜LZÈBh„â½Öâz!bòÈÄvò0 )QQ¡t?;ÿÔïH;AL¤© ³·—oÉwÅÇȧ÷ÒB¢ì[€œÑ\í^¡ˆÒU”ÛEËmõcðĄ̂$__¨Ç™ ¦:Ïoú×ó ³€(9^Ï E  )×-øÃÞу÷‡/Åjü¬Ô³zSw4¹ÊÕ+i^fÍFÀÓïøBóÅu]鎡Wó­÷Ï`½œC7þÍõ¿]ÞtßÙ*a…¯¬ËB¼•wÍÉCb~ë'_·Õ«Ÿ>‡¢fn6È o}={Ðën ‚´ª.&¤êŽÜ=p8v¡«oòwÑ=¤¡çdjð±¤eÆRëCòÚI·Å6ŽÆÝˆdÆê^Šîx*òŒØÈ†á‚‡§àž¹§ßôæ¬Â]„½,uV7ÏUœÇ Øe2 ø³`~H¼t%"6åAFjJ ßsHYÏL]Iwа±S& ×–CaÁþ ú:=œi9å^›¡8ö<ŸsV|ˆg|HK'm½šã1Fá[/5Ü7!Ûx¥Ò¸z©–l§/i×lø1<8â„ÐÇ!hu€eýz¸rlÙàcË|Z­Û«íX/QRw ÅaÁK{¬S6ÐA¾ar——Há–°wúúf¬–”›\ýtõ Ímu¢(õЩC š‰‘OaÂêïÒ>F2Š©x`KX§Ì“çêKÎmÑ·Jð³ˆm†¡5šr5s…yœ½u1¼üþ·+”oU¥·û ¥ùGZÚüOÙiIý<1ì©ö:Ñ<~$]¢#bз~6=]ëej‚6Õ§Ÿ²îÏs½yë0þ‚̾çßj=•m­tÏJ‰Å¶ˆóþ9¿ç¦”ÕmÜsqg-Ý„7õ.v×øò/µ÷ü¤`Á°]ܰ`@õ탠0æwIVqjRttTô{¯²ß×-÷J•ÑÒÐ101·n¡%ßÌX<ª¤ÙÎ-;î‘ÊJ¹¡Ê-]Hñ¯í(º»JÒÍ+‘Q÷S3 PYÜ;ì-ÝA6cǵФ×çpŸ4Ô‡™zéxð…‹qéå2²hÁ;œ–©Õ\»…õ¹M^Kß;µ+àì[&¡PzÊ*ë™MÆ„aèÝ”tÆï߀Èìò2a ª¤ªö@£‘y²¯á›‚úbWÃ"nÞ¹Ÿ‘U.£B-Èz/£¤5ÐbÌø cuåÉí7£òп·nµ"ñº²VGñµ ´Û¾u‚÷`e­¼ µéé½LfÕŸ^Y‰˜¨Ô©+’>wª¢ŒØË‘ÿaF{ö:«¬”3$–QVÐÖ79ÊÚLOñ«ñ”òRQ·œDÿÏÅæ`ž1$Ôô Œþê'†ÐØ«*±òS‚ƒ‚/ÞŽ+–EÞ 2Z£§Ï[8c˜$',Çy.§wùMf/ L賓¡?µŸ•ÙVvó¯Í^]zhã´Ê»„¡.ÂÌO ¿q=þAö{eé²Ì÷ÂÊZ‡Œ`›]é«VÅŽÎ,hžöª¤fšU˜²e²‘*…sS°y±×Ýælâ[-:Xú·Ê/»³LV[¡Q©·Ÿ,¶Kì¨>à»ô]×d¶¹|ÓIŠæ.žæßtë¹ØÖî(ºãí°­E19xú®éÍžh0u͹©kZ¦f8Û\~Ôx’Ó= ZL‚霦­ž.g2×äÕðµÌbÛ7ÆŠ¢hèáw¾¥Uk rz 7`[ ‡ÉJÓyùÞr`Ø?À°`ÿ.a@õ …‰ÉìqÎÌÔˆcûö…?lÖWJD¶­˜ÇÞ×uº’´g,¡…ð*q|+U’Œxƒ¨±ªpþóç™oß½ ­úòA¾ _XŸÍZŒ ZŽf£`‘âóA§f>ñ^½œ,!!äÿÑw>±+½Ì$À°‚aÁþ]Âþ€ê:”fVÈdç¼H‹:äíuœ|QjÀ_Ä»Šï:·gq“8Ÿ‘f|-C峜²Æ.B¶=jíŒwùÅ"†ýƒ öïöTðÛa1‹ŠÞde¾z‘ö(ýªÇ¦)껺"wR¥ ǘþ¥pk8”[À@6+ÇI>Ff‰ ¿{.JLÛgPZÁ ÙYYßm’†¢p#‡ª]õñ`Ø?ß°`@õÀ7PcC¶çsضynláASɯ_4…G64r ÓZšoþ]^y]ãCqZ6“ÿîÔ9 5c$Í᫚1«åÈò˾üè‰#<ãZZV tƒ6š¤ñ?o»O´eeezKJˆHê Ÿ¨+/†íÚ†ûw ûªèDÌÃJ"mÄFòՃͤ‚Ûq¦MâýÕ’u?ª*uJ;0SáÀL}=ýäñ]1ÖÒü‡Rwãv"zæº)1Í-žýÄÝÐôÉd#Í1Î+Må[ ¶8ÒYwydXŽozÝqö$¢?Ç^Ÿùüæù˜l®+7i¦›îÅþÊš1 vw¶? ú€Î…èˆ0EJÒüÜÜ\ƒ®·éÝt–ÿκRÞD¦Â2ÄÎñ»%§$#ˆt‰fdd¶úZ%ë-üÇêjÆæÁi¶Ú¯lq «igŇl‹G‘v¦ò­Ëm9±öIC}òö:Ls ª_›-94(™ÿ«•¡ÁÿΖÃþ1†ûw ûªètˆé¸F¸~÷ùtdz¨c Ç$­6£èæ–ÏÕ AÑo¶yÇÀxÇ@0l73,ØP}¨>TªÕ€êÕ&Õ€ê@õÀPÅb•”2KË+ªkYÕµµ‚8/H"EE)" اC¨aUTV2+‹kªY5µ,‚Äñ*…$I¦ÒÁ> вªLffðjVfpÁ …ÈB4*E†H#ªø“*ÀhfþûgÏ^¾ÍÍùøþ-Vñ¥KɈÐ%Ä$¤È"¢T‘ŒI;"B òD çÿR)úŒÔ0Ê*JK°­¬äSÉÇ¢ÒOJ>bgÉöTPUUÑÑP%‰`aŽ•kŠ?=~‘ù0+³èõëJ¼ "/G””"ÈH Òhx:O¥â©„ڜ֨­­-aÔ25>VU¾¯~—ÏPT õQQSR2&‘©`ோuYé³/ïgf½ÏÉ­@P¤gO¢¤$AFF.ŽÇlN¥àÅ„±Îþò¥–Q\ó‰Qó¾ º¨ˆ•—WUYYÛ³¥Š¼ªŠ¡„¤2P}@§¦ŠUð0#=ùþçâòª}å•Ô{È÷î©5Û¾5(¼^XLÛ${*}}ôq^i^öó¼WO?,RÖÐnf")Aë6ªãË‹Wï&¾ÊÏÿ¢£Mé«ERR"H þf ÐűMPIY諃9•Y))•˜2ÑÒêab<–.Þ»{êê¼¼ÿâf¾ªPS#ii“ÔTIzú8=}LÙ‰}k`BBÒ2ØFÐÔhr¤ E#³³YO2*ÓÒ™22ä!F†jêfðTTðû©®A£î=J¼CS` Ý[ͤ—ÚϾ(‘LQê«‹mÜŸÙeQw#³ÓS¥dåÇX–‘ìñç)½´´³ÿÝÊÆ›kjÔÔÅö5Id== ¶q~ùËLJ*“‘¦ZYMèñg·J¡5¯s#"n<úR…Sô§L›Š‰êŸûe‡PRÂ6›1\1ùì鳇wâËÊÊj-‡h대§  ú€_ʧRæ•ðëùÙ/5™*öí?JµßoŒ Qˆ¬9ÐÛ°ý,FÉ¥°cŸ?ZŽ¥×O«K™õ¥ 2êdzZ©™¹°E§ŸôoŒŒ€™™0¶aû¥e7Ξýœûºjôí~#ÿ ±Wy'!8>î}¿”aÄ$ot0ym»/næ;éqqŸû÷“1b¶  Õ?‹*VuDÌÝŒw´,˜[a[g‹!EDÌpô$¶dªªÜw8´ŠY:eòd9©.¥=XÉÉÁ7£òFŒµ±¡b[g‹ ˆ0~ÊöL0,VöÙ3¾ï‹j¦Nž,%­ÖU‹5Šfg_>ñ±Ž6eäHacc™ÎAcc*¶a0)éÑ·ËF4`€¨>è@>””8q‚*J`>ZA{`ç0H2²žŒíyyy 6±°nÞ #YÍÊÙ²5ØÐ€:o®DW4²”4aÕJ™ÒÏï1#›š*ÖÙ‹ ZÃÝ'€CfÌ=ªë-B$ Œ-Œ¢µÿnÿ·‚‰®vs!‘Eáa úà›a”W ¡ËÈÙÌ_ÞÕÓ",&>ÎÁYVâ³m‡éСƃô:‹Þ«* =~PBBpýzÙ®ndQ<–Šòò²]»6iŒï”‚¯úzdÀë×å˜ÀÆ´S—68'à¶JÓ~çÎí¯ª"LŸ¶/BÕí¬£è™°›EïÞ˜MœÕ,ÿ˜tQ„ÅFÛ.ùXðÆÛwÛÂ…ö¿}½‡ø;ûž>)^°@\@àÏ12•Š_¾\ê]AÞ¶7þ=oNI¥Î·Üœ«§N§.Z$ÙÛ÷ZÑ~S¦ÐX,tß~_Cƒþ'€ê€6ÈÿXr(h€…•:gVÌ? ™^£mÏ\8-''7Áfôo‰Ã—Šì}ûY޵³ëñGYV†à¶Jæäɳ¢"â6cþþ¡•ÇOøÓÅñ«WËü‘'pNN’Éɯ¼ì—Ã`?Õ-›œž5bæ¢?»«'`‹þ¦Ñ{þúä½¹qüT’ó2)Y ;œBps“ÚxÙpÐk]}XÝ@õ'/‡©¬4´šÔ­R-*.5|Ú‚½{÷,^â(.öÓgAŒù_aQéüùÝÊÈtš ‹‹”ŸßaûytñÞ¿òÒ¯sÂÎ_LY¹BêO9Ù&8œÀâE=BCÒX5¬Áƒÿ‚‡€ê6ç#þ++ý<Èr\7L»EØbòÜýÿûŸ³³3•òGC%%|ýöóìYâmø«þœ•Vðú}%ö‹,Dî©ÕËȤ–\{{H2’®Ò ¯!ˆrÃ}¸ÚI`$] ÞÀBŽe¸›~KÈBBNN’G‰ˆHýšœ-,ˆ9u6Ùe¹´€®ì9¶´ƒŸ„Èýú€G€êº;Ñ÷åegšMœÓm-@¡Œž¸/(hårçŸt‰ÌÌ+’ [<¬œ$ ¥ÃYì]ûu–ÖæŠÂÄêìÇ/B§ùÛqëñ![Cæ´¿ôÒqÇÅ‘¾ãLaaüüyA+–{âðøŸ§_*Þ:½t©€ë¶{ÁÚŽ‰’’½då4àA ú€îËDzòøÈ°Qsws;HÈöêÕGóÌ¥kSÇwüÄ÷ÕUŸÎœIuZÚFWzÔc¶ä3™YgÞ0ëÈpí….ócN¿ ÔkxÝçÇÆÚÍ;žÝpº}ÀŠG5¾éJˆiWÃ×ι[ï 8,öÞtSI^]Qbì¼™BP¶²9<ΠÎ+çñŒa{Ï×{P¼ÞVèGì -C02¿xåÀÄ ‹~v†ž9wÜÚF “šÝ¹Tãp¶³%BŽý5JP}@'åÒå«} ÌD˜B} iäñý¹oòzÉulÈ7‚MDDÚR ºãŒÌìÄÆŸ âN H_ûõý¬LT÷—’“$Ê™ q1«÷Xâ=ÀÍ󂘎Ï~a¥È®”œ³ÁÎiGÓàÖ‚ºò:îNgi‘1W}iõŽ Ûn™Í{h%W‚â°Äâé4¶Ìc¤& × 6” ›t`Ó¹…R9gŽ*M»‹¹oÝéaJåˆÀdC¥ 4…±1áÿû››¢  ÷ó²2=í4‚ ýûQ TKI jkÓÿ»}ÒÒr6X@õÝ‘¹ù ô‡Spégdùßvó:²³kIÉ“'OJÛµLœ¤v ˆ •Qig¯¥ß=äÅXù@¢ÓB*#î[°!b¯Z+ rÛå¨ Cÿ]ÊóÇàý›0‰#ùØõ‹‰}‘mYÈ£™%Ö”g±œëiRŽD%p|‰‚ZŠHJr>ø1c¡ÑÉ=lÉg¶u-WòaõãÇPÙ#yä§¼|X\S×üWM¤÷2Õk3•#FHDýwkÁß?Mõ¡è­è—Ó¦Ò¡<ó nIðñÉ27«€EüTÐIH¼«ÒOìPŒ²æ£„èwï‹d¥%;*Ì»‰ÿ}Ó²$-ËAضa×üz§ÔƒôìØº"铉ܗ~U[áT±½´ZÏ×Ö’Ö«ýRÅù¦ô¿eÓÙòNŒX?t¯²º± â*X:Ê7•æ˲*dÀP}½m¶¯©ªâ"#«òó2ääÊ‚r/^\ÅKK MöåÍÒ‘ùˆüøDL·†16iëR4}nHÔƒZ_íz÷¤—üG]ì>Š‚PSBâ³c7ÞJýg.¯–ñÉkñ~u:Õwñ÷Ém-ñôôû?Iõ¥§¿ÒÑiÚc™™“ymC]9¯^»Çè£>\ïG—m,z™y'ñÍÓWÅŒ/˜–&ÐzÓûk+ 1“ý½<~B×®e›™Ác@õññ"t©ºmw_w´Ã”‘ ˜êcäÆÎãÉu4]{+vÓÐNoVèd¢íùÎP±¦¯¼W¾¸¼ŠJ£uo+qßÁÂM~Vu+¼ $ª.Òu½“²cÈO½ãþŽä3+Œ¶–Dº!´AÊ‹WU©4ÊŸw§w=žf½& ‹…Zl zuy‹Ëæ(ìU0ÆÎˆ'µ*©¡'žðŽ/¼T°[¡}³÷‹ ðLxæRM ˆp²žÀé3Øz)à÷ÓäôŸJù^/?è¨Ð>~x(€c/X×¶WÉþ™¬ ÞÓý=ÏŸ”ÂlƃµMú¹qœ›=CÐMÊfÿxmóÃmã÷Ñ7,©µʨâ6ÐU6zÀò; b!xØú8íÞƒÛÝø*V3­MÙkK˜z-?Íð™¶ûžáÞ×~ðÑ­¨(x#²ð'å`n.ÓbXS™vâÊ„5ϸûö§·NíÑäè´5ÏÌÖ¹ÿ€ê«>c¿zÚ¶°T¶ê:½ï`9â—b®%Ø8s< {Κ¦Æ.Õ¡“m/|Ûº¬—ÑDµ“znɾŠß¹ž=Ñ¿TV”“ÈTxô¨>6áî:6¾éØN@r…£ÿ§2ßýRpBEvV^ôÑKÏ Ç«³+ ù÷‚ì Âùƒèo{ñô~îQæ£ÝÔΈ‘O”K™åÔÍõ^f> üë­ñØóYuNCÜŸG{s¦+ö3wCÜN‡“ƒ{…çÖùPq;yÕwº&g¿ØÛLÜ3YóÁˬþÉÎ;stÁûÑM¶p“-éìŽ+[ï–xŠ"HÞÁå ìüoðEWeYàIûA­'å ÎZ¯Ë2À¼÷/ÕÀ â¶¼Úfê¦?£6\¹°^‹]A)ÎŽ˜é¦{±k[¸PuÞÁ•#fâu:`¸–³Ì½ÖT§Ä-ôÀ»9vìQÒ^…ë0ÉPt/pÞŒEá|1Så~"ÄÛ@ª]‘gÜYÛqVúÊj¹×f ãË_…)ëç7[%/»êcoï{ Þ£òxŸˆ3«Õ¾¥cNëv¨/„‰[ðK†­J©÷ÔË6öA°©¯%#u§1n'‚hî*~bw¼‰Íåÿ6Ê;|+ºG^†ÎëÃwñ¼ÕJ=·å´§!·áBc#…Ü<¶8Ú”Ö¾›hˆWÈ_©¶î—0·›|-Ûn7h+ám[/ÜEÞfW¾’Ó•¬=cyÙút7±/{^ûÙ'_†NçÙ!ç쥩‡ë‹e[º×‡>x‚Ø$–_3 ðÊöº°8©SŽN¡»ãC—òìÉz³vÁòmù ¤Ýæ€]kFWµP†±SVMŸç›Ãʺ“缦àÚ¦=×mõ®l»àu òß½oG“ÚD×+ÆHm;ˆ_Öí;AFĬ_„=„qŒ#ävßE.¾YdXW8qŒcãåv$òKî4³Úˆ £C2/è4Sí¬º³×Öió•úߦ =¥ùæÛo|zù™Ù’ÞQˆÃñ›â—]½Ï¤Õ{³%jÓßußnpåÑÛ¸losò†P­¬m^!i&›÷,è×¢@—f|(ê(#¿Ÿ-)Õî'š ¬Ç9Ö|Ô‹© ±Zú¼b:E'4öÿµ£¸cÈ6ÇÖ>ÕMõß0տ͘|²rÕEE•?£Ta~`V âôæëEÖÖHdxд5AWíªB¶’1ùI±v†Ç=™uM.ž>Nµ…“*Þ^ãH¾­){=t®n=ÕÄ7¤0tçS!ÍžÂUãîEºÙuΑ,Kš=Ûân!VþóUÓƒýã>ò…(³þ´ý†©òRî=À•;3e›7[ç÷ŸYüМ֎YX}U$¾/|þS'Ѡ먾Ï1¾Éçvõ]cÉWÿš™‰Î¬ûQ¸AWÚ «‰É¹¤¿ÜÉ‘7óÕQSÕù4BL×ÄÇn6®ª*a»Þu·Þ•‡¢ì9Ñ^ŸTQ˜yÌaÀ1ýà4æ9m¬^[¼wò§ó>êhÅ«ë•Á¶iÖËN>AÃ49â‚¥â¤m3úž½y%ë»Úúds4ãÍhµàp¬b]mʉ[ÑíµRöp5g¨}nE<ÔÅÜÖaðn‡¹é¬£Z‚-×Ôuܳ{s?¯e[ =gƒÞÅ«a…¬%‘*罕°Þ¦Ô%½åØuÉÝb‚ïmj詼gÓäÚø6Z¯¾Ü.äúNèÍvy¨¯»ÈR>ÐÊÿIø2™*N¿žmsì°*iùRcNj ½u¥Ùë¹(±xŸÇjŒ‡G‡ëÎ7”ö™øä¼½fÛ‘ï0+5…'°—YÌŽ,ÇrÐPqRs~“ýÃÍêíînaãë®N<{1/q¼\{JuÛv¨/„†ÎîuB.¹Oð 1“¦¤³öy¤¢£}FêyÜä“åÅ_ۼ莬”É–cóUÍŒªíÔyñÅùLÆ$"ë~­í¾»tìB+m"X‡)±ò„Õ”oº‰ÖÛ&ÌÍcšÊµw,~› ×lËz«÷(ïš”àuÉ}ìxyvÔ‚Vìà~lÕŽ¦ïScï>óæäµÛ1Çoi¼,«ÂîÊ®7Ú˜²oöÎ7‚ ú²“Ž-3ADŸ„ÎÕÄJ–r±ˆJPZ…6÷¾/>8WÜn­Õk»Êï6S†ë:‘6ÜPHMšƒj¿3t¯>¸²Ý«¶yݶîJ‰vÜ€]€Ò²R¥-¥¹/ž?éÁªb æ§Ì„“[ްÝ'ž[÷ÝÐ^ PÎb}å&³]ºïqlÇ|Û­SNyY½ÕÊånK§³¸98kÄÌýÉ©þk^žÕƵ5–ˆpʦ©ˆÀó-Òºç0w‹M‘§V°ëÀ5/§ ðâ„É,*k%ŠBT‘ªÊŠ3rég˜1¿)D¢‡V0KÈ”î°[Zž'Lmq¾2i½˜¬Q~¦Î®Çe¦³¦5÷R+Ù0ÀÍ SY £ÒŸò¦Ãa¾¼cª2^-Þl{Œ—R3A“ç®S ݘí©çè‰ z“-fÿ¥eaØ[]‘F!HÍq­û„io–Ž áßp¤o,s¹)'pFÒº!û“Û%ßqœ; ú­ƒê&¯i¯&¯LöUõx8zëN½5ÏͶ®ñèÅd›³°6ýœ!"Tú¹Ê€êã>†yÝïhíèCøÄ®­b•­ ^mƒÒgÞõ[¯0Ù·åXÆcyž³Mì%gžöé=ÚÉqC쎜²ÕæÖ_éóW/t:¿®Iø¦›âýy-{Aaâ¹[k°`³‡&íkО>¬:‰UÅí!T|l=·M`Š+ÞÏïçK%‘Hë‡ i|åÎn-sÑfkêŒìðȰ“V¿Ïg|ªÌ\¹’ôô£µd}“‹yll½ã}.nMöŒËcÚÒÞ­ë–}N[8é½Ç«¡böà‰:Ô+OžAÖ4´B0îr[¬fª?8ì—€ã¾D‰ZJHJ6rá膽&­=‘ï+5•Ê'÷æ¦È£¾F¨01îÞFêàu_ynd7kžÝ’C¯½ߎzs{ì€ÔÂDïú‚j1i4⛀ O?au?ZÝ”_Ér~›KoNô»gèrÓ^cÁŽúe>Ým摈 úa¼Û9Ë{9÷ %<{ËMdU|ô»µ˜pÆË6­·:h½‘¥×]WŸ«ã÷ŒeÜßëñZiÁ±úg-—ìßttEè¼>/Žy±çq±o½¥Äw?x°¢R³#2Cg›")qHîËOìŸTaŽ˜Ì´×!ÛëÚ¸Mž0r˜éäƒèÂà–ît„6h ŠnÉI K8¾zù{ÆÇO8ä£8‚d#© I Ä‚ÖöuÛ¸+÷7µãìÔÖÔ àÛÑ õîàMoꟻ3o¼áî]<“è>yhï}•&bK>dô¡@§ºE¢Åšó+î³› [Aouìªqu9(mneˆq¼xޓɇö¯à5{àÕ.æG(ìNk;Jx|‹E$tÀt Õ5µ‚0áZ³®¶–ÕáÁVW×¶¶,{5‚\â‡ìÜi¸â–6áÖô€…Z2šÇ–|˜*{T?*BQ5¾U(ey=v㽌”šý:ÜËõB˜ùy×.&_ åjݨ¸ÚÏ¿hÈ~ ±ªžZœzÍ`"ŠNÌIL »›´Ú¹”Q̬@8O­$%!—¨ÒØ~9‰ª›\‡‘Ôæ,¬–MžB‚‚5µ5PðTR/UEöSÆsË góEÍj«¢ŒÄ·´þºòõ M¿„¹s8,k¨B—ÕWÈê‘Unè.ÔžáT*÷s`a_P$â7¼’y]I†¨¨÷Ó@J¿Ô9+……±[/e{7TeŒ9Ù‚½¾­Â”àCž34¥¨dõóõŒó)m\Àm•(z_ÖV¼ˆHûÍÐ-u yôK]j”ÂØÓ¯e‰ßùï±Ò×å¨Y…¶g5°:»~ê(;Tµ\¿ƒå—ÄSíÎkO^6ráôut»yã‡Gèµç&j2¡Fûi;á­Xo¸ûk¯!á^ÿ­7|¶Â söX5k¸‚²õ’ËÇæïøgºãæ9§D%xÝ™¶•!Bxýs(»Ö•Ÿ‘ø_dÄåË[,×d׊]ÛLk’‘![#“þ ]³d†4J£Q£W= ONCJÚyÝöÞ•­Ý€]‚±¼òsÛþÔ6ý{lE“YðKïû˜ýµÑÕÀ|ÿ³;uµK¢àwÉ$ÞIåYršÐŽ1Mhm Z[Û!’m‚PUŠÍ}›%;~Œ™Q„Õƒ¸ºß ^°ÓvŠ=½ÑEìëì&~õdæÖµÊÊX8D°ÅKPää§:bÒÐy¶"Ƕ¯whЩ„òóú_?-“¶z®Éa?µÖ-\ã4HšF¤Ñ‰Ñ+²ÂS Ò6ÒÊ,¬M¨ªª!… à¨>.Ê»î¿*;+ëæb*îàäë õø†|”$/:Ð?•½Ëî¹gl5 ùç<æ¼9šÓiŠKž÷TN{θÑ:äGF{ÇýãŸên\×7¾8Ћ]ãDFL×§5ˆ“³çR< y—.º½Ã5®™*o)ïK"}ÒÜžq'‘„›UÊÞãëPÏ1¶æëHcm=Gpt,‹™“õ¶Š(¬¤$‡½˜ÙÏÞ²]ÍcŸœ1å=§‹÷Ú¹¤4£9cv…>2]Êí‰0ïäFfüDm©ýê]Çú_sÿòŒ»÷ÏšKó£ë"–·ד=˜Âñ6ÐÿëTÑôÆÏAÖ…"ÈÍûÕ¾¶cÞ~3GŠ.ñð |[ä¿ÑJm@8̉»·Å?uC}æy-rkÎsc»ÝàÙÍZ»=…¤=vhO!$ ± TVÙÖÛ¡,<÷ô>N3èÊlç kº&Þ÷Ú¸h†?÷&úqë!$£mg„/t¥1çÃ÷ñÄßbp«Ö>Ñr½}üÒ¸«ÇY½&4Œ|ò"¹-tç@ϾΩ.­Âýs5ò™—™1B꣭ˆ„ç$s¦Í@–Ýüà¿àD¡ºž¡éª =›öýØêJ‘“$ƽ­JìÁ<¼ù)šs<&-²;cÆ7H`Ò–3;qS]Ï!žFbžˆ×Ë{¼÷Ê…epË»îæ;WöãÅ%œ$¿­ä¥¯ÉU´\Ζ[«d½¥IÄL×Fü·ÉâëÓÙàuBз¶FNá'fÄ.k3¶@iGä Ñßa¥61ÝuºÄpÚ®¯r¿0ðÊòÈ¿©Ý¶Æä»¶sjövØi®6q”ÖdžÔVœâäQˆ®jÆæ<4ß ;)…S ¯|ëRMŠ:»!ë{n¢o¸V+ oõ¬|n¶_x2ˆ³¼e·,_cæq3–óYÁóèïŽU³væwT7Ï$h­ç–a¸-MR9´ˆ]1ÿêN÷Þãï9À9îŸÁ¸ê=O9}yû´q+‘ÄåtÜr·¨Lr[×më®DÚcºÎºR¯è×ZÍ®wì?)nCd›ùjc²äðÖ¦³›EQÚ ßí»ÍV>Ûo­»¿á3"ïÓNôóE†CjîǺ2®ÑË9 {…_8AÄtßÝ£óŒæE­5Ô]Ûô*L®ŸVN¯£‘£°e]˜zõaö>A_InK‚1 ÞJJËu”‘{ö2ÌË‹AÑZNà—ä*÷åêtêÊû»—×tàœ¹En.*/÷³–à•ÊÍe)) 5.ÌŸ8ª©ñ[‰Ü' ݹ×ÊÕ‰£úʸý'Ï¡›6°§HÝÞxŠTdÎ÷W¥f/jíï—=:l¸õW3W׿<¸Þè;UšsQ)m$<7ËF’=›‹s”ßÖÝS=õÎÄznÆyÖ{|úò”iãÎ"ñ'è¸nQ~Κì^%)¼Y[Ç¢cÛœ…µÑ}Ä*+eIJ©@@õ5BkÊvÝÞÊbÐ ­|H3ÞŒ¢››|†c6kÛÂÊ¢¥)šËPtY«q“÷ÍF};Iš¬ÌDW"ßaÊÂsèÂf<¢.õõƒûÜy;yó16ûåqCj#+}^E«f,ÐÒéõ t yÝêôkmGã;¬Ô(SýOõCo+ßGvhöBMåFï@ÑFª«›³É;Çi/’»ú[›š‹ÌwÜDß}­¯Û,Eì¢h`7’y Šþp¬š-ÛMå=ÂPo»Ó›8LEW4ühûº­ß•í4]gG‚&Fýøî„l¯f=è¹ÞOumWP$Õ%©ï–´âAdà†ÔwuFi‘L~¥¢8õø»©ß}zsŽU¯ßâÆyzL¸d‚Q]ƒü—Øeб¿ƒÌ[ëMð6çU?~&ˆäž=I/_V©©‘~8°roSWÏxd}ìÎ ­Iå~¦®®My‡†±ÕMR¹×ÊÙé:²>ÂMòøq§cy b&À=Ä‘'f˜/SlGž¯ŸRwèé­Ò¾V§RÁ)Ì…ºäŽé­úìùgUUåŸT°ÕTež>ûØDõ™zmB½šõNuŒtlêØÚ©-¡he“‰Ú´Ã£¸oN`ã§ÖpÞÜS˲Ñ/´É=Òæ,¬ <}ЍªJ €êëTTvÍhWuƒr–´AÛÐ+£-_°rÚ/»Ð7Di×HC—›¼²îÙù¼iQ9E6FŸlÑÖPODiÅí¬†nœß}{üÂkuñ,Úƒ†v¿œ§ZR}]¢¼ÔçÉ[½„×µ“uQ‹ŒZœ• ­b¾Ï{3pÎÌŒŠîõääç¡úï ‚7H wîS/«mΧ7¢¡œv§üd}ù P'DtcÈéœ3G•¦±ç9Ýõ¯Çp¶•r".)Yâœ^VV7ûÈÂb ={V0~œåOÊ{}=›Ýû¬F ÿªöÕ.@jÊ{‹a°F;€êë\Ð=b[ÿ´ß¹  úEÿé&åÌ`CzkÍR]ðBߥå‘èò–*bæÉ诚­áW^«‹gÐ,M }·ý[Q^J¦ŠüaI#©-H}·à[Ïzž|w€þ`<¾#‡ªÐukÓ‡Õ=ztŠ—µÙÖµ»¸] 1ä4f› )ñHî+&‚”ŸÜs—ãÁ+ùvûÕøâAºùÕŒ@l,kàÀ^‚ÒOJ UDJ]–˜Ä42†{#;[¸º¦¸Š!˜@õÝòå!(h`64ý΃FŽk ÌÏÙÏ3ÜV­ìðGŽÔ»~ýñìÙÓÅNˆøc/ýºÙÿ›‘ÊÍ9…:òK³\ôÞ½ç«V®þ©Y9z”­ÿî݃Qñx\·/׸ˆë¹ÖÖ£àP}@÷ÅÒÄpïÓŒ¼Ì§ò*šÝÜq×/Yÿ5®cú¸ôÓ±ÉHžœR®¯÷CkpG@œ»ðÚÀ7·dÑ‘®ñMêÀaJH|v¬ç­ÔU x303žÛ^ë@Sœ9—7vŒÉÏkèãB¡Ò‡×:w>sÚTz7/Õ·£ñ½{õPTЃg€ê€?//¯õë׃à;ø{Î,?¿]tiyаh·5³»Q½ú÷ÕøIáO²tç.ïÞ½ˆ’’ß½ ÕyÿOí³)æ–”áþ¹UĞͥŠÛŽWÙxš\ÞÚÜM~~íhêµü4ÃgÚî{z„{õáO²UCB^ 1|=* ßCBL[gø/ÈÐÁƒ'çæúß¹SnlLí¶¥úÕ+ú“ŒLGGwxʨ> »C&‘-Y²oï^Ëé …(Ýq(TæƒØÚšê 6cÞ%ð‚„¥ŽËwùû-Z$I£}ç[›¢e‰¢­M‚B3€¢8»(¦7<䟈²ÉÏfYE¡[·z–¬sLÚêwûÓ_K_쇺zÆÆŠ?•O™êô˲uÊçààíDbù AÝQø½}-~åÊ3çe«@õ†¸˜èü¿=zxÄL{‘Ô­Òþ6-©„ññïÙ3ö…HdÑÅ‹ì÷ï?à´T’JÅwBS$%¾Äìò<öÙsÍ¡ÆGÇÆŒ !߯úîݧdgÎëú‹S4wîÊ @o2Y@[›Ü­JuaäÉÓÎË\ð‚P}ÀENFréÒ¥†V“èRrÝ$Õ)Q—ÄÅé¿@òq£Éºº®Ú°}üxš²²Pç³iNè¿sB›=ôý’ïÒEXùë%{óç÷ä½ýìØ±oÜx1Õ>f©þô¡ÇCésfO’“ׂG€ê€6¢7@·¯ÆCG${©h6ý“’&PU~ãl¨±É çe¿7&j¦ËU‡ì©ýÃ`–ÓÉÔדpqYÛybEî±rå?·oº™ó÷ßBBN£Š :U,ˆÏ_½z<¾T´2‰´ÌqqAч£‡÷h˜+hôëê)ÂU³.òÓ쯷j…KgyyH ¬ú\R°sçccªA—ïðYSMÚ¼%[G§ÖÉÉ£sÆpèÐYf5§ÎÔÔTÌšEëú>üü H$ÂüyN$r÷]£@õÀ÷##ÙÃ}µÛ'FIPP€jÿÁ*ýwI½WU~ûòi)Y¹µk×â;ßü¢b2®®ÿ0Ë‹ýüöêêR-,ºdݽ²B44ô5]‚ºníÚN>‰½3œÑššS§**™³fÒ»b»Š ]ºTöþ}™½Ýbª°<¬Tüâ41w·UŸKK††Id½ácˆB]cüÒüWñ7# ŒM—/uìäQ¥Pé..k++>9„ÃUOž,&,Ü5Æû½z%qéòƒÁ4;{·®ô-€£ý°[·Ž=|˜=aMII¨KÄüC‘Ä™³/deécÇØ ˆð€P}ÐaˆŠˆ,[â€í¤<ο¦ÖRÿÁxΨLjÊ>&D†‘)”©“&šêu%)B"‹ÎŸ¿’-¥2/î‹Ò(lnFÆãq0ªŸ>ˆ_¾šƒÃ Lš4~åŠI]·`6{Ø0äsIÁáÃÇðøš¿ÆŠÐÅ;cµª²BôÆO¹¯‹§L6[²d¨>øyèõÓÂ6lçÁ£ô7#Å%¥µMF©"¿7V8äcè(Ù^½ÇZ[ésj×¥ŠáªU†ØÎË— áá·%zÆØÓh¿ýuþL4,ü¹œ¼˜Õ(ãùó'ÿ95Ädþþ›­·ÅoOž<_PPjc#¦¦öÛ[ÿpùùW¯¾ÀámlŒÇ‡É9TüZöׯ6lçsiéíøøŒGÉ’²½ûèÒ¥dQ¸š•ÿ*íñƒ$aQQccc3ý~CëþaFVUâì<Û©`ÇD_OºÿR^ŽljFéÝ‹ðk"Àb‘Ò ÄÇç’ÉDc“Ašê¦ê®òÚß4zOnÏO´¦æÉ“¸;ñ÷˜U¦¦4ð+]kk‰ÏŸ“ããr+*XFCúè?ÜÁa" ‹#*BÕÕÑÄ60ÅσDUU‚m`Š_†ŒŒ:¶Õ€ê@õ€ê@õ úàG¨AÑÙyÏ_¾*È[\˜¹‹‰‹Š÷ ‹ˆQEÅ„H"‰D Qp8AÁZŒššÚêê/•¬/ÌR³ôsyIñçâ•e¥"ô’²²}”•u4úI$°mhuaÑýÌW9¹Ÿòò+kª‘=¥¥¢4¼8O¥ PȘñ€«©AjjÑêj´¼¬¶¢²–Q\SRR[ü©ú}! Ûé!!(ßSTY 3óYL ª~_ E7mÚ„ýßÄÝËË‹ÿ§œœ\¿~ý ÀbðËxSÄH¸›”ýø<ËÍ‹Ž.z÷ZQ³¿JÿÁêƒÍ°í'¾ðLLb[½KÄÝÇ/&V2Ëõ Œ,ŒáÿDqòñCÒíèØW¯˜ººTccªÅPA‹¡Ò?ïrx<®¯&Ûê2_¾Êˆ++.®12RøaÔÔÔz÷îýþý{èÛ BiÅ—ƒ*hô1ÝþL`]£^šýwøíee­û›Öú«e•Þ«ªFrq‘þ#nÿ·íOºDÞÛÿnFe-_.ÙmܳaÔ(úÑ`ÿ VÂMÐyTóEÒíØ„Ç/ H%‚ÐhZƒ‡[,Gk] Æý5ôÁ[D?¶ø)í»ƒ)öÖ÷|ˆè®‰OÙlÜž0ûit:d Ðy(¯¨Š¼r~Ä »nn‡žý‹Þ½¹y;nÄPÓ­);vü޽]n2`²%´´9Ù¬[·N 6n=€ß¬ú^„.U· àîëŽv˜2RS}ŒÜعc<¹Ž¦koÅnÚém L´=¯Uü`8 JÈŸ“¬WDÕEº®wRv i÷I$ÎÿÂU?teúÊ{å‹Ë«¨4˜í¹ns§@gáÌ¥+šM„þ õâ¾›~æVGv ÔéàÙ€¯ß8l` L‡Ž9ˆÍʶ_éé¾¥Ñ{‚5~›ê w×±ñMÇv’+õH|G<|÷#HÁ ÙYyÑG/=7¯Î/ÈÎÀ!œ?ˆþ¶Oïçe>ÚMàŒùD¹”YNÝ\ïevàÃÀ¿ÞØ=ŸUç4Äýy´·»«±Ÿ™¸kâv:œ¼Ø+<·Î‡ŠÛÉ«¾Ó¹ +{›‰{Æ!ëb>x™IÔyàˆ9ºàýè&[8ŽÉ–tögëÝCQÉ;¸|ÿ ¾èª, <éoßÒºÞ̽ÖT§dÝ¥pRµg}:{.Jy±ã“ÿ,KKõ^ÝN¼ôч§LÞŬš>Ï?6‡ÿBëNžóš> Q°aqR§B7Xfw|èRãö èåÛÑ“ÌØÏá´¯¶¯ÕÒdJJù]NÃlѽÀy3…g5„¢<ÊýDˆ·g6^ ñ ù+ÕÖ·›|-%¾YŒ½‹Y»`ù¶ˆ‡ü±µÛ°kÍèª;k›ÍŽvZÉ-ôÀ»9v¡˜ƒ¬Çiï-œ+¤î4ÆíDÍ]ÅOœÛ­¢Ê®úØÛû¨·„òxŸˆ3«9¥aÜßD¼Ñò*L_Wß«±cqÀpv3Ýt/vm3†•{mÆ0¾"­0eý|þ¨5:½þÖHÜ‚_2lUJ½¯^¶±‚M¥¾!àGxõæ}aÞkÝ¡6` ¼^ÛÀäZÄ™S'v`°eŸ_†½³Ygô§í×£ìWZp¬½H©NÛfª’äÙ*´®_âv!×wBoöù5iªý6ÎнúàvÊv‹†`mLÙi áHÙ‚ ú²“Ž-3ADŸ„ÎÕlG HÎÙJS³EÔÍw–2l—ˆµJVÜ÷haY‚P ½u¥=²ejbñ>Žq×o(í31ðÉy{M^%¬·M˜›Ç4•#_ɱ”³ˆET‚Ò*ì´¹™S|p®¸ÝZ«×v•ßm&;Úo¥msì01\¾Ô˜sÞÖI>#õ JŸy×o½ÂÄUÜ–cŒåyÎ6±—œå¸»½G;™"®qˆÝ‘S¶Ú\AŸ¿z¡ÓùuMÂ7ÝïÏkÙC ÏÝZƒ›}84iÏXƒöTvYuUs^司ØznsÓ=V¼Ÿß-NmžH¤õC4 ¾rg·–¹hKaÃÃŒû™–0rá$;eÑõƒ³¸¡ôŸ³ÓÔKOsÚ‚¢[r’ÂÂޝ^þžññSù(Ž ÙHjB± ñ[ŸFDfèlS$%É}ù©}ŸÜs˜k(®$ÀP´Ú\O¢›üÃË£ûžœ–1«™êû%à8™L$j)!)ÙÈ…£WöuWGÌ£Š~ÿ˜7ª0§},Ó^‡l¯kã6yÂÈa¦“¢ ƒ[ÊŽo±Òš†öOn-I¯¶0­/«kŸ‹[¤=ã’C¯½ß`‡ï£>/îyÔ5>c…6îÞFêàu­žh›è]ûXLø& ÈÓO˜Ü¥µ¿ðƒ (úöå3Ë™-y¨|›”˜Rˆ4^bL•–QPVèýmQ–>Ü`fµ At=»£Gc!·; ÑpŽ:éÞBWm\#?Nû‰6Á àå•Õ“ 56è¨0Ó3ŠçÍ•ø‘˜9™‘÷ØŸÁLttå¾>zíC¢úp½.£-µ´èÅ™˜L†Ûàw¨>R/UEÉA<·œp6_Ô¬¶*ÊH|Kë¯+_ß"ÔôCâüýXÖP-/C¾j—U¦7­Ç·.+¨Ü÷|a_P$â7Ìþ\Éý3DE½ŸRú¥ÎY),ŒÝz)Û»Õ ¾Ö}[Zn)ÉÇÈÐ#Û™ôOèš%3¤éT½êaxrRÒV°ßH³¢‰DôkG-u yôK]¢•ÂØ³³e‰|Æÿ¡¥sñúçPöuó3ÿ‹Œ¸|y‹åšìzõ»Öø‡¬ô³ p XTø©Áí{k“¤f3m³­ ™[£i˜íÈ_ø>22s("bB¤?¤½º¼Åes¦µÆØñnŽJFjè‰'¼ã /ìVh_‘à™ðÌ¥š@á~T$pÆÍµþßO“Ó*2½rr2‘R}EE)xD\â‡^Ói'®LXóŒ»ozKàÔMŽN[óÌl{R}**”÷ߘ˜À]ð[T¢¼ëîñ«²³²n.¦âH¾¾PïÕR’¼|è@ÿTö.»›±Õ$äŸóH˜óæhn‡CyÞS9ãFëP™<#îÿTwc]^ÊŠ½ÜØGL×§5H¸³çR< y—.º½£¾µ¿V]ÊâîÐ'ÍáwI¸Y¥ì=¾®ùck¾Ž4ÖÖsGDz˜9Yo«ˆÂJJrß»œPñ­ðDŽÚ¹w®® "ãÑnÿ4öž(©ã¡f„ÄÝmd(,9|K4½ñsu¡ró~µ¯íØÅå7sq¤èJ ’›Y”ý¶˜%,©$Go—^`UÇP«CᛇÎÑ2œãâŶå£MÄëâÂãkiM³ãû­DbWúÊ*K¿=kbv…>2]ÚŸ—ÆûÜ3ÞZ›óíÓz÷lBÑjkÞÀ¾Â}+Öµ/亼ØâŸº¡¾Ðæy-rû; íü€äm~˜D;ºØ©Mtݸ¢áCݶƒHñeݾ3ä`DÌúEfìv¹ r»ï"ß,2¬kÔÂ1Ž—Û‘Ès,}¸ÓÌj#‚Œɼ ÓÌT)UwöÚ:m¾RÿÛt¡§4®ápãÓËÏÌ–ôŽBŽß¿ìê}&­ÞÛ˜-Q›þ®k<Ä•Go_ಽ!ÌÉBµ²¶y…¤™lNܳ _KÉí!‘œØQF.x—)%Ý1ËÓY;X#‘áAÓÖ]µ« ý?{gU÷Æñ3Æ 3Ĩײ¦BÙzQB‹ÊÒBŠTT/Ò¢”JÑF‹¢•êýG½½E{i§…KJ…Š6²U–-ëXî6kb¥<ßÏÔ\çž{î9Ï9gîùݳ ï М¸(GÝ“­¦ÜkŒº|væÔô‹*ïðhžC£f<òlXlÒ<¯XvTÔc;}‘&_tÄå„£ÿË–ŠØå°ý>‡?žIÉúOùåP~•êcÍÜ{y~•ªÕnG-‘¯Wòvô9¸Tñ°º€¥y*xíƒÛÝÊÏœ1Á«èÝ;T*³w©¼¥´`6×[uŠ0½}®©mÌyM®•ÈÙ·™=.~Ê.‰­n­Àüºvû—âVT•Ea™ñf¢ô@–…úٟʤ©«¿ÚB×&QÞ~gd™Kz<ö–£¯;Ò¼Ôd)#¶«k;Žm‹D ¸ì÷óPw‰Þ kˆ7ãì•]ÖSW¡GËpËÝ"ÒH_Ûê^l^èãq¶T×z_KC)XÚé£ ºš¡Ç?4û¨c7ÂùÀÜÖi¿ÏÔ@¦99­ò¨Øo‚<}•÷ZèùÛ„Sç \ï±m,n[›3 ‡Ò ùUvlÿ^+‰ ¡gtânFÖ»çcÞï~ÅJ#zî§Ž[Ö:ë#s\ÃÅ(z®‡-78†°bÈŠ=³L½º‘T²FŸÒº0´¶›~;yÑüꊾ*KíU 6Âl¿ðc”••qóôëÜß—¬”·¯„k©µœDTY”{:`Ûtw‹£s›†4³Û ]Q[û•Gæ.1å“´Cß»gœò’zßÛdÅÃo]^Ëœô0{ü¬Cñ‰~ŒÚùWf«Ù\_g„x¶X Fo·‰i\ ¹ÞrûÌJº©O ˜¡îõùb«#½Aäå¯*/ë.#—–~áçïžßËÅ4#Ó'îÕwq=q˜x"-¹Öº½é襞ên^Ï’™˜üÚR…DϘÊÔúJAæJ1×FzÉQ©•t1çuWMJ® dú¹¼ÆÇÂ7Æ@”È ¶àÎ%Q£›4wÛÛ‚˜S4*‚]vÚ9nós•\k«òcí~~TQÑ€Õ×ãðx¨‰¿Bõ1P™± Ãv±áQÞ3óüöiúª*ØÖÖn{çÞs=˜o…L² Öu7)Ÿ ̧µ“ȨUiXG›Ã~ÏĶ ì4Qdµöãf…­lþ#ñk»µº;Ô&c*NÞ[ùÖÙ6)Ÿ¸Bh_Ê&Z¬þ5©%Aï—ýhu†”{hÇ—|ßg%$i¼Ãvw)rí°¯Ë”Ã¬Ã=ª[eÐWa’­ö¾°ÚûÝ—·ãX[˜ÎFþÀ÷ÓPWÇÉÅÆˆÉÜ#‡·|hò÷9íÖæÑ¥sÖN3àG@_âÎÐ%2þ7À¹±G‘8z]ÈÊ'ô®ÂÐ\µzjã¤\1C]ôêÊI/b„yéãIòï¡•¬¾+¼’Ó¥,âxÿ¤NâC p5Ô×w—‘këêˆÄîÚ£¯!îÑ#÷ìÑ]yW•p÷pò•V}~%QÑtɇ”£ž³$ýçi Þ͈|šŠ‹Úüøå¹ÆÁ.êQ±SšüŒž¦Œ|ÓÊ+ªÀ!þòž7ÎÚÔÔ½{_3 C@+ 4½xõ¡•Š>×&†@@5ÔjnÔD€_¦ú~-ÕŸl>2…å©Q&Á=ÖÛµ>3#êÁšîÕ‘Z¤Ñ ù’[y/½^Áî%ÎSU×ëeg¾~×=Fþ}ü7„H¬¡²Ñm¬´eç‰#<™RíɃ)›]u ½y ÓX‰œß5”‘uQE× ¨…ù•Ð`CbÔSkðœ„î22‘H(-mèÞŒÓqu-Ö»% {ÑQÕ9êìæü_ǶíLm/³P^^Û4v€J3@êðA¬/2h¨*oÒÀB¡aôéŽýq?œ\m-ÉЗUŸ€{TW;šú,dÛ ¶A=ynÃx¬÷.¢ã™ÜQ_3ä/t…Ÿ’‘‘Ö¹?^®¶sm92/lßÌ8’#3SJ„‡&ü=|4ÓKÉ¿ÝìMŽë§ §…P<Š>}á…fãt»‚{>»`n]?Åáa¾Ñ´ÌtLtb'Ìêòbž~|ÝöH£ef~êþ¼Ó™ˆU*˜‘w{l n‘‡”‘ª–èZz¶Ü;%Þg`£sÑvëëôïiªCIBŸÇrîp˜§(úu­‚ùTÉFÕZòÖnLiŠ®ûxâÆ¿¨ˆ‹ÆvôiÕü d¥ÇvàZ“KÿJp)ÑÎÒD£õÞ0“>/£Øøìò7Xõæ©Æ¡¦óò̯ÈûoêŽDuÌÞ£ræ´¾ FÇ^~ C€ôÓÿßÃcóFÌ‹X¯«±¾í]*™~:¸¼‘V޼Faj6…Ùœ…–,ŠÏìÄ,¥ŸrDźmGu)IõÐÏ~0j }Øjyuë6’b(¶ç ‰«3Cõ•W×1š²°-ž¼|wá|[y·Ý½6ÈU޵Ž)o[Ï+nå¨äà˜¯4@×ð¢¹TÛ þÞþö²?Üèøð“à‡:ªèYd¥$êêËKŠx)‚ízÐt}’èÊVPÜ'æ.îÀC¿áž‰¹ž¬?0ŠÓíÊ–»d­NæZ}÷åí9RßÄMõpŸ>|±ÅˆÆeEk¢–É^¢}ÿe¨ÚÁ½²³Ò‡ Ö]Fæ@&ásr¨’’ßß?¦ïµój÷ Ï’KÚ:ŠzFx~;4о†Y0­Ö£ˆÁÄ4lb•½wïJ䡀ꀮñ¿ÿýoÒ¤Iýû÷Sû( òþÍ åÆ-pþ ˆR¢eÓ½·#ÄŠÚĨ G|{'µêSöûá¶³º1*C‡J%&üˆêûè¯ãzý:oÊäq` P}Ð5jjjH$غÄÄ1†~ûö ÑÖÇqüi“¬¸•ìsí»zÕ»OT5´ðݺÀÈ3}wúŒŸÐ¯ûóü½y[¯¡!I Â/6¨>Õ=O?ò5­—q‘ª#Æ‚5µüí‹„5n«»ù MàÖ×—»yóÓ”)0“ Q©¼11©«V®S€ê€.S]]ÍÍÍ v€®b9i¢ïî½ÒŠÊñ>nЇ·¯71åäìþGª¾¾Ý?ÿxgdÔÈÉqõq#‡„äONä‚=@õ@×%­¡Ö½ƒ² ï`7ÇöèÿN´[‚ç軕(=!Z@PX[C­‡ÂŸk·`ÿƒ+\Ÿúð8ψDb¦–T:P}Ðejjj¸¸¸Àð}ˆ‹ O›a}ådà„Ù p}Rø}|ù¤¤°ào»Ù=w ^áyv3ýýÎ._.F àú ‘㟒RßÌ›ë 5T|UUU0¼~„Á ²Æ&w/geß×Ò^ð.éCÆ»E÷¸º–deevð`˜‹‹×·zü^½âÏZàäu T|'0©~­¡C„øùNó3²v$’È}$ÕoÝ©©®þ ’‰ŒŒ–­­ OТŢü|}¥[õî]ÎOŸ>äÕ?ôõ@· ;@ÊyÉÒƒÿÔ5™. *ñg'CÑWO)(*M4Ÿô3ï+$$çºbÕƒ{,,(}`q©S¥ââ"66CýÕ?Duu5‘; @7À×Ç}Û™‹W߯ÇêšXþ±?î…^Z°p¡åì¦@äâquÝ~,22wî\?u´gN¶ppp²Ã‚yB‚ f€ê€¥ªª 6ë€ndæ´)ïsrO߯=Á\H¢ÿŸ”4¬þAè9I)i÷µk~mLŒÆÏ+.z¿k÷qssþŠÔh ã>}ºHL¬rÍÚP›@õ@÷óú Û )á¾Æíñ³7Žï5eV?¡ß^ïáÐíÓ‡yxyæÚ„Þ%Á«WmHMõñ‰°³’ üþäðç ÍŸ·”@„—q ú û¨©©áåå;@·£­>ŒöyòìÅ•€ ,lÓÉ~8¬>ñ^Ø—’bçÅ ¹{ßhðG®Y32--ÎËëæÜy‚²2¿åd? #^½Z•ýùïùK¸I|Pw@õ@7S]]-,, v€â/õa´ÏË”wWŽù®'¯ªõÛH‘Êâè›×(‚Bs¬¦qröêg¥‚‚ΦM:>¼Ø¹ëʈýFéý6˨–– ^Éäááš>͉“S¬@õ@©>˜×=Š’¢ÊÚ5ÔÚÚ«¡22´'šSDÄ{gTq µ¯Ýÿ˜‘6múôe‹~##÷ï?lõªaõuµ7o'¿Ê¶²п—ê¨ú:Ò»5ÉÉyÓ,‡Ûÿ=*¨>èqÕÇÅÅv€Ÿ‘@°±4§””•‡Ý¾™•–:ä/}YeõÞñ[PöèÎò/eŒm¦Mù}Œç$›üml‚ª*‹ÃB/&%gRtu{Åìå/_„®_ÿð)¯ÜØÄpÂý  N€ê€Ÿ¥ú`5øÉPøxgM· Ô××'$¿~û ººJIM[zð0<ÇÏÛ¼îKaÒã¹ï3‡ S75RÝ~ÞŸddYÀÔÌÞÔŒ~üæmt샸Ғª#)ýEÄãq?-E…Â÷#sRR‹ÔÔú¡kcc åTüljjj`„'ü*ðxü_jª´óÏìܼ¸Ç )o^¸¸û+–’“ì¶-骾ädf¥¦äe¿“쯦>LCUyøÀ™}Á΃éÓ>ÌãÏŸ3c’^dÑ´Ÿ²*ÿ@ü\w¹¢œ÷]ZÃëן32J$$ûi¨«ª(L³„7k úT ¤$ħM1E´O#E¥e©ïÞ}øø!?ïSiq!†02O?^> ‘Dâââæâ&ãð8NøC%'''S úद¦ð@õÀKUU ï@õÀ Lê€^K=†¥½Ï}“úîSö‡ÏŸòêxùù„„I¼|<|.n‰›ÈEÂápxN†5Ô×ÕÑþQ««©5UU_J*¿”U”–”V•—ñ ‰ˆK)(Ȥ@†Õ›Z‚Õ&¤¥%e¾/ÊùX]Cm&ˆŠ(¼€žDâ “9ðxĉÇÑr¤¡ÕÕa• UÕ %Åõ¥¥ ÅÅuŸòkKK…8ûKñÉÉË+Èâ&ñi@õÀ¯!<<<66ökw//¯¦cAAÁ¢¢¢õë×Ãd?øÉä~.}÷8ýu2ž@”(=P¹Ÿ€ðÕ¿hŸ.„ÂßÑÉWŸk?e½ÎI{›ÿ1CTr€†¦Æða}k “ÊŠwOŸF<^ˆ8ÐÁ¤¡ªÜbâa¤£Ë߉íZ ØÁÉõõ§_¿ª~õº:5µZZ𬡡®KKâïܽ÷öm…šYO×Ào`(Ús·ÃãqC”I´O£CFzú?QÑå……uº:Šº:–œ"”|P}ЃX[[?~ü[g¥¥¥ååå%$$ÀPÐsÔcØýGÏžÆÜWÕ7Ühê/ŒŒ¸Œ"íÃR€ž'ÇÞ—–™l:QˆÂÿ{[«{ý&äÖ­‚© ß´i|ý²Q—òò\´ãðËëWÿ»y«L\¼ŸñDKÁþP@õ@÷#+++%%•ýõ)%%¥ÚÚÚQ£F• ‡xžšuûú%!qi5Cã‰rƒ{[ô¤•iÚÁ‹÷9–U4m’1@ø½Œœÿ)òÜùh11´i”!CÄz[ôšº?å]Ý·¯HNVÈÔt>“-@õ@·2wî\ooï6ŽYYY+W®û@·SW¿–™¦kl9ÞÆ©÷GX@Tr¢íÚÁÕ»SŸ?±™m##%ÙÛ#ÕFFJ|V2ËFÐÙY´÷YLœ°|9]”ÆÇºs÷Ël )é¡PY@õ@÷@ ´µµ?~Üä‚Çã«««gΜùÛ½Ô€^Nu]ÃÑãAô'ZÑûÛÅ_Aí/Ú'éÝ«cGÏûÛQ¦¯Ô~Xù™³ÿTTÖͶ4-öÛYK‹‡öII¹çåuqþÜ)d5 â€ê€nÀÄÄäùóç555Ì?åääÄÅÅeeeÁ2Ð]ÔcXð¹KeŸ õ¦Øàó%R˜#?ß|H?y2höl»^¤ý°ºððƒéåóç ‰¿µ‘•”¸7m’Èzã³ãÚ¬ÙæýûƒJª~”3fœ8q‚v0pàÀªªªqãÆM »¸—sô4;"‰üÇ$J¬¿¼é<—øäø‹—..Z¸€›ø‹¢L}{þÊõ×NN"ã'ˆü1F–Àµf­ø³÷.^¼æ´`)ìûª~ ‰ÒÒÒÌÌLWWW0t •Õµ‡—Uœ0{á™@yU­þJ*þþF3ÒÖø5ýQX}ùñàýÒÒÄU+ÅÿH#«#© á>à¯7BCSË ª¨>ø~æÏŸïíímmmÍÍÍ Ö€çuVÎ¥SAc¬íIdÞ?8™"÷„9‹Þý°|…XŸ2²œ×üyB{öî^ù€ê~ ±ñio^XØöÁ´S„Ä´'˜ïó;àº|)gO.Ušòö|LLÖ’%ݰ_efÚíÇå´™QC5$9¾>{ýq‰â qš½e˜®€ 'Møí?à¿bù*N ÈÕü\ž¥d$ÄFM˜³°ÏZ@H¢¿êÀ£Ç–,詮΂‚—¯¾^µ²{ö^O:uÕbÝæñ‚³Û¬„Ûœµ^÷Æ`ãZ¶U_]°å»‹ò%kÇñc=d1q‚¹9åð¿E‹Ö@¥赪¯2%î^Tì‹Ô¼T…¢¢=ÎhŒ¶$Œ|ÅÛ5=ž!u1 [õJž¬ÐöFH+ªø©>ŒÓ+2ÌÑG¨¢Ö†…œ5œ>¯ÛARaHavVhø]³ñ=°=V|w®­GwîÈgêdŠn‡Z¯ ¼æH ÞÁøÔ‚GQóf Ëhv‘71;u|ªŽV}IÀà&Ã-݈²€öåýÈß]‡«'Œ

ëQ#O˜ÀµwÏG 4Q(r½Eõ…­jæ“L;8_µD³å@|wŸCåR˜}ÿØå·ºæƒH4לǎ:Na-ƒP³»töólåsu4bGÄŠr#«­M^æ< ˜òÁNorHz£ÓȵoïoW¢?‹÷ºF#·³a¤ã‹¼Â²}(¸¾æ3sã¸x» G4ÚYèe Ôèu!Íq~¯À¨m Çx#úšiÞKÝuùÊ>²ÜÞÑïV‹è*, 8í·à¯oØ£ò )ó ´ñrw ©GS:¥&¤ì.ò›mä~¹É«Û©TEVû=7rõÌy~Q™-o´ñô¯™ê­‚ =³Ä9øE³eüc‚—ê±# Ô‹vÆ–ÍfætÖGÕÇdiš‘PqNƒÑ1[ð8`žÍ°ôæPä'®=´]‡1숕G#½‚¦$Ú­¥'Ç-¼ÐÇHè;DKô?+ç-Ùßâ>HÞÂó¿›4غ¾ã²Äòè8שeZ÷ß=è<†Ð¹ÅXZ4÷èáö 4d_ñ+Jgöa¶r³ãŒ(y°¾M‰?Kú¿S圯¦ïŸÌ²ïk¢² =ØÓ©Á3Y%-󼽜ÕQ$敟·Q¤âæl½°Î˜Ü"žnÁ‡smƒiô«–~ž;ž¼nÓKµ\ŸIâðkû‡‘]Pü>õ•é<0 Žc˜®áõ°[sgYuc°U•?)\»¶'Vp©Cˆ{EtÀÈ={tWÞU%Ü=œ|ÀA¥•¶,‰‹¢K>„L­EŸþËp$9UdQB& 9þ¢ÄÁˆR[ÇòMEˆñ›“ú¬¸¾±¿¯Ž(Ð__³Û”ðø "·Ã¯Íž½Š@ïP}e‘> ÉGkíµ–|ˆÏJÃf5þ‘ï©!æEkFK®HNÝ£ÂlW¾;¦?p¾Åà ýu1Q[õ¨ÔRºëõF¼û²1L’vüþ´‚̬Nê'œ´Ž'U^P¥=mŠNWwÙ1HRürMS'‡¯µé²Ó¯°PºÌ«Íºh$kék£|>üjú¿ôFpÇÃ)z[±:33ΑaÈ0ªâ¾>SüÜ[/:vSs/Pc Åà†vNÚþNs“kµ÷Æ´†JŸ<6››zGæ`¡´¸œsÒ²<¤I>$g¢»DnN¾³rKÑUhãØEäv1ËÇbýúú$§Ã6Ûh\{z/a×èæ`Íôéi bHÙ¼‹Z–'–B|¯‚ça#Æ?MÓ†çºÑ[ç™7ÖË™0Ûñùå´g99»†]êH/|Tü?†qKž§1_WlÇ´€W! †°ò(v“]ìܨìJ}IÒwFM5tmÆ‹í² §—'ìUm=5/] Íj*Òñå—%šVZFze¼Ù(ËxApÄNÍqéØÃKg$Ôž“KìÄbñŽ šîáú[G­gêüÎíÃFÚ;ÏM6nôU‰Í»yÊ2ý€×嵓ͥèö\¹›y¿«wo˜ù?%úá›íŒàvb º·Ç(ÞŽÇ^ÎeF»òœ“®õz“ë #Šï£°âékëH“£Kõf.n™’Ì«ÎrS¢ß¦?ø"c( vhBzˆú‹G‘%eå¾nëó~w}øpžîÛÙW×b½[ºU£În^ÁßN§¢ªŠØ`Ɇ*k»¹–ͤ}ø‰í„Ws6Âç&ÎËè÷£=?ÔÇhiªw×”UU\øíòÏŸ3…„d¡ÈôÕGd ô¢ôëü§¾äA ½™Nk¤¾d5ÓiçݼûŽ&®¢·xé©'År6‹ºì"É<`ì¬\£‘ãgìT™C`þçmÂ×ßãÇêÙC™i›q48nÿdvDµTÖ]üÐ[·'61ûRfhÖÆìÝ{—~ˆ‰”a%!tüêÃo®.MÓ î̸Lp˜i—,¼yd63"d5Û=ú.´D±4ç_Û0l[f\hhìÉ5Ë?•|.ªÂ¡Ï‚e Äظ4šÒ"ئ4"ñ1sôQB4ÊJ-b/âÓû2 Å”|4dM¶Çp ŒÚÀÊ£'ÌÞ-“YƒžÝËxô‰D9”.»ZÒ¬jhÂàØ̯ i*÷’Œ°Û¡§=×|Ê))ª®B•…ŒSñq¯?›ŠýhYŠóg¤Å,*”)ùè7uzïÔ fg•‰j*ûöaß$ä&[7jSbŧnaäõÐuÇ5óý“Kžt¾ñ^ÎþÄa­óF‹m9¶2xžbÊ ¯@šÿñÿÛdÄág°Y%Ý„V¯ÓätäÙ›ïÆÍdeME·ìd}z™Ö¶„„4ìOÄ4làÏð÷©¯õ-¾¹GyõǸG ù¨µ, ñˆ‰ËÈË îÒ½¾<ó40ñ¥•©ß<Ф”ü7^Ò? v‰8½öã«q­ü´º¼‡Ç¢Pô$>qüýî ðåËÏÓ§ ôtnRt&b• fäÁƒÓZ¸kiØ¢ëÁE‹¬z2¶**ÏŸÇŽ ª 7¨>îþi?È™ÈcÛ)Ã…í¶ü ^>úHQÓjêjûr”ÀË!ò¹œÚäVÞ(½š‘o~"RÙˆó±ŸOm7± S&ª™_# Œ¾Ô4:Ë…†Ò{/%tTõWqT–ýÖÀ¸#tÝÑ,7¯[l#&ÀC¡ðÜ_ý,,> •vlWi·Ã“Hlg†ÆP•Aƒ¥°šÆDË¥¿ñ%J[Ÿø#%§:Ò4:Šv 3ãø¿6CDyHž²›/Cn$t!”Ë‘‹U–~Üì]±OWŒÐyAîÒÆ­Ýmê52ì€×MºoVºÑ\ÜWÏ'#oºøÊ‰ù»7Ì\²Õö B Ç÷;’QÙ7nHàgLÛûø®¡Gí¦'0s*ñß9^æã``çŸÊ»÷¹x‘§ß7É»+ÛVl i­IŽ#XR«º$1øÔ+Öy‡Ëyþ2ìÍë§îûfEÌ|‹H`lÖÝñ …–~Ú\Þ£ˆõ—Í|›ŒP÷¨¾Ò’×ÕÕ Ý=£ZC…T^ÝzŸw’b(¶ç ‰«3Cõ•W3ÆmrJaÛuìv8ûïÇù·Åd–©¾ ãBQU–•n&B_ÍÅ%bï¾q=khy¨ƒ½Cõ!ù}O^“˜¾ˆwäpüMÍ/wKã—î—H?ôŽ,t×3±DBP¨ËÖû̇ ²·[1zí¦%³%ç¾Eô¿Äµz¬„xÑ[½hüL-J³„;!Á]—uë‚{»›zÛZPÞø"SÀr®GôiN•ßnÞØ ‰Ê"í 7rO¶óÏᵕ™é©D^99Éï}bß £ký-/°†¢’çþf~Iô#>îî D`øØ(úa+CÑ’£ÝÜkJÑ4·Eƒ Rçc7¹Y íµè6ßbwwò7$weAÆÇâZ^9I¶ÌP™ñæ#ýÛ0êÕ9}VË¡ø ãŠöÄ9E·ó²Df¥%ÒeëÍ„]ÆL9Wÿ–šúMù„–<}Άى\dFÃè ûöéºr£¦K3Üß#6a§DolNÌDû±qûø0÷CƒH‡hh,9`7O+ SæNñˆ¾½Á%Ì©y¡ªbÈõúùš¡ðßɾ*^ÙÉ%á‡öãCn.EH¤sJÓ\7¯l–†¾GPñ e„ŽÜˆÜ´Ð@ˆÞ/g!éÿ9]ú°P·ñ…®ä„¹äîG,Ç/Ïö˜lFÈ8(íâPžvT̃ƒvÎ[¯6B±Цõåçæˆl@N'ï¸n?—ÔämÒ¶ˆ-7vâ*îï²_±«9ÌéžÁ*é¾^AI£¶>Úo?ì›/œ„%Šbît—‘óòRÅźów}¯-˜W»gx–ÜXÒÖQpIï’ ÂôÉ ðù‰eOZªîÓ§ ¨ƒ½Dõ±fî½<¿JÕj·£–ˆãWçýc.Õc<Є.`iž† ^»Çàv·ò3ggLð*ú2*•Ù©QÞRZ0›Ü­úE˜Þ>×Ô¶ æ¼&¡õKÍ;²o3{ \ü<”][Ýšµ8صÛo¼ ô·¢ª, ËŒ7¥²,¼ÐÏþTþ M]ýÕÒ¸6‰òö;Ã4XòØ[޾¨Fó:Š¥ŒØ~¬®í8¶-%à²ßÏCÝ%zƒ6nCÓùg¯ì²žº =Z.€[î‘Fú:ØV÷b#ðBψ³¥ºÖûZJÁÒNÑ[úô¸à‡auìF8˜‚;Ð:Íã÷™È4'§UûM§¯æ¿ò^ Öä!¶î– C" xšmk¹íÜœ•ëä1‚ßyåcK[³å}ñZ–%FZÒäé~LZû1 ÍŽ0åç͆Å\†Ð I"+÷|Ì»sû°Ai'¹É^F|]b„Tì·.p8Èðx|›5³!©³|{x#í~[YXÅþJ†äz9ÓmÌË›éoÿ¯¾í£ª£x–0ZcB“#1Œ±¤Ó&)ܦe³ü˜3$?…²ÒR.ž~ûû’•òö•p-µ–“ˆ*‹ÒbOlûînqtnãZv×}¬¨­ýÊ#s—˜òIzö½{Æ™!/©÷½MV<üÖåµÌY©³ÇÏ:ŸèÇÆœe¶šÍõuFˆ7a‹Õ`Äñv›˜Æšûè-·Ï¬¡E¯>%`†º×C曬ò¢ÈÍÛ¯º²ÛIIiI?><¶¶F&á°\uU7‰¬Ð T³9c†íbã¼g$æùíÓôUU°­­ÝÀ:÷†×ðø–ÌX†aË:Œ›”OÖæE¦È¨UiØ*Ô… x&¶M`§‰"«µ7+leó‰_Û­Õ½Ø ¤¶0SqòÞÊç°Î¶éí}ÜŽ !´/e-ÖŒ4©%Aï;xãËfuªû.`ímuŒ­hñGË~ußNÊR§~Ø2;Úa»Û”“ŽíÃí’ö;½Q;%––ä hÛz2¤i³¯¯—5i§u=žÈtG¶~cÿLêëê 6F.ç9¼åCSôç´[˜G—Î=Z;}Ì€}}‰;C—|ÈøßçÆEâèu!+ŸÐ» ;@sMÔê©3WÅ MtÑ«G('½ˆæ¥ ŒGÇ¿‡’þ¶EÉéRq¼ŒR'ñÁá8ðxÎj*•›Hüq#×ÕÕ‰Pؾ†È…£R«@õô"Õ÷k©†ìe‚°<5ÊÄ#ø£Çz»ÖgfD=XÓ=[¾UGj‘Fw:7Onå½töz«‚؇Óü©?DBÕ—òÎý)mÙybe›É_žì0˜²ÙUÇðЛ:¿/DÎïÊȺ¨ëÝkÌžm?"Oç¡bXCCC}·H>¦‘ë¨ Pؾ†Jmàæ†ÍA@õ±pêjGSŸ…lôÁ6¨'ïÀma}Ôº}9íÀŸúóJ¡ä|üع?^®¶S92/lßÌ8’#3ôU-c(qxhÂßÃG3½”<ð븧®‰~ zZÅ£èÓ^h6N·+¸çÃæå퇩8¼1Ì7šöƒ™ŽÉNì„Y[^Lâáë>#‹½LþðËóº$î-„ä£JÖêóýSV±]ÝÕã9ÒÜèï%ûâSþ…ˆÇãˆ\

/£Øøìò7Xõæ©Æ¡¦óò̯ÈûoêŽDuÌqåÌi}ŒŽ½üFg]?ýÿ=<6oļˆõºëÛÞ¥’駃˛„KKG^£Æ05›Âì?ÎBKÅgvb–âOyÂâݶ£º´¤Vnnl/Èmfïç·–ãYõÄQE=å'mÔòá#‡¤D?¨ƒ ú€žE\Dˆ“H()È£ˆ´¿9‡¦ë“DW¶‚â¸81wqú ÷LÌõdýQœnW:µT$²V's­¾ûòö©ï?â¦z¸O¾ØbDã²¢5QËd/Ѿÿ2Tíà^ÙYïÊ+v—‘I<‚Â"Ä̬Y®vNçÄ+H¦#ñËÙ[¦J2‚V±tq¼H;û³VbùËuÂy"ä±×g9'.ÊQ÷dXËp4F]>;sê@º´«L¼Ã£yé›™dÚ1ö¥]åþõPÊœçfRÿБ™”9öìXW×dà½>Ò½?-M\œo¢M7ÜDNžt>‘Ýt‘íµAKäX‚<5Ánb@HFSÆœõó19“€´*4H ŽHIù¢¨«C€ê¶ñòòÚ´iؾÅÁ*ïß&}KõýÎ¥D˦{oGˆ5µ‰Q#ŽøöκÚìŒws¬,»1*ÆÊ$&~l_õIjí^&aáŸëêób*c õÊÄû ÉGãîÖ`£ [ºdM ¾E_¼WÍ|Õ¸:Ou'¯ç4©61ùµ¥ CYU¦>ÐW 2WŠ1ظ6ÒKŽJ­¤_}Ù.zTTö~}IúÅ’¸–wÍ]£áéK׃ê¡ÙKLéj³qR%s‹?TÏ\%ÕËÄ×åìf,XŒ)Pµ¤ƒw ¾ÍA¶b™çŽÉYÓWDõŽØé>ŽnÏÌ—åLÎ0./§/ÇLú¦Að/“ó&¬˜TÐãÕßµs§²îhNN–4n%ûÄ\û®^•™üdª:‘ÐÖøk¸•ïÎm•øÈävó4_?ÝÀ”è•5æJ~ Xv !ñÃq3¢töÛE¬±µVA¶ÚÑõÕžÿÆ¢"è’)G=gI>äz7#òEnFm~ürƒ\ã´Ê%v_Má{µ|꺄È„䃒]mUhÉìh¥ïõû¬»Jž3 %Ä ¬w4UYqzÿC†‡µLɇèë›Gr ^ëÔ ãðC”ÅHd¨€ ú€ž—FDâÐáº/ÝU5¬««IŽ[¹Òµ{ƒÅsrŽ3èæ÷Ó,Û[MYDÕw÷ Ý•o7û½93Ý5¡iS´U‡n¼î®ÿ¹‰îèF0ÍÛGg îŠ(æ5m×%ð2›ååµÍ[t¶»)CòÑH·›1dÇQguýµWrÚs$ru¾äU}÷½û.Ë–ByÕü$¦N·ÛoÿçÜBýû¸)žD\=~‰›»ÛCÖÑyäÈŽ””j%¥v×Yfe»rK°ï.Q_Ú_âA;4éŽ+LL×½ ´^CÛ‰ä/ûhPe¤ª%º‚ž-÷N‰÷Ø@ÑvëëôïiªCIµ£ˆ¨GU.Ö'awÖì0ò½,…»ì¶ËÇä;VUá>VÅdDyÜM\m¯Álƒ”¼µÓ½Þé•—¯”Ž3ŒÌ#UTðó˜?×îÿûgâœ%œBŸ5Âû¤8n÷(íá=þìY‹öùísqkgœ'§ôæ³#‚ÓäМi32úëHƒ7n“ [—‰è)­Xk½pÊ^À¶xlðòÝ…óm†íîµA®ôuV¨Tf]ëÝ YŽeTÆŒ»q>k1—çZRÿøš®ò•™øöqéúéÕõLïmþüÚQßkùÙ’Öþ5 Ï‹[Ú)¡ úݾa„ÇOHTjµ®®T:P}ÀOEÂo3{ÎÙS‡&Ì^„çì‹”¼”ç9ï3ÚÏë¹[Èü æî÷Zî*ÊEl+üd­æaVmï®ãáŽy|’¨gT€ç·oDÑ·À0¦¬Â:r”T‹Çšk&Oë?Ûq¬-øŒ†ŽñÞFrðÐitŒóÞBûRSÑjgK@”ô¢ßógWCÕüäôŸb1-ìüQ£™8GŸJ{ñû7©/_,]´ §o$$"kkk¹ÿÅËEñxÜom4‚ˆPMä>eëþm}F;*r<µU}©)QÑiK–¬…ºªøe¨()RølŽÿ·ŒÕß$ž¾²ƒvz|tIqáO|L$¥Tí);wqZ " ø[?»¹mƒwÚ·{ª­ä‹‰&¦¥ÉªøõH‰‹._îâ¿ÿŸáã' Küá›hsàP\Ø IÉ¿çüÔãø)R+]Ýþùg·‰)_»‹»üYà/\¨äå%Ìë õ TÐ+ “HkÝV†\ K‰afõ§Žö¤çE\9û·½ƒ¸ˆðÏ¿;HrY¾>2òdttÖüù‚¦‘óó„ÿ;–<þLQ1%¨Y ú€Þ…åÓœü‚ãÿ>n’ˆ´ÜŸ”4Ôw㢨¸˜ÇÚ_<àÐÐp¶ºZîž½GÌÌø† &ýIFÆ0â…ó_…*ÝV¯ÃáñP¡@õ½IQ÷5n‰I¯oó1Ùš"$ö»§‡PäÅ ÎÁ~7‘Ø¢ÄO‘XµrCFÆSŸ°Ù³¥ûÿ‚ÃTú¥œú÷ü%Ü$>¨G ú€ÞŽÆÐ!´ÏóWo.øŽš:KH\ú·Ô{XCrô­‚O¹ þ&“z]¯šœÜð5k†¿ÿÜËëòì9‚Š \¿ga!ܼU—ú¶ÀÞ~!™Gê¨>àwBMy°Ú¦M)i™!Ç÷+©ë(¨iÿ6Q¯ùòàÖU2™gŽõtBïÞƒ~ÀšÕrr_îÜuQ[›×@Ÿô»Lª,ÿ"rñžƒÃÚÚÉx" ê ¨>àwEIAÖ}[mmí¥Ð[©oþš`!(&Ù;£ÊÑP—ò4:íí«©Ó—,pøŒ,)¡²z•J}]݈ӉÏ3fÌ•é¥] Ü÷ï×Ç'|´°6wî ¨  ú€?`e> ¡Ie_*n„‡§¥¼2|”œªVoˆ®¦üÉý[EFƧO1EÈô752ž“Óh¼­ÑxT]UvófÈógFéóëéõŠn´ŠrÁ°°ÜK'L1v츱c¡N€êþPøúñXO›‚íƒ^¼Œ­¬(¨þ×€!êxŽŸ·l#VQôâqìÇÌwƒ”‡ŽÒs˜;çO227‰ÏØx¾±1ý855öÁƒ‡EE•:ºüº:\x<î§E£¸X0*2ÿõ›Â¡C%uGhϰåTзЦBû0sòòãâãS^½Ä‡KË ˆIuÛµšÊÂì̬ô·¹Y™BâjÃÔ‡««j(ô‰á…ޤ}˜ÇŸ‹Þ?KŒJz‘‰Ã!eÊÀœ îÚ÷¯²’7=­áõë¢ÔwÅâ<ÃTTT ¦šóL…‚ªhHŠ‹Z˜™ Ú§‘¢Ò²Ôwï>|ü˜ŸŸWú¹Ã02o?ž~ü\d27‰‹‹¾l '54Ô744ÔÕUWWRkj*¾”V”•UWUyù„…%%¥åeÈËHsÊh¡‘Z}ÜÈB‚Æ›3n\³KEùç¬÷IïßgååTÕ×7ðñ(.2LƓȜ8×Pê°ú:¬¢¢®º¦¾¤¤¦¬¬¦¼¼¾_?¼ˆH?iiñþÒ ÒÒCU‡U‡BYÕì!Èϧ£¥Fû€)z^!eåÑÊÊ` P}¨>TªèÃÔcXFΧ·oßå~|ÿ9?¯®¶†—_OH„ÌËÇÃGá"‘ \$"‰¾š ž“«Çêêêë¨U•TjMÕ—’Ê/e•e%¥E…Uåeý(‚¢ÒòòrÃ+’I$°m3X}qÑ‹Ôôçï³ ?~¬ª¬lá%Pø9)‚¼I{›Œ0$%§$9pEHLJY‹öéB(ü‚œ|ý¹îÓûÄœô·Ÿ>dˆˆK«k¨i UÁãñ}ÇÈ5UYOn={–_W‹ B:”[B‚S[ˆCû/>„øØ D #g×ן{û¶æeruJj•„IKc¨ŠÊ\_22¨>øe‰ÄñãLJ‡‡wàGII‰æ $ü4Òs #ïÝËÉzנТºŽ¼ÆHÚ§çn‡çä””Lû4¹D$¼I}ö¨¢¤XMKÛÈ`$'çøL,+M¾ÿÖ«×å**äQz´ƒ‚‚PÿýÅÒ’”I“æ¹x v€ê€nƒ@ L˜0áÖ­[_5D”h§`l'ôõv)ìNú›d] #k‡ÞaŠˆøÄ9‹h7¢¿};ÃÆFa@ÿÞi¬îÁÃ#q fÚ,Z(Òû,"BX¶”.JŸ=;|ëVÙÌ™“dd4¡²€ê€îAWW7>>¾å8OÛ =µ;vòTMU•®©¥¢–Þo9-Ú'%ãí‰ÿŽÎž;_Qv@¯Ô{•W¯þ“—W3ÛVPo¤Øogduu2퓞þÀËëÚœÙu¡â€ê€n Í8ONNΩS§ÂØNèFê1ììå°‚œ÷#§Ø\¿uZ$äMurËÌ~îÌ+›YŠ2½Fûaõ‘‘ÿ$¿,?_˜LæëÒ¥•ÅùõQa ¹[¢R“YN%’e%¿38yy®M›$²sžúì¸mmm&+§•Tü-Çy*))áñxeee0 tÑñÉîÜ0´œ«~ ‡Ûjñ6gˆ\‚å£Q³*¢ [tºU‡m:d¶ùž™Ü½ÃÉTß ¸¶¤ %½šÊ…'¹¤ä„(í4ê rJò‹©ã’Ü®‡–ÁÕTÖÒ›dr×6ëSQ! Ì ¥¡¢3b*T+P}ðý899mÛ¶Í‚@ €5àÇIËÉ?süè˜éóÈýøÿàd\F6 Þ%>àgó:3'æÎ cÛÅ}Ö"†Æÿ <²lÉ¢ºEIqâÙs «W‹ãp8¶.xþòÿø=mTjU^Ö翨ÆÍÓL…ÛtÁq;µýßÅý ÿÃ=úï„¿¤qé±›_¡_2jÖr}.2Œp»cäûÉYuÉ=ï…^Ó%‹ã—[³<G7©>ÁA¢”fè©îîå»ç«š½ÂT²á- ,Äim-à¿t™;T:€^«ú*SâîEžHÍ+AÕQ(*ÚãŒÆhK’ÁøÀïHñv AgHc]LÂÖöw.y²N@Û!­¨â§ú”îhóuw€¿W<^ µ¶îʹÓz“­qø¾l1ÅÏ9Y—ÂnZ˜wèX]ðÉÐY³„ðøÎ%Ÿ¼±¡7N›C5uT–A\IzÍåê#å•DšærvžémJU6ftò«Æc•™i§N??³-ähµŠÄ;=ÈõŽí ¦z<Îg3æSwåáéç›–Ç"ïäKBþ&ËÙZ+?ä€7±ŒOb0kÛAÏg =>~¯üùñû2‹ U(?:ÚSV– 2”/4ôˆ™™T=€Þ¥úR‚—²;À<Ö0vš1A†¦úJ²¢æNò`:꯿µeL¯7Qmðt¢]ˆVDñÓqÐâîGÿŸ·C/¨S/]¼gwØ#ÁRþò¬(^M%òPàNßàbØíƒ”ùEÁÊ#ÆÝ >˜3\SR´›­õ¯¼<×€l-#¢©éÎÖvœÖc4Z;‘eÜ:TTÜ:SÇè´·“ADq‰»bGI]mw]úþ %eÇO!w:@vìÂÙ“¤¤‡B‘è-ª/líP3ŸdÚÁøª%šÜ-θûB(ÄììûÇ.¿Õ5DU˜ó8ÐQÇ)¬ejv—Îbž­|îÏ£î‚FìˆXQndµµÉËœ€gS>ØéMIot¹öíýíJô§Gñ^A×häv6Œt|‘WXV£·Ó×|fao7ôˆF# ½ 𦉳.¤9®Àïµáo$@Ïêý°Ô]—¡ì#Ëíýnµˆ®Â²€Ó~ þú†=*šò8ß@/‡qšz4¥SzaBÊî"¿ÙFî—›¼ºJõ±a=>ks#WÏœç•ÙòFO_ðš©Þ*ØÐhÑ3Kœƒ_4[Æ?&x©{0J½hglÙlÆaNg}T}L–&  ç4ø‚Çól†¥7‡"?qí© í:ŒF+FzMI´[KOŽ[x¡QW'ß³“k¬T»εu¤/.敟·Q¤Ó>ÞÉ£ãF‹aèò:3«--üx^½¸I¥‹B¥üõ§IsC›¬ªà}íŽû$™v=w\¼Y~bç:µŒ¼ãþ»Ç´ßÊ»n&1™ ÄÚìœí’켽Ⱥn3¶EM‘™±i~Û×°:Ò4:ŠV´þK ž×²y—î„SDôÎÏ»Sï hoDÈìQÅu2;‰êj=z¥eï’Ÿ™ýí¦`ýjŽ}ãF¸ýÜÙÝ&M1ÅÄ|Zí&öûš%34bÉá!¡TkÍçJ1ß`sRçýãÇ †GÜž7T@/Q}e‘> Éçv-·µäkD|V6«ñ|O 1¯gI®HNÝÃl|W¾;¦?p¾Åà ýu1Q[õ¨TÆ,ó‡kx÷ec½ûþ´‚̬Nê'œ´Ž'U^P¥µž‹NWwÙ1HRürMS{Ö×ÚtÙéWXèFÃ÷¢‘¬¥¯òùð«éÿNFß‚¢·«33ㆠ£*îë3ÅϽõ¢c·15gð5¦V ^ahç¤íï47¹ö˜J;F­¡–Ó¿6››zGæ`¡´¸œsÒ²<¤I>$g¢»DnN¾³rKÑUhãx9äv1ËÇbýúú$§Ã6Ûh\{z/a×èæ`Íôéi bˆ¢¼‹Z–'–B|¯‚ça#”yÞ^Îê(]Ó†çºÑˆË¼±^Îd)3wÊéËäo×óxF—©Šÿ§Ã0nɳcã4æëŠí˜ð*dÁVÅn²‹•]©/ùC+Úu˜k¬TûÚ:ÒÄmÅR=F¶°C\53†f_V$W`Œ’V{yíh OUžà3Ùo¬%»P/îÓ§Åðzs =&Ëv¾š¾rkïæÈôÊx³Q–ñÎ√ãÒ±‡—ÎH¨='×*À7k4‡øÒ·Ò2 ýxÝTн†WSGº3^pÐâ¬+kÙ2žÐðô W)“='æÏ´1{j*Òô~gj C|^§%äÉ]¦"¦¶,3ßH”tôwÔ# ýH^E‡ãS0‘PTy{ïSág1án[j2îée55€û}Í"knjÎŽG¶†€„»^›óFBr09€^ úˆ¬Ac”~w ”<¤·‰i²ê垦þ²â¼›wßÑ…ÑÛN¼ôÔklÙšE]vauk 0vÖG®ÑÈñ¿3vªL!0ƒsÈÆ6áëo‰ñcõ!‚Ì´ w×Ñ‚Í8·²;Ý;µ“#Xk¢ŸØÄìý›¡Y³w/£™‹‰”a%!tüêÃon+¤¿å±»sâ;a‚à H»dáÍ#³™!«ÙîÑw¡%Š¥9ÿÚ†aÛ2ãBCcO®Yþ©äsQ}D(%ÆÆ• Ñ”Á6¥‰™£¢QVj{ŸÞ”i(¦ä£?§M¶Çp ŒÚÀÊ£'ôæ;B&³==º7–Ñ!‰*r(!]îþiÏ…Lä±í”‹áÂvµUÁËG)jRM-×¶“‹¼Ì§Ùçrj“[yÛ娒h:¦²5æ4ŸÚ"(nbVÜ®f~T4l0úRÓè,Jï½”ÐaPÕ_ÅQYö[³@âvŒÐuD;°Ü¼n±˜…Âsõ³°ø$TÚY°] ¤ÝO"±7¯CU –Âj-w t&Ý«±…ñ{b»âvs­]؉a›‚ÆL~Á§ò.òÖ1$0cXYÞþm:,ÞD.Vñî †ä£?wæN£ä’]l›r‚qwÉ€4Ì" |´Ü>À,Ä©Ôñ^`YÈuó¯»;KÔÕ#àWó!¯°¾¶®ƒ}ï®l[±5‚¦µ&9Ž`©–ê’ÄàS¯Xç.çù˰·ÀG?uØ7+êdæ[DcéþŽ_ÙµôÓæòE´¿lJâãî ­ª"ûsQÝ·fô%ºj±î Bò¶.ò‚tßµEŸ‹„$5§^pxS€ƒd7Å…gÕÿEõ½aή¢"1,,ê @ïP}H~ßÓ×$f§‡/âÁ9ÓA³ÅËÝÒøåc†ûч¨1Æ›é™X¢ !(Ôeë}æ€CÙÛ­½S‡’Ù’sß"zƒ_âZ= VB‹¼ÜèßãgjQš›žç/$¸ë²n]powSo[ Ê¿Ô2,çÚxDŸF±áTùíÍݲH;ÃÜ“í<Æ3W4«ÌLÿH%òÊÉI~oëµøn]­éoy|a=kšSÉs3¿$úw÷"0|ìý°•¡hÉÑnî5¥hšÛ¢Á…?©ó±kÁ·wÖ¢Û|‹ÝÝÉßÐT•‹kyEä$ݘk_Ó•Fî ~®ßØ­Wùø3»Í§©þP W1c8‰Ö¦ èv^¼É¬ÈGºl½™°‹µ`ÎÕ¿¥¦þ‡Bhñ»Æ ³¨ŠëúdtgÍH#ßMR¸Mn×r}&±±esSoóKôlŠs¶×B·.Áà)ÿ*Í¿¸ÐX+K ©´%7ý¦‰;G¾™(¶êÐ[ÉÊÎåfcï8¥i®›W6¿™ó=‚Нh(Û täF䦅Bô~9 Iÿ‡ÈéÒ‡…º#q%'Ì%w?b9~y¶ÇÀd3BÆAi‡¶³[õÁA;ç­W›þÖwðk1"²õåçæˆl@N'ï¸n?—ÔämÒ¶ˆ-7V5\Åý]ö+v5‡9Ý3X%Ý×+(iÔÖGûí‡}+¹¼ÂÅ…Ý&Hr>%‹‰wöË©¦µ{ŸQۜȉ7” tô <8)#q²,óÑö(jÞ¬“a-žÓ&f§ŽOÕÁâ¼·ë®ËDhì[ÌZ©E0™çŽÉY?DH9ªÄñ餮1ÈÀ{}¤{ÖM¢¢çµ pÁ•–(5ƸèˆË Gÿ—-Âw9l¿ÏaÀšEJ²>ÿS%ÔA€^¢úX3÷^ž_¥jµÛQKÄñ«óŽþ1—ê1B°4OC¯Ýcp»[ù™³3&x}•Êì hÕRý…ñK'¦·Ï5µ­‚9¯ÙfRĈÙ·™=.~Ê.‰­n­Àüºvû—âVT•Ea™ñf¢ô@–…úٟʤ©«¿ÚBºí\ o¿3ÌÁl%½åèËÀ4¯üYʈíÇêÚŽcÛ"Q.ûý<Ô]¢7hã64ŸqöÊ.멫Уå¸åni¤¯ƒmu/6 /ôñŒ8[ªk½¯¥¡,íôQ]Ñã‚„}Ô±á|` î@ë4ßgj ÓœœVyTì7Až¾ÉÁÊ{-›tkí“6rq™:nY+/Ãs]Ùûż;RHhÃñû˜1lU\ñZoFäÓäé~LZû1 ÍŽ0¥ V€yÌ®Îq>±ØŠëZ“}'KøJ®x›¹G©³–¡~;YÜüFƒÚÎK>rIÕáj}¯®wLl[¤›ì(QˆzôZJKK¹yØXýõKVÊÛWµÔZN"ª,J‹=°í?º»ÅѹMƒÙ]Ò¿¢¶ö+7ŽÌ]bÊ'éÅÊ÷îg†¼¤Þ÷6Yñð[—×2gÌ?ëP|¢cvþ•Ùj6××!Þ„-VƒÇÛmbhÜ>³R„½ú”€ê^™o²:ê“çâ&×××UUW“¸¹ÜÈe%Å|ý:«ÙN]‘ÔŠÌ_¬%úO³ëÇ¢Çzê×mWwóxŽìØGÅ3u›(”$ÆŽÓ<®+jyxË'·@w߬»6kÔš÷U/xnI—|È%ìo}~ì)ëUÇÌV€úæ)&²ôvDÅ[OGçÝÎÚ µöÒ‘—DnÒ\mo bõ7V»ì´sÜæç8*¹ÖöGfíâñ8nGYé'>~1¨‰½@õ1P™± Ãv±áQÞ3óüöiúª*ØÖÖn{çÞPã‚íC² Öu7)Ÿ ̧µ“ȨUiØ*Ô… x&¶M`§‰"«µ7+leó‰_Û­Õ½Ø ¤¶0SqòÞÊç°Î¶éqÜŽ !´/e“Æî5©%Aï—ýh±I‡¹ÖŽ1Ù‰a“êˆ*¾ÿ3¿u÷LÑIñîÔO;¶Ÿuiÿ+²ÕÞV{;ö摊ý̾Þi=z-uµu£¹sÞò¡iPàç´[˜G—Î=Z;}Ì€}}‰;C—|ÈøßçÆEâèu!+ŸÐ» ;@sMÔê©s}Å MtÑ«G('½ˆæ¥ Œ¡1$]g(9]Ê"Ž—ñObã™ÊI¨¡Ö‘ºAô¡ÚÚ:.®ï],‡—§© ¾$.Š®Ð2µ}úö³ø…6²• " S 8e9pàDm¥ŠjpÛV«uÇ[q îº\¸·€2TDP@Ae¯@¸7­ƒVPÆïû¹–ËåòÜÝŸœÉçÆ®È‚2Ñ… Ô‰¸ âèž„Â)«nüpDeWÜêõ+‡m´áåºaÿ‹ãj–ÿŸÎâÜÌÖ¸íÂèËüM¢N»¨ jÄšöÇ”°º€½~é9þˆ…);uýúG‚pJ—Vå¾!"æÔM[‘¯©ŒZ]ƒî>€Ö”ú¾¯Jüz›†ÖI“åö"há„÷Ÿu}~óܤ°òš¹h߸/Í¥1÷´WËoïGF½½ÔÐjYò—^ÚênÛØFWÚ:ÚÛÂ&z¯ûË{á)ˆjw·¼|¶eŸ-¯[Ö¿-éÂÿ©{·îEeÿþ_$ö—Úüp*Õ´<\SÍ`4ÏÙÍt‘ªªÚÿø/ÃúÓü[ÅèÛ‰ë&(è)×V±ëz×5þ7‹wÖ.M’·ºÊV—=Öu3ÈêðpÒ³zsXP A¨¹þØESèŸþ§´Ñ‡¯­\×î D)§~²lx„%÷‡R—¯½$)›MÒé,ì†H}ÒQÿ±£©ãaz†>÷ mÉ%0ú4¹oª¥~kR=‘ä¢O=k¹4é‹ýq­P]mhóÿ¼ÊH§¥¦|y>1‘;½„2ެ\.‚ÛrWó#ÀÅð¸{ôÌRx=øó=u ĵz›D,}àH‚YýévùWV5ñåoS»G}›Í&×ež¤­ÓšÒfuy‰0Π7Oê“–V,(|ò¥Õý°Ë5?þÎ$³‚»ƒ®‹›a/I榞ę0‚ˆ¼W»*øÝ Ì·×mŸ~‰õSÀ@Ag¬ºÇøÐÓÉöƸ˜¿ ã2BóÄ!ÊÄGb§T]ƒýV±GPÌ="úQµÖÐ!Êõ-¦Lè!:Ø*Ðñ«êÃfSÙìZqqyì†H}ЂôÔU#ÃO~î«yU.ïGܼ^Jóþù¬Íô+玑RcV­Ù`7÷ñÓ- Ïk ~\»šâmÕ‹¨t• Në+ãwìåUñ;šÄmÿº¹{’õ¤È…V¦ ?\J¹`žÏ¼¼Þ{ÅêÛ4kh³‹½»¹:›ñ…²ççt’Wj®"«([¾zy‰$k?zSDvïxT"f?‹²ÿOꯋëï\À¾pçPr¥å„ß}6l¤lxFç±.¶2 ßdžª|xôÆŠ “Õ¿î{Ç‹,š¢û R´,I 1–¸ä›—Ïe»|t³Ùwãg7©)†ÎôøÜ韙A¼ÇÒøÜ¥uH©iʧ5z–¦î±/×ã?¿ücÙY/(C‚Gô˜în]YѪ¨Yêǹ?{öùÜÅ~s3Ò5´5›íã™FïÜ™‘šÊÖÕýÈi‚¶Ë~!—5½1™¡«¿|Ö®pç#dÈÇž`-½²ô½)òK£>˜ò9»iäÀ–x磻–ikvÆ>€ÔÐTË–-[²d êðèêf=NüTêkËè*òÅ#V¬$ˆºCQØ,Žô¶–øÔËÈZNFê#÷AÎ͸*ÆÆZñ÷Ó?šúÚ®ÛKY-ÿâý-4# x‡§~H(áÁKoï¡Øú Å °ëõûªÕ†½úÓèŒv¶i ÝÉñ¹“ÿí«²Åiê鳘Íy«I3³‘/./*æHJPÛMy-›ÚKù‘SÁccéšš2’RJØú å?<„…Í{Ù&^¿dÖÏÕª­¹+Æ×ϯÙ[vt4<‘:z´4ŠL’ôÈÈ ïi^(R|#Îýlƒ7ÿõêyºBÍ^ŠØ+á½úôg5ÿUFÌ̆%$®NJª04mýu(¼}LÚêçˆ%'}E¶­ŸI½ÜC÷·ë·;,xÐðÂy‡~[åщ?Z½ùÈ$Ÿ+éšÕ>ôÈvS)A<+[o;{v 1ﰯ记egßÖÏ¥8ïÐôU ܱò[çYÖÇÛ¡á>l×QïÚqzꨋ(¯ÂÛg¥­Î„É­òé–¢u«zvžÜ¾}>{³^â¹) t††`¼<5nÂÀ£ÏêŸ3íwh…Â*çƒq„E\ùS~³)Åo\Ϙ>·à@ê€ïAEIÑcô˜£·;Œ›J¢v¨m/ÉMO¸wÓ–OK/HRJiòc7o>àç/Æ.åMY6$xEÔd„7ÑžúÛ¨m—͘—5§z•‘Ó™Ü(‰evxõ¨¢Jë–Ú+Mfqà‰Û³øAêü“ÃvN8av4!áZ20©|+?æÕœ˜¿Ö}õ Cæ­C9K<”î+ÈfsçåÔ­ªój^›a¼6‰œXs•­a>¿ËC=2ïÖu“;yEäö¼ë£fœ=¡á|ßHi)¯"+Söì¹G3}°¯ õÀw£­®6釉۷mê;bK\²ƒluÖƒ›/³³¾Aä“×ñöž¶nív ï¡ÝŠ…¶‚ÛH˜lFlË!ˆþgCzÎ,dšZ­³9<;FðRVàýÀÂüˆ É–õ²âmE5QÎdDúG¥.¶¬†¥Ø­ø“ß³Gðî…h~$ÒIÞá\ú¶[·ƒ »ÖÍ¢uu¸ g»è¡«¼Wܘóòð±—3”?ºæÜUmh“PÖoCÄÅ™OË ¢ìÀÆ›‚… "—ºóЂkÂÒ}N Þ¾-š˜îç·{R|g dçÌ™½aãfc»íþâ.‚ˆ»tRRRb꓾årÅÅæÌñµsïAe͇OËÊôeû0×GqG4,B· ÓMŒÅe&%MŒûÒYu×§)e³?ù-BpãŽü¼ªO¶òÏU­÷Ñ›~ÐEêú¯Gp X•S¼~Æþ€Ô­ƒNŸ7ÇÿäÙÈèØ›6CÆP(Bír3kŠ^_8¶oâ¤I*JŠßá3[˜f£@{B¼Š+àpÔ©TÊ_Ržþêï§~TòÛº>º²Í“ "Ÿý½¯QA—ãžlZ7­,dù1ÞO§æ’d}î{¼7Û¶¾[¯üVŒ GqèÐÿpiMVþD̳÷Z˜2wf WUW»AÝô±g õ@ë2ÄÙ!ÿuÁŽÿ3î㤤¦Ýž6BÖÆ^:%++ø=Ï1+*áG"‚“źwg‚ƒ> jªÍVÉŸ­‚MaXÛïuì¡sjóÃWx¯£l™}”²šD¸ç‘¶õÏÜ1£Ýyoy6#³ÏöfDCoßQŸe”÷k]ù‡¿)¿ßN°&D%»Ñª¾¨äüsým—ù*ü} Uêm¯K\zB%”ùú U’ë$0ïç¤ÇONíÞ`í2RZ¾Íß`B7ÎdWVxMþ)ú}ïQ!½4ž\Z7îž•õ`Õª“£F»¤ûû³ñÏå«0åhÈ”4âÿn¼LðÃnqÀµeŸ]ý¨B¿OÝ‹OÊÖ½~M¸3°–ÞYúá,ïM¬ÎCtï·â7Ñ)AråС²¼WF…ûrSŸ¾³¹v&¤>hÅ õt æ?LMû;du/×QrÕÚäfµ)·¯¼ÈL÷úq²„8«µ­ªªñüùÆ9ÙÉË–=ZºkWÆnª²És²›oýir²U×þ Ú[´`ÇûÏŒŒº>¡©Ú}­%K–¤e>?¼g£–QO‹¶r¾¥ªüÆÅ“ÂÂ4ϱ£tzk^Ueƒ%K ò^=Y·î±‰Xÿ~¢ÿ¾È¬ÀèÏßOÊrIDáæZóò2y!§e»ì9£Gy1D%°¿ õ@[¥¥Ö%pþ¼ššš“ç.¤>Lîé8DV©K+ {$'=þfJb¼Û¡?Mù± Y^AwöìE$‡såÊÁ{±i#†Ikj‰´ÎU%IzL åæ­ÌÁƒº7;R´—Ïaáán.„›KIYùùÈKO&u5ë¥eܳ5¬›PuElÔù¼œì~Žî.Žwh›(Tjÿþãú÷'*+Š/^<›Ñ«·„­ £5t±VVHGœ}•–^àè`aÓËÁÖ–Š©Ú'qsÄ7‚;D|ÒÛ×o–è˜X¨u3¥ ÃO¥ò¤{73Ÿ¦hwÓïcÓû‡qcÚS‘¢ŽŽùéõiÚ­ë17òóˬ¬$--Eh4Ê7[¢"™èèüÄÄ|}}Å޽̇ ÓÄû©:SC}î Ï}•;66%9IH˜¦ª¥+×EKZQ¥Ù:©Øor3³ž¦dg¤ËtRènbÒÃØÀÄcXG(²¶–wŒ¼ˆ‹¿–˜A¤¾¾¤ŽMMjž"WV°ÒÒÈ”'…Ož¼•“céÚ¹¹I¸¹á€Ô@J rC]œîP¯°¸ôIZjVÖó¼¼—Eo^“$Ég‰KŠ2Y4CDTTˆ"D¥R¹Ó9µ²†SYYή¬*-)*+-®,+e‰ËÈvRRQVSUÕQW¥©š–¦¼ÈRÒ‡€6(/{›‘™ðâyFNî›üüŠšޤ$]JJ„%Fc2¨¢,•BPi’CÔpH‡;Me%§°¸ª¸¨²¸˜#.F•“SQVPUÕìÒÅÈÀa`ˆ÷2R@#Š„˜…©w@)Z“%£¯ßWwD@ê¤>@ê¤>@ê¤>hƒ8$ù,çÕÓ´Œ—Ù/ ^¿*-*`°ÄEÅĘbR"¢¢4SD„A¡ Q©Âdm-‡Ãáþ¿ª²¼ºª²¢¤¸¢´¤¬¤;YBº“œ¢R—Î] uµ$%ÄPÕ‘œ‚·‰Ï2_¼ÈÏέxóºZLŒ*)É„%$¨ Ád Q„…)$IÖÔµµdY9YYQ[\Ì)*âÖ°«Hyyº²’DgÕ.ZêV,qy©àû(++[³fÍ?§/[¶¬ñCYYÙž={ZZZ¢bß:}dRÚóøûq/ž>fŠK*kê)ªiJÈÈ+ês‡¯oÿi)çí“ǹ™©¹é)T]ÏÀ¨—…™Œ¤DGËxyy7bãc‹ÅŨ¢::"JJ4iYÂÌœ‰¿2¿¬­=–‘YýäqÕÃG‚bbÚÅÄØARJoo¤>€oÅbq³ÜíÛ·?3¶¶vuu5"À7ºû 1ö§¶VÇØBµ«¡‘Í@îÐË¢ Qå:«q£Þu9°°òéÕóâ”UílzëhªµÓ¤W•œ|ôÆÍŒòrN¯^bææ¢ÎNLîЋRWá âtUûôµ«å·ï–ª(‹Ùô²SÓè·=R@ rrrJNN.--ýè³™™™¾¾¾(@KËy]x>òÒËÌ4} ;µnÆý4¿Ï Âé" }K;îÀ/ª©=zúÅ“dsk{+aá6ÿáX^–zåÊé¤äÒ¾ýÄ--X†¾ËjˆÐ…úôãÜq'öb䵨{¥½zéô²&Lc`_@êh~ÞÞÞ=ΓKZZºwïÞ, Uh!%UÇNŸ}™™néänÚ×¥u} ÓŒz;ºÏDÝ}xûš½ƒEÛ똪­y{áRhRb±Ç(W71îÐzÖJ¥8:ˆs‚(MLÜ^ܯ¿‘…ÅPìH}ÍéSÇy Ží´°°@‰ZÂ݇O#Oëjnml7иկ­º¾ w¨®® =rªðUö¤ ã%ÄÅ[‘s²Ïï?pÛÒŠ5pËi ³•¯m÷îLîÀá¼:{nMzÛsüd Iì)H}ÍãŸÇyâØN€B’äù˜»ñ7®Z: wž8³m­<&bdãȉ¸“{Ãö\ qVë¬ò£ÇûNJ÷ðž;W±m™J¥8;ñμykß…ó%þ~ÞÈ~H}Íãƒã€ÿü­‚µdÉÔ Y¤åäܳ³ßˆILqÉŽ°½†½3=Þô?ŸŸ¦Q©Ôo³ÐÒâ¤Mÿ;:yJ'¹N´ŽPd{{VrråÆM+§{Ï¥ Ó°— õÀw“šqüˆÓDªµãlµZ7cq¹5ëƒçøÍú§ù½Î¿±cgäœ9Š4¥ãÙÀ€!/O]»~¥¿ïÏ4º(ö5¤>øR³_ž9rÀq¼w‡Š|2 Ê&}oþŸŸÏŒíñ+.Jضý¢¯¯B‡Š|rr´±£e6nZë;k>züúà[{S\vxÏNÇ1Óh4‘ŽY….šóÊ­»öü4åÇZDuUÞ_˜>CžÉê˜EîÜ…îæ&³cÇú©Óæa§hµ©¯üÉí+Q7R_•!%e`aïÐÏB™‰â´a$IîÛ·ÏÜ~]´Cÿƒ®¬­ÿ6çyøÅË®Žý[¢ýƒ‡w:¹HHJP;r‘uu…33D/œß=`à$ìz­+õ= ›ÙuÂ&Á¸©Ó´‘Ô¸©¯03j¢[`¢íÂËQ¿ôkõ%ªAŸpÔ<²àž½Þ0u"®^gIH)ªi£¶Žçöl²05•ë$ݼ-ßßK¥ÆFøC)á8@äÏ?_§(*vE5ZKê‹èîº*‰;²)¶b†£Ñ3«¶ÄËýZJ㲯î>‘b5´+ïüìœ;[½,§E4nÂxÂñC[Ï–?ØÀ2ñ%¬ô/uðøµa–ñ!÷C?ŸÐ{ÐÑôúI½R®®Ôåù_°ÞNfv41ïP„螟–EdÖÏ¡5ïÀéU£»ñÇ VÚÉE‹¯½^f'[?CÝ ¹ý©ë¥m~ãOŒuæM±âfQ •Ado÷›ì|¾ÑêjÍ 9<µç'êQ¾Ù…ås–X|"‚±Õ%¨a;;{Ç=Yû6xœCà‰†YçíO]5¦î[TuGO ŽÊh¼ ÅŽ,mò^³áÑògø„%¼«Ì†˜°™½›Ö¿¡Ôcœ†¿+£Ñ´C« W9ÏŒ#FÆ•6åßÈ¿2iŒwDú»V4ì]i)O¼ûõZ:8~Bosæ]|½ÊAö_‡ìÜk 'û­>{¿ñÚzýºéÏNܵ(¿óËrw)á~5®¿4Z“¥§Ž-1`_Þä—Ç´”†§ZÇ_<ªR·õÛG°¼ŽrG¼SÈ¿tëæ<Òm+o+²Tש~±È »ºæþÍh‡±SQ . EȨWß3çÎý0~Ls¶KÖ\¼”>eJ'TX`À€NçÏGLœˆÔÐJR_ñµUüÈ7ïtîû‘¯žâØ4rlýƒ¼¥¦ ˸_ò•ý“R× ¾¸—?Ým«óƒ»^¨í‚˜¨_{³ÙE¼©7ÄþÌ&IeîxÖ-µ±{§™ìf¾'±üˆ!7laâsô÷®&RÉóºåVr™uà!΋yÕ™ÇÔ‡¯£ÿ÷ÅSé;q§0>»R½%k\]…{E}¢Ê®Ú ÂÏ•…òýdΰ©Æ‚ æßgÂ4‹ Ó&&Uï6øHQ«Ø¥¼ˇº¬¸–C†+q×åð4óQ[·˜1·hLÞ[FgÖ§Õcu*¼Zxw´Å ^e®rW彞“8MÇhùÓÓ÷®Ä­éû®YW[Þ6†ò£ìËcæJÃ÷β!$†MìÖ„FˆŒ¿'kxìäeÚ‹¹м)gj8ÏüvJÙÁÌ[iªtŸSoüeÉ/náýÝö¦?X)ü>,äáÑ©Ýê~G7–L¸11*»ÜVYô¿½s”ûFZ[+¼ ¿œ‚íe¼:o;ógÙM_6¥R°×ÿ¤2’ÿn©>Ð×}ÕRCVØÁìÇ£”…¿´ÉÃÖúvwNœýûé¡ñóêf~äãÚòËî9a“x‘ûÉÞ?¸‘0X6×Alïò/ÿŽ@yý¶¢º¶}PuTtn\}SX(+Õl‡…¶ÄìhÂk×Á †‚€!ýÃü)>Gоí/1Áu={MmØ‘Ë ¸Í>Ûv{ã ˦|]©f×ðÂ/„ì]"èýiV³~ýeÞ(I§KD"Aì9u}ƒA‰O5fûË@;%Áº ˜2’ØÊ}‰÷¹íã+Â4ö\gëËݨºÌÙó7’ü-ãvxø}óý^¾y[A!ÞÈÄ3"þÆíB¢¯T£f¶‘Pì7Þ–ˆ‹&2Sß6­‘‚w %ˆ|\êοÄ0¤mÕýŽî†ñûÞœÇv½·sý þ5äètº÷Œ8¶ûTáÔ†äÓ'²`·íþÎÃãßx8mjwÑ©¦®óF¸èo;b;9eÏ»_‚`)QQ ïÚÐ߯¸¡vøhò¨™Æ_Üä¡‹ÖÚˆÚ´ìDÀ ¡*!3ç„Ö¶;¢,\Ã~X0é/"ñWO^MÖ†úʵ_,2@ôôQr·ž6Ÿz¶òÅí[qyÜÏ‚ÆDY Šjšjªÿ®çªäþR;çÕaºãñu3©Â]ŽÊ =ßȽ>þÊ{ó¼÷ò¾• zWƒ[wãšñ쾄„,ómõv…·I['ͨÂ[I²¹š54’¾ÿÆ€H}­!õ1ºè¨DôÛ~ß>ÞÍVùÉ·^H›ª4|ž‰}0MLpE¸7¥ì†i¥õÑë%Íw'Q°›+X‚ý+mu† Ln•O·­Øììûùù8±d—ûòwçuO]çÝóñ)¯­9.›–„Ïà룢ÂÉË«$9 •Š=à»§¾º3÷’ÿžkè±ÖË\ÎëÏ{mˆÙ<³7ÿ[µì2mi­ekûQÖ¾7Ïø?bÂæò¾è³Ù‚Þ¥ÒÆÑ¢²„ÿÿÆ“³½©ª~¯™¿Í>¸­­õïÙ=Ò¾!ÁAú¾ñï-ZKðãô…ÇËìz mCu""#ÖUž×Ȭ‹¯ƒ'ïÏëjfeû³{çï–»"ø :“ ï¬Ðà]æÝ•?‹økû¢²úókÛh£¤}7™øF/² ,jx~ä¡“kF ™KÜò“¦øÍ‹Lýg³ï-« \|½jiä¡"«Q6.”Öð ¶D(/KñÖ…Ú=”|a9ÁÚgÓ`ʦ÷·ÙñO;µw›óÞï¨ x€fÐ}ÂtΕFyþ³_‡L²Ùº0è·þ”ß>xFkÛVon! ë›eB™õÞ‹/æÎæ¿Õ¤Mvè¾üðaü³‰Ñ¿éÊÿöÀè³x¥UD  47Dú¢/ Ã)((dI4ápÉ’Ì');U³«…éDùÛ´B~ÛÅ›î¾sbý´hMí*«þçÞ&”±FAw¤ÏêË}øñ’}u…³ÿÍO½¼Zp.tÈ8DZ[bãƒùÇlçg<æÌB,î=B(å7Ó#Üé}¹ppŽwõ8OBFš,»)øKVégV‘FgQ©…Å¥RbÍPäÂWR‚3ú STÖEŠÛ’6M1ü1ªlû„Ù^ þÜzvlYt6…ÿïnô ×’Iå[ù1¯æÄüµî«O2oÊYâ¡T±ÒdvÐns{?HßDrØnà 'ÌŽ&„ç¸Ì¯¹akú†ð“ó†(×r?…¶Î¬FØüK =Fñ§Ì]9ŠW…y;íäˆA%—9¯ö=´œ ãÿ 'Ö\ek˜Ïï„ÄòPφ? °yGÖˆrš0î4ÊRÞ ÕÎ#³#x ¯&wiÿ¥^ü?ê•æÕdB¡IJRßeËʨbOh©Ï`ä’\Ó„5—^#—~úiÞUUÈ_ߟ&O~y6Bp=˜OµÌì6‹$g}vÝTV=#W½?IÎfn9—ø+,½4þà üâF1?¾näœwâÿY·÷–Õ”Fª_'‘ÓVü*1e§\ýÄÛ¿à]âDßÙ¼îð"•¡Y3B¿öwô%*áMyIŸ¨‚«Ÿ:{°Iu#õ‘;HrÇóXÜ$>WÏÏNè(ØÕlªp>>r·oûåyÑ•oÒÎ?Œ?|+`D?Õ¯ý*¹}ù§!>õ=Šô¾ ŽÎ¹Ëë*ü ³ùQ?©?MW¡³ñð‘“þ–ßæñ#üŽ[ø‘÷g/ÝiÇ3éŽj›ð™*L¯âý ¬R»ŠM§óÿ'&Âÿ—ù¥—¡—™É¼ÆúiØ2¥þäú¿µéG]^ß³'öÒc†ràýÀÂüˆ É–õ²âmE5Q.H°éw•ºØšo]¬á°<}öª„!Á†…·¯ùœ+МêµÕø¶ÃŒË¿†9„zÊ> ;Í‹dN–Øs¥e‚EÚ­Xø§G}ÀSÖoCÄÅ™OË?µQŸ™¿0*v«`+8*ü­VZµZÄd/¯6B§SÙ•åØ ZQêû¾*ñëmZ'Mv”sPØ‹ …ÞfdÔõùÍsNIå5sѾq_šKcΕô¦õ â;€ïˆN£×ÔÔ|y>Ý_þØ;çƒØ•ÜýÝnðòÙ–}¶<¾nYÿï ]ø?O]÷¢²ÿ/ûKm~8•ÆjRÃ55lºHóYD„Í„õ#d÷gNRú¥‹É'OD8åׇ¨€kŸ:»R¸î„ç¼*¢âqæú(Þ?²¡ÛŽé&Æbв “’&6ü³l0ÂeùÂ/-R{<÷wJà\ ûÎR.3Â&D.ióë„X‚P ÝÐë½$+kþeå?=¿ùñ­ÿÇ¿ÿl·8Ø ú¤£þmGS‡Åô }îÚ’K`ô‰%›á„~©ž‹Hr~aßùŸWi©GIÅ_žOLäÃS…2ެäß …Ð`òóU5¿7ëbxÜ=ú f)¼üùžºâZ½Í "–ˆ>p$Á¬þt»ü+«šøò·©Ý£¾ÍÇf“õ“¶NkJ›œêjNMM³ÞÉûçNB±¨(™àðî¿ì1hBø2OCMOÞÉÕÕñ§éfg¢"Ò ß¥¾‡Á{³mgÔ]èºüVÌìÞÈСJåé·_ðFõ£’§ØÖu–mž|XùDèüo¢Ú«·õˆðºç Ç?’e¼×DZ‚Ð^ü›^Ä‚Ë]Ey¬6›5ÆS‡ÒBo*)= ;‚ˆâmÅ+Ûu—9»½n·`+¡×HIvÆn€Ô-HGUå웼ÏÌÀ®Êåýˆ›×KiÞ?Ÿµ™¾sŢѼ.Rj̪5ìæ>Þâbº¥áyMÁkWS¼­z5‚ã6J§õ•ñ;öòªøuâ¶ÝÜ=ÉzRäB+Ó….¥\0Ïg^^ibõmš5´ÙÅÞÝ\ˆÍøBYÊßäJËÊ7W‘•Ì_¿>W[[«;ÄÚ&äTÐò5”åβm‹Mãžú,£ø¼7Ç’È?üM¹ÑÛ2pØ~¯cí˜Sž¾Â{eËì£DÕ¬ Â=t2˜2dª×=þa–Dè/f‚^OKg»£ø¡ñÏåÝ¢îïwõç6s/lA·^]¿ê—ç—4Œ|ò£•îΣ>‹¶BË¥§&Õè4¹9 YUÛ@ꀖ$!Γ”zóò¹¬b—Î`6ûnüì&5ÅЙŸ;ý33ˆ÷XŸ»´î)5íBù´FÏÒÔ=öåzüç—l";ëeHPàˆÓÝ­ë/+Z5Ký8÷gÏ>†ŸYÖ«é]ÔÕ›«ÈÜ`Ó¹3#í)[GW&0"¤ ‡®èGú}âÎxŒ)GC¦|dzˆÿ{åCÈfÕ»F6Lkhœµô~ÈÒ[{o¢”­;Iº×¿êËósú“L¡W¸ýÔ»ÿ»új+»2ïrös¨ëÃL}ÊÖÔÃ>€Ô-NGÏàù“äO¥¾¶Œ®"_wo:)ËA$)¸“*Ù¦+Ï4èC’}>3ÃÓ§b""Å*»c@ê€G£ÑŒ,z'ݼdbç„jPâ^ô•ÑcG7s³TjÿþzçÎe.…"sëqáB¦›ÞoH}ð­ vì»væ7¹Ïe•ºtðR¤Þ¹ª­×UUY©Ù[¶°•˜øû“'•ºM>γ½ŠºFíÒ¹“ªª)v=¤>øv&Ž²å¯ž3ÚßqžMW’žõÌ×gz µ?fô´à |}˜L¡[ä¬LÉøûO}}`§@ê€oª“ŒÔÈQcŽÜ:`œ7EˆÚ+P]øêzdøœÙþ-·&Kzò£7m>äï§@£Q:`‘ß¾–>pð‘ï¬9Øãúà;ÐÕRàäzùï]ý=~¤P:VgYVpñøA??_Z wuÊ+víá¶ic¸¯ŸœPÇ*rI‰ÔÖífúÌ`ˆ²°» õÀ÷aÚ½›‹y$t³Ã/½£œ~Vö*#æâÙÙsf3èôo°8U5³qãÄþX}Ðg¦Üç/éÙždeÉ:ôØÏ×!* ©¾'MµÓÚ´ys¯A£¥dÚI¼ýòyÆü¹³¿åBåt}}çlܼv”‡Œª*½ÝùÎmfB³Ÿ^ˆý ©Z qñ €€]û’B4³~.íu3…jªÎÚÝ»OŸÁŽ“¾ýÒ¢¬Ÿç.>}:äÆ‚Ñ£¥Ûk‘95¢[B²{öTœâ5{R´.?Œš‘yd÷›!cÅ¥;µ³­ËO}wgÆôiLQÑ︃M{ù2eõ‡Æ—QVjo~‰ Ò—.¥L:É’Á€Ô­‘ŽºZ`ÀüȨë·"ŽÚ¹aеƒzþðNÔÅÑã<m­ZÃú(*v÷óâøØðƒâ&Oî$)ÙÎô{ôHìÔɧcÆZúùÄ~€Ô­ƒ]oîÀÍ~ç®D:Ž›Æ“l£RöòÙµ³§<ÆŽuhmëfjîÊnÞ8zábÒ ¹N²mõ3=ã™ì¡ÃÉ#æŒÅ¾€Ôm/ûEݺ{öhhO‡ÁTÔÚ̪“µÙï%ÅÝ3f\P`@k^Së^ùCRò¥;¯»‘ÒÑmC—Q¾sGôêµ´qc¬çÏŽý©Ú*;«žÜáMaÑÁÃûH‚°pt§‹2[íÚ² ²/Ÿ>®«g0ÈÙÑÕ¾O[)²¡=w(+}ZQU9vŒ”˜Xë=ì3'GîСd§î ì#H}ÐÈJIΘ:…;’š‘yîØ‘ÚZŽ‘Ý@iyåÖ°nBâUjâ½ëWU5´œ:ZÌû¹™%&ë9ÁŸ«r“>[QYåæ&¡¦*ÒJ¾r$&°Î_|¢®&ãè`áïÚ'u5éÓ¸#%eå‘W¯=J¸/¯¢ªmj-%§ø“ÞëÌÇIwo±««-,¬ú[›;ÚX´›"++üø£w¤²¢ìÒ¥÷î¥ué"jg+Ú¹Ë7¾æ'íÑ#æõ˜¬Ò²*kkÃænÝðÅ©: qÓÝÕ‰;pÇ«««oÆÞOLxPôöŠº¶Š¶ž´¢*U¸Y?¤Øe¯2Ó3Ÿ>ÎÏÍգ‡Y Sîо‹ÌeÙÛ³·çsjªïß¿rÿ~âË—eº:†ÝE44„h4J3.®²B,åImròëÌÌbMMi33ÃnÝzuë†7;Rtx4Mpú_Ô’²ò‡Ÿdde¼ÊÉ)*xÃ`‰IËÈ1Å%X’ ¦."ÂdEˆJ#ÈÚÚZAÖTUTVWU”—•—”¾Î¯åpd••U´µ4u5ÔLT-[‹[dª0ÍÄdwhÒŠSSã22Ò³_¼É˯`±„äå™’’t))š„¸0C”ÂBªå&CG¨¦–¨­¡”•“µE…5EÅUoßVå¿.gWÕ*)‰ué"¯®®£®fflL76Æ;©àKÄYLKscî€R´†¨„¾~_î€R õR õRÁ®®IÉÊÉÊÊÊõªèÍëÒ¢7܉LqICTD„A¥P(TaáZÞÅ\8Üÿª**jØ•å¥%Ue ¦˜„´¬´\'µÎ]t5Te¤$QϪªxž™÷üyöËW%ùyì²rR\LHJŠÊd ‰2„D™BBB˜Fpj)55$§†,/¯­¨¨-.æsjªÉNrty9VçÎ ªåõPO¤>€Oz[V~'6!õabñÛ|UM¹ÎŠªÚ",9•nÜá«ZΪ$îßOÍ{‘ñ2#µ¼´XU³«E3]MµWb’,,|pÿÁ„ok8d7}Qmmº¦†ˆnWŠnWî÷é¯hºˆ ¢Þ\NM­zô¨2'›­¥%ajj¦£eC¡RñÞ@ꀊ$ÉG™9·nÜÈÍ|ªÖµ»–‰¥¢®1wh‰eÉ(væz=lË âü¤”Ø›•e¥¦¶V=tz;­rõÓ´ˆ˜ë_¿®¶²·¶böí+Ü·¯|‹YZØÒ‚;°ê'¤dç$^»Zú"›Ý³§šeÏÁL–4ÞöH}Ðþ•W²/Dß|xï¦jWC½žvfýݾËj(¨jqÁøÍÇÏï_=G£ÓÝ\]Ô»¨´ƒ"WUd^‰:þ ¾ØÆV¼W/QmmÙï²*Êô±cek”ÿzÿÁCÅ•¤›«“ššv¤>hoj8dø•˜ä{×m¨w·à­gݤå•ûyüÈyUY~aÏ¾Š’"QJrÚ\‘kkÞF^KH(9RÚi “;´žu“ëDûñG^ø¬bß9~"òEVõHáŠ8©ÚÔçyÇïSPÓ0´²W3ìÑšWU„Á´8Œ;ò87wïÞ}š::ÃÝ\ÚD‘³ŸŸÝ讥¥˜£#sÀÅV]dºûP)îHþëKÿûßQe™Áƒ¦âô?¤>h“®?xtíìIÛÆy·­5—–Wr3µ¦ºú;vSò§É?´Ò%Éû »ÎŸ{1z´ôÏsÛV‘å:ѦO—ãpÈ}ÿ(( }~š‡ì€ÔmÆýÔÌÓÃzâ2É· FÒh½\G‘dí–]{éT¡‰ãFQ[S,IM=|ðàÃÉ“;ÍŸ¯Øv‹L¥RÆãûwäÈÚÊ Ú˜1>Taö ¤>h½rß…íÞÙÕÌzלö±EŠ•óðšêê…hëèº tøî«ôæí;Î u—Z´H¹Ý¼sFŒâpÈÝ»×té¬4`à$ìJH}и•‘’d?Æ‹*ÔÞÕ¦ÑúŽøáeæÓU«×þôÓT qñï³$yñâ†ÜÜò¹så¹q´™J¥Lž,›žVòÇš_¼§MWÀ>€Ô­EYû[þ2°°ãF£v¼™ŠjÚ žš»Ãöõ·±þÆK¯ªx¾ù¯ÝÆK9mÇEÖÔùy®ÂŽ»ôõt­{ Ã΀Ôßßóü‚Ðí!vCÇ‹K˶û¥P„ì†Mxpíì£ÇÇ wÿfËÍϋٹóòt9q±qÉ“É?Ê„‡gœ82Ô}v1¤>øžŸf…;ää9ƒ*Ü>SŒû8g>Œß²c—÷7¹¼ç³´“§#çÍo‡Gu~†««Xl\Õž=ë&Nœ ©¾´œ¼Sï0nZ‡Š|jú¦ÕU•»öøaܘ]PNö¥£Çøû+P(”ŽVds3‘Š òàÁ£GÏÄî€ÔßÚ«¢âƒ{v8Œ™J£3:f´M­SîD:~r”ûZDQQBXØõÙs¨TJÇ,²MoÆ¥K•ááÛ]]§`§@ê€o‡$Ƀ{˜Ûa0;rºZØ]?½ÿÎý £¨rMhØ©1ãdh4JG.²½=c×®7É/è÷Ç®€ÔßÈéËÑâÒ²ŠªZ(EûÁm73ÔnîÃ\¯\ݦ¥ÅPíBG‘=<$6nŒÑÓµÅ=Üúà[(¯d'ÜŠ0Þ¥àaŠi˜œ>éîêÔŒÍVUdß¼™7¾"*ÌÅbQ{˜Ë^¹zÐÁÁÕ@ê€w1ú¦Š¶]D¥Ð4±:ú¿Aš±»ïú­Sææ¬{:ß?ÙØP×­{Ö¯oM¼tR|ké’Œû8µªUª|“ý¶¼F\VMü{œfH£3U5nÇ'öîiÚ\m&>x=fœ Þl ¢BêbÉ®uw@5ú å¼.¨¬(“QPþÊv®/Ññٚͳ ¾qÈëßöæßÙwô–ÐàŸÆ(Ó‚RxdŠÎÚ[„Ùü¨~=¾KY”Ôµž>}Ò\©ïu~€Ô-«  %)õ5-T>üsÈŒCÜ‘Å1î2'Æé‰[elöÜ×N¶až’û¿Û9/皺yvËÞ›Á{àðë­_læy®ŒÎâ>ŠÞüÓ¸]%L­kóæhòúú|#w‡ûÉ-;Mf«£Â}Ä-ÔÚ>ˆ;2ûüOãªÃã•WFò¦w±t·ë©”uúÑ™¼‡nÁq¿xèý‡bJH——5_‘‹dešç3ÚeíÒÍÝ.k¸DÙIM]µn©-룳å_:.ïpŽ?ªééÛU¦òypHÿ¡~džŸ½Yxû´´Õ™úÙ‡OëJɺáòåÓý£ãæ­>äüË(þ½%8É¿ñjóë-O#Ѹ5®¼Èg¶úF¸O]ÿÞ‚5ܧ3øŒ¯™®æKÏÿv³(BT!!j%›Í 7ꫪ¢Ís—Ò¢Juçqä3m-Ëìf_Y<÷Ú2d¾NüÈ7/rý*ûºž¹?·¾K\7õñ¢%§¦Y"¯Èta6»²9mê–i¤†Ÿú¢àåk('‡ÜwnüdazVï§¾[VCÁ¹úN²&6'¼É®!Tê¦éiÒæièwãovÙ½Ëü~½˜ý,Êþ®Âõ'¥_™úª«) È€Ô-KB\<ÿÕ«ÿôRvøÌû²Biʦí?J¾‹ Äð SÎø»íKÜ;b|χ¼¸NÏi¤°œPª?Û®&;îZ !¥¨c®/þ¥%‰›ìXxaò¯·fûî92/×?4› †…îâ_3†dÊëv&n½è<úïÓë]›«,ì²"QfóY"+³¨¹uòf„ôõ]ê¾á„4åÉÔ µ OÐÅ)÷mn‡j˜žr%÷ÃXV¥IßDTº‰1¥š^³Ò¶´Ä{¯¨.!Ž»´ õ@ SUQŽ»û_.[’´møÂÃ/ÂvË…–ÈžÕ;u9K£t”¯Ïï;&‡Ì ™¶1ieÙ{aÓÜŒŠ_ëµô!!8[OŸ`SøýƒÑ»6¤»jÊhwýçâÌ|yŸTÛí="š÷pöùmÝëB}À‡CÆizpد'XhK§]ß½hâ™î³Ã<ß«÷ï?KßäÉt’k®"+)iß¹“Þ¿¾¡ÁKŸõ߯1ôÚÖÐw™}"ßtXþl”ÆO—ÖùŽïL/Ìßó˾ 0îï‹Xb+ÅMàMøRá±zêæm뢶m l³r±×c<~ºÊw{X<÷Y‹¸êɦ_÷½#7—WÇ>€Ô-K[M¥º¢¼¢¬D”õo¾×dd’v³‚èºLùgäãaX­½{úÐñ¸jZffu/‹ ;ñ Ê’Î… ?²x/!*k»âð£Ü”ÉMÜö¯ûçw®Ýó"?òä~uß®&?®›Õ¿D½O£øGÊM;z]zw$[„`iº»5ºB§¸íöÜr¢øIxhȵA·”e‰J†Š{Ø‘}ƒµ:ýÇ®¤W/2UÕ5›«È**=‹‹#ÊÊ8,Ö?šQÓ©ÏŠ*}9Û/½£>d,™g±~;¯¯SOIÁDûeä²ÊÛ'=÷§˜Š ¶¬¶e䓹ö:uu“Ò4Ú´‚^,¡¤×èB uEä´Xü‰R]¯‘!Dá˰¿®üýØÅ.,¢œ®9~ZÒyc¹f8,óiZ±¦¦*öA¤>hqª:]Ÿ§>Ô5±ü78ê®Sþü,´ÎöãgÚ7Šm,ÃÞ†?1³‚í´Õ¶ïwñ6üçLâ¦3?}= ]WŸµ®>ÍP Ad¤>r˜2µ‹l` “Pam-öŸ[33 4ûÔsÚþÿ¼MÃrˆ-wøÄKÔfª5i¢”¢gà(Ïæ~ב$õÑ£·NÇc@ê€×϶wØž=ÿ.õµkyiI J*r²RÍØ¦µ¥SØÞƒ_“úÚ™»÷„õôD™Ò(R´8ey9%Uͧ÷ïh›X ‚ˆ½~uâÄIÍÛl'yu É›7Kü^GýRd¦7J€ÔßÈèaƒV¯þ£‹®¾³£g’Ç·.w5认 ×ì-»¹z­Y»ÚÈHôkÎîkN,ëÝ[O\\»R|#tmûˆÈ3‡ì=&wä:g§¿|‘éë3½%§ÑE=FÚïÞsuÆôvXo ^êý©¾)c}Ýü7ù7Âÿîå:²cV º8ïzd¸¿¿_Ë-BKÛ¦çÛ¼°°OOÙŽYäŒ éK—SgúÌÀÔ߃mïÊʪ»‘'{: éhÛN–_8ºßÏ×A§·è‚,,†UUíûûð‹‘îB&¯reþþû±¿ß\ª0¾± õÀwâæØÿbÔõèã{mÝ;Ð%õ+ßd_=sÌßÏ)*ú gk;îîÝãÛ¶?š2Y†Bê E~ôHêÂ…¿yÂ4:v4¤>øžízwQQ>¾g£Ã˜©4ºH»ßÞÜG±é)æÓc{ötWVÔúc͉Y>ò Ñöü._~õꥯïì_H}Ð*èiiøùÎÚüWˆ®©•ªžQ{ÝL¡Úš+Ç ŒŒfx{}û¥«t1ò÷ÕÙºm½……hÏž¬öZäªJñ¿¶¤9Ø[÷ïo= ©ZQc®¿odÔõ+ï´sŸÐþNÄ*yñäæÕKÞÓ¦Iˆ·ÄE£‹Î˜t÷î©M›§zÉÒéí­ÓïÎñ;w3§ÿäOaaŸ@ê€ÖÈÁ®wß^–»ÂPh4sûAíã$´Ê7Ù—Oq:lÞÜÙ­a}zöÜÃÌõð‘ÍNÅèÑ’BBí¡ÈÉIbgŸzNèma1ûR´îaa¯<+*+×®]+L8þ§¶›ýjŠó.;àä2((  U­…J5jVM5ûäÉm))¯~þY‘J¥´Ñ"?K“=ôwòÏáóçÅî€Ôm†(ƒ±pÁ‚êêê½‡Ž”•—[ t§‹ˆ¶¡õ/Í~u1ÜmȰ֖÷Þû§ÑÝÝgpjjNŸÞž—ÿfÌ(iq júrçŽèµ¨§ãÆõŽ]©Ú$öÃxÞ{q Ég´º›w5ïݪ׸ªøÚé£,1ñÃÜmÌÚD‘©ÂÂC‡zsGž=»²õl÷­¹5'Gîð¡d--'§¡¢ØMú =032à555®Fß¿{[½›‘®¹0ÖJVý6÷æåsBTaWg_Ÿém´È=æÎíAr8wï†_½v_¯«˜ƒ“Él%ñú(YüÜ…'ŠŠÎN=ýüݱS õ@{üÔvqèǸãÉOž^>s¹²¼\ǤG—®ÆßþšŸœÒ7É÷n=–ªÓ­»ë{‹6ö>@¡R-,sîxNnò±£s_•Xö”´´¢‹ˆ|ã(ô,]2&æù˼2k+=+Ë!Ý p¿u¤>è0 tµ¹ƒ`<ýyvÌÕ›YiOTT•4u4ºÒhÍ|·w AT¾zñ4%=å¡0fØÝØÆº§¹öˆö]de%ƒñž‚ñ‚·Ï/_¾˜˜ôBV†¡§Ç4êNmö“kkEž¦Ò>,HIy£ $ÖÃÌÈ@¿Ÿ†&ïv¤>èè4»¨hŽ~À**+=IKMK™ý¼¸ð­¤´œ¸ŒŒ”ŒS\‚).%,ʤ‰0?8:”$k9ìÊZvEyiqEQQqIAÑë×o^sªÙrŠ*]ÔÔôõtTõÔ-õÔ ·³ÈÒ2]œtv®¯‡“••ðôéìç¯r²K˜Lj§N¢òò I aia1!Ê"™¢µBB¤°0Y[+Tê©.+ª¨¤ÖV½}ËÎË+}óš-!IëÜYF]]CGÛ\·«¬nW¼£ú>K”Áœø™yª¹jx(BTaªÆ Rq_ïB¥ªªšr‡¦ÌL¥ò:Áâ×XM õ@êha4>Ô©©ú©ú©ú©©ú©ú©ú©©ú©ú©ú©©ú©ú©ú©©ú©ú©úú©ú©ú©úú©ú©ú©úú©ú©ú©úú©ú©ú©©%@ê¤>@ê¤>@ê¤>¤>@ê¤>@ê¤>@ê¤>¤>@ê¤>@ê¤>@ê¤>¤>@ê¤>@ê¤>@êøÿFrÿiMAÁ¾IEND®B`‚nova-13.0.0/doc/source/images/Nova_spec_process.svg0000664000567000056710000013200112701407773023412 0ustar jenkinsjenkins00000000000000 launchpad create a bug create a blueprint create a blueprint End states out of scope code merged bug fix? idea REST API change? submit spec for review a feature? spec merged blueprint approved for release spec required? add link on nova meeting agenda blueprint hit by feature freeze re-submit for next release blueprint unapproved apply procedural -2 upload code for review remove procedural -2 review blueprint in nova meeting no yes nova-13.0.0/doc/source/api_microversion_history.rst0000664000567000056710000000010312701407773023631 0ustar jenkinsjenkins00000000000000.. include:: ../../nova/api/openstack/rest_api_version_history.rst nova-13.0.0/doc/source/rpc.rst0000664000567000056710000003177212701407773017304 0ustar jenkinsjenkins00000000000000. Copyright (c) 2010 Citrix Systems, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. AMQP and Nova ============= AMQP is the messaging technology chosen by the OpenStack cloud. The AMQP broker, either RabbitMQ or Qpid, sits between any two Nova components and allows them to communicate in a loosely coupled fashion. More precisely, Nova components (the compute fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter) to communicate to one another; however such a paradigm is built atop the publish/subscribe paradigm so that the following benefits can be achieved: * Decoupling between client and servant (such as the client does not need to know where the servant's reference is). * Full a-synchronism between client and servant (such as the client does not need the servant to run at the same time of the remote call). * Random balancing of remote calls (such as if more servants are up and running, one-way calls are transparently dispatched to the first available servant). Nova uses direct, fanout, and topic-based exchanges. The architecture looks like the one depicted in the figure below: .. image:: ./images/rpc/arch.png :width: 60% .. Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Nova service (for example Compute, Scheduler, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise it acts as a publisher only. Nova RPC Mappings ----------------- The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every Nova component connects to the message broker and, depending on its personality (for example a compute node or a network node), may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Compute or Network). Invokers and Workers do not actually exist in the Nova object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rpc.call operations. Figure 2 shows the following internal elements: * Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery. * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations). * Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host'). * Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message. * Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by Qpid or RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a message broker node will have only one topic-based exchange for every topic in Nova. * Direct Exchange: this is a routing table that is created during rpc.call operations; there are many instances of this kind of exchange throughout the life-cycle of a message broker node, one for each rpc.call invoked. * Queue Element: A Queue is a message bucket. Messages are kept in the queue until a Consumer (either Topic or Direct Consumer) connects to the queue and fetch it. Queues can be shared or can be exclusive. Queues whose routing key is 'topic' are shared amongst Workers of the same personality. .. image:: ./images/rpc/rabt.png :width: 60% .. RPC Calls --------- The diagram below shows the message flow during an rpc.call operation: 1. a Topic Publisher is instantiated to send the message request to the queuing system; immediately before the publishing operation, a Direct Consumer is instantiated to wait for the response message. 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic.host') and passed to the Worker in charge of the task. 3. once the task is completed, a Direct Publisher is allocated to send the response message to the queuing system. 4. once the message is dispatched by the exchange, it is fetched by the Direct Consumer dictated by the routing key (such as 'msg_id') and passed to the Invoker. .. image:: ./images/rpc/flow1.png :width: 60% .. RPC Casts --------- The diagram below shows the message flow during an rpc.cast operation: 1. A Topic Publisher is instantiated to send the message request to the queuing system. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: ./images/rpc/flow2.png :width: 60% .. AMQP Broker Load ---------------- At any given time the load of a message broker node running either Qpid or RabbitMQ is function of the following parameters: * Throughput of API calls: the number of API calls (more precisely rpc.call ops) being served by the OpenStack cloud dictates the number of direct-based exchanges, related queues and direct consumers connected to them. * Number of Workers: there is one queue shared amongst workers with the same personality; however there are as many exclusive queues as the number of workers; the number of workers dictates also the number of routing keys within the topic-based exchange, which is shared amongst all workers. The figure below shows the status of a RabbitMQ node after Nova components' bootstrap in a test environment. Exchanges and queues being created by Nova components are: * Exchanges 1. nova (topic exchange) * Queues 1. compute.phantom (phantom is hostname) 2. compute 3. network.phantom (phantom is hostname) 4. network 5. scheduler.phantom (phantom is hostname) 6. scheduler .. image:: ./images/rpc/state.png :width: 60% .. RabbitMQ Gotchas ---------------- Nova uses Kombu to connect to the RabbitMQ environment. Kombu is a Python library that in turn uses AMQPLib, a library that implements the standard AMQP 0.8 at the time of writing. When using Kombu, Invokers and Workers need the following parameters in order to instantiate a Connection object that connects to the RabbitMQ server (please note that most of the following material can be also found in the Kombu documentation; it has been summarized and revised here for sake of clarity): * Hostname: The hostname to the AMQP server. * Userid: A valid username used to authenticate to the server. * Password: The password used to authenticate to the server. * Virtual_host: The name of the virtual host to work with. This virtual host must exist on the server, and the user must have access to it. Default is "/". * Port: The port of the AMQP server. Default is 5672 (amqp). The following parameters are default: * Insist: insist on connecting to a server. In a configuration with multiple load-sharing servers, the Insist option tells the server that the client is insisting on a connection to the specified server. Default is False. * Connect_timeout: the timeout in seconds before the client gives up connecting to the server. The default is no timeout. * SSL: use SSL to connect to the server. The default is False. More precisely Consumers need the following parameters: * Connection: the above mentioned Connection object. * Queue: name of the queue. * Exchange: name of the exchange the queue binds to. * Routing_key: the interpretation of the routing key depends on the value of the exchange_type attribute. * Direct exchange: if the routing key property of the message and the routing_key attribute of the queue are identical, then the message is forwarded to the queue. * Fanout exchange: messages are forwarded to the queues bound the exchange, even if the binding does not have a key. * Topic exchange: if the routing key property of the message matches the routing key of the key according to a primitive pattern matching scheme, then the message is forwarded to the queue. The message routing key then consists of words separated by dots (".", like domain names), and two special characters are available; star ("") and hash ("#"). The star matches any word, and the hash matches zero or more words. For example ".stock.#" matches the routing keys "usd.stock" and "eur.stock.db" but not "stock.nasdaq". * Durable: this flag determines the durability of both exchanges and queues; durable exchanges and queues remain active when a RabbitMQ server restarts. Non-durable exchanges/queues (transient exchanges/queues) are purged when a server restarts. It is worth noting that AMQP specifies that durable queues cannot bind to transient exchanges. Default is True. * Auto_delete: if set, the exchange is deleted when all queues have finished using it. Default is False. * Exclusive: exclusive queues (such as non-shared) may only be consumed from by the current connection. When exclusive is on, this also implies auto_delete. Default is False. * Exchange_type: AMQP defines several default exchange types (routing algorithms) that covers most of the common messaging use cases. * Auto_ack: acknowledgment is handled automatically once messages are received. By default auto_ack is set to False, and the receiver is required to manually handle acknowledgment. * No_ack: it disable acknowledgment on the server-side. This is different from auto_ack in that acknowledgment is turned off altogether. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. * Auto_declare: if this is True and the exchange name is set, the exchange will be automatically declared at instantiation. Auto declare is on by default. Publishers specify most the parameters of Consumers (such as they do not specify a queue name), but they can also specify the following: * Delivery_mode: the default delivery mode used for messages. The value is an integer. The following delivery modes are supported by RabbitMQ: * 1 or "transient": the message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. * 2 or "persistent": the message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. The default value is 2 (persistent). During a send operation, Publishers can override the delivery mode of messages so that, for example, transient messages can be sent over a durable queue. nova-13.0.0/doc/source/addmethod.openstackapi.rst0000664000567000056710000000435112701407773023122 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Adding a Method to the OpenStack API ==================================== The interface is a mostly RESTful API. REST stands for Representational State Transfer and provides an architecture "style" for distributed systems using HTTP for transport. Figure out a way to express your request and response in terms of resources that are being created, modified, read, or destroyed. Routing ------- To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ for more information. URLs are mapped to "action" methods on "controller" classes in ``nova/api/openstack/__init__/ApiRouter.__init__`` . See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two: - mapper.connect() lets you map a single URL to a single action on a controller. - mapper.resource() connects many standard URLs to actions on a controller. Controllers and actions ----------------------- Controllers live in ``nova/api/openstack``, and inherit from nova.wsgi.Controller. See ``nova/api/openstack/compute/servers.py`` for an example. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. Serialization ------------- Actions return a dictionary, and wsgi.Controller serializes that to JSON. Faults ------ If you need to return a non-200, you should return faults.Fault(webob.exc.HTTPNotFound()) replacing the exception as appropriate. nova-13.0.0/doc/source/architecture.rst0000664000567000056710000000567412701407773021204 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Nova System Architecture ======================== Nova is comprised of multiple server processes, each performing different functions. The user-facing interface is a REST API, while internally Nova components communicate via an RPC message passing mechanism. The API servers process REST requests, which typically involve database reads/writes, optionally sending RPC messages to other Nova services, and generating responses to the REST calls. RPC messaging is done via the **oslo.messaging** library, an abstraction on top of message queues. Most of the major nova components can be run on multiple servers, and have a `manager` that is listening for `RPC` messages. The one major exception is nova-compute, where a single process runs on the hypervisor it is managing (except when using the VMware or Ironic drivers). The manager also, optionally, has periodic tasks. For more details on our `RPC` system, please see: :doc:`rpc` Nova also uses a central database that is (logically) shared between all components. However, to aid upgrade, the DB is accessed through an object layer that ensures an upgraded control plane can still communicate with a nova-compute running the previous release. To make this possible nova-compute proxies DB requests over `RPC` to a central manager called `nova-conductor` To horizontally expand Nova deployments, we have a deployment sharding concept called cells. For more information please see: :doc:`cells` Components ---------- Below you will find a helpful explanation of the key components of a typical (non-cells v1) Nova deployment. .. image:: ./images/architecture.svg :width: 100% * DB: sql database for data storage. * API: component that receives HTTP requests, converts commands and communicates with other components via the **oslo.messaging** queue or HTTP * Scheduler: decides which host gets each instance * Network: manages ip forwarding, bridges, and vlans * Compute: manages communication with hypervisor and virtual machines. * Conductor: handles requests that need coordination(build/resize), acts as a database proxy, or handles object conversions. While all services are designed to be horizontally scalable, you should have significantly more computes then anything else. nova-13.0.0/doc/source/aggregates.rst0000664000567000056710000001463512701407773020630 0ustar jenkinsjenkins00000000000000.. Copyright 2012 OpenStack Foundation Copyright 2012 Citrix Systems, Inc. Copyright 2012, The Cloudscaling Group, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Host Aggregates =============== Host aggregates can be regarded as a mechanism to further partition an availability zone; while availability zones are visible to users, host aggregates are only visible to administrators. Host aggregates started out as a way to use Xen hypervisor resource pools, but has been generalized to provide a mechanism to allow administrators to assign key-value pairs to groups of machines. Each node can have multiple aggregates, each aggregate can have multiple key-value pairs, and the same key-value pair can be assigned to multiple aggregate. This information can be used in the scheduler to enable advanced scheduling, to set up xen hypervisor resources pools or to define logical groups for migration. Availability Zones (AZs) ------------------------ Availability Zones are the end-user visible logical abstraction for partitioning a cloud without knowing the physical infrastructure. That abstraction doesn't come up in Nova with an actual database model since the availability zone is actually a specific metadata information attached to an aggregate. Adding that specific metadata to an aggregate makes the aggregate visible from an end-user perspective and consequently allows to schedule upon a specific set of hosts (the ones belonging to the aggregate). That said, there are a few rules to know that diverge from an API perspective between aggregates and availability zones: - one host can be in multiple aggregates, but it can only be in one availability zone - by default a host is part of a default availability zone even if it doesn't belong to an aggregate (the configuration option is named ``default_availability_zone``) .. warning:: That last rule can be very error-prone. Since the user can see the list of availability zones, they have no way to know whether the default availability zone name (currently *nova*) is provided because an host belongs to an aggregate whose AZ metadata key is set to *nova*, or because there are at least one host belonging to no aggregate. Consequently, it is highly recommended for users to never ever ask for booting an instance by specifying an explicit AZ named *nova* and for operators to never set the AZ metadata for an aggregate to *nova*. That leads to some problems due to the fact that the instance AZ information is explicitly attached to *nova* which could break further move operations when either the host is moved to another aggregate or when the user would like to migrate the instance. Xen Pool Host Aggregates ------------------------ Originally all aggregates were Xen resource pools, now an aggregate can be set up as a resource pool by giving the aggregate the correct key-value pair. You can use aggregates for XenServer resource pools when you have multiple compute nodes installed (only XenServer/XCP via xenapi driver is currently supported), and you want to leverage the capabilities of the underlying hypervisor resource pools. For example, you want to enable VM live migration (i.e. VM migration within the pool) or enable host maintenance with zero-downtime for guest instances. Please, note that VM migration across pools (i.e. storage migration) is not yet supported in XenServer/XCP, but will be added when available. Bear in mind that the two migration techniques are not mutually exclusive and can be used in combination for a higher level of flexibility in your cloud management. Design ------ The OSAPI Admin API is extended to support the following operations: * Aggregates * list aggregates: returns a list of all the host-aggregates (optionally filtered by availability zone) * create aggregate: creates an aggregate, takes a friendly name, etc. returns an id * show aggregate: shows the details of an aggregate (id, name, availability_zone, hosts and metadata) * update aggregate: updates the name and availability zone of an aggregate * set metadata: sets the metadata on an aggregate to the values supplied * delete aggregate: deletes an aggregate, it fails if the aggregate is not empty * add host: adds a host to the aggregate * remove host: removes a host from the aggregate * Hosts * start host maintenance (or evacuate-host): disallow a host to serve API requests and migrate instances to other hosts of the aggregate * stop host maintenance: (or rebalance-host): put the host back into operational mode, migrating instances back onto that host Using the Nova CLI ------------------ Using the nova command you can create, delete and manage aggregates. The following section outlines the list of available commands. Usage ~~~~~ :: * aggregate-list Print a list of all aggregates. * aggregate-create Create a new aggregate with the specified details. * aggregate-delete Delete the aggregate by its id. * aggregate-details Show details of the specified aggregate. * aggregate-add-host Add the host to the specified aggregate. * aggregate-remove-host Remove the specified host from the specified aggregate. * aggregate-set-metadata [ ...] Update the metadata associated with the aggregate. * aggregate-update [] Update the aggregate's name and optionally availability zone. * host-list List all hosts by service * host-update --maintenance [enable | disable] Put/resume host into/from maintenance. nova-13.0.0/doc/source/api_microversion_dev.rst0000664000567000056710000003277012701407773022725 0ustar jenkinsjenkins00000000000000API Microversions ================= Background ---------- Nova uses a framework we call 'API Microversions' for allowing changes to the API while preserving backward compatibility. The basic idea is that a user has to explicitly ask for their request to be treated with a particular version of the API. So breaking changes can be added to the API without breaking users who don't specifically ask for it. This is done with an HTTP header ``X-OpenStack-Nova-API-Version`` which is a monotonically increasing semantic version number starting from ``2.1``. If a user makes a request without specifying a version, they will get the ``DEFAULT_API_VERSION`` as defined in ``nova/api/openstack/wsgi.py``. This value is currently ``2.1`` and is expected to remain so for quite a long time. There is a special value ``latest`` which can be specified, which will allow a client to always receive the most recent version of API responses from the server. .. warning:: The ``latest`` value is mostly meant for integration testing and would be dangerous to rely on in client code since Nova microversions are not following semver and therefore backward compatibility is not guaranteed. Clients, like python-novaclient, should always require a specific microversion but limit what is acceptable to the version range that it understands at the time. For full details please read the `Kilo spec for microversions `_ When do I need a new Microversion? ---------------------------------- A microversion is needed when the contract to the user is changed. The user contract covers many kinds of information such as: - the Request - the list of resource urls which exist on the server Example: adding a new servers/{ID}/foo which didn't exist in a previous version of the code - the list of query parameters that are valid on urls Example: adding a new parameter ``is_yellow`` servers/{ID}?is_yellow=True - the list of query parameter values for non free form fields Example: parameter filter_by takes a small set of constants/enums "A", "B", "C". Adding support for new enum "D". - new headers accepted on a request - the list of attributes and data structures accepted. Example: adding a new attribute 'locked': True/False to the request body However, the attribute ``os.scheduler_hints`` of the "create a server" API is an exception to this. A new scheduler which adds a new attribute to ``os:scheduler_hints`` doesn't require a new microversion, because available schedulers depend on cloud environments, and we accept customized schedulers as a rule. - the Response - the list of attributes and data structures returned Example: adding a new attribute 'locked': True/False to the output of servers/{ID} - the allowed values of non free form fields Example: adding a new allowed ``status`` to servers/{ID} - the list of status codes allowed for a particular request Example: an API previously could return 200, 400, 403, 404 and the change would make the API now also be allowed to return 409. See [#f2]_ for the 400, 403 and 404 cases. - changing a status code on a particular response Example: changing the return code of an API from 501 to 400. .. note:: Fixing a bug so that a 400+ code is returned rather than a 500 or 503 does not require a microversion change. It's assumed that clients are not expected to handle a 500 or 503 response and therefore should not need to opt-in to microversion changes that fixes a 500 or 503 response from happening. According to the OpenStack API Working Group, a **500 Internal Server Error** should **not** be returned to the user for failures due to user error that can be fixed by changing the request on the client side. See [#f1]_. - new headers returned on a response The following flow chart attempts to walk through the process of "do we need a microversion". .. graphviz:: digraph states { label="Do I need a microversion?" silent_fail[shape="diamond", style="", group=g1, label="Did we silently fail to do what is asked?"]; ret_500[shape="diamond", style="", group=g1, label="Did we return a 500 before?"]; new_error[shape="diamond", style="", group=g1, label="Are we changing what status code is returned?"]; new_attr[shape="diamond", style="", group=g1, label="Did we add or remove an attribute to a payload?"]; new_param[shape="diamond", style="", group=g1, label="Did we add or remove an accepted query string parameter or value?"]; new_resource[shape="diamond", style="", group=g1, label="Did we add or remove a resource url?"]; no[shape="box", style=rounded, label="No microversion needed"]; yes[shape="box", style=rounded, label="Yes, you need a microversion"]; no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; silent_fail -> ret_500[label=" no"]; silent_fail -> no2[label="yes"]; ret_500 -> no2[label="yes [1]"]; ret_500 -> new_error[label=" no"]; new_error -> new_attr[label=" no"]; new_error -> yes[label="yes"]; new_attr -> new_param[label=" no"]; new_attr -> yes[label="yes"]; new_param -> new_resource[label=" no"]; new_param -> yes[label="yes"]; new_resource -> no[label=" no"]; new_resource -> yes[label="yes"]; {rank=same; yes new_attr} {rank=same; no2 ret_500} {rank=min; silent_fail} } **Footnotes** .. [#f1] When fixing 500 errors that previously caused stack traces, try to map the new error into the existing set of errors that API call could previously return (400 if nothing else is appropriate). Changing the set of allowed status codes from a request is changing the contract, and should be part of a microversion (except in [#f2]_). The reason why we are so strict on contract is that we'd like application writers to be able to know, for sure, what the contract is at every microversion in Nova. If they do not, they will need to write conditional code in their application to handle ambiguities. When in doubt, consider application authors. If it would work with no client side changes on both Nova versions, you probably don't need a microversion. If, on the other hand, there is any ambiguity, a microversion is probably needed. .. [#f2] The exception to not needing a microversion when returning a previously unspecified error code is the 400, 403 and 404 cases. This is considered OK to return even if previously unspecified in the code since it's implied given keystone authentication can fail with a 403 and API validation can fail with a 400 for invalid json request body. Request to url/resource that does not exist always fails with 404. When a microversion is not needed --------------------------------- A microversion is not needed in the following situation: - the response - Changing the error message without changing the response code does not require a new microversion. - Removing an inapplicable HTTP header, for example, suppose the Retry-After HTTP header is being returned with a 4xx code. This header should only be returned with a 503 or 3xx response, so it may be removed without bumping the microversion. In Code ------- In ``nova/api/openstack/wsgi.py`` we define an ``@api_version`` decorator which is intended to be used on top-level Controller methods. It is not appropriate for lower-level methods. Some examples: Adding a new API method ~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``X-OpenStack-Nova-API-Version`` of >= ``2.4``. If they had specified a lower version (or not specified it and received the default of ``2.1``) the server would respond with ``HTTP/404``. Removing an API method ~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.1", "2.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``X-OpenStack-Nova-API-Version`` of <= ``2.4``. If ``2.5`` or later is specified the server will respond with ``HTTP/404``. Changing a method's behavior ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.1", "2.3") def my_api_method(self, req, id): .... method_1 ... @wsgi.Controller.api_version("2.4") # noqa def my_api_method(self, req, id): .... method_2 ... If a caller specified ``2.1``, ``2.2`` or ``2.3`` (or received the default of ``2.1``) they would see the result from ``method_1``, ``2.4`` or later ``method_2``. It is vital that the two methods have the same name, so the second of them will need ``# noqa`` to avoid failing flake8's ``F811`` rule. The two methods may be different in any kind of semantics (schema validation, return values, response codes, etc) A change in schema only ~~~~~~~~~~~~~~~~~~~~~~~ If there is no change to the method, only to the schema that is used for validation, you can add a version range to the ``validation.schema`` decorator:: @wsgi.Controller.api_version("2.1") @validation.schema(dummy_schema.dummy, "2.3", "2.8") @validation.schema(dummy_schema.dummy2, "2.9") def update(self, req, id, body): .... This method will be available from version ``2.1``, validated according to ``dummy_schema.dummy`` from ``2.3`` to ``2.8``, and validated according to ``dummy_schema.dummy2`` from ``2.9`` onward. When not using decorators ~~~~~~~~~~~~~~~~~~~~~~~~~ When you don't want to use the ``@api_version`` decorator on a method or you want to change behavior within a method (say it leads to simpler or simply a lot less code) you can directly test for the requested version with a method as long as you have access to the api request object (commonly called ``req``). Every API method has an api_version_request object attached to the req object and that can be used to modify behavior based on its value:: def index(self, req): req_version = req.api_version_request req1_min = api_version_request.APIVersionRequest("2.1") req1_max = api_version_request.APIVersionRequest("2.5") req2_min = api_version_request.APIVersionRequest("2.6") req2_max = api_version_request.APIVersionRequest("2.10") if req_version.matches(req1_min, req1_max): ....stuff.... elif req_version.matches(req2min, req2_max): ....other stuff.... elif req_version > api_version_request.APIVersionRequest("2.10"): ....more stuff..... The first argument to the matches method is the minimum acceptable version and the second is maximum acceptable version. A specified version can be null:: null_version = APIVersionRequest() If the minimum version specified is null then there is no restriction on the minimum version, and likewise if the maximum version is null there is no restriction the maximum version. Alternatively a one sided comparison can be used as in the example above. Other necessary changes ----------------------- If you are adding a patch which adds a new microversion, it is necessary to add changes to other places which describe your change: * Update ``REST_API_VERSION_HISTORY`` in ``nova/api/openstack/api_version_request.py`` * Update ``_MAX_API_VERSION`` in ``nova/api/openstack/api_version_request.py`` * Add a verbose description to ``nova/api/openstack/rest_api_version_history.rst``. There should be enough information that it could be used by the docs team for release notes. * Update the expected versions in affected tests, for example in ``nova/tests/unit/api/openstack/compute/test_versions.py``. * Update the get versions api sample files: ``doc/api_samples/versions/versions-get-resp.json`` and ``nova/tests/functional/api_samples/versions/versions-get-resp.json.tpl``. * Make a new commit to python-novaclient and update corresponding files to enable the newly added microversion API. Allocating a microversion ------------------------- If you are adding a patch which adds a new microversion, it is necessary to allocate the next microversion number. Except under extremely unusual circumstances and this would have been mentioned in the nova spec for the change, the minor number of ``_MAX_API_VERSION`` will be incremented. This will also be the new microversion number for the API change. It is possible that multiple microversion patches would be proposed in parallel and the microversions would conflict between patches. This will cause a merge conflict. We don't reserve a microversion for each patch in advance as we don't know the final merge order. Developers may need over time to rebase their patch calculating a new version number as above based on the updated value of ``_MAX_API_VERSION``. Testing Microversioned API Methods ---------------------------------- Testing a microversioned API method is very similar to a normal controller method test, you just need to add the ``X-OpenStack-Nova-API-Version`` header, for example:: req = fakes.HTTPRequest.blank('/testable/url/endpoint') req.headers = {'X-OpenStack-Nova-API-Version': '2.2'} req.api_version_request = api_version.APIVersionRequest('2.6') controller = controller.TestableController() res = controller.index(req) ... assertions about the response ... For many examples of testing, the canonical examples are in ``nova/tests/unit/api/openstack/compute/test_microversions.py``. nova-13.0.0/doc/source/image_src/0000775000567000056710000000000012701410205017665 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/source/image_src/PowerStates.odp0000664000567000056710000005263412701407773022703 0ustar jenkinsjenkins00000000000000PK¤@3&¬¨//mimetypeapplication/vnd.oasis.opendocument.presentationPK¤@Configurations2/statusbar/PK¤@'Configurations2/accelerator/current.xmlPKPK¤@Configurations2/floater/PK¤@Configurations2/popupmenu/PK¤@Configurations2/progressbar/PK¤@Configurations2/toolpanel/PK¤@Configurations2/menubar/PK¤@Configurations2/toolbar/PK¤@Configurations2/images/Bitmaps/PK¤@ content.xmlí]ërä6vþŸ§`õ:©¤Ê¤ˆ oÊ̸Öv¶v«f¶œñÆÎŸ)ª›-q‡Mö’liä—Èϼ_ž$ï—&¨fKl-]e š@ðǃsÀ7ß}Ýʽ'~¾]!M_)^¸Ž6~xûvõó§?¨öê»wÿô&Únýµw½‰Ö‡¦ê: SøWÒar-î¾]âð:r?¹Ý—\§ëëhï…y©ëjîkþ,q%IéâÈfyÔjñm$[øk¨ÛPßíÝÔo´âkà‡_Þ®îÒt}uõðð =-Šo¯ã8WünÑàu‘oˆžk³¾ò=,¹BºÊóî¼Ô•mË[mRxØÝx±44nê¶zu{ d×eÄ”«¨Z¦Æ¯û[ivÝßöÀ¼¾scižñÌuª-ž›_VÝÄwþ›Ì—x_³Ûòï×’ÞÏ÷~Å5VâíÊéPŽ4‰×æÍîö®ÃÒ.^ÜÏ*®%Ÿ8Œ«À=dÃé& 6½¸‰"•›¢T~»»lK_é"//ZKÀ> ÝбáÔy³ä 5œZÞŽó¾_þ”q÷Qòïïý/æÒ\ù膉¸X{7‘_½õB/ö×oWq´sÃZ޽Ÿ®a ºwcŸ…¬šPÚ%Z—B#U˜¼Bz!fæöÓ2 EsþË ÿóà†¿úÊGÊ=°y Éc’z»vsóûe£eeHöœ¢·3£;?Uþ臉¶e{[WäèoßS¥wË]ô,ÃÉÕÆ–©ÝÆ¡§z^Ë3öyÏ7û~ji5¬eókfK ’jæó 'º§ù§O-Õr4¹/ZCüÔÒG¼ÿ¥+zŸZŠÞ˜Î6}íSK_›©d[Ô®9I¶–Ú5†ÜÚò-WjãkHe†À»÷‚L8Ý ƒ"n²ëPÿJü·TÕùvõÿû?Eó*•TÉË0MàÞÀÂe /Üxø˜ºñÇÇˆŠ CâA]á&zPyÎÌ1T8ñ*ÀPãŸ9.ÇÞtø’½»†J½m{9¯!2Bbñëõ!D'DÈ~•“!„5ú*2'Cˆ¼J|¬éðy¥rÚž !úJå´3!B¯SN³ž‰ 2.KPWngºöUï^ƒìÆM´y,~TIß½áKœ,^],vfK«ð­òµôÇbá”oQÈÖxf³‰@÷ê¢ê*«r}H 1jrçî½vE<|½X®-Îþ”ßñ7oWþg¿÷‘ÓÃ?Ñ!kúYa±¸Ì®˱šÅãøÅ¯|éœæ?YŸbÉŒnûŒõF¼ûmÓ ìýñœ:Æ+óò{áÛK´/ÚyiüÈŸÎ6Å|A“tEW02õìoŽÆãꌽuꆷE\QÕžoBû °Âï³âµj”ò_]ùoåÏœN­þì"ÜÛE¸ÖEH¢‹p³‹*—¸“C3ÍJ™š¡“Áz“æ°}ÿÜÿý:õïó~aÙÞMÑG·ÁÁS÷‘¦ c–ÍÑ&È$⮈kü²)±² ¢¦ì ÿ›]áwÅV¨Š: ”€§”µWŠ×(ã¿OzósþŒÖ_h¦yN¢ÔÇ2™€(T£†Ó`ÊðX>Ê cýDçv»Pe.T¡§SÅž\¤Qþ#Ž£x¡ÉÉ4‰Â&¹(îâäHƒ•V¯ñ}ò÷ñ¸,«$*Y r`>óF‘.à"R-].Í®NsârÙgÜ«’´)³cQ%°y‡°˜˜ìɺJˆeª#]E"EáVAÌ·¤çÍHœ&jøi¨‘§£f@+j8GêºÃPËR€žjÎtLëC¦¸X…±‚˜Ó ™‚,GÐL¤$i6‘7&˜ík N%ÿ?ø;[eãs™í­Ó©‚Á œž*ñ’õÁÛ,Ly9¦ÔmM{¡bihz[ócí÷ SÎÏYÇ‘)Áª£A×-ÝKÒ®£/Ô!ôLªÖ5{@WTJU2j—ÕK³T–°n7”rè)”ržš™R>lÉÈb6Ò©`Ö2dlÑLÃä©ya6¬’ËBF{!Ó{ ­«’ÆüÄD£Aifß”˜ÐÆH¶œ|$óÔ¼°”1¯eÁi]K€Ù¶ºÌæ6æfq#[ÑÒ~*Z6nPIHA=ž:·É]׎nÇhǦ¦Oí_¯¯¥5TãSj­©Y‹Êýâ*7Ò%øg kHç¶4Ãn(݃ KÆÓ¢s÷ K0z&[³)ªJK¤9¢[º$¦3næá'°dBÓBµÆVic>LjÈLzºÌ”&­Ìò5-=}¤ÍmFPöQ¼ö^w„ÁlÈ ÓšnO@îNu¿ ÐM¡IzໄbÅSÏLúá°¤ÙzKW謩 jf¤òãV"kÒhy\€žöB߈–™" ¨‚nóà¦ÓT)˜>ýßzT©Sêý«ûÛÇEE»Øs[³Z˘X¯ë ù¨émê4ºPß:gEn(›5 ÐÓ-¢"f´" S–¢ºcÎÈNmü Ôæzú(Pûíþ§ƒÊ ›5,•Ãf " Zóµå‘:™¨ýŽ©'c*Êß);NI&DwpWö¤ÑG`qÂÄ뉺 ± qÿÞq#ª=ì˜ÕëøÃÝK¨}£«ºõGoL¦Š,ƒÀ4€E¦{>"k¸‘áp†W´ E`Çgáöó‡W`™ðló‡jÈiŪ/°ü…[?Þ-öÏ3éê†FöŒý„°ŒÎ£Ñ363€œÂræg •#1©ZJaÚ2*á ŸqT‘¤ºíÌÕ! ¨)“ÔýQ°cÏA’ØRЄSâXÌO( ©%ôõm uÁiôÂ9òF5]oˉ5£ŠL®o0ùš¬Qü™7Ñ×ôJÊäñßŇ6Ê_?LÜNyü]ê%Ýõ^5^ãªÄú¼¸W|¬ý¸£ îÐ \ç˜ù?%Iï¹9§ÔûC´Û¹aOÅ3Ç\G õ#‹ïX`"ì†1RÄ€7¦çzEÊ|r“/s“3Cš¸ÙÛ2Œ-Å»q,©m‹Dwž‡±z÷3ó(;Ï {09¥ò0RRèD%a˜O_½Ÿ( i°/=»¢N©}ãmýÐÛhS»Rª>æŽàn½ø[·Y¢¨,™årÜX]»APª[»hão}/†ªT ÿQÍ$T7lRØ`Y„­ÀŠL—€†ÝC ‡µôæaÉ÷Êw[ ÿ3·ŽíXìÂ&Ñá…ÿ ö”!]ùUä¯ÈJ™ð¿%Êdn^N¤¿ÛÚð¿“•×~)D쫱Œu”çÕÂ=Ö4D²‹¼¿få³X#‘‘•-^®±æ!‹[pyþý >Íʼn¾ßŸ2â`þvõ®¨ÜuÄ ¿þr¨UI•ÃÍrþö_¡¡¶ò-<ø[ýߎ—&ý¥‘ò­‰í¡ hg`JH=Þè/ GÁC5˜Ý5˜âù/`õWMà<¨Áî®ÁMy §¿É6 íx‹FHUÑb`Y…dg n2:¢ÝŒäUÈ1uS’˜òýºyI2R ï&%±¥I‰ºYIlÉtQ’=ZEßÂèv”¡òÝ„DYU€[tto¨A,Ø"¡(ˆ ¶¨íÉ ‘2P´‹rZ˜>º›p€‡ ªª E7&ûŬ¼`ô3ø£-™Gw­xy{ðå[T㈫ý¢=b·žè­'ÝROWDÓ%*hϺºò¤GÒñN'DæÑ=S¯4kHœM0djèsòÜtñ<±Ë÷²O¶ÿ:]Ñ€AAGº)(OÚ¥î éÒôŠ" ]o‚Ì8iìYŸÕÂëþ•ë-¢{a¡ú•¡ë0ûµm{nö8×jù‡„Ò»Ãî&tý ËL©GDsÌöB®ÑÒAþ•}UÍ®:b°†p $>hÍ`ÏÌj¯±Ü$_G* ®Z¶j»÷ñðú Œ´ÚÑbͰj¦tíþºY±®FrÈ·ö»öî¢`ÃÂ?@Õðƒ\5üWí^Ìs0*}W%ûKªzwOöõ(Ùñð°i8ÜhÕ¨UKC§y0–=ÛÁuv/MÌ:Md¾26D³æU?™&æòU¢gã‰#Ë“ >TÀvÈNÌ“å+3ûJ™àÛ'xR–,_(x>–Èž‡§ø˜…Ñ\_>~o9 î•Xà)¾ÐdkΤü[¢_þTg«N™+;÷ÿÕ¦³<̤b¾Z§~wÇâ[ÝÙç¼&_ñ|•ñ—VEõäßÇhn>#eDiËû(oX=ú×® Vxè ´0€æÒ¦¢…"Ò±HQ:«0²mOS!Úÿ‘§#JÍ¢¦•#ÊSóBëµ°K«œ^z¥šsБßê´ ¯3Il9"ìSìÐ(Õ¢ŽH`Gê Ì©Äó“j|WYw¬éÅóOî!™ÜÑ5«}ÜÒ®È >ÎÁŽ‚³kf¡îØ'ëû¼»p6 Þ£G4¬·BNQóC.õÓ´ÓGóÏáþòÇó<”-0ÜuΨlõ›uLÙRUˆ«"57Õ Ì;¢#OÑA뤨rDyjnˆS¶:VÙ’ù¶[KÙR)Û’Ê–HH*[Ï·mșґçäÊ@ŠL§ )²2Hyb^:®è<¨Tz P-ͬoÅÂZï Xxäw‹ˆ> ¨ T,Ö(]%”Ú*ÌEìÜaC)j82{§Z{˜b-Sלš5`eÖÁ‰+‡dÏpœÞµ¼¨¯3R_evl;Cê+HR:¹þšð²õשºIÆ­K§ÝM¨¼(d…NQ½£Ð‰G‚p¿nrØ]¸¥1UOÉ1Éô”¥ÕÎ#ÅÄ;j}¸ðŽš‡·x´3Ö[,c¶¼Åsk2g1KÌË|iâi•XOP¾©´bØf·b(³AŸƒi!®hj0D™¶ò»?#8‹“àÄO;%ª Y9fÖü Ã5·9*5å6Ðkh5Ôö`¿›§›† C“˜bqD‰­ÚC”š»ˆðyŒ¨áS³îçËçéÅøÞ½¿öÎPñÎÝï6Ÿu~èºjÌÇÝ£ê)FÇæ‡Z8®^œu4ѱ½ýçÎõ gæåÎ5 Ø–¤?²·L¯jŠ˜špî<„ =Î*sû‰Ñ¹Ã<á2×’‘¹¤Î•\Ÿ˜ÊóÄhs.9{~owí¨mQ=ż¸»ç» l“ÁxO2^°leròÐߢ/¾°¥ÉóG»êdà±x’Å'å&ˈ¡õÛÂú‡åðŽÀ+[8L­&ˆ§:>Upr/\7<ýg§uÂCûÎNÓ°™Ð$•mË5œSO™¹*l, ÜhЪ¾±0P;±+°ÅU1‚QgÃó47ˆj qp\yÔt7N¹RÙ‰®áéЀ{dÀô"æÉòy¸øú{{i kRè.:$žzï'þM€lÝ áè]EÛ­¿ö®«™Ë«7Ñæ±üµ‰Ö‡dR`)üûîÿPKˆ!ˆ,QÒPK¤@ styles.xmlí]K㸾çWZìÞ$ëá÷L÷b²‹`˜^L0“9ÒmsFJjwÏ)ÿ ‡òÿòK‡$SÖÃ’­¶ÝnÍ‹6Y«>‹Åª2ýþç'Ï‚·Ê€Z€“çÏøéNÑúÀÔ–.Úéö Ú¦¬mcêß•áý{a»¸±›ìŒѶTUžï”5ÁÙJJ›|VB% ¢;){|ü RÕr15ž?ÌtöŸ"x\!×Íz윮ÆÊ`…[ú(Šƒ@ÈÇ*û¼c*ðÉrSñ.¶ª Žp¶y#bA Ü`”„,ˆ};ŠùàϾSBä.LûéöÕ%€nA”dGi[»tÇS=ìÐÇ»D–i× SùdŠÊÜ þÆw;VÀ aÝv)¨8©TÃj±2r&WAÚ8„*ݼUùä ’‰!‡‘7¦¯ë'àðÝGD÷0.øà3$h%Zs"ˆêú”€š&‚=àç(ÙÔx?‚˜÷°›+Dß)*æ(ˆx› üu Ö´ ú¼ÁƱÊÌ/ʦ¤êü”Ó_áWð÷xð™.—ƒL¦#Ãç0‚^‘×´DZDÁ˜N ë‰$ܧ}ß7iO"FÚñËe¬1[ê§£ÄÉÆV ”QT‹”‘” •õnоXY×ï(;õÌ™‚T/e» NßÄQÚ[ n„¹ ÏÁ.rÙ‚l‘Ã\Ýö”ƒ†EBÊ+õX²šÉ†ûmºX™c'VhŽ€®äšá¬·l03k™Õ†Ž÷°• ¨¥Q7˜ ïôå—Ú*´¦zõ5#´zæ+';½¨Ôd0V s̘‘:–8Š˜sPÖçÂUÄØï h½‘zÄ Ø‡;0Èqز•Uºç„0RŸòhä;ŸK;÷ÞÒý{~hqQ˜ß[ötH&s©oã bu».Œ¢“µÓ-VE—Ê|Õ;åÿýw¦pÒC$ãc<ä«.XÒžì]O¨Ã*ë»oZ?Ó7øùÙ[bWic™…µ䫬NÒ8˜'àÀ7Mu ©{Sn!«3„ ¡ç7‡Ð¨C„f7‰Ð¸3„Lmt“M:CȺI|¦Ýás£vzÖB£µÓóºM;mèA4~]†ZêNNlêƒWm\…ÎMO;k&°8E$Gˆ¤1=Aä[Å)%ß–P’fÙ@1ÞÐõy+Gñø‰ ­Ä´nÒ@GlÂìœ&uqF¢ Áñz£&YûB(MNóGDPŒYAm)OÄî¥ô &x•Š)|DŸÊ¤Ù&˜§ÏKÖŒè9V¥ÇZŸ‡«d+c4ÿ€þ_càÿ Mðà7ˆÎ« É|$½yé$ÁkÊ#7ñEƒß¨’5®ÛÈM…ˆYµ$\? l€P4Ywøp]W%J…ËtªŒ Y‚»å(‘ðò+´£-Ѝ™qëÒRÅ$ËÚª…ÈÁ©a&ÍWDštþ¯$’”…ÛkbPÓú”Xv•!(KXĶh Sñ’p&¶è…ˆ«ô‹DŽœÚ}{{Ñ醈ò•Ò˜u¶–Π÷bùžÄå;Ï—ä´ÞÕJ6Ò 5 ­¥bׂ•1ìóIWå’‰LÜZLáf]¯-üμۘ:ã á@‘´ _©"F­™‰d\B¦Ê¤‡.§gζ]4_Ebnú:Ìö¯Ã|ñ=(óô]6'÷V+۞ϯë4Yäb–ýã¥fT0³Ž’÷ÛéV«ÍdÃüáË[–Ð'hŽÌ*8M£9FíFŠÑELÕå@ÊïÓóø»N{è^Ȭ\-t£’ÈŠè"·OEÔƒ ŒÉ9ÖkñÐÙÉѲ¬>¡t\EUƒônS“N zì£(9—6}GfðkRW²ö·5Á±_p»å Ø—ò¶DkiìIÄ94Žài uzòùµfgõ@&@´ÞfÔÚÔû¨õá Cýæ£Ö"ÞlÔ:5[Éj-8³Z.ÑÀ œUºÃl³üYZ\\WUW`óLÕuûUeµõó+K£žXw÷¯ÿS35¾„¦×ZwgÖW+Ì^«•×Ý£CWSñÒ±uWw÷ÊÊ9.Pw7¹Ñº²î*ï¦7Z5Õ]åÝü&ñé®îÎЯÅP_oáa¼.SÝ}å]m9÷ßd¼Á2ûxÃÁxƒé¦ã "¾•xC!”‹7T§-JâX£Q‹\a¡ºQŽ“•vîbL³cr ‰¨V7ˆ™Ý fµÌ®V"VÚ™ÿ𱈺AÌê1³b…Xdm,¶Ç<±q7ˆºA¬MÿRˆMºAlüv›vƒØäí 6ë±éÛAlÞ b³7€X/KkI;Ïÿd%l¤½+ðã¸Mó@»/žº¦Óù‰ þ"…þ"…þ"…kLèô)ô)ô)ô)ô)\y>ç•eÞ/s‘ü”¯®4@CØ'~úÄOŸøy-‰Ÿ×eèC }ˆ¡1ô!†>Äp½õ!†>ÄЇúCbxõ!†’¸Á› ŒÊî èCùÁ¨P1vs!‚ o$D ŸùÕ€ý¬‡ žéBÍÄ?|Ô¿˜¬Æ'÷KbK æ»TѹqgÁ²Ô&Ä ¼Çõ‚ÕDhúx–ÞˆøÌŽ Ó‘•~NL«¡kãñ$mL¿œªêši%Uާñ`PÃnŒ.ÌÄ>Ô§3ÌKq &ö <ÍŒKqÃúõD ö~n+ùȾ‰Í~ÉËVÓŽt{._‹Ÿt¥„æˆR¨|s“”Ÿ/AÄ4´ñ\nOA1§Ú\À™‚¨ Âí~µýTH®Ê!uã:˜•JoÔŠîRå mÀzÙ‹ÁÐ'(Ü üv&W³ ¹±´ÝÝ+“89KL–:úOÊWáÛ­øÊ­Q…tý>«ìjªÝ]¶¥$+Œ£$|ªô77Ÿ³”Ðõ>¿h¯Õu3kr¹‹ÇJ¯çcÿÜñÀÕ›êvvsºfͧ9#vXêËÝvHêÊœƒXö/ŽMPÔˆ–õ®ÅËŸ®EOF£yK,NýfÑõbÑ^/NýÎй±¸ôJúT\HiÈB9z‘¯ú«8º–wî.Îoõe§‡OEM/0Û$â}§ÝE†ç‘®¨»GI×îŠÜ3 ø¥ kŒµ‹©Y]ZüáãÙî]zÏ>SÞgÊûLùÕ ÔgÊûLyŸ)ï3å—E¨Ï”÷™òæ™òzgÛì+O{º÷§{º÷§¯¡ÞŸîýéÞŸîýéÞŸ¾zXYÙ’tx Œøo‰VžFrÁ3kbOÜ}&ቋ ÆPy¿ôã? hÒ Z%©Mq Ø­¡H~쉕Ûqªá¾ð¤ÜHBç몊…Ô\Ù]–Üïg¥@i¢ï9KòŠŒ„i>&ÚÄÞÒÈì8,ÃB®wškæ<«‰JSxgð²Â)©fŠzVr‹ìy'6tm4³.2óL[ÆÙf¦g€ùøbˆïÏ~fØ Óïc¿"T÷KÖ¯Ïɲٹ´:Ï/·áÓšÌ÷ùdIí‰Ä¨.ñÈÿÎ-wÛa(~w’t‰r¾–ø)Ùl‚dKØc4é¥;…_BðÅØ¯ÍsdÖ›KÿRSµ›y¸Ãð<ÍÎñ4LÍ4›`šUpµÕl k6ÍK k^JS͉65ÐeЊB»—UW1Ç™A}yu­V*Oì\kÅðÝ÷?¹Ñ;ñçOëè]Š­DpôÃ2'õ{'MQö­þ:¯Æ(÷j %÷¶sʤ{w4ÇæX3æ…—lic=÷’5¹X\×fÖ´ê 'TÕ§'n÷Þ»V+øA‘w¿ÖBê‘6±ŠªmjÆtV)öH›‹šù2±.Î!¸(·ìÄlÒµjZÅ>U£`Ì5ƒ»!ÝíIímgg{Ò‘ [m@ži³éøÈsm<4†¹ýþd]h:àVûT#-6uMŸŽ#üê7ªœPü§akÏÔåGšî÷ÃÒæ“YáeÑ£ÕTÞp,m–?jf­O±w(:Èwò[¹-øžjæÔ(Ù1ÆSSÖ2ÍÏe%³Òaeœ'\œe˸˜§}gÂ#qíφ lõgÄ‹€ÛŸO=+÷àì)wÙƒ½$AÚì`;ö²oÈ„÷ÿPKéÿ^Ûà ©­PK¤@~UüÄ££meta.xml 2012-05-02T10:10:082012-05-03T21:24:43.07PT26H29M35S13OpenOffice.org/3.2$Win32 OpenOffice.org_project/320m18$Build-95022012-05-02T11:37:56PK¤@Thumbnails/thumbnail.pngÍxõ_T_ÔîÌ0È0ÔÐÒ)ÒJ7(]Ò(]"- "ˆ€t  t‚4*èÀHw# Ò5 å;¾ßû˽Áýå|öÚg×Y{­çyΊÓÕV#ƒ2B™†º²>Ä7]þi÷î®0@ÿ^CYÁ0 g7W!vÅ8ë§Ü÷œz쀴­ˆÛ³8¶TäwRLu¼ìcÆ,4*xs¨L—‹DÄGÞßzîDôÐþGÌçOèð৤2Rç÷ükÇGµ6ô§b`÷)g~qq!ùFZï}WLŒÇ¶ó>M³4÷Óì½?;Ê7œ¸üÓ^2‚Äò¤³©›áì÷á<|®ñ@’ah øüz 2C ¶Ñ‘ªòäÓYÁÔÃ%Ñ:9gM ‡çLRG_ŠŸèÐv?8ž°<}nõoBs!©Å¼nöÈ—I`s¿¹Ken•@U—(X`.5?Îç…{¶mø8–ÇsThÊæL´ÂLTʇWUÙR;àcÛWcšózQ†ŒƒË¨ùvýþ®!_mKÿBèçßÜ)Ð]סñîm."{˜Jm Ýõ ’âd|„ºøv׳àô3Ç#‡Ò—b#F“¾:úð¢azÍNç1;£¼”ÁŸSûÛ œÔf$ŸÐ)B3¥#zt9A‚»O7J"9[-Ïò*,ÿÈ›[ÒÙŸBBô¾sÉ_¸!Í£Óô]’­T à¯cÒ^›6+¦ªÍ€ë8kz⹜vt¿Qv›Þóçý}{Ç5fЍðO|šZ£ê&þÐÞxè<]Óa"O |ât½ Ã*:6°˜~¾îCÂ0éßÍÞÓž’v´_%ÚÖÛœöHÇá¹d_Ý=A™ ­Ôˆ>´If鼆ÃZÐE'µÒ²ÿÊ ü~Öè©Ø¡øZ–Bä²cŠ^kp’MòÁ¡3Ÿéëà¨þG½† K1o)% Ÿ˜–)ŸÆ‚‡£Ù f@ø÷;¤«1úYÜ™ˆ—HVðÍNž“¡ÚaÔVÂŒîûã;ÍývËLVÁDo}Ï8ê{çÂT±Ñ%Hó, `†‘ø½þ;}nQ`8­yVg3EÛÓ}òä@þÉ„_íÄÉ/Ô#€×5ÍÒ<β¦r$*GŠç~A7€}bv"'ÉÉ:Õkcãþ¦sjótéîJáo±Ÿ:·H”®¾m^4d™ \k|ÝEgäóCøÏõÍpÊhp‘¸ÌRh@{,Ž=Otç®7Ò!û˜l-ùÛQõÓ–ðE§¡ã£Ö,± ˆ=å#9žˆl§ªƒÍŒ£“R‹ÒLY?çæ`ûmnŒ8-TÖñ{!j²"QÆuíÕ-Ì…3³ßšwÝxZ:Qal¦èÁ>`XQèJb§£‡vÓ^M|/·“¥ “û-‹»•¾ÄÕ0²'èZ-P9ßd$L°c^¨TÖÒ®zÃP x¥4(Ð Eí(–Zçfþ ×륥ºUÙY]ëû‹í2ÍI¿4•Îã;¥”ÐSÃê èÞt»¤Zûá‰P‡Ö2˜ø³ó¯ëŽ(–άv¡-›ïŸv£b{ØUŠl˜€ãÇj†âzdÕnTÐÜŒ4òÝ9­òµ'ˆòjóúšÖÛµ'-Š|“ûæÉ @¶²[„ˆbF@²‹.àáÚB{ÐM“ÿþÂü¤“°–3gï”b­§8UÛ„3³Ï—Íš¼_éÔ "„IiµvD˜¨oƒw+ü¶Ô¬pªÃK“r¬dg&·øNìíŽÎ“Öˆ1âW““¹bí/Ã6ÔzÜ$4ò‘àkrW1Xm§tåx—êp8ø1âv"ÁèKÚƒŸC/¤ý×n_pj·Ëþ¥73˜¬)ݦZdäC”Nmž°ÂŠ®ž:€¯‡3"8÷»ådeêØ_6Bß‘A¹°B?¨„ð¢;Ü}.ü§´T€äȱ6³Tóƒg /ƒ¼ŠüjE›¥Gž´2€¶®ŠsáØ¼M"é#"qr¶ )!@7%ºÊ™l–‡fØïщ¸‡kêÞl÷Fz¿åï VÛmzìïþÅ“à~8kƒìt)ëhûû·Þö3jePÂBšäH{J—ñŠì‘b ƇMhÀùå[<@ø];:âõJÄzÁÖÜ2ê$gWÙ»DçgnëÞåÚÆ'ÓéN=€*2“WõçG©Am¹]—Xß<rYÖ7Ùëê¡ý°¿(Žž»/fæî2e·ÎTXªÐÛÊí£ºÌë§´Hšú fx¶v6S#îzeÇ èý0Ù#äϹNQ+䑞;ï‹õE¥MÁÈEFƒy²½ýÞìÉ9¯ñÄæ=ãm\ÍjŽ©UáÄІÇÓuéµÊǽBèkªŒ>G1nÌV|öì¼Á¢°ÁçÃ>{ŧ#9GïCOóá€ß«+ØÎLàËóÍõô†Õ~ 3£µ´‡ÅÞí,þUe ( mJÚ7­AÜå0ZÒ‹Í~6/4©-ß6á’+¾ã¿´™Èù´>1ý` ÏAÅL*|Ø™ýv·¤šR;’y«Ðv½ô“}ø(á‡A‘úg¾N·TŸÔ¨» °‰ô6‰’•§l«³‹Î™ÖP+ù†tmŠë $\ÕŽFl¿ö6{e˜ÕåõN4u–º°®Îå<ìŽ@gI¼äcßxå“0 ɇï¢âÙjs¨¹ò%àÿ˜¸àܵ=o€úr¼ÔÝ vÇp¼ 0],äÓ\¬¯Ñïæ·kòRG$k ËëãµçLVToû­òv·L¦ø£I­±Ußʆ¬þ Ñz>`ý@®5oéù­£ÂF¼§p±ö˜¬ö»K5$Ùõ#8¦¾-˜–Î+ª\ÈX"XD%+ÏkýñyŽy™Ñ Z£ .—0Ío}:ïªýI–ùz[c#WÒÑD?…F^ÿÚÐ:°}[œk”£N4CʾÞe4hr¼ÙËèYÕkótM®€¯îkš¶g•0g9úÙ[ë7ó¬0+å—ËígÇÁÆ»Ÿ@}Uù ÜQ+ MÀ£{̯1Æ] LÛ¾j«gs”â= /í¡XßõWƒ4¶åÍ6¨aÅ8UÌ‘dݺ,b½Ü¥"»†ø&5É~Y5]A¦ìÊ·¥œmù…kÔßo ZúEÔ½~I¦¼÷û¿iC¿a&Ôf#?©ŸˆFìMˆ)éöÏëØËÍ®è6aKùñ3S™L äzÿ„(¬¤»äÍ©›dgq‹UP÷lëê§m¼`kúµ˜.€²~˜­¶{YD8Ö5³ÐYVxf¤æ— IKuø7U»Ÿ+š^T$;!pô„¤¥c©z'v’øöîòÛZ¼ðk³ðåüé<9|[§ƒ6‘Ð7ÄŸí:ƒx 4]ÚÄMZ=Rfúòxá‚<£×¯ÏÍo3fv'€!];Z‚/U4ÃJ^.9îzIÝ6d÷4aX1¾ñSd ‘/VIœDš*t¸°A(yzbϤ_a¨@[Eu¿ŠÞ%[.@߸=ÉÐ8Œ6A|gݾ@EžD—:óô·Õ?håË‚›ª[©½º£Ü@9‡´sCP ¯$ XJ©Î³TÅÏ6ççü¾Î&ÍyX•s4zFA¾F“£Å#3it¼MŸPJ Sä.|q©³;ŽÄ„K¨k˜£=EgSE‘ù?f×{ÊÙ *qN½˜Þù;Ÿp<>=’›µ¬Åa×:#jD<ÝõÌÝv u#t=ö Ca™ö9Ð<™´ÁV £‚+±0ÇVqÉ#Ÿhѯ9ë{׿Ói9ò?ŸÍr C7Ï鿦΢¹ÄH”ÐJu™~&8·xs¦Cß±eÂÇRb[Ú¹Gk(²LÝ·7$ÊÕ°ãhr‹õ/r¯Q?g´ö£7!øzYxÅŒ„Z™s^{a«ô&ÐÓÖ™UB†MS«üÇ1>‹«^ý.“˜Dÿ»¿¸«‚(^z{ÏöZ=– É.iÚ†*2H'ñÓn4ýTg¿*>7êOç-¬é×Y” “™y‹íR^…Ö ÙjUŽ¥¼Åɽ?Õ œæï…É/OIsÊ&Á5mÎÄÞ +ëÔËñº§'å»òÀnHƒÊ´ ‡‚C—ñÒ%Mr¶ ÿEÀLJ>´8ðà|‹°Ö`ÍåÁ%J¢Ü«†½ä‚äMb?"–ìߨ€ uéM(¥#^#…WÄy@ê›uÊ&±ç)¤ÀâÈ¥j˜ÂH¸‚é]F"ú@J6’ÙÏq€rxX"VÑŽ àBpƒLmI‘¾ò o>– )mþ€Ý{˜lì=ºPFUUCÊÒlª¹K._™ƒý‹~‘‚JëÀ«1Ju/H8Íáá@ªIN§õ€&CªMž¾Ú¢ª÷à-Ü¡¦ï®î[q!ž%œ¡÷»"—ž˜Šƒ,ÜH‘¤R­K8‘H7R[€XDAÕæÃ +z4¹gf–Œ3ö„åRNbÎg Ä G’Ôd6Ç*Ú5$ú›¨ ÇSÿ$‰ÛU‘)t9%¼Œˆ”…sÝ;¢ôGF/éÒþ÷KÈgœ¬@ëX‡0ø~‹¹Af,Ñ’H޲\–Ç0k·X„'ê›9àMOS”Ù1žý[z¥_#‡jqÓI~¢æ±hÚåð;’´=“Òí„× ¿ÞÝJÊÆ‰¤!-ŠÝ¬4ò Ê¿ð&9Þd éJÄGi@8hŠŸTlt,ßõå=ˆŸ7¢‹¨†Ù&€\¹0Köœ’2þkgåýëœKõГ K¬ÇP{§ï]|Ò SU t«Ywzè„8üy§ ˜ ¸Ð5¹â†2¶øš"³A^AßXí]|µZKPRòý»^¦ÈN¨y3¹L‹SÔg;YÒ¬l±[˜@ = Û Ì¬EŠÕ‹þáÊ•ÉË Ú¯“ìGœ¾è¢kM“ƒ:Ø#Z)¸ãWDƒÖ¸Û6âÓ^Á¤XÇ‘w!EÎ…ÁÅÊ¿¡¶›õ2Í阵÷n.v¸ 2W©ÚtÝ~ë÷âKÈþ¥Rw„Ê›«ö\™ÊCµ ™ý™S„ʼnñKÜñè“ þc«8àãÖãñ¹>‚4q¸Nûh>6ÞX,IËÇÔ0¹ŸÏ!d+’,ÿfšðPÏ cÎøÂÝS±Ý|ží&Zí|„ü¢hz§eŠûyîý›ÌušÒÆiž„QbnCÈ_h@Uo´˜^<–ØAÊ;¦¹g裧dgï žëw®•ÅÝŸ*l- /äñ|)©èp¹Ë*çý—ùr&ÌÿÖÕ÷Ç¿l×w/-ë¶ †0Ý¢$!_7›;¯Cò%óXhäÏuø¦§=öUie'ÙgÑù>:S„ËŸ×Hó'žðžÌˆš¸C lwoƒ¹T^_îý¤ˆœ¬éÚüUžÛðˆhü° 08šß)’re[ùjê¾i{u÷’ʦ¨è£úÞð4%(*øžœût(s™éÂy—O@bSÇo7`€qS-æ ÈŠÊæ…~%{ ÏÂIc³; ÃÓ(ןú(fã¾, /OõÃLœº4(ö À }Û+ɲéK|jÒg÷/ntï?©º ³ åüGxü*úˆOAm|¾…äõŸ€ÀµDi0§x*|*Tž2B‡è¸IñiYL]ïkX?r-°Â-,>¼Íßx’Ž»@Fš'œð?B!§ã>'Ñ%jäÁûXPtöEN¥alݺ¨»5œåikç«î¶v*÷Õ?H'¨.w/œ¬è³À¢Ý4ÛÜôÜ\3è+†>3³Ò,E©²47MÞWØÀ‚}=ýË5ˆZw½pÄ`übªétá[µ1ÒÒû\WÐi¨D¹³²9®ÍùÐiL¤¯Â´»ZñŠñÛg½ô!Ç¡'æ+݇hjõY¡¢œ•ìôáüQ§" øR]x5œò—2˜J\ë¼Åìáyåû¿…£­úQƒÓ¨•|ÂãWú²#ùÿØè›ê.;ÄS ?ûi€?áßKbRóŸ§ú)ol®v=–ßVSíË>dYÝq³É µºòn½ ¥ÖÄKUžÔ$7=¯Pì»NÑ)n¶çÊ);ÞÒlÞ[‚•æžP/º£Ÿ­ ÏäÍS¡¦ˆ,jdPþ£ñÄ]¼4P‡ÌÓÙÇwdïKþ«:rX˜§Âo] aìì3…l”a ØœM7=cDÉ_~¡»Þ3r¨ }\ÓM¸ƒ—q¼|ˆÜðF¾—ÿÔ-‡GÀŒijùUÈø¨)˜O­$7 ñÓÕ²ÞñtäÖ>(³Y4ë¨Ò&—ÉåV¤Üt­b&bXtŽ.aÒ—¬©_^Ù\Ù†.®«äÞçÃwå_ñ;)+4âû,2Ÿy}À0cy÷ßùìûd(Œ’i´S­ 5˜à}±>È{p¿³oÕ†=–{Ôa4¸xÜ´xÝÿ±YÚø¾ÝéÙ‡9ˆ×‘xw÷NZK§ÈïHL’Ädw7­ØåG¤ÊæbÕRÆL€Rj.kÛHk?Cg4lõ„ü¾V¿ ð¬~5»î¾ÇžµqÏ1I쿲p,Ø'Jú. dÉŽw!›"Þï7"Köe<8˜áÍuü5ñ~§û¸ò?¡ø¡þÿéÀ…*´Oªpék ü+Õk¨h+W+Ú„ÿPKêfÊPK¤@ settings.xmlí[[s£:~ß_‘ríÃ9µ•ñ-™‰S“œ6Ø$16ðåM€‚‰òr v~ý¶°“É88ãÁáì9»£ª€Ñ¥¿–ºÕÝjÈ×?V>=y$aä±àªRÿT«œÀfޏWC—N/*\ÿã+»¿÷lré0;ñIŸF$Ž¡KtÃèrÓ|UIÂà’áÈ‹.ì“è2¶/Ù’ÏÃ._÷¾ÌÀ65+ê‹«Ê<Ž——Õjš¦ŸÒæ'ºÕz«Õªf­Ï]—!‰€Ž3†|=æ5¬Í‚{Ï=”ʦ÷ëñŒ±¦ù€ÍÄ2ƵÚYuóüÜ;ò=z(ï{j3 <[ô»µÂçJ†÷}3ë WïóÞzá}+¬W*Ò¨\?ëó\Ý.ÎævêÅÄç:r²­æ<^UòòÑ#é‹öTòÆ}?Æô"¾($XgËÊsc¼^B£Ä•ëÓfýË×ê[:?EûŽÜǹÄŸkõ£©='žç‘o6gÍ£É÷ˆçÎsÙoÔ?Ÿ5¥êãå©8dEœ],’æ +ú®ᘤ²³Ãf‡  •k®ºzŽm×c‡¼Å%8¨\ßc‘cèK! v×ø#¨ËÑ(ÀKq”}äã09’ú»¤C× ¢ò@øõÎ H‰ëرBeÙ C[hïTŽ’ø&Q%¢)^—¨¼\qáG'Äî6-C8"õ|/À12ºÎ–í*bÄáò¬Rý¼V+:›·Xeˆh+;¼··KGäó™L„ëÊuõàrqUŒ—!ÈÿU¸¹cö‚8?f¥úcR C1X« *¬Í({ËâñÊ®%”„eºnªoÁ ï:Ê9 aïÜ8#BÁ¤‡Ï'|p˜°»#3]è3§ë%xîÖR–B¾Ãb›zöB'«Xt¼½>þ¿’´ç8p‰Æ6ç‡QÏ!Ñ„Kóe|Vоêpbw53³éõò™´Ââ7þº\wX’ÖÿH jÉÁ’‚ ·#²—{8¸Ô—oÄßáý(êÆï1~åw¹.NšÇÃÓ%ñIˆc–ˆÑ!Ë‚»|”#އ/Ó?aÓræ!GY@ÍaĀǃ{‹GxDN½„¸}Ƙ?r‚Ü^Nåë¬Ã¾æm"#¿CDâÃSV›Š$ÌÜöÏä®ÐrI×FDÂŽñÇû{Á‹a¢:WC»Û“Wùço 0ðû m]ø§ˆYÅP!Š 1„’a›ù<¯Ê³‚:@~h¸šï…gfíàh^Ó)ˆIîqBc€ÅùÑD½q~ð"îÏàI,´<Ç!Á‹øŽÏçÝA8›äœ'¶kE‚¢²O8þªÆ¨`t‚C¿Iàm©pXˆë<¹U˜‚8ïŽÿûʪþOeõCŸô#ünˆˆ6Gn±•ß—¢ ½'Ę~¼ÛêáØ.ngçEƒ§[(½&'ïžÁKîò|ÀØ‚’rÞ¼°½(#añãÝ7Gƒ@lTNæhC=Ä)ר²¸—¼ø½¨öø ä'0> ø$Jym“aäew>p ƒ$¦o­õðÕQÞyåt<‚îQR®"é^)üs7 Þ²°=/ ˰£ì°€îF•ƒWß"!Î>ÎM¬L“ !þvC¼œß/Ë eWL½xwyŽÉªfdI(Cÿ5å¯ßZ§g©S–žÂ ‘Ñ„Oó(ÈœÝ÷ŒÒž ²¯Qàð> ÙÉÅy­v‚ZõÚÉo ‰S.~? {Dâd÷höýk·šþ«Z›«²Øòg>}˜—Ôi ªí_¸ƒŽ‘öÛ‚0ÐÅTnϳ‰Ö´š7á­Šþke$!WoÌ|ºã"EEéÆNê,PŸ?CÏSg¢µñø )©0ÒLEs‹ÌñùÒöÍ'M\õ‘¸zœ6¤l’dùN„' ›ÔÍO4:“ZºÓ•j³Iá·Û4qºæÚèÝ<:ÝÖƒÕH‘ÝU¢~éÌi<;K #ZÝ”[Z§Š.&³î*BbkÇfÂiâq+A’04¨6R j Qy´º­µ>n-Àέ•îMjƒý4Dg=P}Ã#Ì«;O¦™ÚÐ×é)|Nêˆ`»c6™×Ìî|=ƒ9XÕuäçªÖ¼™ÛÝùrZן7î¶¢Bë¾ ôÁÆCj3ªw¥s«iÖ8=­g¦V×<».7jMêg´¶õúXZÏ4AµÙ>§ã•k’À:ʦÔRUSÓGæRšÔ4I[H¦.©œGe6V÷1v#Ãy‚±°nš:2h2ó/P?›s«1›Ü<Ù½›9QÑy¿çò¿¿+ª0~­ž¹YÃÎ4ÍÚ:ÂÂî.8ÆFN’Tž–NôAºàígʬÌÁðW5®/з >wXÐ|ÚÖ© Cj×µGóÒ T¤H-Áê®6õ)_3’žvc-n¢éX QG¸ŸuéÓt|ÃõHNjSeI|é¢i˜¢9BbBŸ¹Ù£)—ã݃øWqN§cMži€A¿³5‘”t66úØ|À=ý™›tƸR ÑÍö«¡>XSI” ðµß—ý~Drasÿ˃yû¼ñ‡_À£¾ *ØžÃvD÷wÛ'wÇÛ‡üÒɶðÅËó+¼Îš»=ñ~æß.‚çN›öìyx“þõž•_åWù;ÁÀ=Ã_w.wГܑWƒy=x@5¸×ûmð¡q]"~3q[îˆÛŒÌ>¾¶·êËÍù†StUðÓÍ~$ææß8A›²¨Œ|ÐËGhù‰ËíkáÏňKÇD ™¯IßÉÿÜçÙçÕ7ÿÓRÝ÷_O×ÿPKh&­H 75PK¤@META-INF/manifest.xmlµ•KjÃ0@÷9…ÑÞR›U1q-ôéyìôC3 Éí+’¸m(M±V–Ìè½ñH­6GkªDÔÞµì™?± œòvCË>¶ïõ Û¬++ ª¼ÎáuÚ²]ã%jlœ´€ ©ÆpWÉ‚£æk|3šÖ‹êîµ:ÆSu“A§eM§-“!­$å<ÅÁuüìâS0?Ï1ìF™|ÝròºOÆÔAÒ¾e‚‰‡r¹Oyó®×CŠg?.æLîd,ƒ—J<õQ¨ãX\äâ®"‚ÞxIP|H!ŸTýO^¹&ïMLüX™¢¹ƒk+@ñªÉÊ€E²¿w+Lnü9yÒ\MËáA9Á‘ÄØ þ×+~ç" àìX $gëkÛ}²;'µAA—!n˜>oa(_Ç×Ò®ÄÛxý PK’ð§»6ëé«ÿÌç“Wù¢W/¾úâììÓ\L6ùìæÓÉf³š½¹ÞLÏ“«é—/ÞLÎÿu¹Z^/.^4WÝ\w¾œ/Wgï'ó/_üá—òzñêæ6¯îÜç‘{¿›\N߬¦“m¿µÍ¯”ºÜúÝtuÿ¶Wï–ëY¾dóá݃K¶ÜGþÿÉ57W­óE‹Ë¯þð÷éf3]ý¡ùZ7~¼_Û—Ý*hs5Y]Îeåõ™7‹Æ»$/†èÅàn×eqoŽ+n~\q«ãŠ›­_¿[®6«ÉlóPä›år>,©›Õõ´»œõùdžUì±Ç‚îwÿe¶Ù,Ÿøþ¿Læë] ùøw‹Û×z/W³‹Ç÷Î[îòëìbóöõo•–«¹û‡Jw?[ÏÞ̧mß~¶ØôvûýÜþþ¯Süã??Pš»mÝ".âÔOýþ[Äåõìbº~BÍî^³åNoo.{õԪ߿n×…i>º·aóɇéêæöß|ܪÏn~àÄ9›œofïoß}²&Ë7ÿ5=ßÜ<ì÷ÓåÕ4K=?{yöãtõ‹üÍOÿ}=Yå|ä$/Îf_¾ø{wáî?j¾qÆÄ+÷nù»zYc bJ#ÑòýUyˆ)Ûä¼ys_Ì*õÉâr>mD½Ì²l " ‚å±7li„h3u“œ—jòâ ;¸˜oºÝ}:Ÿ^½>_®)JïkXd8{¸Ž·Pæ´‰C HÎSÀd½Î.ßnÆÉG÷û  ›°¼YNŸz0kÀ6/ dða‚K׫‡jq—ÐZÛMÄ/³ù|wéø~"Öo—¿¾náüO“™½×j½ù0ŸÞ0]\_Ýü0à´ucÝkýæ³w¯ß.W³ÿY.6“ù£wéì-&æfv^OÈúúÐÁé‹Î‹Ô|ØìÛ¶„ogëóeÞþ9O6ÿó½¿ËÛϛɺmO€C÷´&² #éâ–ÐHÂFRHct&€ó#p"9 wKèe wÙÐDJè ’ODÎUÜÀ¤»¯¨;‚îº#Gø)£èÅdu‘7…Ÿ§¿mZv8}»\\\Ÿo–+…¨êµE'?„‚Ï ¡ö:œæƒÁ Œ‰GÉ@t©*Z$"I.KË µ‡0ʪ™°«äS8ˆèe õhúÎ ô B"ô bGÊz ¬Ž|hüZH5w[I`ÈqÒê|D‘ ‘H=j¥«é£zÔGå«¿/Ôb‘Í8cÎú•ÝþÍÙÛŒR7*ºYjzöñoóOʳAïݵwvÆÅ¬–0S;ö.úºì8ùF’Í’„{‡-ãcJaÀì¸%Tv¬i:ÊŽ•wbÇ©v2* ñb]vŒž@Øq²…#3‹döä”+;~L5š;vŸŸƒí!x¬E)ʱܕíJÙßH¡ÇÈ褰)ZNƒ¾„J•+=Vz܉ôÀÉ3Êár²©nÂCH1ˆ 46.ŒÐ vjƒrä§uR)ò`(ræ§ÏÏ‘±2{áwÞv­Ü-Aä†Ì`„Î %vùÐT‚¬Y ²änÙõ@#Qu¾«õíZ¯ì¸ägú#™Bd‘œÑ3(EVŠü´V*GGÆ*yP<¸ˆ Ñ8ëgž°r ” !Öh’(Ù%$L‰PÄõÓùÛéÅõ|ª˜X·*¥bâ° %À÷вÉŠ£L©Se¼²x# (Vö'9ì `qÐ-›_BõåÕ—W_^}ùn´•{ ­ÑÙ$ !Ö è"”n–„ÜÉ£âÛgôŒÊ[•·>­•Ê[‡ãËû8ï =ÐäRÕ Åäë²dâFK~Œœ¹ejλœwƒ&É/ rdåÈÊ‘•#wãÈ—Í ל,¸@u92s*}M! GŽ£HÎ話]åÈ;h¥räápäPû¼ëïy#ûí-(öWEæ£êV‘HÕ˜/í2¥Š ¨©1ðì;±¬ëëéâ¢Y–ÃÊ]‹çX¨O°Á×¶òmÙêÐG,—¡Zùçeå÷-Õïeæá™òWz8ÀKi6r]'ÜöÂË!0Q)þ ! èå»oÔµ9Bø¿MÕ±Žcsã±ØöÀv(H}foC]¶CH!Yº“‚'yŸ «÷°áiÎG&“aæH]»vT!<»DiI¹Ôѹ<3 ¸Èšþ ëÒ¡d~¡‡PèE×ôO Ú±­Ù¾%ÇH7sIA@A ‡<€½!Ž!Ô‘`ˆî¶€µrØ$ïåbšD„ã )c±jTŸG´?f „äï¶ŒjÍß‹BBz( z&²WúZS¤8B2ÁG®ÛšÊò$YJ.9B9)¹ìÊHN¤5ÕáK¨™Hš‰¤™Hš‰Ô-HÏ=L ŒAÅ2ë²ì+ã‚FR¶t_¸æ}¤Àoó}óõ«?´Àv•Ôhý`¢õx©úú˜ýe9ŒÐˆ¶rûVò7’œ-í[­L.Ì9¡vûÖ×P9²rdåÈÊ‘»qäØCb*2fBÅÊój£ÅÂ|È5ój±‘›±Ó+EVŠü„F*CC'ÀS 9&WècÕb‘D$°´o%Ÿ½DÆq„A3ä×P²2deÈÊ;1dg{`ÈL)¡`k÷o")H°ôoÛHÎè”#+G~J'•$‡$§g*rsÐ[Kë X)aåjV‹$ \ªYÑS!”‘}ýjÖöçl¯f%— ßµ?—Ö¹ %§j¯”ªú–þõê¼ÍбC÷Ìñ8ÃO=ÜVXÈ z2˜<ËçÑÁÐÛŸ³<¿^½Ÿ¾¾˜­óœ?‘_fa. s"rÖaR 8™Šv8 ø4$öÃô·ÉårÑæ\âñŒƒäÓ(åý|ÍJ/R¢,ʆ,jÌ&ë¾ ~Ë{~è:éí¢aý,â.á°`8Y°LÑzN>ÄT1–Ÿ”R°É»($Í‘†Ã4¦á°“ ‡>¹Ã„à¡ÞzHUOEŒ³œ]*“ܘLþL¥Ð=ÀZ ÿmúa½Y.4V¿Â6¥ÔxØ`âa™¤ÞaÈ’Zñ°>ê’ÏÎjq}Í!ÈlŠ› ^¦ £±ed Ê‘Am/yÛc>¼”s.ùÆ V½èÔ˦—] CöJž‘<›¥^wí ¼kæÎĺ†n¥Û|dmLÅÐYʬ «ú–Çlµs Í΋:]G;¹ž„¡c¥÷D8B–éQ\×ÒÑÙlÙhØ8–M¤Ž‡³—©þ–Þþœ[LÝúòE;7LQS?US¬>àÙL}·ê:|€D4Iú_›ÀT3§4KŠÑ¹" ÈNb€Ù‡òàø>–PK 4çAs4ç¡Sl›.-pÎØÌ¹Ä›®YUîÀdà" m{cGfD.”¸Ól?—²Ñ³?N®Þ¿8ûv¶¸ÐòGsoQOr&ÊMÏUc@¾ÁÒL©Œ ¡ò`é̺Êä!™²–LD‰z wîJŠöš-Ýúœ-œ7™€’­MŽ´È@½ícПD¸y­žQ:Œ¡ÅÊ "Q40Ä)»ÙÖXB.r9aÊZûS¶BH"ä‹zVQ9JÀîù0äàÌô—Î$›FN"ÑUADDùNDY¿”‘–ˆä“@”*#Éögmíîa¢o®uN#ô§gð=ÙPìÁ†"±ÏºâDW ® ÅÄ\DÙÌæÅ„˜I>d¹¾ myÔv Ѻrm:ôT­³ƒu¦>¬3ˆJóIŠ•78´ŽDTÞÖÒ8[XçË£?‚q¶>i«m"xç›kƒSª¬¶¹÷?8Ù3o_6+‰è™gùº“Ë>&Ec2‘1’òéŸ,×󥹚®×“Ëü^O jŸ<¦ ŸÃIÎIAO˜=`äÝ¢ðZ=Ö…¬[YhR²8Î6 Ý<°ðý̪O²¾ûFaê0µM'¥†ˆRس²VÜnBòÁFW™[}—uœßÿÈ6jѵ¢Õ㺩€5DÀr½Ð*t)ŠFøP›V5²œIY Ç`¼óPâ4èàþðóÏ?*P…Vµë¤¢ÔÉ¡Ô^}=õW¶èå4ÑJÎÕ ,Ûp+Jæ-¿#B dqè:Lî$¦(ô²Š»”:€± “GË™²…Š¥`“‹.$¦(^Zê ¥Zêp’¥Þ÷´¶Ìx„P=$XdYã‘ØJ"{ÙÐÊgù~r¶ž®ÞÏÎušÂ‘bƒ­Ê©$ö¤Iìö"]Ï=°WÄÌF˜ $›ê²WŒŽŠ$²%+‚°P>×µöáD¸kk¸wõ„d}H}òà]]îêdRµ–§hÃ1¸ëí ”»*wUîº+w ½p×€ÅBô\›»ÞÊʈÉco,…àå}v—áô¹ë¿ý¶™®ùGVþzLþÚ® Ê_Où¨hKF¯=̱ý™f:»«;ÇÚEéÝæ2'òTÖÛX$3v%aû̱nÎöK}JeØOçÁ5Z­òy–§=Öx®§~RßÍ&WËLX[Jê£#¬'ðo¡jû8ià&%èÒ~É£KÙk„Rì" x„V/K¸Û -²d£%`Œ‰²YÑQ%ÃéÎËé!‹:ªê¨ž¢£Ê¶ÖÇÖ9‰b»º]Deü*‡Ÿ Áñdþi*‡íaNêùnÙ;]/µTu÷t«Zj#©Á4’ò÷èu<ÒzòGlØaÕ$‘ä"¸0Èì GC‰B~rèüòfˆŒ–穦FíN.j÷íl}¾Ì8õÏé|"(—Õæ»Éfòf²nË䃫S0ë£ #Ê{¦åš%ùEûInŒÎäM:K–)GaÈ¢{XÁ]úL…)I‚&DòI’O«ž<ßõçST‡^zuèOÒ¡?¸âGz‚zˆ‚až¡æ.ð»¤Ò-)ŽÅgcr"9tžpLòúõUÂZ°nÓG%¬¬Hdê!àhSâ%ƒŽ©®oM’IÑ0A9f>€Hv±ëHŽcâÓ·ËÅÅõùFãG‰7¶k¥Æ‡]‘Ⱦ‡ƒrϘF.J[+ª{PîáF ÉA9`ö²%qÐç䇯ànçäyýlâ’óSvvr«½áä£ûý  ÝêV«[}’n5÷@[ÄÔ8·¶vvä$ï„¶fp!yŸÑ3ª[­„õ1}T·z0ä™›ÖÎ }š‡>I¥ØÄe·Ú¡«Ë6’06‰¤^2ˆÈKKtv"éÁK¨YÏ” +AîF{)rÎ D‹u 2b(çLÒ”Uré&–%gô J• ?¦ÑA'@û¨´â šÈY#“¯KI“‚œßòÈ£¡ØµYÚ‰ä×P ²d%ÈJ;äÐG¥ÉTJ™¶†Ê‰! ä=¤$Ljll "7c§¦=(IÞA'•$‡$§ç'É¡’+ëbvêCvת¶ú”`k¼‘ä IæljB’v§y$¹%T’¬$YI²’än$ûˆ"'r#oÅÄu«ÀqêCŽJ;’<õ,"‚Òd¥ÉOk¥ÒäÁÐäLQkÐä@ñà’.Dƒ! RY´•ëz9ø9 ôQêzcÄ:€àPÑõÓùÛéÅõ|ª˜X·)¥Bâ° &õІ@¦¦–S(ª¨É¤ÔHò 3^eg,Š+ïb ƒîCpøª+¯®¼ºòêÊwc­¾Ö*K‹ƒl͈.‚A‰àfÚ“c$C¶á&L•¶*m}Z+•·Ç•§8ñâhrÖÀ渆CªëÖÝH‚ÆÈÆzlN¼ÀÅAÓä×Pi²Òd¥ÉJ“»ÑäƒËçÊlnÎÀǺ4™ KB…&Go¡œxqÔè®Òä´Riòph2×>ñÚ2‹*ôWLæÅ®•½»Ï¢JIšìH n™EŽ„R"uåÆ{Í¢j}Îm³¨ I[ÓQTŸÙ(ªû•N{M¢ª´½ÍЯ Æ{I¾¬Ÿï“%Yñ“aF7ξ£÷®ìp.úTÛз=g»SKà›K‘ÕÒ?/K¿o¬~/KçgJb‰¶$®TiÛªã%³ŸbiNlʆŽ&@(…Àäyý“¾ûFý›cd¯´j£ú7Ã)|ÁÊó…¶°ž=°ŠÒ¸¨~¯Œ,©Ô„€‰1Ea=‰bé•Á݈}XOûsn›¼r)rð'Äzv‰Ö’ª“qö"Tφ!ØŸò^,[z=TܘùTòT¦û@þƒªl¡M3†ÎÉi;cÈÖçlßÔ¸CÔ•é)†ütÊèIà÷AŽ"®ðK‹ija+‡_Ц"ˆ<ΠB MœÕví ¾Wø¥õ9[@L&LQ¾(Z " "=T=Ù+§)R­®lr0ò”ÝŽÊEüÙm/‚\ R¼ÉÞãˆKñft£«ƒPš4¡Iš4¡©[ ß÷0€0RÊ–™]þCÝ>W.ФÒî0•I ¶‘œÁ“†PÂõ._¯áþ# !l×I ÷&ÜO ë?r ™¢Ð»ècåV°Ò°"˱±4‚µ2 1óc¶4ìF°‡.ŸÒc¥ÇJ•w£Ç¡‡ÔVt,L$X®Þ¶h¦ßr*o3t&åÆÊŸPH¥ÆÃ¡Æ|Ô8ö@#‡‘/ÞVG(‚ÐÛÒ–0;‡,1eæA“ãƒPÙ±²ceÇÊŽ»±ãÔ;ft# P¬ÝþUê¥.Ãq3#¡Èu@Ù±²ã'RÙñpØq|¦ò¸d{k‡&†À•ë`ƒ/=a"²D lŠ\¸$S:Blëc¶_ ò~RéYšCU!‡j¯ªú†þõê¼ÍÎaPÃS}–ÎY¦8†” ܇²Á¥t¬r÷§ëAú ‚ç׫÷Ó׳u^€óGÈñË,ËdNDÎ:LЧSΧŸFÂ~˜þ6¹\.ZÜŸtpQÇK0ŽØÁÈES³P–C–9ˆœàÇœßIÉ9åÝÞ7þÕÃêíþ †“Ë­çäCLÃ_ù§I)›¼‹@iøKÃ_þ:ÅðW:|Ò‡  AJ¡êˆLoÈa‹aL³[Ux¡óv=“ÿ6ý°Þ,«ß+a›Rj l01°t¯ºè¡R+ÖGMQ˜§±jã.õÍM³„âgÉ¡öÞU/QÜö˜/eãl@×$­W‚Sçz ÎõóˆïD‚“6Ùi«™xí ¹Ò„ ÅD³ §ðõ®l{̇—fWßÉ•ÑcP Q 9 †€õÏ"}ÔnDà$õfȱ*†€¥” Ù8Cb”l£=i}Ì6 ÉA ò=ãIõ¤U 9m y,Lÿ|²c”ÿð–ÿÑ$ µ°!ªƒ±¢‰^¸GÆÌ ÆŽ sh¿$ò|ÄVPCýê×P¿†ú»…ú®vpÎd»+³µ#Ùš…`Œ÷$$OcG†$Ôï­ P öý\ò7Îþ8¹z7þâìÛÙâBGÀÔûoUP û'ìžËÙîe2„‹Xšyz_y2•”8g0YcVx+Ù01ãf×™{µ&l}ÎÖ›2MÌÎCT[ýíãÄýŸ E²ÒG«uL^‚Y1ÚÊNË@0Ù{ËÎvVÂh‰8¡Õzës¶ÂH’ïÙµïªbˆÆì†ƒ!}dçs¤›š“ª;©ÂñRvƒ&‚‹¥ 'sƒRÑݵè>Ùùí¹-‘K ½Î¨RS¯?ŽîÓèüw³ÉÕrqј‹}ób©=áT¹EH‘Š ×4¢pžâˆ!îzžv*(]ÁÝQ%-cLd¹fx>£cºórž×ð¼†çO1<Öõ@ú|`nº…º}Ú"Y'½9É£t¢p€QäBJþô£ó?ä»­ÞÏÖKÉ£E«RjH~0!y!¨uGŽmÅÄÃSñ³»ç¬9Á([IÒþK²h½ñH(’½µ|ú¨øéõfµ\($V‡Äm:ù9`¢&ö—`'é}}ÔÑ»eFaªZ4TJé ¹‡H®Ó³÷#ïòwè*údÊé^DM³S?^ýxõã;rVޗ3+Å’U+ê‰åL„’ñI ê­‹®ÈÍÛ€WƪŒõ T'~@Nüá(ÍûùäÃtõÕÍ›üßåjrõÕÿz);þnova-13.0.0/doc/source/image_src/create_vm_states.diag0000664000567000056710000000177312701407773024073 0ustar jenkinsjenkins00000000000000seqdiag { edge_length = 250; span_height = 40; node_width=200; default_note_color = lightblue; // Use note (put note on rightside) api [label="Compute.api"]; manager [label="Compute.manager"]; api -> manager [label = "create_db_entry_for_new_instance", note = "VM: Building Task: Scheduling Power: No State"]; manager -> manager [label="_start_building", note ="VM: Building Task: None"]; manager -> manager [label="_allocate_network", note ="VM: Building Task: Networking"]; manager -> manager [label="_prep_block_device", note ="VM: Building Task: Block_Device_Mapping"]; manager -> manager [label="_spawn", note ="VM: Building Task: Spawning"]; api <-- manager [note ="VM: Active Task: None"]; } nova-13.0.0/doc/source/image_src/Nova_spec_process.graphml0000664000567000056710000012701512701407773024742 0ustar jenkinsjenkins00000000000000 API Cell Folder 1 nova-cells nova-cells rabbit cell slots cell slots nova-api child cell Folder 2 rabbit nova-scheduler mysql - hoststate hoststate nova-cells nova-compute <?xml version="1.0" encoding="utf-8"?> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="40px" height="48px" viewBox="0 0 40 48" enable-background="new 0 0 40 48" xml:space="preserve"> <defs> </defs> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="655.0938" x2="409.4502" y2="655.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_1_)" d="M19.625,37.613C8.787,37.613,0,35.738,0,33.425v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,35.738,30.464,37.613,19.625,37.613z"/> <linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="649.0938" x2="409.4502" y2="649.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_2_)" d="M19.625,37.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,35.738,8.787,37.613,19.625,37.613z"/> <linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="646" x2="408.2217" y2="646" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_3_)" cx="19.625" cy="31.425" rx="18.396" ry="3.926"/> <linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="641.0938" x2="409.4502" y2="641.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_4_)" d="M19.625,23.613C8.787,23.613,0,21.738,0,19.425v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.738,30.464,23.613,19.625,23.613z"/> <linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="635.0938" x2="409.4502" y2="635.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_5_)" d="M19.625,23.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.738,8.787,23.613,19.625,23.613z"/> <linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="632" x2="408.2217" y2="632" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_6_)" cx="19.625" cy="17.426" rx="18.396" ry="3.926"/> <linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="627.5938" x2="409.4502" y2="627.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_7_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> <linearGradient id="SVGID_8_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="621.5938" x2="409.4502" y2="621.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_8_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> <linearGradient id="SVGID_9_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="618.5" x2="408.2217" y2="618.5" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_9_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> <path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.291,46.792c0,0-4.313,0.578-7.249,0.694 C20.917,47.613,15,47.613,15,47.613l-2.443-10.279l-0.119-2.283l-1.231-1.842L9.789,23.024l-0.082-0.119L9.3,20.715l-1.45-1.44 L5.329,8.793c0,0,5.296,0.882,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.644l-0.375,1.875 l1.627,2.193L31.291,46.792z"/> </svg> <?xml version="1.0" encoding="utf-8"?> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="41px" height="48px" viewBox="-0.875 -0.887 41 48" enable-background="new -0.875 -0.887 41 48" xml:space="preserve"> <defs> </defs> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-979.1445" x2="682.0508" y2="-979.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_1_)" d="M19.625,36.763C8.787,36.763,0,34.888,0,32.575v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,34.888,30.464,36.763,19.625,36.763z"/> <linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-973.1445" x2="682.0508" y2="-973.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_2_)" d="M19.625,36.763c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,34.888,8.787,36.763,19.625,36.763z"/> <path fill="#3C89C9" d="M19.625,26.468c10.16,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.554,5.438 c-12.125,0-18.467-2.484-19.541-4.918C-0.127,29.125,9.465,26.468,19.625,26.468z"/> <linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-965.6948" x2="682.0508" y2="-965.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_3_)" d="M19.625,23.313C8.787,23.313,0,21.438,0,19.125v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.438,30.464,23.313,19.625,23.313z"/> <linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-959.6948" x2="682.0508" y2="-959.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_4_)" d="M19.625,23.313c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.438,8.787,23.313,19.625,23.313z"/> <path fill="#3C89C9" d="M19.476,13.019c10.161,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.555,5.438 c-12.125,0-18.467-2.485-19.541-4.918C-0.277,15.674,9.316,13.019,19.476,13.019z"/> <linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-952.4946" x2="682.0508" y2="-952.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_5_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> <linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-946.4946" x2="682.0508" y2="-946.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_6_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> <linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="644.0293" y1="-943.4014" x2="680.8223" y2="-943.4014" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <ellipse fill="url(#SVGID_7_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> <path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.04,45.982c0,0-4.354,0.664-7.29,0.781 c-3.125,0.125-8.952,0-8.952,0l-2.384-10.292l0.044-2.108l-1.251-1.154L9.789,23.024l-0.082-0.119L9.5,20.529l-1.65-1.254 L5.329,8.793c0,0,4.213,0.903,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.645l-0.521,1.416 l1.46,1.834L31.04,45.982z"/> </svg> nova-13.0.0/doc/source/i18n.rst0000664000567000056710000000342512701410011017244 0ustar jenkinsjenkins00000000000000Internationalization ==================== Nova uses the `oslo.i18n library `_ to support internationalization. The oslo.i18n library is built on top of `gettext `_ and provides functions that are used to enable user-facing strings such as log messages to appear in the appropriate language in different locales. Nova exposes the oslo.i18n library support via the ``nova/i18n.py`` integration module. This module provides the functions needed to wrap translatable strings. It provides the ``_()`` wrapper for general user-facing messages and specific wrappers for messages used only for logging. DEBUG level messages do not need translation but CRITICAL, ERROR, WARNING and INFO messages should be wrapped with ``_LC()``, ``_LE()``, ``_LW()`` or ``_LI()`` respectively. For example:: LOG.debug("block_device_mapping %(mapping)s", {'mapping': block_device_mapping}) or:: LOG.warn(_LW('Unknown base file %(img)s'), {'img': img}) You should use the basic wrapper ``_()`` for strings which are not log messages:: raise nova.SomeException(_('Invalid service catalogue')) Do not use ``locals()`` for formatting messages because: 1. It is not as clear as using explicit dicts. 2. It could produce hidden errors during refactoring. 3. Changing the name of a variable causes a change in the message. 4. It creates a lot of otherwise unused variables. If you do not follow the project conventions, your code may cause hacking checks to fail. The ``_()``, ``_LC()``, ``_LE()``, ``_LW()`` and ``_LI()`` functions can be imported with:: from nova.i18n import _ from nova.i18n import _LC from nova.i18n import _LE from nova.i18n import _LW from nova.i18n import _LI nova-13.0.0/doc/source/api_plugins.rst0000664000567000056710000001566112701407773021031 0ustar jenkinsjenkins00000000000000API Plugins =========== Background ---------- Nova has two API plugin frameworks, one for the original V2 API and one for what we call V2.1 which also supports V2.1 microversions. The V2.1 API acts from a REST API user point of view in an identical way to the original V2 API. V2.1 is implemented in the same framework as microversions, with the version requested being 2.1. The V2 API is now frozen and with the exception of significant bugs no change should be made to the V2 API code. API changes should only be made through V2.1 microversions. This document covers how to write plugins for the v2.1 framework. A `microversions specific document `_ covers the details around what is required for the microversions part. It does not cover V2 plugins which should no longer be developed. There may still be references to a v3 API both in comments and in the directory path of relevant files. This is because v2.1 first started out being called v3 rather than v2.1. Where you see references to v3 you can treat it as a reference to v2.1 with or without microversions support. The original V2 API plugins live in ``nova/api/openstack/compute/legacy_v2`` and the V2.1 plugins live in ``nova/api/openstack/compute``. Note that any change to the Nova API to be merged will first require a spec be approved first. See `here `_ for the appropriate repository. For guidance on the design of the API please refer to the `OpenStack API WG `_ Basic plugin structure ---------------------- A very basic skeleton of a v2.1 plugin can be seen `here in the unittests `_. An annotated version below:: """Basic Test Extension""" from nova.api.openstack import extensions from nova.api.openstack import wsgi ALIAS = 'test-basic' # ALIAS needs to be unique and should be of the format # ^[a-z]+[a-z\-]*[a-z]$ class BasicController(wsgi.Controller): # Define support for GET on a collection def index(self, req): data = {'param': 'val'} return data # Defining a method implements the following API responses: # delete -> DELETE # update -> PUT # create -> POST # show -> GET # If a method is not definied a request to it will be a 404 response # It is also possible to define support for further responses # See `servers.py `_. class Basic(extensions.V3APIExtensionBase): """Basic Test Extension.""" name = "BasicTest" alias = ALIAS version = 1 # Both get_resources and get_controller_extensions must always # be definied by can return an empty array def get_resources(self): resource = extensions.ResourceExtension('test', BasicController()) return [resource] def get_controller_extensions(self): return [] All of these plugin files should live in the ``nova/api/openstack/compute`` directory. Policy ~~~~~~ Policy (permission) is defined ``etc/nova/policy.json``. Implementation of policy is changing a bit at the moment. Will add more to this document or reference another one in the future. Note that a 'discoverable' policy needs to be added for each plugin that you wish to appear in the ``/extension`` output. Also look at the authorize call in plugins currently merged. Modularity ~~~~~~~~~~ The Nova REST API is separated into different plugins in the directory 'nova/api/openstack/compute/' Because microversions are supported in the Nova REST API, the API can be extended without any new plugin. But for code readability, the Nova REST API code still needs modularity. Here are rules for how to separate modules: * You are adding a new resource The new resource should be in standalone module. There isn't any reason to put different resources in a single module. * Add sub-resource for existing resource To prevent an existing resource module becoming over-inflated, the sub-resource should be implemented in a separate module. * Add extended attributes for existing resource In normally, the extended attributes is part of existing resource's data model too. So this can be added into existing resource module directly and lightly. To avoid namespace complexity, we should avoid to add extended attributes in existing extended models. New extended attributes needn't any namespace prefix anymore. JSON-Schema ~~~~~~~~~~~ The v2.1 API validates a REST request body with JSON-Schema library. Valid body formats are defined with JSON-Schema in the directory 'nova/api/openstack/compute/schemas'. Each definition is used at the corresponding method with the ``validation.schema`` decorator like:: @validation.schema(schema.update_something) def update(self, req, id, body): .... Nova supports the extension of JSON-Schema definitions based on the loaded API extensions for some APIs. Stevedore library tries to find specific name methods which return additional parameters and extends them to the original JSON-Schema definitions. The following are the combinations of extensible API and method name which returns additional parameters: * Create a server API - get_server_create_schema() * Update a server API - get_server_update_schema() * Rebuild a server API - get_server_rebuild_schema() * Resize a server API - get_server_resize_schema() For example, keypairs extension(Keypairs class) contains the method get_server_create_schema() which returns:: { 'key_name': parameter_types.name, } then the parameter key_name is allowed on Create a server API. Support files ------------- At least one entry needs to made in ``setup.cfg`` for each plugin. An entry point for the plugin must be added to nova.api.v21.extensions even if no resource or controller is added. Other entry points available are * Modify create behaviour (nova.api.v21.extensions.server.create) * Modify rebuild behaviour (nova.api.v21.extensions.server.rebuild) * Modify update behaviour (nova.api.v21.extensions.server.update) * Modify resize behaviour (nova.api.v21.extensions.server.resize) These are essentially hooks into the servers plugin which allow other plugins to modify behaviour without having to modify servers.py. In the past not having this capability led to very large chunks of unrelated code being added to servers.py which was difficult to maintain. Unit Tests ---------- Should write something more here. But you need to have both unit and functional tests. Functional tests and API Samples -------------------------------- Should write something here Commit message tags ------------------- Please ensure you add the ``DocImpact`` tag along with a short description for any API change. nova-13.0.0/doc/source/policy_enforcement.rst0000664000567000056710000001623212701410011022351 0ustar jenkinsjenkins00000000000000.. Copyright 2014 Intel All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Rest API Policy Enforcement =========================== Here is a vision of how we want policy to be enforced in nova. Problems with current system ---------------------------- There are several problems for current API policy. * The permission checking is spread through the various levels of the nova code, also there are some hard-coded permission checks that make some policies not enforceable. * API policy rules need better granularity. Some of extensions just use one rule for all the APIs. Deployer can't get better granularity control for the APIs. * More easy way to override default policy settings for deployer. And Currently all the API(EC2, V2, V2.1) rules mix in one policy.conf file. These are the kinds of things we need to make easier: 1. Operator wants to enable a specific role to access the service API which is not possible because there is currently a hard coded admin check. 2. One policy rule per API action. Having a check in the REST API and a redundant check in the compute API can confuse developers and deployers. 3. Operator can specify different rules for APIs that in same extension. 4. Operator can override the default policy rule easily without mixing his own config and default config in one policy.conf file. Future of policy enforcement ---------------------------- The generic rule for all the improvement is keep V2 API back-compatible. Because V2 API may be deprecated after V2.1 parity with V2. This can reduce the risk we take. The improvement just for EC2 and V2.1 API. There isn't any user for V2.1, as it isn't ready yet. We have to do change for EC2 API. EC2 API won't be removed like v2 API. If we keep back-compatible for EC2 API also, the old compute api layer checks won't be removed forever. EC2 API is really small than Nova API. It's about 29 APIs without volume and image related(those policy check done by cinder and glance). So it will affect user less. Enforcement policy at REST API layer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The policy should be only enforced at REST API layer. This is clear for user to know where the policy will be enforced. If the policy spread into multiple layer of nova code, user won't know when and where the policy will be enforced if they didn't have knowledge about nova code. Remove all the permission checking under REST API layer. Policy will only be enforced at REST API layer. This will affect the EC2 API and V2.1 API, there are some API just have policy enforcement at Compute/Network API layer, those policy will be move to API layer and renamed. Removes hard-code permission checks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hard-coded permission checks make it impossible to supply a configurable policy. They should be removed in order to make nova auth completely configurable. This will affect EC2 API and Nova V2.1 API. User need update their policy rule to match the old hard-code permission. For Nova V2 API, the hard-code permission checks will be moved to REST API layer to guarantee it won't break the back-compatibility. That may ugly some hard-code permission check in API layer, but V2 API will be removed once V2.1 API ready, so our choice will reduce the risk. Port policy.d from oslo-incubator into nova ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This feature make deployer can override default policy rule easily. And When nova default policy config changed, deployer only need replace default policy config files with new one. It won't affect his own policy config in other files. Use different prefix in policy rule name for EC2/V2/V2.1 API ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently all the APIs(Nova v2/v2.1 API, EC2 API) use same set of policy rules. Especially there isn't obvious mapping between those policy rules and EC2 API. User can know clearly which policy should be configured for specific API. Nova should provide different prefix for policy rule name that used to group them, and put them in different policy configure file in policy.d * EC2 API: Use prefix "ec2_api". The rule looks like "ec2_api:[action]" * Nova V2 API: After we move to V2.1, we needn't spend time to change V2 api rule, and needn't to bother deployer upgrade their policy config. So just keep V2 API policy rule named as before. * Nova V2.1 API: We name the policy rule as "os_compute_api:[extension]:[action]". The core API may be changed in the future, so we needn't name them as "compute" or "compute_extension" to distinguish the core or extension API. This will affect EC2 API and V2.1 API. For EC2 API, it need deployer update their policy config. For V2.1 API, there isn't any user yet, so there won't any effect. Group the policy rules into different policy files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After group the policy rules for different API, we can separate them into different files. Then deployer will more clear for which rule he can set for specific API. The rules can be grouped as below: * policy.conf: It only contains the generic rule, like: :: "context_is_admin": "role:admin", "admin_or_owner": "is_admin:True or project_id:%(project_id)s", "default": "rule:admin_or_owner", * policy.d/00-ec2-api.conf: It contains all the policy rules for EC2 API. * policy.d/00-v2-api.conf: It contains all the policy rules for nova V2 API. * policy.d/00-v2.1-api.conf: It contains all the policy rules for nova v2.1 API. The prefix '00-' is used to order the configure file. All the files in policy.d will be loaded by alphabetical order. '00-' means those files will be loaded very early. Add separated rule for each API in extension ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is for provider better granularity for policy rules. Not just provide policy rule for extension as unit. This need user to move the policy rule into separated rule for each API. Enable action level rule override extension level rule ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After separated rule for each API in extension, that will increase the work for deployer. So enable extension level rule as default for each API in that extension will ease that a lot. Deployer also can specify one rule for each API to override the extension level rule. Existed Nova API being restricted --------------------------------- Nova provide default policy rules for all the APIs. Operator should only make the policy rule more permissive. If the Operator make the API to be restricted that make break the existed API user or application. That's kind of back-incompatible. SO Operator can free to add additional permission to the existed API. nova-13.0.0/doc/source/stable_api.rst0000664000567000056710000000743012701407773020615 0ustar jenkinsjenkins00000000000000.. Copyright 2015 Intel All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Nova Stable REST API ==================== This document describes both the current state of the Nova REST API -- as of the Kilo release -- and also attempts to describe how the Nova team intends to evolve the REST API's implementation over time and remove some of the cruft that has crept in over the years. Background ---------- Nova currently includes two distinct frameworks for exposing REST API functionality. Older code is called the "V2 API" and exists in the /nova/api/openstack/compute/contrib/ directory. Newer code is called the "v2.1 API" and exists in the /nova/api/openstack/compute/plugins directory. The V2 API is the old Nova REST API. It will be replaced by V2.1 API totally. The code tree of V2 API will be removed in the future also. The V2.1 API is the new Nova REST API with a set of improvements which includes Microversion and standardized validation of inputs using JSON-Schema. Also the V2.1 API is totally backwards compatible with the V2 API (That is the reason we call it as V2.1 API). Stable API ---------- In the V2 API, there is a concept called 'extension'. An operator can use it to enable/disable part of Nova REST API based on requirements. An end user may query the '/extensions' API to discover what *API functionality* is supported by the Nova deployment. Unfortunately, because V2 API extensions could be enabled or disabled from one deployment to another -- as well as custom API extensions added to one deployment and not another -- it was impossible for an end user to know what the OpenStack Compute API actually included. No two OpenStack deployments were consistent, which made cloud interoperability impossible. API extensions, while not (yet) removed from the V2.1 API, are no longer needed to evolve the REST API, and no new API functionality should use the API extension classes to implement new functionality. Instead, new API functionality should use the microversioning decorators to add or change the REST API. The extension is considered as two things in the Nova V2.1 API: * The '/extensions' API In the V2 API the user can query it to determine what APIs are supported by the current Nova deployment. In V2.1 API, microversions enable us to add new features in backwards- compatible ways. And microversions not only enable us to add new futures by backwards-compatible method, also can be added by appropriate backwards- incompatible method. The '/extensions' API is frozen in Nova V2.1 API and will be deprecated in the future. * The plugin framework One of the improvements in the V2.1 API was using stevedore to load Nova REST API extensions instead of old V2 handcrafted extension load mechanism. There was an argument that the plugin framework supported extensibility in the Nova API to allow deployers to publish custom API resources. We will keep the existing plugin mechanisms in place within Nova but only to enable modularity in the codebase, not to allow extending of the Nova REST API. As the extension will be removed from Nove V2.1 REST API. So the concept of core API and extension API is eliminated also. There is no difference between Nova V2.1 REST API, all of them are part of Nova stable REST API. nova-13.0.0/doc/source/code-review.rst0000664000567000056710000002700712701407773020725 0ustar jenkinsjenkins00000000000000.. _code-review: ========================== Code Review Guide for Nova ========================== This is a very terse set of points for reviewers to consider when looking at nova code. These are things that are important for the continued smooth operation of Nova, but that tend to be carried as "tribal knowledge" instead of being written down. It is an attempt to boil down some of those things into nearly checklist format. Further explanation about why some of these things are important belongs elsewhere and should be linked from here. Upgrade-Related Concerns ======================== RPC API Versions ---------------- * If an RPC method is modified, the following needs to happen: * The manager-side (example: compute/manager) needs a version bump * The manager-side method needs to tolerate older calls as well as newer calls * Arguments can be added as long as they are optional. Arguments cannot be removed or changed in an incompatible way. * The RPC client code (example: compute/rpcapi.py) needs to be able to honor a pin for the older version (see self.client.can_send_version() calls). If we are pinned at 1.5, but the version requirement for a method is 1.7, we need to be able to formulate the call at version 1.5. * Methods can drop compatibility with older versions when we bump a major version. * RPC methods can be deprecated by removing the client (example: compute/rpcapi.py) implementation. However, the manager method must continue to exist until the major version of the API is bumped. Object Versions --------------- * If a tracked attribute (i.e. listed in fields) or remotable method is added, or a method is changed, the object version must be bumped. Changes for methods follow the same rules as above for regular RPC methods. We have tests to try to catch these changes, which remind you to bump the version and then correct the version-hash in the tests. * Field types cannot be changed. If absolutely required, create a new attribute and deprecate the old one. Ideally, support converting the old attribute to the new one with an obj_load_attr() handler. There are some exceptional cases where changing the type can be allowed, but care must be taken to ensure it does not affect the wireline API. * New attributes should be removed from the primitive in obj_make_compatible() if the attribute was added after the target version. * Remotable methods should not return unversioned structures wherever possible. They should return objects or simple values as the return types are not (and cannot) be checked by the hash tests. * Remotable methods should not take complex structures as arguments. These cannot be verified by the hash tests, and thus are subject to drift. Either construct an object and pass that, or pass all the simple values required to make the call. * Changes to an object as described above will cause a hash to change in TestObjectVersions. This is a reminder to the developer and the reviewer that the version needs to be bumped. There are times when we need to make a change to an object without bumping its version, but those cases are only where the hash logic detects a change that is not actually a compatibility issue and must be handled carefully. Database Schema --------------- * Changes to the database schema must generally be additive-only. This means you can add columns, but you can't drop or alter a column. We have some hacky tests to try to catch these things, but they are fragile. Extreme reviewer attention to non-online alterations to the DB schema will help us avoid disaster. * Dropping things from the schema is a thing we need to be extremely careful about, making sure that the column has not been used (even present in one of our models) for at least a release. * Data migrations must not be present in schema migrations. If data needs to be converted to another format, or moved from one place to another, then that must be done while the database server remains online. Generally, this can and should be hidden within the object layer so that an object can load from either the old or new location, and save to the new one. REST API ========= When making a change to the nova API, we should always follow `the API WG guidelines `_ rather than going for "local" consistency. Developers and reviewers should read all of the guidelines, but they are very long. So here are some key points: * `Terms `_ * ``project`` should be used in the REST API instead of ``tenant``. * ``server`` should be used in the REST API instead of ``instance``. * ``compute`` should be used in the REST API instead of ``nova``. * `Naming Conventions `_ * URL should not include underscores; use hyphens ('-') instead. * The field names contained in a request/response body should use snake_case style, not CamelCase or Mixed_Case style. * `HTTP Response Codes `_ * Synchronous resource creation: ``201 Created`` * Asynchronous resource creation: ``202 Accepted`` * Synchronous resource deletion: ``204 No Content`` * For all other successful operations: ``200 OK`` Config Options ============== Location -------- The central place where all config options should reside is the ``/nova/conf/`` package. Options that are in named sections of ``nova.conf``, such as ``[serial_console]``, should be in their own module. Options that are in the ``[DEFAULT]`` section should be placed in modules that represent a natural grouping. For example, all of the options that affect the scheduler would be in the ``scheduler.py`` file, and all the networking options would be moved to ``network.py``. Implementation -------------- A config option should be checked for: * A short description which explains what it does. If it is a unit (e.g. timeouts or so) describe the unit which is used (seconds, megabyte, mebibyte, ...). * A long description which shows the impact and scope. The operators should know the expected change in the behavior of Nova if they tweak this. * Hints which services will consume this config option. Operators/Deployers should not be forced to read the code to know which one of the services will change its behavior nor should they set this in every ``nova.conf`` file to be sure. * Descriptions/Validations for the possible values. * If this is an option with numeric values (int, float), describe the edge cases (like the min value, max value, 0, -1). * If this is a DictOpt, describe the allowed keys. * If this is a StrOpt, list any possible regex validations, or provide a list of acceptable and/or prohibited values. * Interdependencies to other options. If other config options have to be considered when this config option gets changed, is this described? Third Party Tests ================= Any change that is not tested well by the Jenkins check jobs must have a recent +1 vote from an appropriate third party test (or tests) on the latest patchset, before a core reviewer is allowed to make a +2 vote. Virt drivers ------------ At a minimum, we must ensure that any technology specific code has a +1 from the relevant third party test, on the latest patchset, before a +2 vote can be applied. Specifically, changes to nova/virt/driver/ need a +1 vote from the respective third party CI. For example, if you change something in the XenAPI virt driver, you must wait for a +1 from the XenServer CI on the latest patchset, before you can give that patch set a +2 vote. This is important to ensure: * We keep those drivers stable * We don't break that third party CI Notes ----- Please note: * Long term, we should ensure that any patch a third party CI is allowed to vote on, can be blocked from merging by that third party CI. But we need a lot more work to make something like that feasible, hence the proposed compromise. * While its possible to break a virt driver CI system by changing code that is outside the virt drivers, this policy is not focusing on fixing that. A third party test failure should always be investigated, but the failure of a third party test to report in a timely manner should not block others. * We are only talking about the testing of in-tree code. Please note the only public API is our REST API, see: :doc:`policies` Microversion API ================ * If an new microversion API is added, the following needs to happen: * A new patch for the microversion API change in python-novaclient side should be submitted. Release Notes ============= What is reno ? -------------- Nova uses `reno `_ for providing release notes in-tree. That means that a patch can include a *reno file* or a series can have a follow-on change containing that file explaining what the impact is. A *reno file* is a YAML file written in the releasenotes/notes tree which is generated using the reno tool this way: .. code-block:: bash $ tox -e venv -- reno new where usually ```` can be ``bp-`` for a blueprint or ``bug-XXXXXX`` for a bugfix. Refer to the `reno documentation `_ for the full list of sections. When a release note is needed ----------------------------- A release note is required anytime a reno section is needed. Below are some examples for each section. Any sections that would be blank should be left out of the note file entirely. If no section is needed, then you know you don't need to provide a release note :-) * ``upgrade`` * The patch has an `UpgradeImpact `_ tag * A DB change needs some deployer modification (like a migration) * A configuration option change (deprecation, removal or modified default) * some specific changes that have a `DocImpact `_ tag but require further action from an deployer perspective * any patch that requires an action from the deployer in general * ``security`` * If the patch fixes a known vulnerability * ``features`` * If the patch has an `APIImpact `_ tag * For nova-manage and python-novaclient changes, if it adds or changes a new command, including adding new options to existing commands * not all blueprints in general, just the ones impacting a `contractual API `_ * a new virt driver is provided or an existing driver impacts the `HypervisorSupportMatrix `_ * ``critical`` * Bugfixes categorized as Critical in Launchpad *impacting users* * ``fixes`` * No clear definition of such bugfixes. Hairy long-standing bugs with high importance that have been fixed are good candidates though. Three sections are left intentionally unexplained (``prelude``, ``issues`` and ``other``). Those are targeted to be filled in close to the release time for providing details about the soon-ish release. Don't use them unless you know exactly what you are doing. Notifications ============= * Every new notification type shall use the new versioned notification infrastructure documented in :doc:`notifications` nova-13.0.0/doc/api_samples/0000775000567000056710000000000012701410205016731 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-hypervisors/0000775000567000056710000000000012701410205021745 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-hypervisors/hypervisors-uptime-resp.json0000664000567000056710000000035212701407773027525 0ustar jenkinsjenkins00000000000000{ "hypervisor": { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled", "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" } } nova-13.0.0/doc/api_samples/os-hypervisors/hypervisors-search-resp.json0000664000567000056710000000026312701407773027470 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } nova-13.0.0/doc/api_samples/os-hypervisors/hypervisors-with-servers-resp.json0000664000567000056710000000100212701407773030655 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" } ] } ] } nova-13.0.0/doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json0000664000567000056710000000055712701407773030423 0ustar jenkinsjenkins00000000000000{ "hypervisor_statistics": { "count": 1, "current_workload": 0, "disk_available_least": 0, "free_disk_gb": 1028, "free_ram_mb": 7680, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "vcpus": 1, "vcpus_used": 0 } }nova-13.0.0/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json0000664000567000056710000000175612701407773027475 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", "current_workload": 0, "status": "enabled", "state": "up", "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "e6a37ee802d74863ab8b91ade8f12a67", "id": 2, "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 } ] } nova-13.0.0/doc/api_samples/os-hypervisors/hypervisors-list-resp.json0000664000567000056710000000026312701407773027176 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } nova-13.0.0/doc/api_samples/os-hypervisors/hypervisors-without-servers-resp.json0000664000567000056710000000026312701407773031415 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } nova-13.0.0/doc/api_samples/os-hypervisors/hypervisors-show-resp.json0000664000567000056710000000157112701407773027206 0ustar jenkinsjenkins00000000000000{ "hypervisor": { "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", "state": "up", "status": "enabled", "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", "id": 2, "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 } } nova-13.0.0/doc/api_samples/servers/0000775000567000056710000000000012701410205020422 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/servers/server-action-create-image.json0000664000567000056710000000020012701407773026427 0ustar jenkinsjenkins00000000000000{ "createImage" : { "name" : "foo-image", "metadata": { "meta_var": "meta_val" } } }nova-13.0.0/doc/api_samples/servers/server-action-stop.json0000664000567000056710000000003012701407773025072 0ustar jenkinsjenkins00000000000000{ "os-stop" : null }nova-13.0.0/doc/api_samples/servers/server-action-reboot.json0000664000567000056710000000006212701407773025404 0ustar jenkinsjenkins00000000000000{ "reboot" : { "type" : "HARD" } }nova-13.0.0/doc/api_samples/servers/server-post-req.json0000664000567000056710000000044212701407773024413 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } nova-13.0.0/doc/api_samples/servers/server-action-rebuild.json0000664000567000056710000000043512701407773025544 0ustar jenkinsjenkins00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" } } } nova-13.0.0/doc/api_samples/servers/servers-list-resp.json0000664000567000056710000000113412701407773024745 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "22c91117-08de-4894-9aa9-6ef382400985", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "bookmark" } ], "name": "new-server-test" } ] }nova-13.0.0/doc/api_samples/servers/v2.19/0000775000567000056710000000000012701410205021201 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/servers/v2.19/server-post-req.json0000664000567000056710000000064312701407773025175 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "description" : "new-server-description", "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } } }nova-13.0.0/doc/api_samples/servers/v2.19/server-action-rebuild.json0000664000567000056710000000051612701407773026323 0ustar jenkinsjenkins00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "description" : "description of foobar", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" } } } nova-13.0.0/doc/api_samples/servers/v2.19/servers-list-resp.json0000664000567000056710000000113412701407773025524 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "78d95942-8805-4597-b1af-3d0e38330758", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/78d95942-8805-4597-b1af-3d0e38330758", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/78d95942-8805-4597-b1af-3d0e38330758", "rel": "bookmark" } ], "name": "new-server-test" } ] }nova-13.0.0/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json0000664000567000056710000000347612701407773027302 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2013-11-14T06:29:00Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "bookmark" } ], "locked": false, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "description" : "description of foobar", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/servers/v2.19/server-put-resp.json0000664000567000056710000000346312701407773025205 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "created": "2015-12-07T19:19:36Z", "description": "updated-server-description", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "4e17a358ca9bbc8ac6e215837b6410c0baa21b2463fefe3e8f712b31", "id": "c509708e-f0c6-461f-b2b3-507547959eb2", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/c509708e-f0c6-461f-b2b3-507547959eb2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/c509708e-f0c6-461f-b2b3-507547959eb2", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "updated-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2015-12-07T19:19:36Z", "user_id": "fake" } }nova-13.0.0/doc/api_samples/servers/v2.19/servers-details-resp.json0000664000567000056710000000425312701407773026203 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "addr": "192.168.0.3", "version": 4 } ] }, "created": "2015-12-07T19:54:48Z", "description": "new-server-description", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "a672ab12738567bfcb852c846d66a6ce5c3555b42d73db80bdc6f1a4", "id": "91965362-fd86-4543-8ce1-c17074d2984d", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/91965362-fd86-4543-8ce1-c17074d2984d", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/91965362-fd86-4543-8ce1-c17074d2984d", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2015-12-07T19:54:49Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/servers/v2.19/server-post-resp.json0000664000567000056710000000103112701407773025347 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "rySfUy7xL4C5", "id": "19923676-e78b-46fb-af62-a5942aece2ac", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/19923676-e78b-46fb-af62-a5942aece2ac", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/19923676-e78b-46fb-af62-a5942aece2ac", "rel": "bookmark" } ] } }nova-13.0.0/doc/api_samples/servers/v2.19/server-get-resp.json0000664000567000056710000000367212701407773025156 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "addr": "192.168.0.3", "version": 4 } ] }, "created": "2015-12-07T17:24:14Z", "description": "new-server-description", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "c656e68b04b483cfc87cdbaa2346557b174ec1cb6be6afbd2a0133a0", "id": "ddb205dc-717e-496e-8e96-88a3b31b075d", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/ddb205dc-717e-496e-8e96-88a3b31b075d", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/ddb205dc-717e-496e-8e96-88a3b31b075d", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2015-12-07T17:24:15Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/servers/v2.19/server-put-req.json0000664000567000056710000000017012701407773025013 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "updated-server-test", "description" : "updated-server-description" } } nova-13.0.0/doc/api_samples/servers/server-action-rebuild-resp.json0000664000567000056710000000336312701407773026516 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2013-11-14T06:29:00Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "foobar", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", "user_id": "fake" } }nova-13.0.0/doc/api_samples/servers/server-action-start.json0000664000567000056710000000003112701407773025243 0ustar jenkinsjenkins00000000000000{ "os-start" : null }nova-13.0.0/doc/api_samples/servers/servers-details-resp.json0000664000567000056710000000413112701407773025417 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:32Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/servers/server-post-resp.json0000664000567000056710000000103212701407773024571 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "6NpUwoz2QDRN", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ] } } nova-13.0.0/doc/api_samples/servers/server-get-resp.json0000664000567000056710000000356012701407773024373 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:33Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/servers/server-create-resp.json0000664000567000056710000000124512701407773025055 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "6NpUwoz2QDRN", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } } nova-13.0.0/doc/api_samples/servers/v2.9/0000775000567000056710000000000012701410205021120 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/servers/v2.9/servers-list-resp.json0000664000567000056710000000113412701407773025443 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "22c91117-08de-4894-9aa9-6ef382400985", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "bookmark" } ], "name": "new-server-test" } ] }nova-13.0.0/doc/api_samples/servers/v2.9/servers-details-resp.json0000664000567000056710000000416612701407773026125 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:32Z", "user_id": "fake", "locked": false } ] } nova-13.0.0/doc/api_samples/servers/v2.9/server-get-resp.json0000664000567000056710000000361112701407773025066 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:33Z", "user_id": "fake", "locked": false } } nova-13.0.0/doc/api_samples/servers/server-create-req.json0000664000567000056710000000044212701407773024671 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } nova-13.0.0/doc/api_samples/servers/server-update-resp.json0000664000567000056710000000340612701407773025075 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "created": "2012-12-02T02:11:57Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "6e84af987b4e7ec1c039b16d21f508f4a505672bd94fb0218b668d07", "id": "324dfb7d-f4a9-419a-9a19-237df04b443b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2012-12-02T02:11:58Z", "user_id": "fake" } }nova-13.0.0/doc/api_samples/servers/server-update-req.json0000664000567000056710000000024212701407773024706 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "4.3.2.1", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "name" : "new-server-test" } }nova-13.0.0/doc/api_samples/servers/server-action-revert-resize.json0000664000567000056710000000003512701407773026720 0ustar jenkinsjenkins00000000000000{ "revertResize" : null }nova-13.0.0/doc/api_samples/servers/server-action-resize.json0000664000567000056710000000006412701407773025415 0ustar jenkinsjenkins00000000000000{ "resize" : { "flavorRef" : "2" } }nova-13.0.0/doc/api_samples/servers/server-action-confirm-resize.json0000664000567000056710000000003612701407773027047 0ustar jenkinsjenkins00000000000000{ "confirmResize" : null }nova-13.0.0/doc/api_samples/servers/server-action-rebuild-preserve-ephemeral.json0000664000567000056710000000146312701407773031337 0ustar jenkinsjenkins00000000000000{ "rebuild" : { "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" }, "personality" : [ { "path" : "/etc/banner.txt", "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ], "preserve_ephemeral": true } } nova-13.0.0/doc/api_samples/servers/v2.17/0000775000567000056710000000000012701410205021177 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/servers/v2.17/server-action-trigger-crash-dump.json0000664000567000056710000000004312701407773030372 0ustar jenkinsjenkins00000000000000{ "trigger_crash_dump": null } nova-13.0.0/doc/api_samples/servers/v2.16/0000775000567000056710000000000012701410205021176 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/servers/v2.16/servers-details-resp.json0000664000567000056710000000417212701407773026200 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "host_status": "UP", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:32Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/servers/v2.16/server-get-resp.json0000664000567000056710000000361512701407773025150 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "host_status": "UP", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:33Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-migrate-server/0000775000567000056710000000000012701410205022304 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-migrate-server/v2.25/0000775000567000056710000000000012701410205023060 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-migrate-server/v2.25/live-migrate-server.json0000664000567000056710000000017012701407773027662 0ustar jenkinsjenkins00000000000000{ "os-migrateLive": { "host": "01c0cadef72d47e28a672a76060d492c", "block_migration": "auto" } } nova-13.0.0/doc/api_samples/os-migrate-server/live-migrate-server.json0000664000567000056710000000023212701407773027105 0ustar jenkinsjenkins00000000000000{ "os-migrateLive": { "host": "01c0cadef72d47e28a672a76060d492c", "block_migration": false, "disk_over_commit": false } } nova-13.0.0/doc/api_samples/os-migrate-server/migrate-server.json0000664000567000056710000000002712701407773026152 0ustar jenkinsjenkins00000000000000{ "migrate": null }nova-13.0.0/doc/api_samples/os-user-data/0000775000567000056710000000000012701410205021235 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-user-data/userdata-post-req.json0000664000567000056710000000046312701407773025533 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" }, "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==" } } nova-13.0.0/doc/api_samples/os-user-data/userdata-post-resp.json0000664000567000056710000000103212701407773025706 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "2xHoDU7Gd7vw", "id": "976a62bb-0d4a-4e17-9044-1864e888a557", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/976a62bb-0d4a-4e17-9044-1864e888a557", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/976a62bb-0d4a-4e17-9044-1864e888a557", "rel": "bookmark" } ] } } nova-13.0.0/doc/api_samples/os-admin-password/0000775000567000056710000000000012701410205022300 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-admin-password/admin-password-change-password.json0000664000567000056710000000007712701407773031232 0ustar jenkinsjenkins00000000000000{ "changePassword" : { "adminPass" : "foo" } } nova-13.0.0/doc/api_samples/os-server-diagnostics/0000775000567000056710000000000012701410205023163 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json0000664000567000056710000000060312701407773031434 0ustar jenkinsjenkins00000000000000{ "cpu0_time": 17300000000, "memory": 524288, "vda_errors": -1, "vda_read": 262144, "vda_read_req": 112, "vda_write": 5778432, "vda_write_req": 488, "vnet1_rx": 2070139, "vnet1_rx_drop": 0, "vnet1_rx_errors": 0, "vnet1_rx_packets": 26701, "vnet1_tx": 140208, "vnet1_tx_drop": 0, "vnet1_tx_errors": 0, "vnet1_tx_packets": 662 } nova-13.0.0/doc/api_samples/os-quota-class-sets/0000775000567000056710000000000012701410205022560 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json0000664000567000056710000000056212701407773031312 0ustar jenkinsjenkins00000000000000{ "quota_class_set": { "instances": 50, "cores": 50, "ram": 51200, "floating_ips": 10, "metadata_items": 128, "injected_files": 5, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "security_groups": 10, "security_group_rules": 20, "key_pairs": 100 } } nova-13.0.0/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json0000664000567000056710000000061312701407773031471 0ustar jenkinsjenkins00000000000000{ "quota_class_set": { "cores": 50, "fixed_ips": -1, "floating_ips": 10, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 50, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10 } } nova-13.0.0/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json0000664000567000056710000000064712701407773030770 0ustar jenkinsjenkins00000000000000{ "quota_class_set": { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "id": "test_class", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10 } } nova-13.0.0/doc/api_samples/versions/0000775000567000056710000000000012701410205020601 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/versions/v21-version-get-resp.json0000664000567000056710000000123512701407773025334 0ustar jenkinsjenkins00000000000000{ "version": { "id": "v2.1", "links": [ { "href": "http://openstack.example.com/v2.1/", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2.1" } ], "status": "CURRENT", "version": "2.25", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } } nova-13.0.0/doc/api_samples/versions/versions-get-resp.json0000664000567000056710000000135612701407773025115 0ustar jenkinsjenkins00000000000000{ "versions": [ { "id": "v2.0", "links": [ { "href": "http://openstack.example.com/v2/", "rel": "self" } ], "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z" }, { "id": "v2.1", "links": [ { "href": "http://openstack.example.com/v2.1/", "rel": "self" } ], "status": "CURRENT", "version": "2.25", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } ] } nova-13.0.0/doc/api_samples/versions/v2-version-get-resp.json0000664000567000056710000000122412701407773025251 0ustar jenkinsjenkins00000000000000{ "version": { "id": "v2.0", "links": [ { "href": "http://openstack.example.com/v2/", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2" } ], "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z" } } nova-13.0.0/doc/api_samples/os-floating-ips-bulk/0000775000567000056710000000000012701410205022677 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json0000664000567000056710000000037112701407773032445 0ustar jenkinsjenkins00000000000000{ "floating_ip_info": [ { "address": "10.10.10.3", "instance_uuid": null, "fixed_ip": null, "interface": "eth0", "pool": "nova", "project_id": null } ] }nova-13.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json0000664000567000056710000000006412701407773031350 0ustar jenkinsjenkins00000000000000{ "floating_ips_bulk_delete": "192.168.1.0/24" }nova-13.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json0000664000567000056710000000020512701407773031346 0ustar jenkinsjenkins00000000000000{ "floating_ips_bulk_create": { "interface": "eth0", "ip_range": "192.168.1.0/24", "pool": "nova" } }nova-13.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json0000664000567000056710000000004412701407773031164 0ustar jenkinsjenkins00000000000000{ "ip_range": "192.168.1.0/24" }nova-13.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json0000664000567000056710000000020612701407773031165 0ustar jenkinsjenkins00000000000000{ "floating_ips_bulk_create": { "ip_range": "192.168.1.0/24", "pool": "nova", "interface": "eth0" } } nova-13.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json0000664000567000056710000000124712701407773031065 0ustar jenkinsjenkins00000000000000{ "floating_ip_info": [ { "address": "10.10.10.1", "instance_uuid": null, "fixed_ip": null, "interface": "eth0", "pool": "nova", "project_id": null }, { "address": "10.10.10.2", "instance_uuid": null, "fixed_ip": null, "interface": "eth0", "pool": "nova", "project_id": null }, { "address": "10.10.10.3", "instance_uuid": null, "fixed_ip": null, "interface": "eth0", "pool": "nova", "project_id": null } ] }nova-13.0.0/doc/api_samples/server-migrations/0000775000567000056710000000000012701410205022411 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/server-migrations/live-migrate-server.json0000664000567000056710000000023212701407773027212 0ustar jenkinsjenkins00000000000000{ "os-migrateLive": { "host": "01c0cadef72d47e28a672a76060d492c", "block_migration": false, "disk_over_commit": false } } nova-13.0.0/doc/api_samples/server-migrations/force_complete.json0000664000567000056710000000003712701407773026312 0ustar jenkinsjenkins00000000000000{ "force_complete": null } nova-13.0.0/doc/api_samples/server-migrations/v2.23/0000775000567000056710000000000012701410205023163 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/server-migrations/v2.23/migrations-get.json0000664000567000056710000000120512701407773027025 0ustar jenkinsjenkins00000000000000{ "migration": { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe", "source_compute": "compute1", "source_node": "node1", "status": "running", "memory_total_bytes": 123456, "memory_processed_bytes": 12345, "memory_remaining_bytes": 120000, "disk_total_bytes": 234567, "disk_processed_bytes": 23456, "disk_remaining_bytes": 230000, "updated_at": "2016-01-29T13:42:02.000000" } } nova-13.0.0/doc/api_samples/server-migrations/v2.23/migrations-index.json0000664000567000056710000000133212701407773027356 0ustar jenkinsjenkins00000000000000{ "migrations": [ { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe", "source_compute": "compute1", "source_node": "node1", "status": "running", "memory_total_bytes": 123456, "memory_processed_bytes": 12345, "memory_remaining_bytes": 120000, "disk_total_bytes": 234567, "disk_processed_bytes": 23456, "disk_remaining_bytes": 230000, "updated_at": "2016-01-29T13:42:02.000000" } ] } nova-13.0.0/doc/api_samples/os-rescue/0000775000567000056710000000000012701410205020636 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-rescue/server-get-resp-rescue.json0000664000567000056710000000353112701407773026071 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-18T07:22:09Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "f04994c5b4aac1cacbb83b09c2506e457d97dd54f620961624574690", "id": "2fd0c66b-50af-41d2-9253-9fa41e7e8dd8", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/2fd0c66b-50af-41d2-9253-9fa41e7e8dd8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2fd0c66b-50af-41d2-9253-9fa41e7e8dd8", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "status": "RESCUE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-18T07:22:11Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-rescue/server-unrescue-req.json0000664000567000056710000000003012701407773025464 0ustar jenkinsjenkins00000000000000{ "unrescue": null }nova-13.0.0/doc/api_samples/os-rescue/server-rescue-req.json0000664000567000056710000000007612701407773025133 0ustar jenkinsjenkins00000000000000{ "rescue": { "adminPass": "MySecretPass" } } nova-13.0.0/doc/api_samples/os-rescue/server-rescue.json0000664000567000056710000000004412701407773024341 0ustar jenkinsjenkins00000000000000{ "adminPass": "MySecretPass" } nova-13.0.0/doc/api_samples/os-rescue/server-get-resp-unrescue.json0000664000567000056710000000356012701407773026436 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-18T07:22:09Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "53cd4520a6cc639eeabcae4a0512b93e4675d431002e0b60e2dcfc04", "id": "edfc3905-1f3c-4819-8fc3-a7d8131cfa22", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/edfc3905-1f3c-4819-8fc3-a7d8131cfa22", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/edfc3905-1f3c-4819-8fc3-a7d8131cfa22", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-18T07:22:12Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-rescue/server-rescue-req-with-image-ref.json0000664000567000056710000000020212701407773027725 0ustar jenkinsjenkins00000000000000{ "rescue": { "adminPass": "MySecretPass", "rescue_image_ref": "70a599e0-31e7-49b7-b260-868f441e862b" } } nova-13.0.0/doc/api_samples/os-pci/0000775000567000056710000000000012701410205020123 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-pci/hypervisors-pci-show-resp.json0000664000567000056710000000221012701407773026124 0ustar jenkinsjenkins00000000000000{ "hypervisor": { "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", "state": "up", "status": "enabled", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "os-pci:pci_stats": [ { "count": 5, "key1": "value1", "keya": "valuea", "product_id": "1520", "vendor_id": "8086", "numa_node": 1 } ], "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", "id": 2, "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 } } nova-13.0.0/doc/api_samples/os-pci/hypervisors-pci-detail-resp.json0000664000567000056710000000244512701407773026420 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", "state": "up", "status": "enabled", "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "os-pci:pci_stats": [ { "count": 5, "key1": "value1", "keya": "valuea", "product_id": "1520", "vendor_id": "8086", "numa_node": 1 } ], "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", "id": 2, "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 } ] } nova-13.0.0/doc/api_samples/os-pci/pci-show-resp.json0000664000567000056710000000071712701407773023543 0ustar jenkinsjenkins00000000000000{ "pci_device": { "address": "0000:04:10.0", "compute_node_id": 1, "dev_id": "pci_0000_04_10_0", "dev_type": "type-VF", "extra_info": { "key1": "value1", "key2": "value2" }, "id": 1, "server_uuid": "69ba1044-0766-4ec0-b60d-09595de034a1", "label": "label_8086_1520", "product_id": "1520", "status": "available", "vendor_id": "8086" } } nova-13.0.0/doc/api_samples/os-pci/server-get-resp.json0000664000567000056710000000341112701407773024067 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-11-25T03:45:54Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/flavors/1", "rel": "bookmark" } ] }, "hostId": "b7e88944272df30c113572778bcf5527f02e9c2a745221214536c1a2", "id": "9dafa6bc-7a9f-45b2-8177-11800ceb7224", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v3/servers/9dafa6bc-7a9f-45b2-8177-11800ceb7224", "rel": "self" }, { "href": "http://openstack.example.com/servers/9dafa6bc-7a9f-45b2-8177-11800ceb7224", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-pci:pci_devices": [ { "id": 1 } ], "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-25T03:45:54Z", "user_id": "fake" } }nova-13.0.0/doc/api_samples/os-pci/servers-detail-resp.json0000664000567000056710000000377612701407773024753 0ustar jenkinsjenkins00000000000000{ "servers": [ { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-11-25T03:45:54Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/flavors/1", "rel": "bookmark" } ] }, "hostId": "416f83c758ea0f9271018b278a9dcedb91b1190deaa598704b87219b", "id": "ef440f98-04e8-46ea-ae74-e24d437040ea", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v3/servers/ef440f98-04e8-46ea-ae74-e24d437040ea", "rel": "self" }, { "href": "http://openstack.example.com/servers/ef440f98-04e8-46ea-ae74-e24d437040ea", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-pci:pci_devices": [ { "id": 1 } ], "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-25T03:45:54Z", "user_id": "fake" } ] }nova-13.0.0/doc/api_samples/os-pci/pci-detail-resp.json0000664000567000056710000000203212701407773024015 0ustar jenkinsjenkins00000000000000{ "pci_devices": [ { "address": "0000:04:10.0", "compute_node_id": 1, "dev_id": "pci_0000_04_10_0", "dev_type": "type-VF", "extra_info": { "key1": "value1", "key2": "value2" }, "id": 1, "server_uuid": "69ba1044-0766-4ec0-b60d-09595de034a1", "label": "label_8086_1520", "product_id": "1520", "status": "available", "vendor_id": "8086" }, { "address": "0000:04:10.1", "compute_node_id": 1, "dev_id": "pci_0000_04_10_1", "dev_type": "type-VF", "extra_info": { "key3": "value3", "key4": "value4" }, "id": 2, "server_uuid": "d5b446a6-a1b4-4d01-b4f0-eac37b3a62fc", "label": "label_8086_1520", "product_id": "1520", "status": "available", "vendor_id": "8086" } ] } nova-13.0.0/doc/api_samples/os-pci/pci-index-resp.json0000664000567000056710000000071512701407773023670 0ustar jenkinsjenkins00000000000000{ "pci_devices": [ { "address": "0000:04:10.0", "compute_node_id": 1, "id": 1, "product_id": "1520", "status": "available", "vendor_id": "8086" }, { "address": "0000:04:10.1", "compute_node_id": 1, "id": 2, "product_id": "1520", "status": "available", "vendor_id": "8086" } ] }nova-13.0.0/doc/api_samples/os-pause-server/0000775000567000056710000000000012701410205021771 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-pause-server/unpause-server.json0000664000567000056710000000002712701407773025667 0ustar jenkinsjenkins00000000000000{ "unpause": null }nova-13.0.0/doc/api_samples/os-pause-server/pause-server.json0000664000567000056710000000002512701407773025322 0ustar jenkinsjenkins00000000000000{ "pause": null }nova-13.0.0/doc/api_samples/os-server-password/0000775000567000056710000000000012701410205022516 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-server-password/get-password-resp.json0000664000567000056710000000055612701407773027025 0ustar jenkinsjenkins00000000000000{ "password": "xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtVVzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNXJjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrjQskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+AcX//PXk3uJ5kC7d67fPXaVz4WaQRYMg==" }nova-13.0.0/doc/api_samples/os-hide-server-addresses/0000775000567000056710000000000012701410205023540 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-hide-server-addresses/servers-list-resp.json0000664000567000056710000000113412701407773030063 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "b2a7068b-8aed-41a4-aa74-af8feb984bae", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/b2a7068b-8aed-41a4-aa74-af8feb984bae", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/b2a7068b-8aed-41a4-aa74-af8feb984bae", "rel": "bookmark" } ], "name": "new-server-test" } ] }nova-13.0.0/doc/api_samples/os-hide-server-addresses/servers-details-resp.json0000664000567000056710000000344112701407773030540 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": {}, "created": "2013-09-24T14:44:01Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "a4fa72ae8741e5e18fb062c15657b8f689b8da2837b734c61fc9eedd", "id": "a747eac1-e3ed-446c-935a-c2a2853f919c", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a747eac1-e3ed-446c-935a-c2a2853f919c", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a747eac1-e3ed-446c-935a-c2a2853f919c", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-24T14:44:01Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-hide-server-addresses/server-get-resp.json0000664000567000056710000000313412701407773027506 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": {}, "created": "2013-09-24T14:39:00Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "d0635823e9162b22b90ff103f0c30f129bacf6ffb72f4d6fde87e738", "id": "4bdee8c7-507f-40f2-8429-d301edd3791b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/4bdee8c7-507f-40f2-8429-d301edd3791b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/4bdee8c7-507f-40f2-8429-d301edd3791b", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-24T14:39:01Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-attach-interfaces/0000775000567000056710000000000012701410205022735 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json0000664000567000056710000000062212701407773031162 0ustar jenkinsjenkins00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE" } }nova-13.0.0/doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json0000664000567000056710000000062212701407773031445 0ustar jenkinsjenkins00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE" } }nova-13.0.0/doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json0000664000567000056710000000071712701407773031162 0ustar jenkinsjenkins00000000000000{ "interfaceAttachments": [ { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE" } ] }nova-13.0.0/doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.json0000664000567000056710000000014012701407773031256 0ustar jenkinsjenkins00000000000000{ "interfaceAttachment": { "port_id": "ce531f90-199f-48c0-816c-13e38010b442" } }nova-13.0.0/doc/api_samples/os-remote-consoles/0000775000567000056710000000000012701410205022466 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-remote-consoles/get-serial-console-post-req.json0000664000567000056710000000010012701407773030634 0ustar jenkinsjenkins00000000000000{ "os-getSerialConsole": { "type": "serial" } } nova-13.0.0/doc/api_samples/os-remote-consoles/get-rdp-console-post-resp.json0000664000567000056710000000021312701407773030331 0ustar jenkinsjenkins00000000000000{ "console": { "type": "rdp-html5", "url": "http://127.0.0.1:6083/?token=191996c3-7b0f-42f3-95a7-f1839f2da6ed" } } nova-13.0.0/doc/api_samples/os-remote-consoles/v2.8/0000775000567000056710000000000012701410205023163 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-remote-consoles/v2.8/create-mks-console-req.json0000664000567000056710000000012612701407773030355 0ustar jenkinsjenkins00000000000000{ "remote_console": { "protocol": "mks", "type": "webmks" } } nova-13.0.0/doc/api_samples/os-remote-consoles/v2.8/create-mks-console-resp.json0000664000567000056710000000026412701407773030542 0ustar jenkinsjenkins00000000000000{ "remote_console": { "protocol": "mks", "type": "webmks", "url": "http://example.com:6090/mks.html?token=b60bcfc3-5fd4-4d21-986c-e83379107819" } } nova-13.0.0/doc/api_samples/os-remote-consoles/get-rdp-console-post-req.json0000664000567000056710000000010012701407773030142 0ustar jenkinsjenkins00000000000000{ "os-getRDPConsole": { "type": "rdp-html5" } } nova-13.0.0/doc/api_samples/os-remote-consoles/get-vnc-console-post-req.json0000664000567000056710000000007312701407773030154 0ustar jenkinsjenkins00000000000000{ "os-getVNCConsole": { "type": "novnc" } }nova-13.0.0/doc/api_samples/os-remote-consoles/get-spice-console-post-resp.json0000664000567000056710000000023312701407773030651 0ustar jenkinsjenkins00000000000000{ "console": { "type": "spice-html5", "url": "http://127.0.0.1:6082/spice_auto.html?token=a30e5d08-6a20-4043-958f-0852440c6af4" } }nova-13.0.0/doc/api_samples/os-remote-consoles/get-spice-console-post-req.json0000664000567000056710000000010312701407773030463 0ustar jenkinsjenkins00000000000000{ "os-getSPICEConsole": { "type": "spice-html5" } }nova-13.0.0/doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json0000664000567000056710000000022312701407773030333 0ustar jenkinsjenkins00000000000000{ "console": { "type": "novnc", "url": "http://127.0.0.1:6080/vnc_auto.html?token=191996c3-7b0f-42f3-95a7-f1839f2da6ed" } }nova-13.0.0/doc/api_samples/os-remote-consoles/v2.6/0000775000567000056710000000000012701410205023161 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-req.json0000664000567000056710000000012512701407773030346 0ustar jenkinsjenkins00000000000000{ "remote_console": { "protocol": "vnc", "type": "novnc" } } nova-13.0.0/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json0000664000567000056710000000027012701407773030531 0ustar jenkinsjenkins00000000000000{ "remote_console": { "protocol": "vnc", "type": "novnc", "url": "http://example.com:6080/vnc_auto.html?token=b60bcfc3-5fd4-4d21-986c-e83379107819" } } nova-13.0.0/doc/api_samples/os-remote-consoles/get-serial-console-post-resp.json0000664000567000056710000000020512701407773031024 0ustar jenkinsjenkins00000000000000{ "console": { "type": "serial", "url":"ws://127.0.0.1:6083/?token=f9906a48-b71e-4f18-baca-c987da3ebdb3" } } nova-13.0.0/doc/api_samples/os-fping/0000775000567000056710000000000012701410205020453 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-fping/fping-get-details-resp.json0000664000567000056710000000024012701407773025634 0ustar jenkinsjenkins00000000000000{ "server": { "alive": false, "id": "f5e6fd6d-c0a3-4f9e-aabf-d69196b6d11a", "project_id": "6f70656e737461636b20342065766572" } }nova-13.0.0/doc/api_samples/os-fping/fping-get-resp.json0000664000567000056710000000030112701407773024207 0ustar jenkinsjenkins00000000000000{ "servers": [ { "alive": false, "id": "1d1aea35-472b-40cf-9337-8eb68480aaa1", "project_id": "6f70656e737461636b20342065766572" } ] }nova-13.0.0/doc/api_samples/os-floating-ips/0000775000567000056710000000000012701410205021744 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json0000664000567000056710000000003212701407773030322 0ustar jenkinsjenkins00000000000000{ "floating_ips": [] }nova-13.0.0/doc/api_samples/os-floating-ips/floating-ips-get-resp.json0000664000567000056710000000023112701407773026773 0ustar jenkinsjenkins00000000000000{ "floating_ip": { "fixed_ip": null, "id": 1, "instance_id": null, "ip": "10.10.10.1", "pool": "nova" } }nova-13.0.0/doc/api_samples/os-floating-ips/floating-ips-list-resp.json0000664000567000056710000000054612701407773027200 0ustar jenkinsjenkins00000000000000{ "floating_ips": [ { "fixed_ip": null, "id": 1, "instance_id": null, "ip": "10.10.10.1", "pool": "nova" }, { "fixed_ip": null, "id": 2, "instance_id": null, "ip": "10.10.10.2", "pool": "nova" } ] }nova-13.0.0/doc/api_samples/os-floating-ips/floating-ips-create-req.json0000664000567000056710000000002612701407773027277 0ustar jenkinsjenkins00000000000000{ "pool": "nova" }nova-13.0.0/doc/api_samples/os-floating-ips/floating-ips-create-resp.json0000664000567000056710000000023112701407773027457 0ustar jenkinsjenkins00000000000000{ "floating_ip": { "fixed_ip": null, "id": 1, "instance_id": null, "ip": "10.10.10.1", "pool": "nova" } }nova-13.0.0/doc/api_samples/consoles/0000775000567000056710000000000012701410205020556 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/consoles/consoles-get-resp.json0000664000567000056710000000030612701407773025041 0ustar jenkinsjenkins00000000000000{ "console": { "console_type": "fake", "host": "fake", "id": 1, "instance_name": "instance-00000001", "password": "C4jBpJ6x", "port": 5999 } }nova-13.0.0/doc/api_samples/consoles/consoles-list-get-resp.json0000664000567000056710000000022612701407773026013 0ustar jenkinsjenkins00000000000000{ "consoles": [ { "console": { "console_type": "fake", "id": 1 } } ] }nova-13.0.0/doc/api_samples/os-floating-ip-pools/0000775000567000056710000000000012701410205022713 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.json0000664000567000056710000000020512701407773030754 0ustar jenkinsjenkins00000000000000{ "floating_ip_pools": [ { "name": "pool1" }, { "name": "pool2" } ] }nova-13.0.0/doc/api_samples/os-extended-volumes/0000775000567000056710000000000012701410205022640 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-extended-volumes/v2.3/0000775000567000056710000000000012701410205023330 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-extended-volumes/v2.3/server-get-resp.json0000664000567000056710000000401412701407773027274 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T03:22:28Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/flavors/1", "rel": "bookmark" } ] }, "hostId": "8feef92e2152b9970b51dbdade024afbec7f8f03daf7cb335a3c1cb9", "id": "7d62983e-23df-4320-bc89-bbc77f2a2e40", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/7d62983e-23df-4320-bc89-bbc77f2a2e40", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/7d62983e-23df-4320-bc89-bbc77f2a2e40", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1", "delete_on_termination": "True" }, { "id": "volume_id2", "delete_on_termination": "False" } ], "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T03:22:29Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-extended-volumes/v2.3/servers-detail-resp.json0000664000567000056710000000442512701407773030150 0ustar jenkinsjenkins00000000000000{ "servers": [ { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T03:22:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/flavors/1", "rel": "bookmark" } ] }, "hostId": "f9622ec1b5ab6e3785661ea1c1e0294f95aecbcf27ac4cb60b06bd02", "id": "8e479732-7701-48cd-af7a-04d84f51b742", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/8e479732-7701-48cd-af7a-04d84f51b742", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8e479732-7701-48cd-af7a-04d84f51b742", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1", "delete_on_termination": "True" }, { "id": "volume_id2", "delete_on_termination": "False" } ], "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T03:22:34Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-extended-volumes/server-get-resp.json0000664000567000056710000000405412701407773026610 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T03:22:28Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "8feef92e2152b9970b51dbdade024afbec7f8f03daf7cb335a3c1cb9", "id": "7d62983e-23df-4320-bc89-bbc77f2a2e40", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/7d62983e-23df-4320-bc89-bbc77f2a2e40", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/7d62983e-23df-4320-bc89-bbc77f2a2e40", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1" }, { "id": "volume_id2" } ], "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T03:22:29Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-extended-volumes/servers-detail-resp.json0000664000567000056710000000446512701407773027464 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T03:22:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "f9622ec1b5ab6e3785661ea1c1e0294f95aecbcf27ac4cb60b06bd02", "id": "8e479732-7701-48cd-af7a-04d84f51b742", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/8e479732-7701-48cd-af7a-04d84f51b742", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8e479732-7701-48cd-af7a-04d84f51b742", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1" }, { "id": "volume_id2" } ], "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T03:22:34Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-fixed-ips/0000775000567000056710000000000012701410205021240 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-fixed-ips/v2.4/0000775000567000056710000000000012701410205021731 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-fixed-ips/v2.4/fixedips-get-resp.json0000664000567000056710000000027212701407773026204 0ustar jenkinsjenkins00000000000000{ "fixed_ip": { "address": "192.168.1.1", "cidr": "192.168.1.0/24", "host": "host", "hostname": "compute.host.pvt", "reserved": false } } nova-13.0.0/doc/api_samples/os-fixed-ips/v2.4/fixedip-post-req.json0000664000567000056710000000002712701407773026043 0ustar jenkinsjenkins00000000000000{ "reserve": null }nova-13.0.0/doc/api_samples/os-fixed-ips/fixedips-get-resp.json0000664000567000056710000000023712701407773025514 0ustar jenkinsjenkins00000000000000{ "fixed_ip": { "address": "192.168.1.1", "cidr": "192.168.1.0/24", "host": "host", "hostname": "compute.host.pvt" } } nova-13.0.0/doc/api_samples/os-fixed-ips/fixedip-post-req.json0000664000567000056710000000002712701407773025352 0ustar jenkinsjenkins00000000000000{ "reserve": null }nova-13.0.0/doc/api_samples/os-scheduler-hints/0000775000567000056710000000000012701410205022451 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-scheduler-hints/scheduler-hints-post-req.json0000664000567000056710000000037512701407773030242 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1" }, "OS-SCH-HNT:scheduler_hints": { "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" } } nova-13.0.0/doc/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json0000664000567000056710000000103212701407773030413 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "erQXgJ8NBDD4", "id": "4c8b1df3-46f7-4555-98d8-cdb869aaf9ad", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/4c8b1df3-46f7-4555-98d8-cdb869aaf9ad", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/4c8b1df3-46f7-4555-98d8-cdb869aaf9ad", "rel": "bookmark" } ] } } nova-13.0.0/doc/api_samples/os-create-backup/0000775000567000056710000000000012701410205022056 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-create-backup/create-backup-req.json0000664000567000056710000000016212701407773026263 0ustar jenkinsjenkins00000000000000{ "createBackup": { "name": "Backup 1", "backup_type": "daily", "rotation": 1 } } nova-13.0.0/doc/api_samples/os-floating-ip-dns/0000775000567000056710000000000012701410205022343 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json0000664000567000056710000000031212701407773030165 0ustar jenkinsjenkins00000000000000{ "domain_entries": [ { "availability_zone": null, "domain": "domain1.example.org", "project": "project1", "scope": "public" } ] }nova-13.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json0000664000567000056710000000024712701407773033521 0ustar jenkinsjenkins00000000000000{ "dns_entry": { "domain": "domain1.example.org", "id": null, "ip": "192.168.1.1", "name": "instance1", "type": "A" } }nova-13.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json0000664000567000056710000000025012701407773031131 0ustar jenkinsjenkins00000000000000{ "dns_entry": { "domain": "domain1.example.org", "id": null, "ip": "192.168.1.1", "name": "instance1", "type": null } }nova-13.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json0000664000567000056710000000013112701407773032170 0ustar jenkinsjenkins00000000000000{ "domain_entry": { "scope": "public", "project": "project1" } } nova-13.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json0000664000567000056710000000032212701407773031325 0ustar jenkinsjenkins00000000000000{ "dns_entries": [ { "domain": "domain1.example.org", "id": null, "ip": "192.168.1.1", "name": "instance1", "type": null } ] }nova-13.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json0000664000567000056710000000012412701407773033331 0ustar jenkinsjenkins00000000000000{ "dns_entry": { "ip": "192.168.53.11", "dns_type": "A" } } nova-13.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json0000664000567000056710000000024412701407773032357 0ustar jenkinsjenkins00000000000000{ "domain_entry": { "availability_zone": null, "domain": "domain1.example.org", "project": "project1", "scope": "public" } }nova-13.0.0/doc/api_samples/os-hosts/0000775000567000056710000000000012701410205020510 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-hosts/hosts-list-resp.json0000664000567000056710000000137312701407773024507 0ustar jenkinsjenkins00000000000000{ "hosts": [ { "host_name": "b6e4adbc193d428ea923899d07fb001e", "service": "conductor", "zone": "internal" }, { "host_name": "09c025b0efc64211bd23fc50fa974cdf", "service": "compute", "zone": "nova" }, { "host_name": "e73ec0bd35c64de4a1adfa8b8969a1f6", "service": "consoleauth", "zone": "internal" }, { "host_name": "396a8a0a234f476eb05fb9fbc5802ba7", "service": "network", "zone": "internal" }, { "host_name": "abffda96592c4eacaf4111c28fddee17", "service": "scheduler", "zone": "internal" } ] } nova-13.0.0/doc/api_samples/os-hosts/host-get-resp.json0000664000567000056710000000140612701407773024125 0ustar jenkinsjenkins00000000000000{ "host": [ { "resource": { "cpu": 1, "disk_gb": 1028, "host": "c1a7de0ac9d94e4baceae031d05caae3", "memory_mb": 8192, "project": "(total)" } }, { "resource": { "cpu": 0, "disk_gb": 0, "host": "c1a7de0ac9d94e4baceae031d05caae3", "memory_mb": 512, "project": "(used_now)" } }, { "resource": { "cpu": 0, "disk_gb": 0, "host": "c1a7de0ac9d94e4baceae031d05caae3", "memory_mb": 0, "project": "(used_max)" } } ] }nova-13.0.0/doc/api_samples/os-hosts/host-get-shutdown.json0000664000567000056710000000012312701407773025022 0ustar jenkinsjenkins00000000000000{ "host": "77cfa0002e4d45fe97f185968111b27b", "power_action": "shutdown" } nova-13.0.0/doc/api_samples/os-hosts/host-get-reboot.json0000664000567000056710000000012112701407773024437 0ustar jenkinsjenkins00000000000000{ "host": "9557750dbc464741a89c907921c1cb31", "power_action": "reboot" } nova-13.0.0/doc/api_samples/os-hosts/host-put-maintenance-req.json0000664000567000056710000000007612701407773026256 0ustar jenkinsjenkins00000000000000{ "status": "enable", "maintenance_mode": "disable" } nova-13.0.0/doc/api_samples/os-hosts/host-put-maintenance-resp.json0000664000567000056710000000016712701407773026441 0ustar jenkinsjenkins00000000000000{ "host": "65c5d5b7e3bd44308e67fc50f362aee6", "maintenance_mode": "off_maintenance", "status": "enabled" } nova-13.0.0/doc/api_samples/os-hosts/host-get-startup.json0000664000567000056710000000012212701407773024650 0ustar jenkinsjenkins00000000000000{ "host": "4b392b27930343bbaa27fd5d8328a564", "power_action": "startup" } nova-13.0.0/doc/api_samples/os-volumes/0000775000567000056710000000000012701410205021042 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-volumes/volume-attachment-detail-resp.json0000664000567000056710000000035612701407773027625 0ustar jenkinsjenkins00000000000000{ "volumeAttachment": { "device": "/dev/sdd", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "2390fb4d-1693-45d7-b309-e29c4af16538", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } }nova-13.0.0/doc/api_samples/os-volumes/os-volumes-detail-resp.json0000664000567000056710000000141512701407773026276 0ustar jenkinsjenkins00000000000000{ "volumes": [ { "attachments": [ { "device": "/", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "3912f2b4-c5ba-4aec-9165-872876fe202e", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } ], "availabilityZone": "zone1:host1", "createdAt": "1999-01-01T01:01:01.000000", "displayDescription": "Volume Description", "displayName": "Volume Name", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "Backup" } ] } nova-13.0.0/doc/api_samples/os-volumes/snapshots-list-resp.json0000664000567000056710000000156512701407773025726 0ustar jenkinsjenkins00000000000000{ "snapshots": [ { "createdAt": "2013-02-25T16:27:54.684999", "displayDescription": "Default description", "displayName": "Default name", "id": 100, "size": 100, "status": "available", "volumeId": 12 }, { "createdAt": "2013-02-25T16:27:54.685005", "displayDescription": "Default description", "displayName": "Default name", "id": 101, "size": 100, "status": "available", "volumeId": 12 }, { "createdAt": "2013-02-25T16:27:54.685008", "displayDescription": "Default description", "displayName": "Default name", "id": 102, "size": 100, "status": "available", "volumeId": 12 } ] }nova-13.0.0/doc/api_samples/os-volumes/snapshots-show-resp.json0000664000567000056710000000041212701407773025721 0ustar jenkinsjenkins00000000000000{ "snapshot": { "createdAt": "2013-02-25T16:27:54.724209", "displayDescription": "Default description", "displayName": "Default name", "id": "100", "size": 100, "status": "available", "volumeId": 12 } }nova-13.0.0/doc/api_samples/os-volumes/os-volumes-post-req.json0000664000567000056710000000026612701407773025642 0ustar jenkinsjenkins00000000000000{ "volume": { "availability_zone": "zone1:host1", "display_name": "Volume Name", "display_description": "Volume Description", "size": 100 } } nova-13.0.0/doc/api_samples/os-volumes/os-volumes-post-resp.json0000664000567000056710000000125712701407773026025 0ustar jenkinsjenkins00000000000000{ "volume": { "attachments": [ { "device": "/", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "3912f2b4-c5ba-4aec-9165-872876fe202e", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } ], "availabilityZone": "zone1:host1", "createdAt": "2013-02-18T14:51:17.970024", "displayDescription": "Volume Description", "displayName": "Volume Name", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "Backup" } }nova-13.0.0/doc/api_samples/os-volumes/snapshot-create-resp.json0000664000567000056710000000044112701407773026023 0ustar jenkinsjenkins00000000000000{ "snapshot": { "createdAt": "2013-02-25T16:27:54.680544", "displayDescription": "Daily backup", "displayName": "snap-001", "id": 100, "size": 100, "status": "available", "volumeId": "521752a6-acf6-4b2d-bc7a-119f9148cd8c" } }nova-13.0.0/doc/api_samples/os-volumes/attach-volume-to-server-req.json0000664000567000056710000000017412701407773027241 0ustar jenkinsjenkins00000000000000{ "volumeAttachment": { "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "device": "/dev/vdd" } }nova-13.0.0/doc/api_samples/os-volumes/snapshot-create-req.json0000664000567000056710000000030312701407773025636 0ustar jenkinsjenkins00000000000000{ "snapshot": { "display_name": "snap-001", "display_description": "Daily backup", "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", "force": false } }nova-13.0.0/doc/api_samples/os-volumes/update-volume-req.json0000664000567000056710000000013612701407773025331 0ustar jenkinsjenkins00000000000000{ "volumeAttachment": { "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f805" } }nova-13.0.0/doc/api_samples/os-volumes/list-volume-attachments-resp.json0000664000567000056710000000100312701407773027507 0ustar jenkinsjenkins00000000000000{ "volumeAttachments": [ { "device": "/dev/sdd", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" }, { "device": "/dev/sdc", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804", "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804" } ] }nova-13.0.0/doc/api_samples/os-volumes/os-volumes-index-resp.json0000664000567000056710000000141412701407773026142 0ustar jenkinsjenkins00000000000000{ "volumes": [ { "attachments": [ { "device": "/", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "3912f2b4-c5ba-4aec-9165-872876fe202e", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } ], "availabilityZone": "zone1:host1", "createdAt": "2013-02-19T20:01:40.274897", "displayDescription": "Volume Description", "displayName": "Volume Name", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "Backup" } ] }nova-13.0.0/doc/api_samples/os-volumes/snapshots-detail-resp.json0000664000567000056710000000156512701407773026215 0ustar jenkinsjenkins00000000000000{ "snapshots": [ { "createdAt": "2013-02-25T16:27:54.671372", "displayDescription": "Default description", "displayName": "Default name", "id": 100, "size": 100, "status": "available", "volumeId": 12 }, { "createdAt": "2013-02-25T16:27:54.671378", "displayDescription": "Default description", "displayName": "Default name", "id": 101, "size": 100, "status": "available", "volumeId": 12 }, { "createdAt": "2013-02-25T16:27:54.671381", "displayDescription": "Default description", "displayName": "Default name", "id": 102, "size": 100, "status": "available", "volumeId": 12 } ] }nova-13.0.0/doc/api_samples/os-volumes/attach-volume-to-server-resp.json0000664000567000056710000000035612701407773027425 0ustar jenkinsjenkins00000000000000{ "volumeAttachment": { "device": "/dev/vdd", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "0c92f3f6-c253-4c9b-bd43-e880a8d2eb0a", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } }nova-13.0.0/doc/api_samples/os-volumes/os-volumes-get-resp.json0000664000567000056710000000125712701407773025617 0ustar jenkinsjenkins00000000000000{ "volume": { "attachments": [ { "device": "/", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "3912f2b4-c5ba-4aec-9165-872876fe202e", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } ], "availabilityZone": "zone1:host1", "createdAt": "2013-02-18T14:51:18.528085", "displayDescription": "Volume Description", "displayName": "Volume Name", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "Backup" } }nova-13.0.0/doc/api_samples/os-aggregates/0000775000567000056710000000000012701410205021461 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-aggregates/aggregate-add-host-post-req.json0000664000567000056710000000011712701407773027572 0ustar jenkinsjenkins00000000000000{ "add_host": { "host": "21549b2f665945baaa7101926a00143c" } } nova-13.0.0/doc/api_samples/os-aggregates/aggregates-list-get-resp.json0000664000567000056710000000061712701407773027206 0ustar jenkinsjenkins00000000000000{ "aggregates": [ { "availability_zone": "nova", "created_at": "2013-08-18T12:17:56.856455", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova" }, "name": "name", "updated_at": null } ] } nova-13.0.0/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json0000664000567000056710000000060612701407773030142 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova", "created_at": "2013-08-18T12:17:56.297823", "deleted": false, "deleted_at": null, "hosts": [ "21549b2f665945baaa7101926a00143c" ], "id": 1, "metadata": { "availability_zone": "nova" }, "name": "name", "updated_at": null } } nova-13.0.0/doc/api_samples/os-aggregates/aggregate-remove-host-post-req.json0000664000567000056710000000012212701407773030333 0ustar jenkinsjenkins00000000000000{ "remove_host": { "host": "bf1454b3d71145d49fca2101c56c728d" } } nova-13.0.0/doc/api_samples/os-aggregates/aggregate-update-post-resp.json0000664000567000056710000000055312701407773027537 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova2", "created_at": "2013-08-18T12:17:56.259751", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova2" }, "name": "newname", "updated_at": "2013-08-18T12:17:56.286720" } } nova-13.0.0/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json0000664000567000056710000000051612701407773030707 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova", "created_at": "2013-08-18T12:17:56.990581", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova" }, "name": "name", "updated_at": null } } nova-13.0.0/doc/api_samples/os-aggregates/aggregate-metadata-post-req.json0000664000567000056710000000021212701407773027643 0ustar jenkinsjenkins00000000000000{ "set_metadata": { "metadata": { "key": "value" } } }nova-13.0.0/doc/api_samples/os-aggregates/aggregate-post-req.json0000664000567000056710000000013412701407773026070 0ustar jenkinsjenkins00000000000000{ "aggregate": { "name": "name", "availability_zone": "nova" } }nova-13.0.0/doc/api_samples/os-aggregates/aggregates-get-resp.json0000664000567000056710000000051612701407773026233 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova", "created_at": "2013-08-18T12:17:56.380226", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova" }, "name": "name", "updated_at": null } } nova-13.0.0/doc/api_samples/os-aggregates/aggregate-post-resp.json0000664000567000056710000000036012701407773026253 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova", "created_at": "2013-08-18T12:17:55.751757", "deleted": false, "deleted_at": null, "id": 1, "name": "name", "updated_at": null } } nova-13.0.0/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json0000664000567000056710000000060212701407773030213 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova", "created_at": "2013-08-18T12:17:55.959571", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova", "key": "value" }, "name": "name", "updated_at": "2013-08-18T12:17:55.986540" } } nova-13.0.0/doc/api_samples/os-aggregates/aggregate-update-post-req.json0000664000567000056710000000014012701407773027345 0ustar jenkinsjenkins00000000000000{ "aggregate": { "name": "newname", "availability_zone": "nova2" } }nova-13.0.0/doc/api_samples/os-server-usage/0000775000567000056710000000000012701410205021760 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-server-usage/server-get-resp.json0000664000567000056710000000373212701407773025732 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-08-15T08:12:40Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "73cf3a40601b63f5992894be2daa3712dd599d1c919984951e21edda", "id": "cee6d136-e378-4cfc-9eec-71797f025991", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/cee6d136-e378-4cfc-9eec-71797f025991", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/cee6d136-e378-4cfc-9eec-71797f025991", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-SRV-USG:launched_at": "2013-08-15T08:12:40.108903", "OS-SRV-USG:terminated_at": null, "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-08-15T08:12:40Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-server-usage/servers-detail-resp.json0000664000567000056710000000431312701407773026574 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-08-15T12:04:04Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "117535ce0eda7ee02ebffe2c976173629385481ae3f2bded5e14a66b", "id": "ae114799-9164-48f5-a036-6ef9310acbc4", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/ae114799-9164-48f5-a036-6ef9310acbc4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/ae114799-9164-48f5-a036-6ef9310acbc4", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-SRV-USG:launched_at": "2013-08-15T12:04:05.368766", "OS-SRV-USG:terminated_at": null, "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-08-15T12:04:05Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-flavor-rxtx/0000775000567000056710000000000012701410205021644 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json0000664000567000056710000000122112701407773026573 0ustar jenkinsjenkins00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 } } nova-13.0.0/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json0000664000567000056710000000024512701407773026624 0ustar jenkinsjenkins00000000000000{ "flavor": { "name": "flavortest", "ram": 1024, "vcpus": 2, "disk": 10, "id": "100", "rxtx_factor": 2.0 } } nova-13.0.0/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json0000664000567000056710000000716612701407773027005 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "ram": 2048, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "ram": 4096, "rxtx_factor": 1.0, "swap": "", "vcpus": 2 }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "ram": 8192, "rxtx_factor": 1.0, "swap": "", "vcpus": 4 }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "ram": 16384, "rxtx_factor": 1.0, "swap": "", "vcpus": 8 } ] } nova-13.0.0/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json0000664000567000056710000000123412701407773027005 0ustar jenkinsjenkins00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 10, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "100", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/100", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/100", "rel": "bookmark" } ], "name": "flavortest", "ram": 1024, "rxtx_factor": 2.0, "swap": "", "vcpus": 2 } } nova-13.0.0/doc/api_samples/os-quota-sets/0000775000567000056710000000000012701410205021455 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json0000664000567000056710000000070612701407773030215 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 45, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10 } } nova-13.0.0/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json0000664000567000056710000000074312701407773030217 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10 } } nova-13.0.0/doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json0000664000567000056710000000074312701407773027366 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10 } } nova-13.0.0/doc/api_samples/os-quota-sets/quotas-update-post-req.json0000664000567000056710000000007212701407773026733 0ustar jenkinsjenkins00000000000000{ "quota_set": { "security_groups": 45 } }nova-13.0.0/doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json0000664000567000056710000000070512701407773030074 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 9, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10 } } nova-13.0.0/doc/api_samples/os-quota-sets/user-quotas-update-post-req.json0000664000567000056710000000011412701407773027704 0ustar jenkinsjenkins00000000000000{ "quota_set": { "force": "True", "instances": 9 } }nova-13.0.0/doc/api_samples/os-quota-sets/quotas-update-post-resp.json0000664000567000056710000000070612701407773027121 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 45, "server_groups": 10, "server_group_members": 10 } } nova-13.0.0/doc/api_samples/os-quota-sets/quotas-show-get-resp.json0000664000567000056710000000074312701407773026412 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10 } } nova-13.0.0/doc/api_samples/os-quota-sets/quotas-update-force-post-req.json0000664000567000056710000000011512701407773030025 0ustar jenkinsjenkins00000000000000{ "quota_set": { "force": "True", "instances": 45 } }nova-13.0.0/doc/api_samples/os-suspend-server/0000775000567000056710000000000012701410205022335 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-suspend-server/server-resume.json0000664000567000056710000000002612701407773026052 0ustar jenkinsjenkins00000000000000{ "resume": null }nova-13.0.0/doc/api_samples/os-suspend-server/server-suspend.json0000664000567000056710000000002712701407773026234 0ustar jenkinsjenkins00000000000000{ "suspend": null }nova-13.0.0/doc/api_samples/server-metadata/0000775000567000056710000000000012701410205022015 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/server-metadata/server-metadata-all-req.json0000664000567000056710000000006712701407773027352 0ustar jenkinsjenkins00000000000000{ "metadata": { "foo": "Foo Value" } } nova-13.0.0/doc/api_samples/server-metadata/server-metadata-req.json0000664000567000056710000000006312701407773026600 0ustar jenkinsjenkins00000000000000{ "meta": { "foo": "Bar Value" } } nova-13.0.0/doc/api_samples/server-metadata/server-metadata-all-resp.json0000664000567000056710000000006612701407773027533 0ustar jenkinsjenkins00000000000000{ "metadata": { "foo": "Foo Value" } }nova-13.0.0/doc/api_samples/server-metadata/server-metadata-resp.json0000664000567000056710000000006312701407773026762 0ustar jenkinsjenkins00000000000000{ "meta": { "foo": "Foo Value" } } nova-13.0.0/doc/api_samples/extension-info/0000775000567000056710000000000012701410205021676 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/extension-info/extensions-get-resp-v2.json0000664000567000056710000000041112701407773027055 0ustar jenkinsjenkins00000000000000{ "extension": { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/agents/api/v2", "updated": "2012-10-28T00:00:00Z" } }nova-13.0.0/doc/api_samples/extension-info/extensions-get-resp.json0000664000567000056710000000040412701407773026532 0ustar jenkinsjenkins00000000000000{ "extension": { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" } }nova-13.0.0/doc/api_samples/os-security-groups/0000775000567000056710000000000012701410205022534 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-security-groups/server-post-req.json0000664000567000056710000000052112701407773026523 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [{"name": "test"}] } } nova-13.0.0/doc/api_samples/os-security-groups/server-security-groups-list-resp.json0000664000567000056710000000034512701407773032061 0ustar jenkinsjenkins00000000000000{ "security_groups": [ { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } ] }nova-13.0.0/doc/api_samples/os-security-groups/security-groups-create-resp.json0000664000567000056710000000027412701407773031046 0ustar jenkinsjenkins00000000000000{ "security_group": { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } }nova-13.0.0/doc/api_samples/os-security-groups/security-group-add-post-req.json0000664000567000056710000000007212701407773030745 0ustar jenkinsjenkins00000000000000{ "addSecurityGroup": { "name": "test" } }nova-13.0.0/doc/api_samples/os-security-groups/security-groups-list-get-resp.json0000664000567000056710000000034512701407773031332 0ustar jenkinsjenkins00000000000000{ "security_groups": [ { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } ] }nova-13.0.0/doc/api_samples/os-security-groups/server-post-resp.json0000664000567000056710000000117412701407773026712 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "xhS2khTdkRkT", "id": "60874907-c72b-4a01-805d-54c992510e47", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/60874907-c72b-4a01-805d-54c992510e47", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/60874907-c72b-4a01-805d-54c992510e47", "rel": "bookmark" } ], "security_groups": [ { "name": "test" } ] } }nova-13.0.0/doc/api_samples/os-security-groups/server-get-resp.json0000664000567000056710000000372312701407773026506 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2014-09-18T10:13:33Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "24451d49cba30e60300a5b928ebc93a2d0b43c084a677b0a14fd678b", "id": "b08eb8d8-db43-44fb-bd89-dfe3302b84ef", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/b08eb8d8-db43-44fb-bd89-dfe3302b84ef", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/b08eb8d8-db43-44fb-bd89-dfe3302b84ef", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "security_groups": [ { "name": "test" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2014-09-18T10:13:34Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-security-groups/servers-detail-resp.json0000664000567000056710000000432012701407773027346 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2014-09-18T10:13:33Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "2ab794bccd321fe64f9f8b679266aa2c96825f467434bbdd71b09b1d", "id": "d182742c-6f20-479c-8e32-f79f9c9df6e3", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/d182742c-6f20-479c-8e32-f79f9c9df6e3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/d182742c-6f20-479c-8e32-f79f9c9df6e3", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "security_groups": [ { "name": "test" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2014-09-18T10:13:34Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-security-groups/security-group-post-req.json0000664000567000056710000000013612701407773030220 0ustar jenkinsjenkins00000000000000{ "security_group": { "name": "test", "description": "description" } }nova-13.0.0/doc/api_samples/os-security-groups/security-group-remove-post-req.json0000664000567000056710000000007512701407773031515 0ustar jenkinsjenkins00000000000000{ "removeSecurityGroup": { "name": "test" } }nova-13.0.0/doc/api_samples/os-security-groups/security-groups-get-resp.json0000664000567000056710000000027412701407773030362 0ustar jenkinsjenkins00000000000000{ "security_group": { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } }nova-13.0.0/doc/api_samples/os-preserve-ephemeral-rebuild/0000775000567000056710000000000012701410205024567 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.0.0/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.jsonnova-13.0.0/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-r0000664000567000056710000000336312701407773034774 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2013-12-30T12:28:14Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "ee8ea077f8548ce25c59c2d5020d0f82810c815c210fd68194a5c0f8", "id": "810e78d5-47fe-48bf-9559-bfe5dc918685", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/810e78d5-47fe-48bf-9559-bfe5dc918685", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/810e78d5-47fe-48bf-9559-bfe5dc918685", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "foobar", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-12-30T12:28:15Z", "user_id": "fake" } }././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.0.0/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral.jsonnova-13.0.0/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral.j0000664000567000056710000000037112701407773034761 0ustar jenkinsjenkins00000000000000{ "rebuild": { "imageRef": "70a599e0-31e7-49b7-b260-868f441e862b", "name": "foobar", "adminPass": "seekr3t", "metadata": { "meta_var": "meta_val" }, "preserve_ephemeral": false } } nova-13.0.0/doc/api_samples/os-networks/0000775000567000056710000000000012701410205021224 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-networks/networks-disassociate-req.json0000664000567000056710000000003512701407773027247 0ustar jenkinsjenkins00000000000000{ "disassociate": null } nova-13.0.0/doc/api_samples/os-networks/network-create-resp.json0000664000567000056710000000173312701407773026044 0ustar jenkinsjenkins00000000000000{ "network": { "bridge": null, "bridge_interface": null, "broadcast": "10.20.105.255", "cidr": "10.20.105.0/24", "cidr_v6": null, "created_at": null, "deleted": null, "deleted_at": null, "dhcp_server": "10.20.105.2", "dhcp_start": "10.20.105.2", "dns1": null, "dns2": null, "enable_dhcp": false, "gateway": "10.20.105.1", "gateway_v6": null, "host": null, "id": "d7a17c0c-457e-4ab4-a99c-4fa1762f5359", "injected": null, "label": "new net 111", "mtu": 9000, "multi_host": null, "netmask": "255.255.255.0", "netmask_v6": null, "priority": null, "project_id": null, "rxtx_base": null, "share_address": true, "updated_at": null, "vlan": null, "vpn_private_address": null, "vpn_public_address": null, "vpn_public_port": null } }nova-13.0.0/doc/api_samples/os-networks/networks-list-resp.json0000664000567000056710000000443512701407773025741 0ustar jenkinsjenkins00000000000000{ "networks": [ { "bridge": "br100", "bridge_interface": "eth0", "broadcast": "10.0.0.7", "cidr": "10.0.0.0/29", "cidr_v6": null, "created_at": "2011-08-15T06:19:19.387525", "deleted": false, "deleted_at": null, "dhcp_server": "10.0.0.1", "dhcp_start": "10.0.0.3", "dns1": null, "dns2": null, "enable_dhcp": true, "gateway": "10.0.0.1", "gateway_v6": null, "host": "nsokolov-desktop", "id": "20c8acc0-f747-4d71-a389-46d078ebf047", "injected": false, "label": "mynet_0", "mtu": null, "multi_host": false, "netmask": "255.255.255.248", "netmask_v6": null, "priority": null, "project_id": "1234", "rxtx_base": null, "share_address": false, "updated_at": "2011-08-16T09:26:13.048257", "vlan": 100, "vpn_private_address": "10.0.0.2", "vpn_public_address": "127.0.0.1", "vpn_public_port": 1000 }, { "bridge": "br101", "bridge_interface": "eth0", "broadcast": "10.0.0.15", "cidr": "10.0.0.10/29", "cidr_v6": null, "created_at": "2011-08-15T06:19:19.885495", "deleted": false, "deleted_at": null, "dhcp_server": "10.0.0.9", "dhcp_start": "10.0.0.11", "dns1": null, "dns2": null, "enable_dhcp": true, "gateway": "10.0.0.9", "gateway_v6": null, "host": null, "id": "20c8acc0-f747-4d71-a389-46d078ebf000", "injected": false, "label": "mynet_1", "mtu": null, "multi_host": false, "netmask": "255.255.255.248", "netmask_v6": null, "priority": null, "project_id": null, "rxtx_base": null, "share_address": false, "updated_at": null, "vlan": 101, "vpn_private_address": "10.0.0.10", "vpn_public_address": null, "vpn_public_port": 1001 } ] }nova-13.0.0/doc/api_samples/os-networks/network-show-resp.json0000664000567000056710000000203412701407773025554 0ustar jenkinsjenkins00000000000000{ "network": { "bridge": "br100", "bridge_interface": "eth0", "broadcast": "10.0.0.7", "cidr": "10.0.0.0/29", "cidr_v6": null, "created_at": "2011-08-15T06:19:19.387525", "deleted": false, "deleted_at": null, "dhcp_server": "10.0.0.1", "dhcp_start": "10.0.0.3", "dns1": null, "dns2": null, "enable_dhcp": true, "gateway": "10.0.0.1", "gateway_v6": null, "host": "nsokolov-desktop", "id": "20c8acc0-f747-4d71-a389-46d078ebf047", "injected": false, "label": "mynet_0", "mtu": null, "multi_host": false, "netmask": "255.255.255.248", "netmask_v6": null, "priority": null, "project_id": "1234", "rxtx_base": null, "share_address": false, "updated_at": "2011-08-16T09:26:13.048257", "vlan": 100, "vpn_private_address": "10.0.0.2", "vpn_public_address": "127.0.0.1", "vpn_public_port": 1000 } }nova-13.0.0/doc/api_samples/os-networks/network-add-req.json0000664000567000056710000000002212701407773025135 0ustar jenkinsjenkins00000000000000{ "id": "1" } nova-13.0.0/doc/api_samples/os-networks/network-create-req.json0000664000567000056710000000044512701407773025661 0ustar jenkinsjenkins00000000000000{ "network": { "label": "new net 111", "cidr": "10.20.105.0/24", "mtu": 9000, "dhcp_server": "10.20.105.2", "enable_dhcp": false, "share_address": true, "allowed_start": "10.20.105.10", "allowed_end": "10.20.105.200" } } nova-13.0.0/doc/api_samples/os-server-external-events/0000775000567000056710000000000012701410205024000 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-server-external-events/event-create-resp.json0000664000567000056710000000035412701407773030246 0ustar jenkinsjenkins00000000000000{ "events": [ { "code": 200, "name": "network-changed", "server_uuid": "ff1df7b2-6772-45fd-9326-c0a3b05591c2", "status": "completed", "tag": "foo" } ] }nova-13.0.0/doc/api_samples/os-server-external-events/event-create-req.json0000664000567000056710000000031612701407773030062 0ustar jenkinsjenkins00000000000000{ "events": [ { "name": "test-event", "tag": "foo", "status": "completed", "server_uuid": "3df201cf-2451-44f2-8d25-a4ca826fc1f3" } ] }nova-13.0.0/doc/api_samples/os-baremetal-nodes/0000775000567000056710000000000012701410205022412 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-baremetal-nodes/baremetal-node-get-resp.json0000664000567000056710000000046412701407773027734 0ustar jenkinsjenkins00000000000000{ "node": { "cpus": "2", "disk_gb": "10", "host": "IRONIC MANAGED", "id": "058d27fa-241b-445a-a386-08c04f96db43", "instance_uuid": "1ea4e53e-149a-4f02-9515-590c9fb2315a", "interfaces": [], "memory_mb": "1024", "task_state": "active" } }nova-13.0.0/doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json0000664000567000056710000000106112701407773030122 0ustar jenkinsjenkins00000000000000{ "nodes": [ { "cpus": "2", "disk_gb": "10", "host": "IRONIC MANAGED", "id": "058d27fa-241b-445a-a386-08c04f96db43", "interfaces": [], "memory_mb": "1024", "task_state": "active" }, { "cpus": "2", "disk_gb": "10", "host": "IRONIC MANAGED", "id": "e2025409-f3ce-4d6a-9788-c565cf3b1b1c", "interfaces": [], "memory_mb": "1024", "task_state": "active" } ] }nova-13.0.0/doc/api_samples/servers-sort/0000775000567000056710000000000012701410205021407 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/servers-sort/server-sort-keys-list-resp.json0000664000567000056710000000113512701407773027506 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "e08e6d34-fcc1-480e-b11e-24a675b479f8", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/e08e6d34-fcc1-480e-b11e-24a675b479f8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/e08e6d34-fcc1-480e-b11e-24a675b479f8", "rel": "bookmark" } ], "name": "new-server-test" } ] } nova-13.0.0/doc/api_samples/os-extended-availability-zone/0000775000567000056710000000000012701410205024571 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-extended-availability-zone/server-get-resp.json0000664000567000056710000000363712701407773030547 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T02:54:56Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "b75d6736650f9b272223ceb48f4cde001de1856e381613a922117ab7", "id": "f22e4521-d03a-4e9f-9fd3-016b9e227219", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f22e4521-d03a-4e9f-9fd3-016b9e227219", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f22e4521-d03a-4e9f-9fd3-016b9e227219", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-EXT-AZ:availability_zone": "nova", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:54:57Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-extended-availability-zone/servers-detail-resp.json0000664000567000056710000000421412701407773031405 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T02:54:56Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "cf5540800371e53064a60b36ff9d6d1d6a8719ffc870c63a270c6bee", "id": "55f43fa2-dc7c-4c0b-b21a-76f9abe516f9", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/55f43fa2-dc7c-4c0b-b21a-76f9abe516f9", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/55f43fa2-dc7c-4c0b-b21a-76f9abe516f9", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-EXT-AZ:availability_zone": "nova", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:54:58Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-console-output/0000775000567000056710000000000012701410205022350 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-console-output/console-output-post-req.json0000664000567000056710000000007412701407773030034 0ustar jenkinsjenkins00000000000000{ "os-getConsoleOutput": { "length": 50 } } nova-13.0.0/doc/api_samples/os-console-output/console-output-post-resp.json0000664000567000056710000000007312701407773030215 0ustar jenkinsjenkins00000000000000{ "output": "FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE" }nova-13.0.0/doc/api_samples/flavor-access/0000775000567000056710000000000012701410205021461 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/flavor-access/flavor-access-show-resp.json0000664000567000056710000000116512701407773027054 0ustar jenkinsjenkins00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": "", "vcpus": 1 } } nova-13.0.0/doc/api_samples/flavor-access/flavor-access-add-tenant-resp.json0000664000567000056710000000017212701407773030110 0ustar jenkinsjenkins00000000000000{ "flavor_access": [ { "flavor_id": "10", "tenant_id": "fake_tenant" } ] }nova-13.0.0/doc/api_samples/flavor-access/flavor-access-remove-tenant-req.json0000664000567000056710000000010612701407773030470 0ustar jenkinsjenkins00000000000000{ "removeTenantAccess": { "tenant": "fake_tenant" } } nova-13.0.0/doc/api_samples/flavor-access/flavor-access-create-resp.json0000664000567000056710000000117712701407773027342 0ustar jenkinsjenkins00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 10, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": false, "id": "10", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/10", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/10", "rel": "bookmark" } ], "name": "test_flavor", "ram": 1024, "swap": "", "vcpus": 2 } } nova-13.0.0/doc/api_samples/flavor-access/flavor-access-detail-resp.json0000664000567000056710000000672612701407773027346 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "ram": 2048, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "ram": 4096, "swap": "", "vcpus": 2 }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "ram": 8192, "swap": "", "vcpus": 4 }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "ram": 16384, "swap": "", "vcpus": 8 } ] } nova-13.0.0/doc/api_samples/flavor-access/flavor-access-add-tenant-req.json0000664000567000056710000000010312701407773027720 0ustar jenkinsjenkins00000000000000{ "addTenantAccess": { "tenant": "fake_tenant" } } nova-13.0.0/doc/api_samples/flavor-access/flavor-access-create-req.json0000664000567000056710000000026612701407773027156 0ustar jenkinsjenkins00000000000000{ "flavor": { "name": "test_flavor", "ram": 1024, "vcpus": 2, "disk": 10, "id": "10", "os-flavor-access:is_public": false } } nova-13.0.0/doc/api_samples/flavor-access/flavor-access-remove-tenant-resp.json0000664000567000056710000000004012701407773030647 0ustar jenkinsjenkins00000000000000{ "flavor_access": [ ] }nova-13.0.0/doc/api_samples/flavor-access/flavor-access-list-resp.json0000664000567000056710000000017212701407773027044 0ustar jenkinsjenkins00000000000000{ "flavor_access": [ { "flavor_id": "10", "tenant_id": "fake_tenant" } ] }nova-13.0.0/doc/api_samples/os-extended-status/0000775000567000056710000000000012701410205022471 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-extended-status/server-get-resp.json0000664000567000056710000000374512701407773026447 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T03:07:06Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "46d2aa2d637bd55606304b611a1928627ee1278c149aef2206268d6e", "id": "a868cb5e-c794-47bf-9cd8-e302b72bb94b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a868cb5e-c794-47bf-9cd8-e302b72bb94b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a868cb5e-c794-47bf-9cd8-e302b72bb94b", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T03:07:07Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-extended-status/servers-detail-resp.json0000664000567000056710000000433212701407773027306 0ustar jenkinsjenkins00000000000000{ "servers": [ { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T03:07:09Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "a275e77473e464558c4aba0d68e1914d1164e7ee2f69affde7aaae2b", "id": "6c8b5385-e74c-4fd5-add6-2fcf42d74a98", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/6c8b5385-e74c-4fd5-add6-2fcf42d74a98", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/6c8b5385-e74c-4fd5-add6-2fcf42d74a98", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T03:07:10Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-cells/0000775000567000056710000000000012701410205020452 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-cells/cells-list-empty-resp.json0000664000567000056710000000002312701407773025536 0ustar jenkinsjenkins00000000000000{ "cells": [] }nova-13.0.0/doc/api_samples/os-cells/cells-get-resp.json0000664000567000056710000000023512701407773024213 0ustar jenkinsjenkins00000000000000{ "cell": { "name": "cell3", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username3" } }nova-13.0.0/doc/api_samples/os-cells/cells-capacities-resp.json0000664000567000056710000000116212701407773025541 0ustar jenkinsjenkins00000000000000{ "cell": { "capacities": { "disk_free": { "total_mb": 1052672, "units_by_mb": { "0": 0, "163840": 5, "20480": 46, "40960": 23, "81920": 11 } }, "ram_free": { "total_mb": 7680, "units_by_mb": { "16384": 0, "2048": 3, "4096": 1, "512": 13, "8192": 0 } } } } }nova-13.0.0/doc/api_samples/os-cells/cells-list-resp.json0000664000567000056710000000160412701407773024410 0ustar jenkinsjenkins00000000000000{ "cells": [ { "name": "cell1", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username1" }, { "name": "cell3", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username3" }, { "name": "cell5", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username5" }, { "name": "cell2", "rpc_host": null, "rpc_port": null, "type": "parent", "username": "username2" }, { "name": "cell4", "rpc_host": null, "rpc_port": null, "type": "parent", "username": "username4" } ] }nova-13.0.0/doc/api_samples/os-multinic/0000775000567000056710000000000012701410205021174 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-multinic/multinic-add-fixed-ip-req.json0000664000567000056710000000006512701407773026752 0ustar jenkinsjenkins00000000000000{ "addFixedIp": { "networkId": 1 } } nova-13.0.0/doc/api_samples/os-multinic/multinic-remove-fixed-ip-req.json0000664000567000056710000000010212701407773027507 0ustar jenkinsjenkins00000000000000{ "removeFixedIp":{ "address": "10.0.0.4" } } nova-13.0.0/doc/api_samples/os-disk-config/0000775000567000056710000000000012701410205021545 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-disk-config/server-post-req.json0000664000567000056710000000034112701407773025534 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } nova-13.0.0/doc/api_samples/os-disk-config/server-action-rebuild-resp.json0000664000567000056710000000343612701407773027642 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "", "accessIPv6": "", "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "adminPass": "NBjMaJoFL4EF", "created": "2012-12-02T02:11:56Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "c076393ad900d62c4805a42df10d9b364f629842681c00cce035487f", "id": "63a8aa13-60fe-41c4-b079-77f6fdf3c841", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/63a8aa13-60fe-41c4-b079-77f6fdf3c841", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/63a8aa13-60fe-41c4-b079-77f6fdf3c841", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2012-12-02T02:11:56Z", "user_id": "fake" } }nova-13.0.0/doc/api_samples/os-disk-config/list-servers-detail-get.json0000664000567000056710000000416412701407773027144 0ustar jenkinsjenkins00000000000000{ "servers": [ { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "", "accessIPv6": "", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2012-12-02T02:11:55Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "99428f32351a5d89d0f7727c6eec68c1777c545a0972aaac645508dc", "id": "05372e62-05b9-4ee2-9343-9a1fdf2a5fda", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/05372e62-05b9-4ee2-9343-9a1fdf2a5fda", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/05372e62-05b9-4ee2-9343-9a1fdf2a5fda", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2012-12-02T02:11:56Z", "key_name": null, "user_id": "fake" } ] }nova-13.0.0/doc/api_samples/os-disk-config/server-post-resp.json0000664000567000056710000000107612701407773025724 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "CQH9gWzgkVno", "id": "324dfb7d-f4a9-419a-9a19-237df04b443b", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "bookmark" } ] } }nova-13.0.0/doc/api_samples/os-disk-config/server-resize-post-req.json0000664000567000056710000000012712701407773027035 0ustar jenkinsjenkins00000000000000{ "resize": { "flavorRef": "3", "OS-DCF:diskConfig": "AUTO" } }nova-13.0.0/doc/api_samples/os-disk-config/server-get-resp.json0000664000567000056710000000360712701407773025520 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "", "accessIPv6": "", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2012-12-02T02:11:55Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "c949ab4256cea23b6089b710aa2df48bf6577ed915278b62e33ad8bb", "id": "5046e2f2-3b33-4041-b3cf-e085f73e78e7", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/5046e2f2-3b33-4041-b3cf-e085f73e78e7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/5046e2f2-3b33-4041-b3cf-e085f73e78e7", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2012-12-02T02:11:55Z", "key_name": null, "user_id": "fake" } }nova-13.0.0/doc/api_samples/os-disk-config/server-action-rebuild-req.json0000664000567000056710000000017412701407773027454 0ustar jenkinsjenkins00000000000000{ "rebuild": { "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "OS-DCF:diskConfig": "AUTO" } } nova-13.0.0/doc/api_samples/os-services/0000775000567000056710000000000012701410205021173 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-services/service-disable-log-put-req.json0000664000567000056710000000012512701407773027317 0ustar jenkinsjenkins00000000000000{ "host": "host1", "binary": "nova-compute", "disabled_reason": "test2" }nova-13.0.0/doc/api_samples/os-services/services-list-get-resp.json0000664000567000056710000000227312701407773026432 0ustar jenkinsjenkins00000000000000{ "services": [ { "id": 1, "binary": "nova-scheduler", "disabled_reason": "test1", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:02.000000", "zone": "internal" }, { "id": 2, "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:05.000000", "zone": "nova" }, { "id": 3, "binary": "nova-scheduler", "disabled_reason": null, "host": "host2", "state": "down", "status": "enabled", "updated_at": "2012-09-19T06:55:34.000000", "zone": "internal" }, { "id": 4, "binary": "nova-compute", "disabled_reason": "test4", "host": "host2", "state": "down", "status": "disabled", "updated_at": "2012-09-18T08:03:38.000000", "zone": "nova" } ] } nova-13.0.0/doc/api_samples/os-services/service-enable-put-resp.json0000664000567000056710000000016112701407773026545 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "enabled" } }nova-13.0.0/doc/api_samples/os-services/service-disable-put-resp.json0000664000567000056710000000016212701407773026723 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "disabled" } }nova-13.0.0/doc/api_samples/os-services/service-disable-log-put-resp.json0000664000567000056710000000022612701407773027503 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "status": "disabled" } }nova-13.0.0/doc/api_samples/os-services/service-disable-put-req.json0000664000567000056710000000006512701407773026543 0ustar jenkinsjenkins00000000000000{ "host": "host1", "binary": "nova-compute" }nova-13.0.0/doc/api_samples/os-services/service-enable-put-req.json0000664000567000056710000000006512701407773026366 0ustar jenkinsjenkins00000000000000{ "host": "host1", "binary": "nova-compute" }nova-13.0.0/doc/api_samples/os-services/v2.11/0000775000567000056710000000000012701410205021742 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-services/v2.11/service-force-down-put-req.json0000664000567000056710000000011712701407773027750 0ustar jenkinsjenkins00000000000000{ "host": "host1", "binary": "nova-compute", "forced_down": true } nova-13.0.0/doc/api_samples/os-services/v2.11/service-disable-log-put-req.json0000664000567000056710000000012512701407773030066 0ustar jenkinsjenkins00000000000000{ "host": "host1", "binary": "nova-compute", "disabled_reason": "test2" }nova-13.0.0/doc/api_samples/os-services/v2.11/services-list-get-resp.json0000664000567000056710000000250312701407773027175 0ustar jenkinsjenkins00000000000000{ "services": [ { "id": 1, "binary": "nova-scheduler", "disabled_reason": "test1", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:02.000000", "forced_down": false, "zone": "internal" }, { "id": 2, "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:05.000000", "forced_down": false, "zone": "nova" }, { "id": 3, "binary": "nova-scheduler", "disabled_reason": null, "host": "host2", "state": "down", "status": "enabled", "updated_at": "2012-09-19T06:55:34.000000", "forced_down": false, "zone": "internal" }, { "id": 4, "binary": "nova-compute", "disabled_reason": "test4", "host": "host2", "state": "down", "status": "disabled", "updated_at": "2012-09-18T08:03:38.000000", "forced_down": false, "zone": "nova" } ] } nova-13.0.0/doc/api_samples/os-services/v2.11/service-enable-put-resp.json0000664000567000056710000000016112701407773027314 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "enabled" } }nova-13.0.0/doc/api_samples/os-services/v2.11/service-force-down-put-resp.json0000664000567000056710000000016212701407773030132 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "forced_down": true } } nova-13.0.0/doc/api_samples/os-services/v2.11/service-disable-put-resp.json0000664000567000056710000000016212701407773027472 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "disabled" } }nova-13.0.0/doc/api_samples/os-services/v2.11/service-disable-log-put-resp.json0000664000567000056710000000022612701407773030252 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "status": "disabled" } }nova-13.0.0/doc/api_samples/os-services/v2.11/service-disable-put-req.json0000664000567000056710000000006512701407773027312 0ustar jenkinsjenkins00000000000000{ "host": "host1", "binary": "nova-compute" }nova-13.0.0/doc/api_samples/os-services/v2.11/service-enable-put-req.json0000664000567000056710000000006512701407773027135 0ustar jenkinsjenkins00000000000000{ "host": "host1", "binary": "nova-compute" }nova-13.0.0/doc/api_samples/all_extensions/0000775000567000056710000000000012701410205021760 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/all_extensions/flavors-detail-resp.json0000664000567000056710000000716612701407773026570 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "ram": 2048, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "ram": 4096, "rxtx_factor": 1.0, "swap": "", "vcpus": 2 }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "ram": 8192, "rxtx_factor": 1.0, "swap": "", "vcpus": 4 }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "ram": 16384, "rxtx_factor": 1.0, "swap": "", "vcpus": 8 } ] } nova-13.0.0/doc/api_samples/all_extensions/flavors-list-resp.json0000664000567000056710000000447112701407773026275 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny" }, { "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small" }, { "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium" }, { "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large" }, { "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge" } ] }nova-13.0.0/doc/api_samples/all_extensions/server-action-create-image.json0000664000567000056710000000020012701407773027765 0ustar jenkinsjenkins00000000000000{ "createImage" : { "name" : "foo-image", "metadata": { "meta_var": "meta_val" } } }nova-13.0.0/doc/api_samples/all_extensions/server-action-reboot.json0000664000567000056710000000006312701407773026743 0ustar jenkinsjenkins00000000000000{ "reboot" : { "type" : "SOFT" } }nova-13.0.0/doc/api_samples/all_extensions/server-action-rebuild.json0000664000567000056710000000152212701407773027100 0ustar jenkinsjenkins00000000000000{ "rebuild" : { "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "metadata" : { "meta var" : "meta val" }, "personality" : [ { "path" : "/etc/banner.txt", "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ] } } nova-13.0.0/doc/api_samples/all_extensions/servers-list-resp.json0000664000567000056710000000113412701407773026303 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "a291599e-6de2-41a6-88df-c443ddcef70d", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a291599e-6de2-41a6-88df-c443ddcef70d", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a291599e-6de2-41a6-88df-c443ddcef70d", "rel": "bookmark" } ], "name": "new-server-test" } ] }nova-13.0.0/doc/api_samples/all_extensions/server-action-rebuild-resp.json0000664000567000056710000000343012701407773030047 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2012-09-25T13:36:08Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "935dcd1019fd43814a1d2a6e9b320dcac352d3a02c69f8be7ba41002", "id": "27568e59-cfb7-4283-a00e-4af933f2d539", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/27568e59-cfb7-4283-a00e-4af933f2d539", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/27568e59-cfb7-4283-a00e-4af933f2d539", "rel": "bookmark" } ], "metadata": { "meta var": "meta val" }, "name": "foobar", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2012-09-25T13:36:09Z", "user_id": "fake" } }nova-13.0.0/doc/api_samples/all_extensions/extensions-list-resp-v21-compatible.json0000664000567000056710000007374612701407773031556 0ustar jenkinsjenkins00000000000000{ "extensions": [ { "alias": "NMN", "description": "Multiple network support.", "links": [], "name": "Multinic", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-DCF", "description": "Disk Management Extension.", "links": [], "name": "DiskConfig", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-AZ", "description": "Extended Availability Zone support.", "links": [], "name": "ExtendedAvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IMG-SIZE", "description": "Adds image size to image listings.", "links": [], "name": "ImageSize", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS", "description": "", "links": [], "name": "ExtendedIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS-MAC", "description": "", "links": [], "name": "ExtendedIpsMac", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-SRV-ATTR", "description": "Extended Server Attributes support.", "links": [], "name": "ExtendedServerAttributes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-STS", "description": "", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-VIF-NET", "description": "", "links": [], "name": "ExtendedVIFNet", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-DISABLED", "description": "", "links": [], "name": "FlavorDisabled", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-EXT-DATA", "description": "", "links": [], "name": "FlavorExtraData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler.", "links": [], "name": "SchedulerHints", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SRV-USG", "description": "Adds launched_at and terminated_at on Servers.", "links": [], "name": "ServerUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-access-ips", "description": "Access IPs support.", "links": [], "name": "AccessIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-actions", "description": "Enable admin-only server actions\n\n Actions include: resetNetwork, injectNetworkInfo, os-resetState\n ", "links": [], "name": "AdminActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-password", "description": "Admin password management support.", "links": [], "name": "AdminPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-aggregates", "description": "Admin-only aggregate administration.", "links": [], "name": "Aggregates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-assisted-volume-snapshots", "description": "Assisted volume snapshots.", "links": [], "name": "AssistedVolumeSnapshots", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-attach-interfaces", "description": "Attach interface support.", "links": [], "name": "AttachInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-availability-zone", "description": "1. Add availability_zone to the Create Server API.\n 2. Add availability zones describing.\n ", "links": [], "name": "AvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-ext-status", "description": "", "links": [], "name": "BareMetalExtStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-nodes", "description": "Admin-only bare-metal node administration.", "links": [], "name": "BareMetalNodes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping", "description": "Block device mapping boot support.", "links": [], "name": "BlockDeviceMapping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping-v2-boot", "description": "", "links": [], "name": "BlockDeviceMappingV2Boot", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cell-capacities", "description": "", "links": [], "name": "CellCapacities", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cells", "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ", "links": [], "name": "Cells", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-certificates", "description": "Certificates support.", "links": [], "name": "Certificates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe", "description": "Adds actions to create cloudpipe instances.\n\n When running with the Vlan network mode, you need a mechanism to route\n from the public Internet to your vlans. This mechanism is known as a\n cloudpipe.\n\n At the time of creating this class, only OpenVPN is supported. Support for\n a SSH Bastion host is forthcoming.\n ", "links": [], "name": "Cloudpipe", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe-update", "description": "", "links": [], "name": "CloudpipeUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-config-drive", "description": "Config Drive Extension.", "links": [], "name": "ConfigDrive", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-auth-tokens", "description": "Console token authentication support.", "links": [], "name": "ConsoleAuthTokens", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-output", "description": "Console log output support, with tailing ability.", "links": [], "name": "ConsoleOutput", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-consoles", "description": "Interactive Console support.", "links": [], "name": "Consoles", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-backup", "description": "Create a backup of a server.", "links": [], "name": "CreateBackup", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-server-ext", "description": "", "links": [], "name": "Createserverext", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-deferred-delete", "description": "Instance deferred delete.", "links": [], "name": "DeferredDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-evacuate", "description": "Enables server evacuation.", "links": [], "name": "Evacuate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-evacuate-find-host", "description": "", "links": [], "name": "ExtendedEvacuateFindHost", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-floating-ips", "description": "", "links": [], "name": "ExtendedFloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-hypervisors", "description": "", "links": [], "name": "ExtendedHypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-networks", "description": "", "links": [], "name": "ExtendedNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-quotas", "description": "", "links": [], "name": "ExtendedQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-rescue-with-image", "description": "", "links": [], "name": "ExtendedRescueWithImage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services", "description": "", "links": [], "name": "ExtendedServices", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services-delete", "description": "", "links": [], "name": "ExtendedServicesDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-status", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-volumes", "description": "Extended Volumes support.", "links": [], "name": "ExtendedVolumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fixed-ips", "description": "Fixed IPs support.", "links": [], "name": "FixedIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-access", "description": "Flavor access support.", "links": [], "name": "FlavorAccess", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-extra-specs", "description": "Flavors extra specs support.", "links": [], "name": "FlavorExtraSpecs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-manage", "description": "Flavor create/delete API support.", "links": [], "name": "FlavorManage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-rxtx", "description": "Support to show the rxtx status of a flavor.", "links": [], "name": "FlavorRxtx", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-swap", "description": "", "links": [], "name": "FlavorSwap", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-dns", "description": "Floating IP DNS support.", "links": [], "name": "FloatingIpDns", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-pools", "description": "Floating IPs support.", "links": [], "name": "FloatingIpPools", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips", "description": "Floating IPs support.", "links": [], "name": "FloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips-bulk", "description": "Bulk handling of Floating IPs.", "links": [], "name": "FloatingIpsBulk", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fping", "description": "Fping Management Extension.", "links": [], "name": "Fping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hide-server-addresses", "description": "Support hiding server addresses in certain states.", "links": [], "name": "HideServerAddresses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisor-status", "description": "", "links": [], "name": "HypervisorStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisors", "description": "Admin-only hypervisor administration.", "links": [], "name": "Hypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance-actions", "description": "View a log of actions and events taken on an instance.", "links": [], "name": "InstanceActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance_usage_audit_log", "description": "Admin-only Task Log Monitoring.", "links": [], "name": "OSInstanceUsageAuditLog", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-keypairs", "description": "Keypair Support.", "links": [], "name": "Keypairs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-lock-server", "description": "Enable lock/unlock server actions.", "links": [], "name": "LockServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrate-server", "description": "Enable migrate and live-migrate server actions.", "links": [], "name": "MigrateServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrations", "description": "Provide data on migrations.", "links": [], "name": "Migrations", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-multiple-create", "description": "Allow multiple create in the Create Server v2.1 API.", "links": [], "name": "MultipleCreate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks", "description": "Admin-only Network Management Extension.", "links": [], "name": "Networks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks-associate", "description": "Network association support.", "links": [], "name": "NetworkAssociationSupport", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-pause-server", "description": "Enable pause/unpause server actions.", "links": [], "name": "PauseServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-personality", "description": "Personality support.", "links": [], "name": "Personality", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-preserve-ephemeral-rebuild", "description": "Allow preservation of the ephemeral partition on rebuild.", "links": [], "name": "PreserveEphemeralOnRebuild", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-class-sets", "description": "Quota classes management support.", "links": [], "name": "QuotaClasses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-sets", "description": "Quotas management support.", "links": [], "name": "Quotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-rescue", "description": "Instance rescue mode.", "links": [], "name": "Rescue", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-group-default-rules", "description": "Default rules for security group support.", "links": [], "name": "SecurityGroupDefaultRules", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-groups", "description": "Security group support.", "links": [], "name": "SecurityGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-diagnostics", "description": "Allow Admins to view server diagnostics through server action.", "links": [], "name": "ServerDiagnostics", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-external-events", "description": "Server External Event Triggers.", "links": [], "name": "ServerExternalEvents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-group-quotas", "description": "", "links": [], "name": "ServerGroupQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-groups", "description": "Server group support.", "links": [], "name": "ServerGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-list-multi-status", "description": "", "links": [], "name": "ServerListMultiStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-password", "description": "Server password support.", "links": [], "name": "ServerPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-sort-keys", "description": "", "links": [], "name": "ServerSortKeys", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-start-stop", "description": "", "links": [], "name": "ServerStartStop", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-services", "description": "Services support.", "links": [], "name": "Services", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-shelve", "description": "Instance shelve mode.", "links": [], "name": "Shelve", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-simple-tenant-usage", "description": "Simple tenant usage extension.", "links": [], "name": "SimpleTenantUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-suspend-server", "description": "Enable suspend/resume server actions.", "links": [], "name": "SuspendServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-tenant-networks", "description": "Tenant-based Network Management Extension.", "links": [], "name": "OSTenantNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits", "description": "Provide data on limited resources that are being used.", "links": [], "name": "UsedLimits", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits-for-admin", "description": "", "links": [], "name": "UsedLimitsForAdmin", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-data", "description": "Add user_data to the Create Server API.", "links": [], "name": "UserData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-quotas", "description": "", "links": [], "name": "UserQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-virtual-interfaces", "description": "Virtual interface support.", "links": [], "name": "VirtualInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volume-attachment-update", "description": "", "links": [], "name": "VolumeAttachmentUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volumes", "description": "Volumes support.", "links": [], "name": "Volumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" } ] } nova-13.0.0/doc/api_samples/all_extensions/flavor-get-resp.json0000664000567000056710000000122112701407773025704 0ustar jenkinsjenkins00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "OS-FLV-EXT-DATA:ephemeral": 0, "disk": 1, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "os-flavor-access:is_public": true, "ram": 512, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 } } nova-13.0.0/doc/api_samples/all_extensions/servers-details-resp.json0000664000567000056710000000546712701407773026772 0ustar jenkinsjenkins00000000000000{ "servers": [ { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-23T13:53:12Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "f1e160ad2bf07084f3d3e0dfdd0795d80da18a60825322c15775c0dd", "id": "9cbefc35-d372-40c5-88e2-9fda1b6ea12c", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/9cbefc35-d372-40c5-88e2-9fda1b6ea12c", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/9cbefc35-d372-40c5-88e2-9fda1b6ea12c", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "accessIPv4": "", "accessIPv6": "", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-10-31T06:32:32Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/all_extensions/server-get-resp.json0000664000567000056710000000501212701407773025723 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-23T13:37:00Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "9cc36101a27c2a69c1a18241f6228454d9d7f466bd90c62db8e8b856", "id": "f474386b-4fb6-4e1f-b1d5-d6bf4437f7d5", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f474386b-4fb6-4e1f-b1d5-d6bf4437f7d5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f474386b-4fb6-4e1f-b1d5-d6bf4437f7d5", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "accessIPv4": "", "accessIPv6": "", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-10-31T07:31:30Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/all_extensions/server-create-resp.json0000664000567000056710000000124512701407773026413 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "zPnp2GseTqG4", "id": "8195065c-fea4-4d57-b93f-5c5c63fe90e8", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/8195065c-fea4-4d57-b93f-5c5c63fe90e8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8195065c-fea4-4d57-b93f-5c5c63fe90e8", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } } nova-13.0.0/doc/api_samples/all_extensions/extensions-list-resp-v2.json0000664000567000056710000007321712701407773027351 0ustar jenkinsjenkins00000000000000{ "extensions": [ { "alias": "NMN", "description": "Multiple network support.", "links": [], "name": "Multinic", "namespace": "http://docs.openstack.org/compute/ext/multinic/api/v1.1", "updated": "2011-06-09T00:00:00Z" }, { "alias": "OS-DCF", "description": "Disk Management Extension.", "links": [], "name": "DiskConfig", "namespace": "http://docs.openstack.org/compute/ext/disk_config/api/v1.1", "updated": "2011-09-27T00:00:00Z" }, { "alias": "OS-EXT-AZ", "description": "Extended Availability Zone support.", "links": [], "name": "ExtendedAvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2", "updated": "2013-01-30T00:00:00Z" }, { "alias": "OS-EXT-IMG-SIZE", "description": "Adds image size to image listings.", "links": [], "name": "ImageSize", "namespace": "http://docs.openstack.org/compute/ext/image_size/api/v1.1", "updated": "2013-02-19T00:00:00Z" }, { "alias": "OS-EXT-IPS", "description": "Adds type parameter to the ip list.", "links": [], "name": "ExtendedIps", "namespace": "http://docs.openstack.org/compute/ext/extended_ips/api/v1.1", "updated": "2013-01-06T00:00:00Z" }, { "alias": "OS-EXT-IPS-MAC", "description": "Adds mac address parameter to the ip list.", "links": [], "name": "ExtendedIpsMac", "namespace": "http://docs.openstack.org/compute/ext/extended_ips_mac/api/v1.1", "updated": "2013-03-07T00:00:00Z" }, { "alias": "OS-EXT-SRV-ATTR", "description": "Extended Server Attributes support.", "links": [], "name": "ExtendedServerAttributes", "namespace": "http://docs.openstack.org/compute/ext/extended_status/api/v1.1", "updated": "2011-11-03T00:00:00Z" }, { "alias": "OS-EXT-STS", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/extended_status/api/v1.1", "updated": "2011-11-03T00:00:00Z" }, { "alias": "OS-EXT-VIF-NET", "description": "Adds network id parameter to the virtual interface list.", "links": [], "name": "ExtendedVIFNet", "namespace": "http://docs.openstack.org/compute/ext/extended-virtual-interfaces-net/api/v1.1", "updated": "2013-03-07T00:00:00Z" }, { "alias": "OS-FLV-DISABLED", "description": "Support to show the disabled status of a flavor.", "links": [], "name": "FlavorDisabled", "namespace": "http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1", "updated": "2012-08-29T00:00:00Z" }, { "alias": "OS-FLV-EXT-DATA", "description": "Provide additional data for flavors.", "links": [], "name": "FlavorExtraData", "namespace": "http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1", "updated": "2011-09-14T00:00:00Z" }, { "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler.", "links": [], "name": "SchedulerHints", "namespace": "http://docs.openstack.org/compute/ext/scheduler-hints/api/v2", "updated": "2011-07-19T00:00:00Z" }, { "alias": "OS-SRV-USG", "description": "Adds launched_at and terminated_at on Servers.", "links": [], "name": "ServerUsage", "namespace": "http://docs.openstack.org/compute/ext/server_usage/api/v1.1", "updated": "2013-04-29T00:00:00Z" }, { "alias": "os-admin-actions", "description": "Enable admin-only server actions\n\n Actions include: pause, unpause, suspend, resume, migrate,\n resetNetwork, injectNetworkInfo, lock, unlock, createBackup\n ", "links": [], "name": "AdminActions", "namespace": "http://docs.openstack.org/compute/ext/admin-actions/api/v1.1", "updated": "2011-09-20T00:00:00Z" }, { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/agents/api/v2", "updated": "2012-10-28T00:00:00Z" }, { "alias": "os-aggregates", "description": "Admin-only aggregate administration.", "links": [], "name": "Aggregates", "namespace": "http://docs.openstack.org/compute/ext/aggregates/api/v1.1", "updated": "2012-01-12T00:00:00Z" }, { "alias": "os-assisted-volume-snapshots", "description": "Assisted volume snapshots.", "links": [], "name": "AssistedVolumeSnapshots", "namespace": "http://docs.openstack.org/compute/ext/assisted-volume-snapshots/api/v2", "updated": "2013-08-29T00:00:00Z" }, { "alias": "os-attach-interfaces", "description": "Attach interface support.", "links": [], "name": "AttachInterfaces", "namespace": "http://docs.openstack.org/compute/ext/interfaces/api/v1.1", "updated": "2012-07-22T00:00:00Z" }, { "alias": "os-availability-zone", "description": "1. Add availability_zone to the Create Server v1.1 API.\n 2. Add availability zones describing.\n ", "links": [], "name": "AvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1", "updated": "2012-12-21T00:00:00Z" }, { "alias": "os-baremetal-ext-status", "description": "Add extended status in Baremetal Nodes v2 API.", "links": [], "name": "BareMetalExtStatus", "namespace": "http://docs.openstack.org/compute/ext/baremetal_ext_status/api/v2", "updated": "2013-08-27T00:00:00Z" }, { "alias": "os-baremetal-nodes", "description": "Admin-only bare-metal node administration.", "links": [], "name": "BareMetalNodes", "namespace": "http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2", "updated": "2013-01-04T00:00:00Z" }, { "alias": "os-block-device-mapping-v2-boot", "description": "Allow boot with the new BDM data format.", "links": [], "name": "BlockDeviceMappingV2Boot", "namespace": "http://docs.openstack.org/compute/ext/block_device_mapping_v2_boot/api/v2", "updated": "2013-07-08T00:00:00Z" }, { "alias": "os-cell-capacities", "description": "Adding functionality to get cell capacities.", "links": [], "name": "CellCapacities", "namespace": "http://docs.openstack.org/compute/ext/cell_capacities/api/v1.1", "updated": "2013-05-27T00:00:00Z" }, { "alias": "os-cells", "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ", "links": [], "name": "Cells", "namespace": "http://docs.openstack.org/compute/ext/cells/api/v1.1", "updated": "2013-05-14T00:00:00Z" }, { "alias": "os-certificates", "description": "Certificates support.", "links": [], "name": "Certificates", "namespace": "http://docs.openstack.org/compute/ext/certificates/api/v1.1", "updated": "2012-01-19T00:00:00Z" }, { "alias": "os-cloudpipe", "description": "Adds actions to create cloudpipe instances.\n\n When running with the Vlan network mode, you need a mechanism to route\n from the public Internet to your vlans. This mechanism is known as a\n cloudpipe.\n\n At the time of creating this class, only OpenVPN is supported. Support for\n a SSH Bastion host is forthcoming.\n ", "links": [], "name": "Cloudpipe", "namespace": "http://docs.openstack.org/compute/ext/cloudpipe/api/v1.1", "updated": "2011-12-16T00:00:00Z" }, { "alias": "os-cloudpipe-update", "description": "Adds the ability to set the vpn ip/port for cloudpipe instances.", "links": [], "name": "CloudpipeUpdate", "namespace": "http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2", "updated": "2012-11-14T00:00:00Z" }, { "alias": "os-config-drive", "description": "Config Drive Extension.", "links": [], "name": "ConfigDrive", "namespace": "http://docs.openstack.org/compute/ext/config_drive/api/v1.1", "updated": "2012-07-16T00:00:00Z" }, { "alias": "os-console-auth-tokens", "description": "Console token authentication support.", "links": [], "name": "ConsoleAuthTokens", "namespace": "http://docs.openstack.org/compute/ext/consoles-auth-tokens/api/v2", "updated": "2013-08-13T00:00:00Z" }, { "alias": "os-console-output", "description": "Console log output support, with tailing ability.", "links": [], "name": "ConsoleOutput", "namespace": "http://docs.openstack.org/compute/ext/os-console-output/api/v2", "updated": "2011-12-08T00:00:00Z" }, { "alias": "os-consoles", "description": "Interactive Console support.", "links": [], "name": "Consoles", "namespace": "http://docs.openstack.org/compute/ext/os-consoles/api/v2", "updated": "2011-12-23T00:00:00Z" }, { "alias": "os-create-server-ext", "description": "Extended support to the Create Server v1.1 API.", "links": [], "name": "Createserverext", "namespace": "http://docs.openstack.org/compute/ext/createserverext/api/v1.1", "updated": "2011-07-19T00:00:00Z" }, { "alias": "os-deferred-delete", "description": "Instance deferred delete.", "links": [], "name": "DeferredDelete", "namespace": "http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1", "updated": "2011-09-01T00:00:00Z" }, { "alias": "os-evacuate", "description": "Enables server evacuation.", "links": [], "name": "Evacuate", "namespace": "http://docs.openstack.org/compute/ext/evacuate/api/v2", "updated": "2013-01-06T00:00:00Z" }, { "alias": "os-extended-evacuate-find-host", "description": "Enables server evacuation without target host. Scheduler will select\n one to target.\n ", "links": [], "name": "ExtendedEvacuateFindHost", "namespace": "http://docs.openstack.org/compute/ext/extended_evacuate_find_host/api/v2", "updated": "2014-02-12T00:00:00Z" }, { "alias": "os-extended-floating-ips", "description": "Adds optional fixed_address to the add floating IP command.", "links": [], "name": "ExtendedFloatingIps", "namespace": "http://docs.openstack.org/compute/ext/extended_floating_ips/api/v2", "updated": "2013-04-19T00:00:00Z" }, { "alias": "os-extended-hypervisors", "description": "Extended hypervisors support.", "links": [], "name": "ExtendedHypervisors", "namespace": "http://docs.openstack.org/compute/ext/extended_hypervisors/api/v1.1", "updated": "2014-01-04T00:00:00Z" }, { "alias": "os-extended-networks", "description": "Adds additional fields to networks", "links": [], "name": "ExtendedNetworks", "namespace": "http://docs.openstack.org/compute/ext/extended_networks/api/v2", "updated": "2014-05-09T00:00:00Z" }, { "alias": "os-extended-quotas", "description": "Adds ability for admins to delete quota\n and optionally force the update Quota command.\n ", "links": [], "name": "ExtendedQuotas", "namespace": "http://docs.openstack.org/compute/ext/extended_quotas/api/v1.1", "updated": "2013-06-09T00:00:00Z" }, { "alias": "os-extended-rescue-with-image", "description": "Allow the user to specify the image to use for rescue.", "links": [], "name": "ExtendedRescueWithImage", "namespace": "http://docs.openstack.org/compute/ext/extended_rescue_with_image/api/v2", "updated": "2014-01-04T00:00:00Z" }, { "alias": "os-hypervisor-status", "description": "Show hypervisor status.", "links": [], "name": "HypervisorStatus", "namespace": "http://docs.openstack.org/compute/ext/hypervisor_status/api/v1.1", "updated": "2014-04-17T00:00:00Z" }, { "alias": "os-extended-services", "description": "Extended services support.", "links": [], "name": "ExtendedServices", "namespace": "http://docs.openstack.org/compute/ext/extended_services/api/v2", "updated": "2013-05-17T00:00:00Z" }, { "alias": "os-extended-services-delete", "description": "Extended services deletion support.", "links": [], "name": "ExtendedServicesDelete", "namespace": "http://docs.openstack.org/compute/ext/extended_services_delete/api/v2", "updated": "2013-12-10T00:00:00Z" }, { "alias": "os-extended-volumes", "description": "Extended Volumes support.", "links": [], "name": "ExtendedVolumes", "namespace": "http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1", "updated": "2013-06-07T00:00:00Z" }, { "alias": "os-fixed-ips", "description": "Fixed IPs support.", "links": [], "name": "FixedIPs", "namespace": "http://docs.openstack.org/compute/ext/fixed_ips/api/v2", "updated": "2012-10-18T19:25:27Z" }, { "alias": "os-flavor-access", "description": "Flavor access support.", "links": [], "name": "FlavorAccess", "namespace": "http://docs.openstack.org/compute/ext/flavor_access/api/v2", "updated": "2012-08-01T00:00:00Z" }, { "alias": "os-flavor-extra-specs", "description": "Instance type (flavor) extra specs.", "links": [], "name": "FlavorExtraSpecs", "namespace": "http://docs.openstack.org/compute/ext/flavor_extra_specs/api/v1.1", "updated": "2011-06-23T00:00:00Z" }, { "alias": "os-flavor-manage", "description": "Flavor create/delete API support.", "links": [], "name": "FlavorManage", "namespace": "http://docs.openstack.org/compute/ext/flavor_manage/api/v1.1", "updated": "2012-01-19T00:00:00Z" }, { "alias": "os-flavor-rxtx", "description": "Support to show the rxtx status of a flavor.", "links": [], "name": "FlavorRxtx", "namespace": "http://docs.openstack.org/compute/ext/flavor_rxtx/api/v1.1", "updated": "2012-08-29T00:00:00Z" }, { "alias": "os-flavor-swap", "description": "Support to show the swap status of a flavor.", "links": [], "name": "FlavorSwap", "namespace": "http://docs.openstack.org/compute/ext/flavor_swap/api/v1.1", "updated": "2012-08-29T00:00:00Z" }, { "alias": "os-floating-ip-dns", "description": "Floating IP DNS support.", "links": [], "name": "FloatingIpDns", "namespace": "http://docs.openstack.org/ext/floating_ip_dns/api/v1.1", "updated": "2011-12-23T00:00:00Z" }, { "alias": "os-floating-ip-pools", "description": "Floating IPs support.", "links": [], "name": "FloatingIpPools", "namespace": "http://docs.openstack.org/compute/ext/floating_ip_pools/api/v1.1", "updated": "2012-01-04T00:00:00Z" }, { "alias": "os-floating-ips", "description": "Floating IPs support.", "links": [], "name": "FloatingIps", "namespace": "http://docs.openstack.org/compute/ext/floating_ips/api/v1.1", "updated": "2011-06-16T00:00:00Z" }, { "alias": "os-floating-ips-bulk", "description": "Bulk handling of Floating IPs.", "links": [], "name": "FloatingIpsBulk", "namespace": "http://docs.openstack.org/compute/ext/floating_ips_bulk/api/v2", "updated": "2012-10-29T19:25:27Z" }, { "alias": "os-fping", "description": "Fping Management Extension.", "links": [], "name": "Fping", "namespace": "http://docs.openstack.org/compute/ext/fping/api/v1.1", "updated": "2012-07-06T00:00:00Z" }, { "alias": "os-hide-server-addresses", "description": "Support hiding server addresses in certain states.", "links": [], "name": "HideServerAddresses", "namespace": "http://docs.openstack.org/compute/ext/hide_server_addresses/api/v1.1", "updated": "2012-12-11T00:00:00Z" }, { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "namespace": "http://docs.openstack.org/compute/ext/hosts/api/v1.1", "updated": "2011-06-29T00:00:00Z" }, { "alias": "os-hypervisors", "description": "Admin-only hypervisor administration.", "links": [], "name": "Hypervisors", "namespace": "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1", "updated": "2012-06-21T00:00:00Z" }, { "alias": "os-instance-actions", "description": "View a log of actions and events taken on an instance.", "links": [], "name": "InstanceActions", "namespace": "http://docs.openstack.org/compute/ext/instance-actions/api/v1.1", "updated": "2013-02-08T00:00:00Z" }, { "alias": "os-instance_usage_audit_log", "description": "Admin-only Task Log Monitoring.", "links": [], "name": "OSInstanceUsageAuditLog", "namespace": "http://docs.openstack.org/ext/services/api/v1.1", "updated": "2012-07-06T01:00:00Z" }, { "alias": "os-keypairs", "description": "Keypair Support.", "links": [], "name": "Keypairs", "namespace": "http://docs.openstack.org/compute/ext/keypairs/api/v1.1", "updated": "2011-08-08T00:00:00Z" }, { "alias": "os-migrations", "description": "Provide data on migrations.", "links": [], "name": "Migrations", "namespace": "http://docs.openstack.org/compute/ext/migrations/api/v2.0", "updated": "2013-05-30T00:00:00Z" }, { "alias": "os-multiple-create", "description": "Allow multiple create in the Create Server v1.1 API.", "links": [], "name": "MultipleCreate", "namespace": "http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1", "updated": "2012-08-07T00:00:00Z" }, { "alias": "os-networks", "description": "Admin-only Network Management Extension.", "links": [], "name": "Networks", "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1", "updated": "2011-12-23T00:00:00Z" }, { "alias": "os-networks-associate", "description": "Network association support.", "links": [], "name": "NetworkAssociationSupport", "namespace": "http://docs.openstack.org/compute/ext/networks_associate/api/v2", "updated": "2012-11-19T00:00:00Z" }, { "alias": "os-preserve-ephemeral-rebuild", "description": "Allow preservation of the ephemeral partition on rebuild.", "links": [], "name": "PreserveEphemeralOnRebuild", "namespace": "http://docs.openstack.org/compute/ext/preserve_ephemeral_rebuild/api/v2", "updated": "2013-12-17T00:00:00Z" }, { "alias": "os-quota-class-sets", "description": "Quota classes management support.", "links": [], "name": "QuotaClasses", "namespace": "http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1", "updated": "2012-03-12T00:00:00Z" }, { "alias": "os-quota-sets", "description": "Quotas management support.", "links": [], "name": "Quotas", "namespace": "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1", "updated": "2011-08-08T00:00:00Z" }, { "alias": "os-rescue", "description": "Instance rescue mode.", "links": [], "name": "Rescue", "namespace": "http://docs.openstack.org/compute/ext/rescue/api/v1.1", "updated": "2011-08-18T00:00:00Z" }, { "alias": "os-security-group-default-rules", "description": "Default rules for security group support.", "links": [], "name": "SecurityGroupDefaultRules", "namespace": "http://docs.openstack.org/compute/ext/securitygroupdefaultrules/api/v1.1", "updated": "2013-02-05T00:00:00Z" }, { "alias": "os-security-groups", "description": "Security group support.", "links": [], "name": "SecurityGroups", "namespace": "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1", "updated": "2013-05-28T00:00:00Z" }, { "alias": "os-server-diagnostics", "description": "Allow Admins to view server diagnostics through server action.", "links": [], "name": "ServerDiagnostics", "namespace": "http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1", "updated": "2011-12-21T00:00:00Z" }, { "alias": "os-server-external-events", "description": "Server External Event Triggers.", "links": [], "name": "ServerExternalEvents", "namespace": "http://docs.openstack.org/compute/ext/server-external-events/api/v2", "updated": "2014-02-18T00:00:00Z" }, { "alias": "os-server-groups", "description": "Server group support.", "links": [], "name": "ServerGroups", "namespace": "http://docs.openstack.org/compute/ext/servergroups/api/v2", "updated": "2013-06-20T00:00:00Z" }, { "alias": "os-server-group-quotas", "description": "Adds quota support to server groups.", "links": [], "name": "ServerGroupQuotas", "namespace": "http://docs.openstack.org/compute/ext/server-group-quotas/api/v2", "updated": "2014-07-25T00:00:00Z" }, { "alias": "os-server-list-multi-status", "description": "Allow to filter the servers by a set of status values.", "links": [], "name": "ServerListMultiStatus", "namespace": "http://docs.openstack.org/compute/ext/os-server-list-multi-status/api/v2", "updated": "2014-05-11T00:00:00Z" }, { "alias": "os-server-password", "description": "Server password support.", "links": [], "name": "ServerPassword", "namespace": "http://docs.openstack.org/compute/ext/server-password/api/v2", "updated": "2012-11-29T00:00:00Z" }, { "alias": "os-server-start-stop", "description": "Start/Stop instance compute API support.", "links": [], "name": "ServerStartStop", "namespace": "http://docs.openstack.org/compute/ext/servers/api/v1.1", "updated": "2012-01-23T00:00:00Z" }, { "alias": "os-services", "description": "Services support.", "links": [], "name": "Services", "namespace": "http://docs.openstack.org/compute/ext/services/api/v2", "updated": "2012-10-28T00:00:00Z" }, { "alias": "os-shelve", "description": "Instance shelve mode.", "links": [], "name": "Shelve", "namespace": "http://docs.openstack.org/compute/ext/shelve/api/v1.1", "updated": "2013-04-06T00:00:00Z" }, { "alias": "os-simple-tenant-usage", "description": "Simple tenant usage extension.", "links": [], "name": "SimpleTenantUsage", "namespace": "http://docs.openstack.org/compute/ext/os-simple-tenant-usage/api/v1.1", "updated": "2011-08-19T00:00:00Z" }, { "alias": "os-tenant-networks", "description": "Tenant-based Network Management Extension.", "links": [], "name": "OSTenantNetworks", "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2", "updated": "2012-03-07T14:46:43Z" }, { "alias": "os-used-limits", "description": "Provide data on limited resources that are being used.", "links": [], "name": "UsedLimits", "namespace": "http://docs.openstack.org/compute/ext/used_limits/api/v1.1", "updated": "2012-07-13T00:00:00Z" }, { "alias": "os-used-limits-for-admin", "description": "Provide data to admin on limited resources used by other tenants.", "links": [], "name": "UsedLimitsForAdmin", "namespace": "http://docs.openstack.org/compute/ext/used_limits_for_admin/api/v1.1", "updated": "2013-05-02T00:00:00Z" }, { "alias": "os-user-data", "description": "Add user_data to the Create Server v1.1 API.", "links": [], "name": "UserData", "namespace": "http://docs.openstack.org/compute/ext/userdata/api/v1.1", "updated": "2012-08-07T00:00:00Z" }, { "alias": "os-user-quotas", "description": "Project user quota support.", "links": [], "name": "UserQuotas", "namespace": "http://docs.openstack.org/compute/ext/user_quotas/api/v1.1", "updated": "2013-07-18T00:00:00Z" }, { "alias": "os-virtual-interfaces", "description": "Virtual interface support.", "links": [], "name": "VirtualInterfaces", "namespace": "http://docs.openstack.org/compute/ext/virtual_interfaces/api/v1.1", "updated": "2011-08-17T00:00:00Z" }, { "alias": "os-volume-attachment-update", "description": "Support for updating a volume attachment.", "links": [], "name": "VolumeAttachmentUpdate", "namespace": "http://docs.openstack.org/compute/ext/os-volume-attachment-update/api/v2", "updated": "2013-06-20T00:00:00Z" }, { "alias": "os-volumes", "description": "Volumes support.", "links": [], "name": "Volumes", "namespace": "http://docs.openstack.org/compute/ext/volumes/api/v1.1", "updated": "2011-03-25T00:00:00Z" }, { "alias": "os-server-sort-keys", "description": "Add sorting support in get Server v2 API.", "links": [], "name": "ServerSortKeys", "namespace": "http://docs.openstack.org/compute/ext/server_sort_keys/api/v2", "updated": "2014-05-22T00:00:00Z" } ] } nova-13.0.0/doc/api_samples/all_extensions/server-create-req.json0000664000567000056710000000034112701407773026225 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } nova-13.0.0/doc/api_samples/all_extensions/extensions-list-resp.json0000664000567000056710000007332312701407773027022 0ustar jenkinsjenkins00000000000000{ "extensions": [ { "alias": "NMN", "description": "Multiple network support.", "links": [], "name": "Multinic", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-DCF", "description": "Disk Management Extension.", "links": [], "name": "DiskConfig", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-AZ", "description": "Extended Availability Zone support.", "links": [], "name": "ExtendedAvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IMG-SIZE", "description": "Adds image size to image listings.", "links": [], "name": "ImageSize", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS", "description": "", "links": [], "name": "ExtendedIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS-MAC", "description": "", "links": [], "name": "ExtendedIpsMac", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-SRV-ATTR", "description": "Extended Server Attributes support.", "links": [], "name": "ExtendedServerAttributes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-STS", "description": "", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-DISABLED", "description": "", "links": [], "name": "FlavorDisabled", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-EXT-DATA", "description": "", "links": [], "name": "FlavorExtraData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler.", "links": [], "name": "SchedulerHints", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SRV-USG", "description": "Adds launched_at and terminated_at on Servers.", "links": [], "name": "ServerUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-access-ips", "description": "Access IPs support.", "links": [], "name": "AccessIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-actions", "description": "Enable admin-only server actions\n\n Actions include: resetNetwork, injectNetworkInfo, os-resetState\n ", "links": [], "name": "AdminActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-password", "description": "Admin password management support.", "links": [], "name": "AdminPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-aggregates", "description": "Admin-only aggregate administration.", "links": [], "name": "Aggregates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-assisted-volume-snapshots", "description": "Assisted volume snapshots.", "links": [], "name": "AssistedVolumeSnapshots", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-attach-interfaces", "description": "Attach interface support.", "links": [], "name": "AttachInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-availability-zone", "description": "1. Add availability_zone to the Create Server API.\n 2. Add availability zones describing.\n ", "links": [], "name": "AvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-ext-status", "description": "", "links": [], "name": "BareMetalExtStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-nodes", "description": "Admin-only bare-metal node administration.", "links": [], "name": "BareMetalNodes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping", "description": "Block device mapping boot support.", "links": [], "name": "BlockDeviceMapping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping-v2-boot", "description": "", "links": [], "name": "BlockDeviceMappingV2Boot", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cell-capacities", "description": "", "links": [], "name": "CellCapacities", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cells", "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ", "links": [], "name": "Cells", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-certificates", "description": "Certificates support.", "links": [], "name": "Certificates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe", "description": "Adds actions to create cloudpipe instances.\n\n When running with the Vlan network mode, you need a mechanism to route\n from the public Internet to your vlans. This mechanism is known as a\n cloudpipe.\n\n At the time of creating this class, only OpenVPN is supported. Support for\n a SSH Bastion host is forthcoming.\n ", "links": [], "name": "Cloudpipe", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe-update", "description": "", "links": [], "name": "CloudpipeUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-config-drive", "description": "Config Drive Extension.", "links": [], "name": "ConfigDrive", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-auth-tokens", "description": "Console token authentication support.", "links": [], "name": "ConsoleAuthTokens", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-output", "description": "Console log output support, with tailing ability.", "links": [], "name": "ConsoleOutput", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-consoles", "description": "Interactive Console support.", "links": [], "name": "Consoles", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-backup", "description": "Create a backup of a server.", "links": [], "name": "CreateBackup", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-server-ext", "description": "", "links": [], "name": "Createserverext", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-deferred-delete", "description": "Instance deferred delete.", "links": [], "name": "DeferredDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-evacuate", "description": "Enables server evacuation.", "links": [], "name": "Evacuate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-evacuate-find-host", "description": "", "links": [], "name": "ExtendedEvacuateFindHost", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-floating-ips", "description": "", "links": [], "name": "ExtendedFloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-hypervisors", "description": "", "links": [], "name": "ExtendedHypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-networks", "description": "", "links": [], "name": "ExtendedNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-quotas", "description": "", "links": [], "name": "ExtendedQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-rescue-with-image", "description": "", "links": [], "name": "ExtendedRescueWithImage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services", "description": "", "links": [], "name": "ExtendedServices", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services-delete", "description": "", "links": [], "name": "ExtendedServicesDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-status", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-volumes", "description": "Extended Volumes support.", "links": [], "name": "ExtendedVolumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fixed-ips", "description": "Fixed IPs support.", "links": [], "name": "FixedIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-access", "description": "Flavor access support.", "links": [], "name": "FlavorAccess", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-extra-specs", "description": "Flavors extra specs support.", "links": [], "name": "FlavorExtraSpecs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-manage", "description": "Flavor create/delete API support.", "links": [], "name": "FlavorManage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-rxtx", "description": "Support to show the rxtx status of a flavor.", "links": [], "name": "FlavorRxtx", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-swap", "description": "", "links": [], "name": "FlavorSwap", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-dns", "description": "Floating IP DNS support.", "links": [], "name": "FloatingIpDns", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-pools", "description": "Floating IPs support.", "links": [], "name": "FloatingIpPools", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips", "description": "Floating IPs support.", "links": [], "name": "FloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips-bulk", "description": "Bulk handling of Floating IPs.", "links": [], "name": "FloatingIpsBulk", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fping", "description": "Fping Management Extension.", "links": [], "name": "Fping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hide-server-addresses", "description": "Support hiding server addresses in certain states.", "links": [], "name": "HideServerAddresses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisor-status", "description": "", "links": [], "name": "HypervisorStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisors", "description": "Admin-only hypervisor administration.", "links": [], "name": "Hypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance-actions", "description": "View a log of actions and events taken on an instance.", "links": [], "name": "InstanceActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance_usage_audit_log", "description": "Admin-only Task Log Monitoring.", "links": [], "name": "OSInstanceUsageAuditLog", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-keypairs", "description": "Keypair Support.", "links": [], "name": "Keypairs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-lock-server", "description": "Enable lock/unlock server actions.", "links": [], "name": "LockServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrate-server", "description": "Enable migrate and live-migrate server actions.", "links": [], "name": "MigrateServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrations", "description": "Provide data on migrations.", "links": [], "name": "Migrations", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-multiple-create", "description": "Allow multiple create in the Create Server v2.1 API.", "links": [], "name": "MultipleCreate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks", "description": "Admin-only Network Management Extension.", "links": [], "name": "Networks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks-associate", "description": "Network association support.", "links": [], "name": "NetworkAssociationSupport", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-pause-server", "description": "Enable pause/unpause server actions.", "links": [], "name": "PauseServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-personality", "description": "Personality support.", "links": [], "name": "Personality", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-preserve-ephemeral-rebuild", "description": "Allow preservation of the ephemeral partition on rebuild.", "links": [], "name": "PreserveEphemeralOnRebuild", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-class-sets", "description": "Quota classes management support.", "links": [], "name": "QuotaClasses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-sets", "description": "Quotas management support.", "links": [], "name": "Quotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-rescue", "description": "Instance rescue mode.", "links": [], "name": "Rescue", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-group-default-rules", "description": "Default rules for security group support.", "links": [], "name": "SecurityGroupDefaultRules", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-groups", "description": "Security group support.", "links": [], "name": "SecurityGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-diagnostics", "description": "Allow Admins to view server diagnostics through server action.", "links": [], "name": "ServerDiagnostics", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-external-events", "description": "Server External Event Triggers.", "links": [], "name": "ServerExternalEvents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-group-quotas", "description": "", "links": [], "name": "ServerGroupQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-groups", "description": "Server group support.", "links": [], "name": "ServerGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-list-multi-status", "description": "", "links": [], "name": "ServerListMultiStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-password", "description": "Server password support.", "links": [], "name": "ServerPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-sort-keys", "description": "", "links": [], "name": "ServerSortKeys", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-start-stop", "description": "", "links": [], "name": "ServerStartStop", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-services", "description": "Services support.", "links": [], "name": "Services", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-shelve", "description": "Instance shelve mode.", "links": [], "name": "Shelve", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-simple-tenant-usage", "description": "Simple tenant usage extension.", "links": [], "name": "SimpleTenantUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-suspend-server", "description": "Enable suspend/resume server actions.", "links": [], "name": "SuspendServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-tenant-networks", "description": "Tenant-based Network Management Extension.", "links": [], "name": "OSTenantNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits", "description": "Provide data on limited resources that are being used.", "links": [], "name": "UsedLimits", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits-for-admin", "description": "", "links": [], "name": "UsedLimitsForAdmin", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-data", "description": "Add user_data to the Create Server API.", "links": [], "name": "UserData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-quotas", "description": "", "links": [], "name": "UserQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-virtual-interfaces", "description": "Virtual interface support.", "links": [], "name": "VirtualInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volume-attachment-update", "description": "", "links": [], "name": "VolumeAttachmentUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volumes", "description": "Volumes support.", "links": [], "name": "Volumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" } ] } nova-13.0.0/doc/api_samples/all_extensions/server-action-revert-resize.json0000664000567000056710000000003512701407773030256 0ustar jenkinsjenkins00000000000000{ "revertResize" : null }nova-13.0.0/doc/api_samples/all_extensions/server-action-resize.json0000664000567000056710000000006412701407773026753 0ustar jenkinsjenkins00000000000000{ "resize" : { "flavorRef" : "2" } }nova-13.0.0/doc/api_samples/all_extensions/server-action-confirm-resize.json0000664000567000056710000000003612701407773030405 0ustar jenkinsjenkins00000000000000{ "confirmResize" : null }nova-13.0.0/doc/api_samples/os-used-limits/0000775000567000056710000000000012701410205021607 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-used-limits/usedlimits-get-resp.json0000664000567000056710000000141212701407773026426 0ustar jenkinsjenkins00000000000000{ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxSecurityGroupRules": 20, "maxSecurityGroups": 10, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalCoresUsed": 0, "totalInstancesUsed": 0, "totalRAMUsed": 0, "totalSecurityGroupsUsed": 0, "totalFloatingIpsUsed": 0, "totalServerGroupsUsed": 0 }, "rate": [] } } nova-13.0.0/doc/api_samples/os-used-limits/v2-usedlimits-get-resp.json0000664000567000056710000000567412701407773026771 0ustar jenkinsjenkins00000000000000{ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxSecurityGroupRules": 20, "maxSecurityGroups": 10, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalCoresUsed": 0, "totalInstancesUsed": 0, "totalRAMUsed": 0, "totalSecurityGroupsUsed": 0, "totalFloatingIpsUsed": 0, "totalServerGroupsUsed": 0 }, "rate": [ { "limit": [ { "next-available": "2012-11-27T17:24:52Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "POST" }, { "next-available": "2012-11-27T17:24:52Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "PUT" }, { "next-available": "2012-11-27T17:24:52Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "DELETE" } ], "regex": ".*", "uri": "*" }, { "limit": [ { "next-available": "2012-11-27T17:24:52Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "POST" } ], "regex": "^/servers", "uri": "*/servers" }, { "limit": [ { "next-available": "2012-11-27T17:24:52Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "GET" } ], "regex": ".*changes-since.*", "uri": "*changes-since*" }, { "limit": [ { "next-available": "2012-11-27T17:24:52Z", "remaining": 12, "unit": "MINUTE", "value": 12, "verb": "GET" } ], "regex": "^/os-fping", "uri": "*/os-fping" } ] } } nova-13.0.0/doc/api_samples/images/0000775000567000056710000000000012701410205020176 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/images/image-metadata-put-req.json0000664000567000056710000000013212701407773025340 0ustar jenkinsjenkins00000000000000{ "metadata": { "auto_disk_config": "True", "Label": "Changed" } }nova-13.0.0/doc/api_samples/images/image-metadata-post-resp.json0000664000567000056710000000030112701407773025675 0ustar jenkinsjenkins00000000000000{ "metadata": { "Label": "UpdatedImage", "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "False", "ramdisk_id": "nokernel" } }nova-13.0.0/doc/api_samples/images/images-details-get-resp.json0000664000567000056710000002107412701407773025531 0ustar jenkinsjenkins00000000000000{ "images": [ { "OS-DCF:diskConfig": "AUTO", "OS-EXT-IMG-SIZE:size": "74185822", "created": "2011-01-01T01:02:03Z", "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage7", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": "74185822", "created": "2011-01-01T01:02:03Z", "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": "74185822", "created": "2011-01-01T01:02:03Z", "id": "a2459075-d96c-40d5-893e-577ff92e721c", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-DCF:diskConfig": "MANUAL", "OS-EXT-IMG-SIZE:size": "74185822", "created": "2011-01-01T01:02:03Z", "id": "a440c04b-79fa-479c-bed1-0b816eaec379", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "auto_disk_config": "False", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage6", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": "74185822", "created": "2011-01-01T01:02:03Z", "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ramdisk_id": null }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": "74185822", "created": "2011-01-01T01:02:03Z", "id": "cedef40a-ed67-4d10-800e-17455edce175", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": "74185822", "created": "2011-01-01T01:02:03Z", "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" } ] } nova-13.0.0/doc/api_samples/images/image-get-resp.json0000664000567000056710000000226612701407773023725 0ustar jenkinsjenkins00000000000000{ "image": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-IMG-SIZE:size": "74185822", "created": "2011-01-01T01:02:03Z", "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage7", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" } } nova-13.0.0/doc/api_samples/images/images-list-get-resp.json0000664000567000056710000001325412701407773025060 0ustar jenkinsjenkins00000000000000{ "images": [ { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage7" }, { "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "a2459075-d96c-40d5-893e-577ff92e721c", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "a440c04b-79fa-479c-bed1-0b816eaec379", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage6" }, { "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "cedef40a-ed67-4d10-800e-17455edce175", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" } ] } nova-13.0.0/doc/api_samples/images/image-meta-key-put-req.json0000664000567000056710000000007412701407773025301 0ustar jenkinsjenkins00000000000000{ "meta": { "auto_disk_config": "False" } } nova-13.0.0/doc/api_samples/images/image-meta-key-put-resp.json0000664000567000056710000000007312701407773025462 0ustar jenkinsjenkins00000000000000{ "meta": { "auto_disk_config": "False" } }nova-13.0.0/doc/api_samples/images/image-meta-key-get.json0000664000567000056710000000006712701407773024465 0ustar jenkinsjenkins00000000000000{ "meta": { "kernel_id": "nokernel" } }nova-13.0.0/doc/api_samples/images/image-metadata-get-resp.json0000664000567000056710000000024312701407773025474 0ustar jenkinsjenkins00000000000000{ "metadata": { "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "nokernel", "ramdisk_id": "nokernel" } }nova-13.0.0/doc/api_samples/images/image-metadata-put-resp.json0000664000567000056710000000013212701407773025522 0ustar jenkinsjenkins00000000000000{ "metadata": { "Label": "Changed", "auto_disk_config": "True" } }nova-13.0.0/doc/api_samples/images/image-metadata-post-req.json0000664000567000056710000000013112701407773025514 0ustar jenkinsjenkins00000000000000{ "metadata": { "kernel_id": "False", "Label": "UpdatedImage" } }nova-13.0.0/doc/api_samples/os-instance-usage-audit-log/0000775000567000056710000000000012701410205024141 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-show-get-resp.json0000664000567000056710000000073512701407773033525 0ustar jenkinsjenkins00000000000000{ "instance_usage_audit_log": { "hosts_not_run": [ "8e33da2b48684ef3ab165444d6a7384c" ], "log": {}, "num_hosts": 1, "num_hosts_done": 0, "num_hosts_not_run": 1, "num_hosts_running": 0, "overall_status": "0 of 1 hosts done. 0 errors.", "period_beginning": "2012-06-01 00:00:00", "period_ending": "2012-07-01 00:00:00", "total_errors": 0, "total_instances": 0 } }nova-13.0.0/doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-index-get-resp.json0000664000567000056710000000073612701407773033655 0ustar jenkinsjenkins00000000000000{ "instance_usage_audit_logs": { "hosts_not_run": [ "f4eb7cfd155f4574967f8b55a7faed75" ], "log": {}, "num_hosts": 1, "num_hosts_done": 0, "num_hosts_not_run": 1, "num_hosts_running": 0, "overall_status": "0 of 1 hosts done. 0 errors.", "period_beginning": "2012-12-01 00:00:00", "period_ending": "2013-01-01 00:00:00", "total_errors": 0, "total_instances": 0 } }nova-13.0.0/doc/api_samples/os-migrations/0000775000567000056710000000000012701410205021524 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-migrations/migrations-get.json0000664000567000056710000000201012701407773025361 0ustar jenkinsjenkins00000000000000{ "migrations": [ { "created_at": "2012-10-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1234, "instance_uuid": "instance_id_123", "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "Done", "updated_at": "2012-10-29T13:42:02.000000" }, { "created_at": "2013-10-22T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 5678, "instance_uuid": "instance_id_456", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "Done", "updated_at": "2013-10-22T13:42:02.000000" } ] }nova-13.0.0/doc/api_samples/os-migrations/v2.23/0000775000567000056710000000000012701410205022276 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-migrations/v2.23/migrations-get.json0000664000567000056710000000534112701407773026145 0ustar jenkinsjenkins00000000000000{ "migrations": [ { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T13:42:02.000000" }, { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 2, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "error", "migration_type": "live-migration", "updated_at": "2016-01-29T13:42:02.000000" }, { "created_at": "2016-01-22T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 3, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "error", "migration_type": "resize", "updated_at": "2016-01-22T13:42:02.000000" }, { "created_at": "2016-01-22T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-01-22T13:42:02.000000" } ] } nova-13.0.0/doc/api_samples/os-multiple-create/0000775000567000056710000000000012701410205022444 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-multiple-create/multiple-create-post-resp.json0000664000567000056710000000004612701407773030405 0ustar jenkinsjenkins00000000000000{ "reservation_id": "r-3fhpjulh" }nova-13.0.0/doc/api_samples/os-multiple-create/multiple-create-post-req.json0000664000567000056710000000047012701407773030224 0ustar jenkinsjenkins00000000000000{ "server": { "name": "new-server-test", "imageRef": "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef": "1", "metadata": { "My Server Name": "Apache1" }, "return_reservation_id": "True", "min_count": "2", "max_count": "3" } } nova-13.0.0/doc/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json0000664000567000056710000000041712701407773031614 0ustar jenkinsjenkins00000000000000{ "server": { "name": "new-server-test", "imageRef": "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef": "1", "metadata": { "My Server Name": "Apache1" }, "min_count": "2", "max_count": "3" } } nova-13.0.0/doc/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json0000664000567000056710000000103212701407773031770 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "wfksH3GTTseP", "id": "440cf918-3ee0-4143-b289-f63e1d2000e6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/440cf918-3ee0-4143-b289-f63e1d2000e6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/440cf918-3ee0-4143-b289-f63e1d2000e6", "rel": "bookmark" } ] } } nova-13.0.0/doc/api_samples/os-instance-actions/0000775000567000056710000000000012701410205022612 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-instance-actions/v2.21/0000775000567000056710000000000012701410205023362 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json0000664000567000056710000000154612701407773031106 0ustar jenkinsjenkins00000000000000{ "instanceAction": { "action": "reboot", "events": [ { "event": "schedule", "finish_time": "2012-12-05T01:02:00.000000", "result": "Success", "start_time": "2012-12-05T01:00:02.000000", "traceback": "" }, { "event": "compute_create", "finish_time": "2012-12-05T01:04:00.000000", "result": "Success", "start_time": "2012-12-05T01:03:00.000000", "traceback": "" } ], "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13", "message": "", "project_id": "147", "request_id": "req-3293a3f1-b44c-4609-b8d2-d81b105636b8", "start_time": "2012-12-05T00:00:00.000000", "user_id": "789" } } nova-13.0.0/doc/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json0000664000567000056710000000130412701407773031455 0ustar jenkinsjenkins00000000000000{ "instanceActions": [ { "action": "resize", "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13", "message": "", "project_id": "842", "request_id": "req-25517360-b757-47d3-be45-0e8d2a01b36a", "start_time": "2012-12-05T01:00:00.000000", "user_id": "789" }, { "action": "reboot", "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13", "message": "", "project_id": "147", "request_id": "req-3293a3f1-b44c-4609-b8d2-d81b105636b8", "start_time": "2012-12-05T00:00:00.000000", "user_id": "789" } ] } nova-13.0.0/doc/api_samples/os-instance-actions/instance-action-get-resp.json0000664000567000056710000000154612701407773030336 0ustar jenkinsjenkins00000000000000{ "instanceAction": { "action": "reboot", "events": [ { "event": "schedule", "finish_time": "2012-12-05T01:02:00.000000", "result": "Success", "start_time": "2012-12-05T01:00:02.000000", "traceback": "" }, { "event": "compute_create", "finish_time": "2012-12-05T01:04:00.000000", "result": "Success", "start_time": "2012-12-05T01:03:00.000000", "traceback": "" } ], "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13", "message": "", "project_id": "147", "request_id": "req-3293a3f1-b44c-4609-b8d2-d81b105636b8", "start_time": "2012-12-05T00:00:00.000000", "user_id": "789" } } nova-13.0.0/doc/api_samples/os-instance-actions/instance-actions-list-resp.json0000664000567000056710000000130412701407773030705 0ustar jenkinsjenkins00000000000000{ "instanceActions": [ { "action": "resize", "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13", "message": "", "project_id": "842", "request_id": "req-25517360-b757-47d3-be45-0e8d2a01b36a", "start_time": "2012-12-05T01:00:00.000000", "user_id": "789" }, { "action": "reboot", "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13", "message": "", "project_id": "147", "request_id": "req-3293a3f1-b44c-4609-b8d2-d81b105636b8", "start_time": "2012-12-05T00:00:00.000000", "user_id": "789" } ] } nova-13.0.0/doc/api_samples/os-virtual-interfaces/0000775000567000056710000000000012701410205023157 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-virtual-interfaces/vifs-list-resp-v2.json0000664000567000056710000000036012701407773027305 0ustar jenkinsjenkins00000000000000{ "virtual_interfaces": [ { "id": "cec8b9bb-5d22-4104-b3c8-4c35db3210a6", "mac_address": "fa:16:3e:3c:ce:6f", "OS-EXT-VIF-NET:net_id": "cec8b9bb-5d22-4104-b3c8-4c35db3210a7" } ] } nova-13.0.0/doc/api_samples/os-virtual-interfaces/v2.12/0000775000567000056710000000000012701410205023727 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-virtual-interfaces/v2.12/vifs-list-resp.json0000664000567000056710000000034112701407773027527 0ustar jenkinsjenkins00000000000000{ "virtual_interfaces": [ { "id": "cec8b9bb-5d22-4104-b3c8-4c35db3210a6", "mac_address": "fa:16:3e:3c:ce:6f", "net_id": "cec8b9bb-5d22-4104-b3c8-4c35db3210a7" } ] } nova-13.0.0/doc/api_samples/os-virtual-interfaces/vifs-list-resp.json0000664000567000056710000000024212701407773026757 0ustar jenkinsjenkins00000000000000{ "virtual_interfaces": [ { "id": "cec8b9bb-5d22-4104-b3c8-4c35db3210a6", "mac_address": "fa:16:3e:3c:ce:6f" } ] }nova-13.0.0/doc/api_samples/os-access-ips/0000775000567000056710000000000012701410205021402 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-access-ips/server-post-req.json0000664000567000056710000000044212701407773025373 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "fe80::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } nova-13.0.0/doc/api_samples/os-access-ips/server-action-rebuild.json0000664000567000056710000000040312701407773026517 0ustar jenkinsjenkins00000000000000{ "rebuild" : { "accessIPv4": "4.3.2.1", "accessIPv6": "80fe::", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "new-server-test", "metadata" : { "meta_var" : "meta_val" } } } nova-13.0.0/doc/api_samples/os-access-ips/server-action-rebuild-resp.json0000664000567000056710000000340212701407773027470 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "adminPass": "99WHAxN8gpvg", "created": "2013-11-06T07:51:09Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "5c8072dbcda8ce3f26deb6662bd7718e1a6d349bdf2296911d1be4ac", "id": "53a63a19-c145-47f8-9ae5-b39d6bff33ec", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/53a63a19-c145-47f8-9ae5-b39d6bff33ec", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/53a63a19-c145-47f8-9ae5-b39d6bff33ec", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "new-server-test", "accessIPv4": "4.3.2.1", "accessIPv6": "80fe::", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-06T07:51:11Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-access-ips/servers-details-resp.json0000664000567000056710000000413112701407773026377 0ustar jenkinsjenkins00000000000000{ "servers": [ { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-11T03:23:11Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "9896cb12c9845becf1b9b06c8ff5b131d20300f83e2cdffc92e3f4a4", "id": "934760e1-2b0b-4f9e-a916-eac1e69839dc", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/934760e1-2b0b-4f9e-a916-eac1e69839dc", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/934760e1-2b0b-4f9e-a916-eac1e69839dc", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "accessIPv4": "1.2.3.4", "accessIPv6": "fe80::", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-11T03:23:12Z", "user_id": "fake", "key_name": null } ] } nova-13.0.0/doc/api_samples/os-access-ips/server-post-resp.json0000664000567000056710000000103112701407773025550 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "n7JGBda664QG", "id": "934760e1-2b0b-4f9e-a916-eac1e69839dc", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/934760e1-2b0b-4f9e-a916-eac1e69839dc", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/934760e1-2b0b-4f9e-a916-eac1e69839dc", "rel": "bookmark" } ] } }nova-13.0.0/doc/api_samples/os-access-ips/server-get-resp.json0000664000567000056710000000356012701407773025353 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-11T03:23:12Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "b3a6fd97c027e18d6d9c7506eea8a236cf2ceca420cfdfe0239a64a8", "id": "5eedbf0c-c303-4ed3-933a-a4d3732cfa0a", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/5eedbf0c-c303-4ed3-933a-a4d3732cfa0a", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/5eedbf0c-c303-4ed3-933a-a4d3732cfa0a", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "accessIPv4": "1.2.3.4", "accessIPv6": "fe80::", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-11T03:23:13Z", "user_id": "fake", "key_name": null } } nova-13.0.0/doc/api_samples/os-config-drive/0000775000567000056710000000000012701410205021724 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-config-drive/server-post-req.json0000664000567000056710000000044212701407773025715 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } nova-13.0.0/doc/api_samples/os-config-drive/server-config-drive-get-resp.json0000664000567000056710000000361412701407773030247 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-22T02:33:23Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "1642bbdbd61a0f1c513b4bb6e418326103172698104bfa278eca106b", "id": "7838ff1b-b71f-48b9-91e9-7c08de20b249", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/7838ff1b-b71f-48b9-91e9-7c08de20b249", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/7838ff1b-b71f-48b9-91e9-7c08de20b249", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-22T02:33:25Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-config-drive/servers-config-drive-details-resp.json0000664000567000056710000000417112701407773031277 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-22T02:33:17Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "1ed067c90341cd9d94bbe5da960922b56f107262cdc75719a0d97b78", "id": "f0318e69-11eb-4aed-9840-59b6c72beee8", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f0318e69-11eb-4aed-9840-59b6c72beee8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f0318e69-11eb-4aed-9840-59b6c72beee8", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-22T02:33:19Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-config-drive/server-post-resp.json0000664000567000056710000000103212701407773026073 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "h2cx3Lm47BJc", "id": "f0318e69-11eb-4aed-9840-59b6c72beee8", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f0318e69-11eb-4aed-9840-59b6c72beee8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f0318e69-11eb-4aed-9840-59b6c72beee8", "rel": "bookmark" } ] } } nova-13.0.0/doc/api_samples/server-ips/0000775000567000056710000000000012701410205021030 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/server-ips/server-ips-network-resp.json0000664000567000056710000000015212701407773026476 0ustar jenkinsjenkins00000000000000{ "private": [ { "addr": "192.168.0.3", "version": 4 } ] }nova-13.0.0/doc/api_samples/server-ips/server-ips-resp.json0000664000567000056710000000023312701407773025007 0ustar jenkinsjenkins00000000000000{ "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] } }nova-13.0.0/doc/api_samples/os-shelve/0000775000567000056710000000000012701410205020636 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-shelve/os-shelve.json0000664000567000056710000000002612701407773023454 0ustar jenkinsjenkins00000000000000{ "shelve": null }nova-13.0.0/doc/api_samples/os-shelve/os-unshelve.json0000664000567000056710000000003112701407773024013 0ustar jenkinsjenkins00000000000000{ "unshelve": null } nova-13.0.0/doc/api_samples/os-shelve/os-shelve-offload.json0000664000567000056710000000003612701407773025065 0ustar jenkinsjenkins00000000000000{ "shelveOffload": null } nova-13.0.0/doc/api_samples/os-personality/0000775000567000056710000000000012701410205021721 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-personality/server-post-req.json0000664000567000056710000000140512701407773025712 0ustar jenkinsjenkins00000000000000{ "server": { "name": "new-server-test", "imageRef": "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef": "1", "metadata": { "My Server Name": "Apache1" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ] } } nova-13.0.0/doc/api_samples/os-personality/server-action-rebuild-resp.json0000664000567000056710000000340412701407773030011 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "adminPass": "99WHAxN8gpvg", "created": "2013-11-06T07:51:09Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "5c8072dbcda8ce3f26deb6662bd7718e1a6d349bdf2296911d1be4ac", "id": "53a63a19-c145-47f8-9ae5-b39d6bff33ec", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/53a63a19-c145-47f8-9ae5-b39d6bff33ec", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/53a63a19-c145-47f8-9ae5-b39d6bff33ec", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-06T07:51:11Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-personality/server-post-resp.json0000664000567000056710000000103112701407773026067 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "n7JGBda664QG", "id": "934760e1-2b0b-4f9e-a916-eac1e69839dc", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/934760e1-2b0b-4f9e-a916-eac1e69839dc", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/934760e1-2b0b-4f9e-a916-eac1e69839dc", "rel": "bookmark" } ] } }nova-13.0.0/doc/api_samples/os-personality/server-action-rebuild-req.json0000664000567000056710000000146212701407773027631 0ustar jenkinsjenkins00000000000000{ "rebuild": { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "imageRef": "70a599e0-31e7-49b7-b260-868f441e862b", "name": "new-server-test", "metadata": { "meta_var": "meta_val" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ] } } nova-13.0.0/doc/api_samples/os-console-auth-tokens/0000775000567000056710000000000012701410205023252 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json0000664000567000056710000000032612701407773032511 0ustar jenkinsjenkins00000000000000{ "console": { "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13", "host": "localhost", "port": 5900, "internal_access_path": "51af38c3-555e-4884-a314-6c8cdde37444" } } nova-13.0.0/doc/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json0000664000567000056710000000010012701407773030726 0ustar jenkinsjenkins00000000000000{ "os-getRDPConsole": { "type": "rdp-html5" } } nova-13.0.0/doc/api_samples/os-admin-actions/0000775000567000056710000000000012701410205022076 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-admin-actions/admin-actions-reset-server-state.json0000664000567000056710000000007312701407773031301 0ustar jenkinsjenkins00000000000000{ "os-resetState": { "state": "active" } } nova-13.0.0/doc/api_samples/os-admin-actions/admin-actions-inject-network-info.json0000664000567000056710000000004212701407773031425 0ustar jenkinsjenkins00000000000000{ "injectNetworkInfo": null } nova-13.0.0/doc/api_samples/os-admin-actions/admin-actions-reset-network.json0000664000567000056710000000003512701407773030344 0ustar jenkinsjenkins00000000000000{ "resetNetwork": null } nova-13.0.0/doc/api_samples/limits/0000775000567000056710000000000012701410205020232 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/limits/limit-get-resp.json0000664000567000056710000000105412701407773024007 0ustar jenkinsjenkins00000000000000{ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxSecurityGroupRules": 20, "maxSecurityGroups": 10, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10 }, "rate": [] } } nova-13.0.0/doc/api_samples/limits/v2-limit-get-resp.json0000664000567000056710000000533612701407773024343 0ustar jenkinsjenkins00000000000000{ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxSecurityGroupRules": 20, "maxSecurityGroups": 10, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10 }, "rate": [ { "limit": [ { "next-available": "2012-11-27T17:22:18Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "POST" }, { "next-available": "2012-11-27T17:22:18Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "PUT" }, { "next-available": "2012-11-27T17:22:18Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "DELETE" } ], "regex": ".*", "uri": "*" }, { "limit": [ { "next-available": "2012-11-27T17:22:18Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "POST" } ], "regex": "^/servers", "uri": "*/servers" }, { "limit": [ { "next-available": "2012-11-27T17:22:18Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "GET" } ], "regex": ".*changes-since.*", "uri": "*changes-since*" }, { "limit": [ { "next-available": "2012-11-27T17:22:18Z", "remaining": 12, "unit": "MINUTE", "value": 12, "verb": "GET" } ], "regex": "^/os-fping", "uri": "*/os-fping" } ] } } nova-13.0.0/doc/api_samples/os-certificates/0000775000567000056710000000000012701410205022015 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-certificates/certificate-create-resp.json0000664000567000056710000000672312701407773027432 0ustar jenkinsjenkins00000000000000{ "certificate": { "data": "Certificate:\n Data:\n Version: 1 (0x0)\n Serial Number: 1018 (0x3fa)\n Signature Algorithm: md5WithRSAEncryption\n Issuer: O=NOVA ROOT, L=Mountain View, ST=California, C=US\n Validity\n Not Before: Aug 12 07:20:30 2013 GMT\n Not After : Aug 12 07:20:30 2014 GMT\n Subject: C=US, ST=California, O=OpenStack, OU=NovaDev, CN=openstack-fake-2013-08-12T07:20:30Z\n Subject Public Key Info:\n Public Key Algorithm: rsaEncryption\n Public-Key: (1024 bit)\n Modulus:\n 00:ac:ff:b1:d1:ed:54:4e:35:6c:34:b4:8f:0b:04:\n 50:25:a3:e2:4f:02:4c:4f:26:59:bd:f3:fd:eb:da:\n 18:c2:36:aa:63:42:72:1f:88:4f:3a:ec:e7:9f:8e:\n 44:2a:d3:b8:94:7b:20:41:f8:48:02:57:91:4c:16:\n 62:f1:21:d4:f2:40:b5:86:50:d9:61:f0:be:ff:d8:\n 8d:9f:4b:aa:6a:07:38:a2:7f:87:21:fc:e6:6e:1d:\n 0a:95:1a:90:0e:60:c2:24:e9:8e:e8:68:1b:e9:f3:\n c6:b0:7c:da:c5:20:66:9b:85:ea:f5:c9:a7:de:ee:\n 16:b1:51:a0:4d:e3:95:98:df\n Exponent: 65537 (0x10001)\n Signature Algorithm: md5WithRSAEncryption\n 15:42:ca:71:cc:32:af:dc:cf:45:91:df:8a:b8:30:c4:7f:78:\n 80:a7:25:c2:d9:81:3e:b3:dd:22:cc:3b:f8:94:e7:8f:04:f6:\n 93:04:9e:85:d4:10:40:ff:5a:07:47:24:b5:ae:93:ad:8d:e1:\n e6:54:4a:8d:4a:29:53:c4:8d:04:6b:0b:f6:af:38:78:02:c5:\n 05:19:89:82:2d:ba:fd:11:3c:1e:18:c9:0c:3d:03:93:6e:bc:\n 66:70:34:ee:03:78:8a:1d:3d:64:e8:20:2f:90:81:8e:49:1d:\n 07:37:15:66:42:cb:58:39:ad:56:ce:ed:47:c6:78:0b:0e:75:\n 29:ca\n-----BEGIN CERTIFICATE-----\nMIICNDCCAZ0CAgP6MA0GCSqGSIb3DQEBBAUAME4xEjAQBgNVBAoTCU5PVkEgUk9P\nVDEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UECBMKQ2FsaWZvcm5pYTEL\nMAkGA1UEBhMCVVMwHhcNMTMwODEyMDcyMDMwWhcNMTQwODEyMDcyMDMwWjB2MQsw\nCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UECgwJT3BlblN0\nYWNrMRAwDgYDVQQLDAdOb3ZhRGV2MSwwKgYDVQQDDCNvcGVuc3RhY2stZmFrZS0y\nMDEzLTA4LTEyVDA3OjIwOjMwWjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA\nrP+x0e1UTjVsNLSPCwRQJaPiTwJMTyZZvfP969oYwjaqY0JyH4hPOuznn45EKtO4\nlHsgQfhIAleRTBZi8SHU8kC1hlDZYfC+/9iNn0uqagc4on+HIfzmbh0KlRqQDmDC\nJOmO6Ggb6fPGsHzaxSBmm4Xq9cmn3u4WsVGgTeOVmN8CAwEAATANBgkqhkiG9w0B\nAQQFAAOBgQAVQspxzDKv3M9Fkd+KuDDEf3iApyXC2YE+s90izDv4lOePBPaTBJ6F\n1BBA/1oHRyS1rpOtjeHmVEqNSilTxI0Eawv2rzh4AsUFGYmCLbr9ETweGMkMPQOT\nbrxmcDTuA3iKHT1k6CAvkIGOSR0HNxVmQstYOa1Wzu1HxngLDnUpyg==\n-----END CERTIFICATE-----\n", "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKBgQCs/7HR7VRONWw0tI8LBFAlo+JPAkxPJlm98/3r2hjCNqpjQnIf\niE867OefjkQq07iUeyBB+EgCV5FMFmLxIdTyQLWGUNlh8L7/2I2fS6pqBziif4ch\n/OZuHQqVGpAOYMIk6Y7oaBvp88awfNrFIGabher1yafe7haxUaBN45WY3wIDAQAB\nAoGBAIrcr2I/KyWf0hw4Nn10V9TuyE/9Gz2JHg3QFKjFJox2DqygADT5WAeHc6Bq\nNKNf0NA2SL1LSpm+ql01tvOw4VjE5TF6OHiIzHuTTnXggG6vuA8rxp6L24HtkAcC\n0CBno9ggSX6jVornJPBfxpkwITYSvH57BUFVD7ovbPyWGzS5AkEA1JeUtL6zxwps\nWRr1aJ8Ill2uQk/RUIvSZOU61s+B190zvHikFy8LD8CI6vvBmjC/IZuZVedufjqs\n4vX82uDO3QJBANBSh2b2dyB4AGVFY9vXMRtALAspJHbLHy+zTKxlGPFiuz7Se3ps\n8Kehz4C/CBXgQkk194dwFSGE19/PQfyJROsCQQCFFDJZhrtBUMwMZ2zSRiN5BUGt\nbwuncS+OS1Su3Yz5VRYq2BZYEPHKtYrAFkLWQ8eRwTaWaN5pFE/fb38OgQXdAkA4\nDm0W/K0zlHbuyUxEpNQ28/6mBi0ktiWvLT0tioq6sYmXLwZA/D2JrhXrG/xt/ol3\nr8jqrfNRsLByLhAgh0N/AkEAl2eR0O97lTEgFNqzIQwVmIAn9mBO3cnf3tycvlDU\nm6eb2CS242y4QalfCCAEjxoJURdfsm3/D1iFo00X+IWF+A==\n-----END RSA PRIVATE KEY-----\n" } }nova-13.0.0/doc/api_samples/os-certificates/certificate-get-root-resp.json0000664000567000056710000000214412701407773027720 0ustar jenkinsjenkins00000000000000{ "certificate": { "data": "-----BEGIN CERTIFICATE-----\nMIICyzCCAjSgAwIBAgIJAJ8zSIxUp/m4MA0GCSqGSIb3DQEBBAUAME4xEjAQBgNV\nBAoTCU5PVkEgUk9PVDEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UECBMK\nQ2FsaWZvcm5pYTELMAkGA1UEBhMCVVMwHhcNMTIxMDE3MDEzMzM5WhcNMTMxMDE3\nMDEzMzM5WjBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWlu\nIFZpZXcxEzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTMIGfMA0GCSqG\nSIb3DQEBAQUAA4GNADCBiQKBgQDXW4QfQQxJG4MqurqK8nU/Lge0mfNKxXj/Gwvg\n2sQVwxzmKfoxih8Nn6yt0yHMNjhoji1UoWI03TXUnPZRAZmsypGKZeBd7Y1ZOCPB\nXGZVGrQm+PB2kZU+3cD8fVKcueMLLeZ+LRt5d0njnoKhc5xjqMlfFPimHMba4OL6\nTnYzPQIDAQABo4GwMIGtMAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFKyoKu4SMOFM\ngx5Ec7p0nrCkabvxMH4GA1UdIwR3MHWAFKyoKu4SMOFMgx5Ec7p0nrCkabvxoVKk\nUDBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWluIFZpZXcx\nEzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTggkAnzNIjFSn+bgwDQYJ\nKoZIhvcNAQEEBQADgYEAXuvXlu1o/SVvykSLhHW8QiAY00yzN/eDzYmZGomgiuoO\n/x+ayVzbrz1UWZnBD+lC4hll2iELSmf22LjLoF+s/9NyPqHxGL3FrfatBkndaiF8\nAx/TMEyCPl7IQWi+3zzatqOKHSHiG7a9SGn/7o2aNTIWKVulfy5GvmbBjBM/0UE=\n-----END CERTIFICATE-----\n", "private_key": null } }nova-13.0.0/doc/api_samples/os-availability-zone/0000775000567000056710000000000012701410205022773 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-availability-zone/availability-zone-post-req.json0000664000567000056710000000146312701407773031105 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" }, "availability_zone": "nova", "personality" : [ { "path" : "/etc/banner.txt", "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ] } } nova-13.0.0/doc/api_samples/os-availability-zone/availability-zone-detail-resp.json0000664000567000056710000000302712701407773031542 0ustar jenkinsjenkins00000000000000{ "availabilityZoneInfo": [ { "hosts": { "conductor": { "nova-conductor": { "active": true, "available": true, "updated_at": null } }, "consoleauth": { "nova-consoleauth": { "active": true, "available": true, "updated_at": null } }, "network": { "nova-network": { "active": true, "available": true, "updated_at": null } }, "scheduler": { "nova-scheduler": { "active": true, "available": true, "updated_at": null } } }, "zoneName": "internal", "zoneState": { "available": true } }, { "hosts": { "compute": { "nova-compute": { "active": true, "available": true, "updated_at": null } } }, "zoneName": "nova", "zoneState": { "available": true } } ] } nova-13.0.0/doc/api_samples/os-availability-zone/availability-zone-post-resp.json0000664000567000056710000000103112701407773031256 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "k4pKvTfcA4gY", "id": "3e45fa2a-5204-466f-a684-c2a8e1c82d7f", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/3e45fa2a-5204-466f-a684-c2a8e1c82d7f", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/3e45fa2a-5204-466f-a684-c2a8e1c82d7f", "rel": "bookmark" } ] } }nova-13.0.0/doc/api_samples/os-availability-zone/availability-zone-list-resp.json0000664000567000056710000000030212701407773031244 0ustar jenkinsjenkins00000000000000{ "availabilityZoneInfo": [ { "hosts": null, "zoneName": "nova", "zoneState": { "available": true } } ] } nova-13.0.0/doc/api_samples/os-lock-server/0000775000567000056710000000000012701410205021604 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-lock-server/unlock-server.json0000664000567000056710000000002612701407773025314 0ustar jenkinsjenkins00000000000000{ "unlock": null }nova-13.0.0/doc/api_samples/os-lock-server/lock-server.json0000664000567000056710000000002412701407773024747 0ustar jenkinsjenkins00000000000000{ "lock": null }nova-13.0.0/doc/api_samples/os-simple-tenant-usage/0000775000567000056710000000000012701410205023232 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json0000664000567000056710000000056112701407773030606 0ustar jenkinsjenkins00000000000000{ "tenant_usages": [ { "start": "2012-10-08T21:10:44.587336", "stop": "2012-10-08T22:10:44.587336", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0 } ] } nova-13.0.0/doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json0000664000567000056710000000156512701407773032376 0ustar jenkinsjenkins00000000000000{ "tenant_usage": { "server_usages": [ { "ended_at": null, "flavor": "m1.tiny", "hours": 1.0, "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8fe0", "local_gb": 1, "memory_mb": 512, "name": "new-server-test", "started_at": "2012-10-08T20:10:44.541277", "state": "active", "tenant_id": "6f70656e737461636b20342065766572", "uptime": 3600, "vcpus": 1 } ], "start": "2012-10-08T20:10:44.587336", "stop": "2012-10-08T21:10:44.587336", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0 } } nova-13.0.0/doc/api_samples/os-networks-associate/0000775000567000056710000000000012701410205023175 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-networks-associate/network-disassociate-host-req.json0000664000567000056710000000004112701407773032005 0ustar jenkinsjenkins00000000000000{ "disassociate_host": null }nova-13.0.0/doc/api_samples/os-networks-associate/network-disassociate-project-req.json0000664000567000056710000000004412701407773032501 0ustar jenkinsjenkins00000000000000{ "disassociate_project": null }nova-13.0.0/doc/api_samples/os-networks-associate/network-disassociate-req.json0000664000567000056710000000003412701407773031034 0ustar jenkinsjenkins00000000000000{ "disassociate": null }nova-13.0.0/doc/api_samples/os-networks-associate/network-associate-host-req.json0000664000567000056710000000004412701407773031310 0ustar jenkinsjenkins00000000000000{ "associate_host": "testHost" }nova-13.0.0/doc/api_samples/os-deferred-delete/0000775000567000056710000000000012701410205022370 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-deferred-delete/force-delete-post-req.json0000664000567000056710000000003412701407773027406 0ustar jenkinsjenkins00000000000000{ "forceDelete": null } nova-13.0.0/doc/api_samples/os-deferred-delete/restore-post-req.json0000664000567000056710000000002712701407773026535 0ustar jenkinsjenkins00000000000000{ "restore": null }nova-13.0.0/doc/api_samples/flavors/0000775000567000056710000000000012701410205020405 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/flavors/flavors-detail-resp.json0000664000567000056710000000672612701407773025216 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "ram": 2048, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "ram": 4096, "swap": "", "vcpus": 2 }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "ram": 8192, "swap": "", "vcpus": 4 }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "ram": 16384, "swap": "", "vcpus": 8 } ] } nova-13.0.0/doc/api_samples/flavors/flavors-list-resp.json0000664000567000056710000000447112701407773024722 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny" }, { "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small" }, { "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium" }, { "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large" }, { "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge" } ] }nova-13.0.0/doc/api_samples/flavors/flavor-get-resp.json0000664000567000056710000000116512701407773024340 0ustar jenkinsjenkins00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": "", "vcpus": 1 } } nova-13.0.0/doc/api_samples/os-agents/0000775000567000056710000000000012701410205020631 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-agents/agents-get-resp.json0000664000567000056710000000047612701407773024560 0ustar jenkinsjenkins00000000000000{ "agents": [ { "agent_id": 1, "architecture": "x86", "hypervisor": "hypervisor", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", "url": "http://example.com/path/to/resource", "version": "8.0" } ] } nova-13.0.0/doc/api_samples/os-agents/agent-post-resp.json0000664000567000056710000000041512701407773024574 0ustar jenkinsjenkins00000000000000{ "agent": { "agent_id": 1, "architecture": "x86", "hypervisor": "hypervisor", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", "url": "http://example.com/path/to/resource", "version": "8.0" } } nova-13.0.0/doc/api_samples/os-agents/agent-update-put-req.json0000664000567000056710000000023612701407773025516 0ustar jenkinsjenkins00000000000000{ "para": { "url": "http://example.com/path/to/resource", "md5hash": "add6bb58e139be103324d04d82d8f545", "version": "7.0" } } nova-13.0.0/doc/api_samples/os-agents/agent-update-put-resp.json0000664000567000056710000000027012701407773025676 0ustar jenkinsjenkins00000000000000{ "agent": { "agent_id": "1", "md5hash": "add6bb58e139be103324d04d82d8f545", "url": "http://example.com/path/to/resource", "version": "7.0" } } nova-13.0.0/doc/api_samples/os-agents/agent-post-req.json0000664000567000056710000000036612701407773024417 0ustar jenkinsjenkins00000000000000{ "agent": { "hypervisor": "hypervisor", "os": "os", "architecture": "x86", "version": "8.0", "md5hash": "add6bb58e139be103324d04d82d8f545", "url": "http://example.com/path/to/resource" } } nova-13.0.0/doc/api_samples/flavor-extra-specs/0000775000567000056710000000000012701410205022456 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json0000664000567000056710000000003412701407773031160 0ustar jenkinsjenkins00000000000000{ "key1": "new_value1" }nova-13.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json0000664000567000056710000000003412701407773031342 0ustar jenkinsjenkins00000000000000{ "key1": "new_value1" }nova-13.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json0000664000567000056710000000012112701407773031320 0ustar jenkinsjenkins00000000000000{ "extra_specs": { "key1": "value1", "key2": "value2" } }nova-13.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json0000664000567000056710000000003012701407773030633 0ustar jenkinsjenkins00000000000000{ "key1": "value1" }nova-13.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json0000664000567000056710000000012112701407773031030 0ustar jenkinsjenkins00000000000000{ "extra_specs": { "key1": "value1", "key2": "value2" } }nova-13.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json0000664000567000056710000000012112701407773031136 0ustar jenkinsjenkins00000000000000{ "extra_specs": { "key1": "value1", "key2": "value2" } }nova-13.0.0/doc/api_samples/os-extended-server-attributes/0000775000567000056710000000000012701410205024640 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-extended-server-attributes/v2.3/0000775000567000056710000000000012701410205025330 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-extended-server-attributes/v2.3/server-get-resp.json0000664000567000056710000000437012701407773031301 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T02:55:07Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/flavors/1", "rel": "bookmark" } ] }, "hostId": "3bf189131c61d0e71b0a8686a897a0f50d1693b48c47b721fe77155b", "id": "c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v3/servers/c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "rel": "self" }, { "href": "http://openstack.example.com/servers/c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-EXT-SRV-ATTR:host": "c5f474bf81474f9dbbc404d5b2e4e9b3", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:reservation_id": "r-00000001", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:kernel_id": "a5f474bf81474f9dbbc404d5b2e4e9b3", "OS-EXT-SRV-ATTR:ramdisk_id": "b5f474bf81474f9dbbc404d5b2e4e9b3", "OS-EXT-SRV-ATTR:hostname": "fake-hostname", "OS-EXT-SRV-ATTR:root_device_name": "/dev/vda", "OS-EXT-SRV-ATTR:userdata": "fake", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:55:08Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-extended-server-attributes/v2.3/servers-detail-resp.json0000664000567000056710000000500112701407773032137 0ustar jenkinsjenkins00000000000000{ "servers": [ { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T02:55:03Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/flavors/1", "rel": "bookmark" } ] }, "hostId": "63cf07a9fd82e1d2294926ec5c0d2e1e0ca449224246df75e16f23dc", "id": "a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v3/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "self" }, { "href": "http://openstack.example.com/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-EXT-SRV-ATTR:host": "bc8efe4fdb7148a4bb921a2b03d17de6", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:reservation_id": "r-00000001", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:kernel_id": "a5f474bf81474f9dbbc404d5b2e4e9b3", "OS-EXT-SRV-ATTR:ramdisk_id": "b5f474bf81474f9dbbc404d5b2e4e9b3", "OS-EXT-SRV-ATTR:hostname": "fake-hostname", "OS-EXT-SRV-ATTR:root_device_name": "/dev/vda", "OS-EXT-SRV-ATTR:userdata": "fake", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:55:05Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-extended-server-attributes/server-post-resp.json0000664000567000056710000000073012701407773031013 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "UCvmH8nHXm66", "id": "a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "links": [ { "href": "http://openstack.example.com/v3/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "self" }, { "href": "http://openstack.example.com/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "bookmark" } ] } } nova-13.0.0/doc/api_samples/os-extended-server-attributes/server-get-resp.json0000664000567000056710000000405612701407773030612 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T02:55:07Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "3bf189131c61d0e71b0a8686a897a0f50d1693b48c47b721fe77155b", "id": "c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-EXT-SRV-ATTR:host": "c5f474bf81474f9dbbc404d5b2e4e9b3", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:55:08Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-extended-server-attributes/servers-detail-resp.json0000664000567000056710000000444312701407773031460 0ustar jenkinsjenkins00000000000000{ "servers": [ { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T02:55:03Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "63cf07a9fd82e1d2294926ec5c0d2e1e0ca449224246df75e16f23dc", "id": "a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-EXT-SRV-ATTR:host": "bc8efe4fdb7148a4bb921a2b03d17de6", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:55:05Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-extended-server-attributes/v2.16/0000775000567000056710000000000012701410205025414 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json0000664000567000056710000000467012701407773031370 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T02:55:07Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "3bf189131c61d0e71b0a8686a897a0f50d1693b48c47b721fe77155b", "id": "c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-EXT-SRV-ATTR:host": "c5f474bf81474f9dbbc404d5b2e4e9b3", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:reservation_id": "r-12345678", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:kernel_id": null, "OS-EXT-SRV-ATTR:ramdisk_id": null, "OS-EXT-SRV-ATTR:user_data": null, "locked": false, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "progress": 0, "status": "ACTIVE", "host_status": "UP", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:55:08Z", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json0000664000567000056710000000532112701407773032230 0ustar jenkinsjenkins00000000000000{ "servers": [ { "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T02:55:03Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "63cf07a9fd82e1d2294926ec5c0d2e1e0ca449224246df75e16f23dc", "id": "a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "OS-EXT-SRV-ATTR:host": "bc8efe4fdb7148a4bb921a2b03d17de6", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:reservation_id": "r-12345678", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:kernel_id": null, "OS-EXT-SRV-ATTR:ramdisk_id": null, "OS-EXT-SRV-ATTR:user_data": null, "locked": false, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "progress": 0, "status": "ACTIVE", "host_status": "UP", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:55:05Z", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/keypairs/0000775000567000056710000000000012701410205020560 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/keypairs/keypairs-post-req.json0000664000567000056710000000013112701407773025065 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9" } }nova-13.0.0/doc/api_samples/keypairs/v2.2/0000775000567000056710000000000012701410205021247 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/keypairs/v2.2/keypairs-post-req.json0000664000567000056710000000016012701407773025556 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", "type": "ssh" } }nova-13.0.0/doc/api_samples/keypairs/v2.2/keypairs-post-resp.json0000664000567000056710000000450412701407773025746 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", "type": "ssh", "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", "user_id": "fake" } }nova-13.0.0/doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json0000664000567000056710000000072512701407773027257 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "user_id": "fake" } }nova-13.0.0/doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json0000664000567000056710000000056012701407773027072 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova" } }nova-13.0.0/doc/api_samples/keypairs/v2.2/keypairs-list-resp.json0000664000567000056710000000130012701407773025723 0ustar jenkinsjenkins00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" } } ] }nova-13.0.0/doc/api_samples/keypairs/v2.2/keypairs-get-resp.json0000664000567000056710000000142712701407773025541 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", "user_id": "fake", "deleted": false, "created_at": "2014-05-07T12:06:13.681238", "updated_at": null, "deleted_at": null, "id": 1 } } nova-13.0.0/doc/api_samples/keypairs/keypairs-post-resp.json0000664000567000056710000000445512701407773025264 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", "user_id": "fake" } }nova-13.0.0/doc/api_samples/keypairs/keypairs-import-post-resp.json0000664000567000056710000000067612701407773026575 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "user_id": "fake" } }nova-13.0.0/doc/api_samples/keypairs/keypairs-import-post-req.json0000664000567000056710000000053112701407773026401 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova" } }nova-13.0.0/doc/api_samples/keypairs/v2.10/0000775000567000056710000000000012701410205021326 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/keypairs/v2.10/keypairs-post-req.json0000664000567000056710000000021412701407773025635 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", "type": "ssh", "user_id": "fake" } } nova-13.0.0/doc/api_samples/keypairs/v2.10/keypairs-post-resp.json0000664000567000056710000000450512701407773026026 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", "type": "ssh", "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", "user_id": "fake" } } nova-13.0.0/doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json0000664000567000056710000000072512701407773027336 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "user_id": "fake" } }nova-13.0.0/doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json0000664000567000056710000000061412701407773027151 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "user_id": "fake" } } nova-13.0.0/doc/api_samples/keypairs/v2.10/keypairs-list-resp.json0000664000567000056710000000130012701407773026002 0ustar jenkinsjenkins00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" } } ] }nova-13.0.0/doc/api_samples/keypairs/v2.10/keypairs-get-resp.json0000664000567000056710000000142712701407773025620 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", "user_id": "fake", "deleted": false, "created_at": "2014-05-07T12:06:13.681238", "updated_at": null, "deleted_at": null, "id": 1 } } nova-13.0.0/doc/api_samples/keypairs/keypairs-list-resp.json0000664000567000056710000000124112701407773025240 0ustar jenkinsjenkins00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" } } ] }nova-13.0.0/doc/api_samples/keypairs/keypairs-get-resp.json0000664000567000056710000000140012701407773025041 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", "user_id": "fake", "deleted": false, "created_at": "2014-05-07T12:06:13.681238", "updated_at": null, "deleted_at": null, "id": 1 } } nova-13.0.0/doc/api_samples/os-cloudpipe/0000775000567000056710000000000012701410205021334 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-cloudpipe/cloud-pipe-create-req.json0000664000567000056710000000013212701407773026332 0ustar jenkinsjenkins00000000000000{ "cloudpipe": { "project_id": "059f21e3-c20e-4efc-9e7a-eba2ab3c6f9a" } } nova-13.0.0/doc/api_samples/os-cloudpipe/cloud-pipe-update-req.json0000664000567000056710000000014012701407773026350 0ustar jenkinsjenkins00000000000000{ "configure_project": { "vpn_ip": "192.168.1.1", "vpn_port": "2000" } }nova-13.0.0/doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json0000664000567000056710000000007512701407773026522 0ustar jenkinsjenkins00000000000000{ "instance_id": "1e9b8425-34af-488e-b969-4d46f4a6382e" }nova-13.0.0/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json0000664000567000056710000000056412701407773026041 0ustar jenkinsjenkins00000000000000{ "cloudpipes": [ { "created_at": "2012-11-27T17:18:01Z", "instance_id": "27deecdb-baa3-4a26-9c82-32994b815b01", "internal_ip": "192.168.0.3", "project_id": "fa1765bd-a352-49c7-a6b7-8ee108a3cb0c", "public_ip": "127.0.0.1", "public_port": 22, "state": "down" } ] } nova-13.0.0/doc/api_samples/flavor-manage/0000775000567000056710000000000012701410205021450 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/flavor-manage/flavor-create-post-req.json0000664000567000056710000000021112701407773026657 0ustar jenkinsjenkins00000000000000{ "flavor": { "name": "test_flavor", "ram": 1024, "vcpus": 2, "disk": 10, "id": "10" } } nova-13.0.0/doc/api_samples/flavor-manage/flavor-create-post-resp.json0000664000567000056710000000117612701407773027054 0ustar jenkinsjenkins00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 10, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "10", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/10", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/10", "rel": "bookmark" } ], "name": "test_flavor", "ram": 1024, "swap": "", "vcpus": 2 } } nova-13.0.0/doc/api_samples/os-server-groups/0000775000567000056710000000000012701410205022173 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-server-groups/server-groups-post-resp.json0000664000567000056710000000030312701407773027657 0ustar jenkinsjenkins00000000000000{ "server_group": { "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {} } } nova-13.0.0/doc/api_samples/os-server-groups/server-groups-get-resp.json0000664000567000056710000000030312701407773027451 0ustar jenkinsjenkins00000000000000{ "server_group": { "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {} } } nova-13.0.0/doc/api_samples/os-server-groups/v2.13/0000775000567000056710000000000012701410205022744 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-server-groups/v2.13/server-groups-post-resp.json0000664000567000056710000000043012701407773030431 0ustar jenkinsjenkins00000000000000{ "server_group": { "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {}, "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-server-groups/v2.13/server-groups-get-resp.json0000664000567000056710000000043012701407773030223 0ustar jenkinsjenkins00000000000000{ "server_group": { "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {}, "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } } nova-13.0.0/doc/api_samples/os-server-groups/v2.13/server-groups-list-resp.json0000664000567000056710000000051112701407773030417 0ustar jenkinsjenkins00000000000000{ "server_groups": [ { "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {}, "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } ] } nova-13.0.0/doc/api_samples/os-server-groups/v2.13/server-groups-post-req.json0000664000567000056710000000013612701407773030252 0ustar jenkinsjenkins00000000000000{ "server_group": { "name": "test", "policies": ["anti-affinity"] } } nova-13.0.0/doc/api_samples/os-server-groups/server-groups-list-resp.json0000664000567000056710000000035412701407773027653 0ustar jenkinsjenkins00000000000000{ "server_groups": [ { "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {} } ] } nova-13.0.0/doc/api_samples/os-server-groups/server-groups-post-req.json0000664000567000056710000000013612701407773027501 0ustar jenkinsjenkins00000000000000{ "server_group": { "name": "test", "policies": ["anti-affinity"] } } nova-13.0.0/doc/api_samples/os-tenant-networks/0000775000567000056710000000000012701410205022513 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-tenant-networks/networks-list-res.json0000664000567000056710000000047312701407773027046 0ustar jenkinsjenkins00000000000000{ "networks": [ { "cidr": "10.0.0.0/29", "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", "label": "test_0" }, { "cidr": "10.0.0.8/29", "id": "616fb98f-46ca-475e-917e-2563e5a8cd20", "label": "test_1" } ] } nova-13.0.0/doc/api_samples/os-tenant-networks/networks-post-req.json0000664000567000056710000000024512701407773027053 0ustar jenkinsjenkins00000000000000{ "network": { "label": "public", "cidr": "172.0.0.0/24", "vlan_start": 1, "num_networks": 1, "network_size": 255 } }nova-13.0.0/doc/api_samples/os-tenant-networks/networks-post-res.json0000664000567000056710000000021312701407773027050 0ustar jenkinsjenkins00000000000000{ "network": { "cidr": "172.0.0.0/24", "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "label": "public" } } nova-13.0.0/doc/api_samples/os-security-group-default-rules/0000775000567000056710000000000012701410205025123 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-13.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.jsonnova-13.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp0000664000567000056710000000032112701407773035045 0ustar jenkinsjenkins00000000000000{ "security_group_default_rule": { "from_port": 80, "id": 1, "ip_protocol": "TCP", "ip_range": { "cidr": "10.10.10.0/24" }, "to_port": 80 } }././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.jsonnova-13.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.0000664000567000056710000000024012701407773034741 0ustar jenkinsjenkins00000000000000{ "security_group_default_rule": { "ip_protocol": "TCP", "from_port": "80", "to_port": "80", "cidr": "10.10.10.0/24" } }././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.jsonnova-13.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.j0000664000567000056710000000040212701407773035005 0ustar jenkinsjenkins00000000000000{ "security_group_default_rules": [ { "from_port": 80, "id": 1, "ip_protocol": "TCP", "ip_range": { "cidr": "10.10.10.0/24" }, "to_port": 80 } ] }././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.jsonnova-13.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.j0000664000567000056710000000032112701407773035012 0ustar jenkinsjenkins00000000000000{ "security_group_default_rule": { "from_port": 80, "id": 1, "ip_protocol": "TCP", "ip_range": { "cidr": "10.10.10.0/24" }, "to_port": 80 } }nova-13.0.0/doc/api_samples/os-assisted-volume-snapshots/0000775000567000056710000000000012701410205024514 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json0000664000567000056710000000015112701407773033310 0ustar jenkinsjenkins00000000000000{ "snapshot": { "id": 100, "volumeId": "521752a6-acf6-4b2d-bc7a-119f9148cd8c" } }nova-13.0.0/doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json0000664000567000056710000000040412701407773033127 0ustar jenkinsjenkins00000000000000{ "snapshot": { "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", "create_info": { "snapshot_id": "421752a6-acf6-4b2d-bc7a-119f9148cd8c", "type": "qcow2", "new_file": "new_file_name" } } } nova-13.0.0/doc/api_samples/os-evacuate/0000775000567000056710000000000012701410205021145 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-evacuate/v2.14/0000775000567000056710000000000012701410205021717 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/api_samples/os-evacuate/v2.14/server-evacuate-find-host-req.json0000664000567000056710000000010012701407773030400 0ustar jenkinsjenkins00000000000000{ "evacuate": { "adminPass": "MySecretPass" } } nova-13.0.0/doc/api_samples/os-evacuate/v2.14/server-evacuate-req.json0000664000567000056710000000013412701407773026516 0ustar jenkinsjenkins00000000000000{ "evacuate": { "host": "testHost", "adminPass": "MySecretPass" } } nova-13.0.0/doc/api_samples/os-evacuate/server-evacuate-find-host-req.json0000664000567000056710000000014412701407773027636 0ustar jenkinsjenkins00000000000000{ "evacuate": { "adminPass": "MySecretPass", "onSharedStorage": "False" } } nova-13.0.0/doc/api_samples/os-evacuate/server-evacuate-find-host-resp.json0000664000567000056710000000004412701407773030017 0ustar jenkinsjenkins00000000000000{ "adminPass": "MySecretPass" } nova-13.0.0/doc/api_samples/os-evacuate/server-evacuate-resp.json0000664000567000056710000000004412701407773026126 0ustar jenkinsjenkins00000000000000{ "adminPass": "MySecretPass" } nova-13.0.0/doc/api_samples/os-evacuate/server-evacuate-req.json0000664000567000056710000000023012701407773025741 0ustar jenkinsjenkins00000000000000{ "evacuate": { "host": "b419863b7d814906a68fb31703c0dbd6", "adminPass": "MySecretPass", "onSharedStorage": "False" } } nova-13.0.0/doc/ext/0000775000567000056710000000000012701410205015234 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/ext/__init__.py0000664000567000056710000000000012701407773017353 0ustar jenkinsjenkins00000000000000nova-13.0.0/doc/ext/support_matrix.py0000664000567000056710000004446312701407773020741 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This provides a sphinx extension able to render the source/support-matrix.ini file into the developer documentation. It is used via a single directive in the .rst file .. support_matrix:: """ import re import six from six.moves import configparser from docutils import nodes from docutils.parsers import rst class SupportMatrix(object): """Represents the entire support matrix for Nova virt drivers """ def __init__(self): # List of SupportMatrixFeature instances, describing # all the features present in Nova virt drivers self.features = [] # Dict of (name, SupportMatrixTarget) enumerating # all the hypervisor drivers that have data recorded # for them in self.features. The 'name' dict key is # the value from the SupportMatrixTarget.key attribute self.targets = {} class SupportMatrixFeature(object): STATUS_MANDATORY = "mandatory" STATUS_CHOICE = "choice" STATUS_CONDITION = "condition" STATUS_OPTIONAL = "optional" STATUS_ALL = [STATUS_MANDATORY, STATUS_CHOICE, STATUS_CONDITION, STATUS_OPTIONAL] def __init__(self, key, title, status=STATUS_OPTIONAL, group=None, notes=None, cli=[]): # A unique key (eg 'foo.bar.wizz') to identify the feature self.key = key # A human friendly short title for the feature self.title = title # One of the status constants self.status = status # Detail string if status was choice/condition self.group = group # Arbitrarily long string describing the feature in detail self.notes = notes # Dict of (name, SupportMatrixImplementation) detailing # the implementation for each hypervisor driver. The # 'name' dict key is the value from SupportMatrixTarget.key # for the hypervisor in question self.implementations = {} # A list of CLI commands which are related to that feature self.cli = cli class SupportMatrixImplementation(object): STATUS_COMPLETE = "complete" STATUS_PARTIAL = "partial" STATUS_MISSING = "missing" STATUS_UKNOWN = "unknown" STATUS_ALL = [STATUS_COMPLETE, STATUS_PARTIAL, STATUS_MISSING, STATUS_UKNOWN] def __init__(self, status=STATUS_MISSING, notes=None): # One of the status constants detailing the implementation # level self.status = status # Arbitrary string describing any caveats of the implementation. # Mandatory if status is 'partial', optional otherwise. self.notes = notes class SupportMatrixTarget(object): def __init__(self, key, title, driver, hypervisor=None, architecture=None): """:param key: Unique identifier for the hypervisor driver :param title: Human friendly name of the hypervisor :param driver: Name of the Nova driver :param hypervisor: (optional) Name of the hypervisor, if many :param architecture: (optional) Name of the architecture, if many """ self.key = key self.title = title self.driver = driver self.hypervisor = hypervisor self.architecture = architecture class SupportMatrixDirective(rst.Directive): # The argument is the filename, e.g. support-matrix.ini required_arguments = 1 def run(self): matrix = self._load_support_matrix() return self._build_markup(matrix) def _load_support_matrix(self): """Reads the support-matrix.ini file and populates an instance of the SupportMatrix class with all the data. :returns: SupportMatrix instance """ cfg = configparser.SafeConfigParser() env = self.state.document.settings.env fname = self.arguments[0] rel_fpath, fpath = env.relfn2path(fname) with open(fpath) as fp: cfg.readfp(fp) # This ensures that the docs are rebuilt whenever the # .ini file changes env.note_dependency(rel_fpath) matrix = SupportMatrix() matrix.targets = self._get_targets(cfg) matrix.features = self._get_features(cfg, matrix.targets) return matrix def _get_targets(self, cfg): # The 'targets' section is special - it lists all the # hypervisors that this file records data for targets = {} for item in cfg.options("targets"): if not item.startswith("driver-impl-"): continue # The driver string will optionally contain # a hypervisor and architecture qualifier # so we expect between 1 and 3 components # in the name key = item[12:] title = cfg.get("targets", item) name = key.split("-") if len(name) == 1: target = SupportMatrixTarget(key, title, name[0]) elif len(name) == 2: target = SupportMatrixTarget(key, title, name[0], name[1]) elif len(name) == 3: target = SupportMatrixTarget(key, title, name[0], name[1], name[2]) else: raise Exception("'%s' field is malformed in '[%s]' section" % (item, "DEFAULT")) targets[key] = target return targets def _get_features(self, cfg, targets): # All sections except 'targets' describe some feature of # the Nova hypervisor driver implementation features = [] for section in cfg.sections(): if section == "targets": continue if not cfg.has_option(section, "title"): raise Exception( "'title' field missing in '[%s]' section" % section) title = cfg.get(section, "title") status = SupportMatrixFeature.STATUS_OPTIONAL if cfg.has_option(section, "status"): # The value is a string "status(group)" where # the 'group' part is optional status = cfg.get(section, "status") offset = status.find("(") group = None if offset != -1: group = status[offset + 1:-1] status = status[0:offset] if status not in SupportMatrixFeature.STATUS_ALL: raise Exception( "'status' field value '%s' in ['%s']" "section must be %s" % (status, section, ",".join(SupportMatrixFeature.STATUS_ALL))) notes = None if cfg.has_option(section, "notes"): notes = cfg.get(section, "notes") cli = [] if cfg.has_option(section, "cli"): cli = cfg.get(section, "cli") feature = SupportMatrixFeature(section, title, status, group, notes, cli) # Now we've got the basic feature details, we must process # the hypervisor driver implementation for each feature for item in cfg.options(section): if not item.startswith("driver-impl-"): continue key = item[12:] if key not in targets: raise Exception( "Driver impl '%s' in '[%s]' not declared" % (item, section)) status = cfg.get(section, item) if status not in SupportMatrixImplementation.STATUS_ALL: raise Exception( "'%s' value '%s' in '[%s]' section must be %s" % (item, status, section, ",".join(SupportMatrixImplementation.STATUS_ALL))) noteskey = "driver-notes-" + item[12:] notes = None if cfg.has_option(section, noteskey): notes = cfg.get(section, noteskey) target = targets[key] impl = SupportMatrixImplementation(status, notes) feature.implementations[target.key] = impl for key in targets: if key not in feature.implementations: raise Exception("'%s' missing in '[%s]' section" % (target.key, section)) features.append(feature) return features def _build_markup(self, matrix): """Constructs the docutils content for the support matrix """ content = [] self._build_summary(matrix, content) self._build_details(matrix, content) self._build_notes(content) return content def _build_summary(self, matrix, content): """Constructs the docutils content for the summary of the support matrix. The summary consists of a giant table, with one row for each feature, and a column for each hypervisor driver. It provides an 'at a glance' summary of the status of each driver """ summarytitle = nodes.subtitle(text="Summary") summary = nodes.table() cols = len(matrix.targets.keys()) cols += 2 summarygroup = nodes.tgroup(cols=cols) summarybody = nodes.tbody() summaryhead = nodes.thead() for i in range(cols): summarygroup.append(nodes.colspec(colwidth=1)) summarygroup.append(summaryhead) summarygroup.append(summarybody) summary.append(summarygroup) content.append(summarytitle) content.append(summary) # This sets up all the column headers - two fixed # columns for feature name & status header = nodes.row() blank = nodes.entry() blank.append(nodes.emphasis(text="Feature")) header.append(blank) blank = nodes.entry() blank.append(nodes.emphasis(text="Status")) header.append(blank) summaryhead.append(header) # then one column for each hypervisor driver impls = matrix.targets.keys() impls.sort() for key in impls: target = matrix.targets[key] implcol = nodes.entry() header.append(implcol) implcol.append(nodes.strong(text=target.title)) # We now produce the body of the table, one row for # each feature to report on for feature in matrix.features: item = nodes.row() # the hyperlink target name linking to details id = re.sub("[^a-zA-Z0-9_]", "_", feature.key) # first the to fixed columns for title/status keycol = nodes.entry() item.append(keycol) keyref = nodes.reference(refid=id) keytxt = nodes.inline() keycol.append(keytxt) keytxt.append(keyref) keyref.append(nodes.strong(text=feature.title)) statuscol = nodes.entry() item.append(statuscol) statuscol.append(nodes.inline( text=feature.status, classes=["sp_feature_" + feature.status])) # and then one column for each hypervisor driver impls = matrix.targets.keys() impls.sort() for key in impls: target = matrix.targets[key] impl = feature.implementations[key] implcol = nodes.entry() item.append(implcol) id = re.sub("[^a-zA-Z0-9_]", "_", feature.key + "_" + key) implref = nodes.reference(refid=id) impltxt = nodes.inline() implcol.append(impltxt) impltxt.append(implref) status = "" if impl.status == SupportMatrixImplementation.STATUS_COMPLETE: status = u"\u2714" elif impl.status == SupportMatrixImplementation.STATUS_MISSING: status = u"\u2716" elif impl.status == SupportMatrixImplementation.STATUS_PARTIAL: status = u"\u2714" elif impl.status == SupportMatrixImplementation.STATUS_UKNOWN: status = u"?" implref.append(nodes.literal( text=status, classes=["sp_impl_summary", "sp_impl_" + impl.status])) summarybody.append(item) def _build_details(self, matrix, content): """Constructs the docutils content for the details of the support matrix. This is generated as a bullet list of features. Against each feature we provide the description of the feature and then the details of the hypervisor impls, with any driver specific notes that exist """ detailstitle = nodes.subtitle(text="Details") details = nodes.bullet_list() content.append(detailstitle) content.append(details) # One list entry for each feature we're reporting on for feature in matrix.features: item = nodes.list_item() status = feature.status if feature.group is not None: status += "(" + feature.group + ")" # The hypervisor target name linked from summary table id = re.sub("[^a-zA-Z0-9_]", "_", feature.key) # Highlight the feature title name item.append(nodes.strong(text=feature.title, ids=[id])) para = nodes.paragraph() para.append(nodes.strong(text="Status: " + status + ". ")) if feature.notes is not None: para.append(nodes.inline(text=feature.notes)) item.append(para) if feature.cli: item.append(self._create_cli_paragraph(feature)) para_divers = nodes.paragraph() para_divers.append(nodes.strong(text="drivers:")) # A sub-list giving details of each hypervisor target impls = nodes.bullet_list() for key in feature.implementations: target = matrix.targets[key] impl = feature.implementations[key] subitem = nodes.list_item() id = re.sub("[^a-zA-Z0-9_]", "_", feature.key + "_" + key) subitem += [ nodes.strong(text=target.title + ": "), nodes.literal(text=impl.status, classes=["sp_impl_" + impl.status], ids=[id]), ] if impl.notes is not None: subitem.append(self._create_notes_paragraph(impl.notes)) impls.append(subitem) para_divers.append(impls) item.append(para_divers) details.append(item) def _build_notes(self, content): """Constructs a list of notes content for the support matrix. This is generated as a bullet list. """ notestitle = nodes.subtitle(text="Notes") notes = nodes.bullet_list() content.append(notestitle) content.append(notes) NOTES = [ "Virtuozzo was formerly named Parallels in this document" ] for note in NOTES: item = nodes.list_item() item.append(nodes.strong(text=note)) notes.append(item) def _create_cli_paragraph(self, feature): ''' Create a paragraph which represents the CLI commands of the feature The paragraph will have a bullet list of CLI commands. ''' para = nodes.paragraph() para.append(nodes.strong(text="CLI commands:")) commands = nodes.bullet_list() for c in feature.cli.split(";"): cli_command = nodes.list_item() cli_command += nodes.literal(text=c, classes=["sp_cli"]) commands.append(cli_command) para.append(commands) return para def _create_notes_paragraph(self, notes): """ Constructs a paragraph which represents the implementation notes The paragraph consists of text and clickable URL nodes if links were given in the notes. """ para = nodes.paragraph() # links could start with http:// or https:// link_idxs = [m.start() for m in re.finditer('https?://', notes)] start_idx = 0 for link_idx in link_idxs: # assume the notes start with text (could be empty) para.append(nodes.inline(text=notes[start_idx:link_idx])) # create a URL node until the next text or the end of the notes link_end_idx = notes.find(" ", link_idx) if link_end_idx == -1: # In case the notes end with a link without a blank link_end_idx = len(notes) uri = notes[link_idx:link_end_idx + 1] para.append(nodes.reference("", uri, refuri=uri)) start_idx = link_end_idx + 1 # get all text after the last link (could be empty) or all of the # text if no link was given para.append(nodes.inline(text=notes[start_idx:])) return para def setup(app): app.add_directive('support_matrix', SupportMatrixDirective) app.add_stylesheet('support-matrix.css') nova-13.0.0/doc/ext/nova_todo.py0000664000567000056710000000647112701407773017626 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # This is a hack of the builtin todo extension, to make the todo_list # more user friendly. from sphinx.ext.todo import * import re def _(s): return s def process_todo_nodes(app, doctree, fromdocname): if not app.config['todo_include_todos']: for node in doctree.traverse(todo_node): node.parent.remove(node) # Replace all todolist nodes with a list of the collected todos. # Augment each todo with a backlink to the original location. env = app.builder.env if not hasattr(env, 'todo_all_todos'): env.todo_all_todos = [] # remove the item that was added in the constructor, since I'm tired of # reading through docutils for the proper way to construct an empty list lists = [] for i in range(5): lists.append(nodes.bullet_list("", nodes.Text('', ''))) lists[i].remove(lists[i][0]) lists[i]['classes'].append('todo_list') for node in doctree.traverse(todolist): if not app.config['todo_include_todos']: node.replace_self([]) continue for todo_info in env.todo_all_todos: para = nodes.paragraph() # Create a reference newnode = nodes.reference('', '') filename = env.doc2path(todo_info['docname'], base=None) link = (_('%(filename)s, line %(line_info)d') % {'filename': filename, 'line_info': todo_info['lineno']}) innernode = nodes.emphasis(link, link) newnode['refdocname'] = todo_info['docname'] try: newnode['refuri'] = app.builder.get_relative_uri( fromdocname, todo_info['docname']) newnode['refuri'] += '#' + todo_info['target']['refid'] except NoUri: # ignore if no URI can be determined, e.g. for LaTeX output pass newnode.append(innernode) para += newnode para['classes'].append('todo_link') todo_entry = todo_info['todo'] env.resolve_references(todo_entry, todo_info['docname'], app.builder) item = nodes.list_item('', para) todo_entry[1]['classes'].append('details') comment = todo_entry[1] m = re.match(r"^P(\d)", comment.astext()) priority = 5 if m: priority = int(m.group(1)) if priority < 0: priority = 1 if priority > 5: priority = 5 item['classes'].append('todo_p' + str(priority)) todo_entry['classes'].append('todo_p' + str(priority)) item.append(comment) lists[priority - 1].insert(0, item) node.replace_self(lists) def setup(app): app.add_config_value('todo_include_todos', False, False) app.add_node(todolist) app.add_node(todo_node, html=(visit_todo_node, depart_todo_node), latex=(visit_todo_node, depart_todo_node), text=(visit_todo_node, depart_todo_node)) app.add_directive('todo', Todo) app.add_directive('todolist', TodoList) app.connect('doctree-read', process_todos) app.connect('doctree-resolved', process_todo_nodes) app.connect('env-purge-doc', purge_todos) nova-13.0.0/doc/ext/versioned_notifications.py0000664000567000056710000000662112701407773022562 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This provides a sphinx extension able to list the implemented versioned notifications into the developer documentation. It is used via a single directive in the .rst file .. versioned_notifications:: """ from sphinx.util.compat import Directive from docutils import nodes from nova.objects import base from nova.objects import notification def full_name(cls): return cls.__module__ + '.' + cls.__name__ class VersionedNotificationDirective(Directive): LINK_PREFIX = 'https://git.openstack.org/cgit/openstack/nova/plain/' SAMPLE_ROOT = 'doc/notification_samples/' def run(self): notifications = self._collect_notifications() return self._build_markup(notifications) def _collect_notifications(self): notifications = [] ovos = base.NovaObjectRegistry.obj_classes() for name, cls in ovos.items(): cls = cls[0] if (issubclass(cls, notification.NotificationBase) and cls != notification.NotificationBase): payload_name = cls.fields['payload'].objname payload_cls = ovos[payload_name][0] notifications.append((full_name(cls), full_name(payload_cls), cls.sample)) return notifications def _build_markup(self, notifications): content = [] cols = ['Notification class', 'Payload class', 'Sample file link'] table = nodes.table() content.append(table) group = nodes.tgroup(cols=len(cols)) table.append(group) head = nodes.thead() group.append(head) for i in range(len(cols)): group.append(nodes.colspec(colwidth=1)) body = nodes.tbody() group.append(body) # fill the table header row = nodes.row() body.append(row) for col_name in cols: col = nodes.entry() row.append(col) text = nodes.strong(text=col_name) col.append(text) # fill the table content, one notification per row for name, payload, sample in notifications: row = nodes.row() body.append(row) col = nodes.entry() row.append(col) text = nodes.literal(text=name) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=payload) col.append(text) col = nodes.entry() row.append(col) ref = nodes.reference(refuri=self.LINK_PREFIX + self.SAMPLE_ROOT + sample) txt = nodes.inline() col.append(txt) txt.append(ref) ref.append(nodes.literal(text=sample)) return content def setup(app): app.add_directive('versioned_notifications', VersionedNotificationDirective) nova-13.0.0/doc/README.rst0000664000567000056710000000065612701407773016152 0ustar jenkinsjenkins00000000000000OpenStack Nova Documentation README =================================== Both contributor developer documentation and REST API documentation are sourced here. Contributor developer docs are built to: http://docs.openstack.org/developer/nova/ API guide docs are built to: http://developer.openstack.org/api-guide/compute/ For more details, see the "Building the Documentation" section of doc/source/development.environment.rst. nova-13.0.0/doc/notification_samples/0000775000567000056710000000000012701410205020646 5ustar jenkinsjenkins00000000000000nova-13.0.0/doc/notification_samples/service-update.json0000664000567000056710000000114112701407773024476 0ustar jenkinsjenkins00000000000000{ "priority": "INFO", "payload": { "nova_object.namespace": "nova", "nova_object.name": "ServiceStatusPayload", "nova_object.version": "1.0", "nova_object.data": { "host": "host1", "disabled": false, "last_seen_up": "2012-10-29T13:42:05Z", "binary": "nova-compute", "topic": "compute", "disabled_reason": null, "report_count": 1, "forced_down": false, "version": 9 } }, "event_type": "service.update", "publisher_id": "nova-compute:host1" } nova-13.0.0/LICENSE0000664000567000056710000002363712701407773014727 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. nova-13.0.0/run_tests.sh0000775000567000056710000000152512701407773016277 0ustar jenkinsjenkins00000000000000#!/bin/bash set -eu cat < REST * Wrong spelling of defined * libvirt: fix typo in test_init_host_migration_flags * docs: update refs to mitaka release schedule * doc: add how to arrange order of scheduler filters * libvirt: only get instance.flavor if needed in get_disk_mapping * Replace backtick with apostrophe in lazy-loading debug log * libvirt: fix TypeError in find_disk_dev_for_disk_bus * Fix RPC revision log entry for 4.6 * signature_utils: move to explicit image metadata * Unreference mocks are listed in the wrong order * remove API v1.1 from testing * remove /v1.1 from default paste.ini * libvirt: verify cpu bw policy capability for host * Implements proper UUID format for test_compute_cells and test_compute_utils * Add the missing return value in the comment * Updated from global requirements * xen: block BootableTestCase from py34 testing * Modify conductor to use RequestSpec object * db: querry to retrieve all pci device by parent address * db: adding columns to PciDevice table * Replace except Exception with specific exception * pci: minor fix to exception message format * Python 3 deprecated the logger.warn method in favor of warning * Check added for mandatory parameter size in schema * Remove redundant driver initialization in test * enginefacade: 'instance_metadata' * Misspelling in messages * Add lock to host-state consumption * Add lock to scheduler host state updating * Allow virt driver to define binding:host_id * [python3] Webob request body should be bytes * Replace copy.deepcopy of RequestContext with copy.copy * DriverBlockDevice must receive a BDM object, not a dict * Misspelling in message * Wrong usage of "a" * Remove unused logging import and LOG global var * Reduce the number of db/rpc calls to get instance rules * Use is_supported() to check microversion * SameHostFilter should fail if host does not have instances * VMware: add method for getting hosts attached to datastore * Trivial: Fix wrong comment in service version * signature_utils: handle ECC curve unavailability * Updated from global requirements * tests: Remove duplicate check * enginefacade: 'bw_usage', 'vol_usage' and 's3_image' * VMware: improve instance names on VC * VMware: add in folder support on VC * VMware: cleanup unit test global variable * signature_utils: refactor the list of ECC curves * Nuke EC2 API from api-paste and remove wsgi support * Remove cruft for things o.vo handles * Make scheduler_hints schema allow list of id * Change logging level for 'oslo_db' * Remove unused compute_api in ServerUsageController * network: Don't repopulate instance info cache from Neutron ports * Fix doc comment for get_available_resource * objects: lazy-load instance.security_groups more efficiently * VMware: cleanup unit tests * Use SpawnIsSynchronousFixture in most unit tests * Use stub_out and mock to remove mox: part 1 * Disable the in tree EC2 API by default * deprecate old glance config options * remove temporary GlanceEndpoint object * convert GlanceClientWrapper to endpoint * Use stub_out and mock to remove mox: part 2 * Add a compute API to trigger crash dump in instance * Make libvirt driver return migrate data objects for source and dest checks * Use TimeFixture from oslo_utils to override time in tests * enginefacade: 'vif' and 'task_log' * review guide: add location details for config options * libvirt: wrapper list_guests to Host's object * remove vestigial XML_NS_V11 variable * remove unused EXTENSION_DESERIALIZE_* constants * config options: Centralise 'virt.ironic' options * remove unused pipeline_factory_v3 alias * remove unused methods from integrated_helpers test class * remove unused extends_name attribute * Add upload/download vhd2 interfaces * Replace unicode with six.text_type * conductor: fix unbound local variable request_spec * Use just ids in all request templates for flavors/images * extract non instance methods * remove unused trigger_handler * remove unused update_dhcp_hostfile_with_text method * remove nova-cert from most functional tests * enginefacade: 'migration' * XenAPI: Fix race in rotate_xen_guest_logs * libvirt: introduce "pause" to Guest's object * libvirt: introduce "shutdown" to Guest's object * libvirt: introduce "snapshot" to Guest's object * libvirt: introduce thaw filesystems * libvirt: introduce freeze filesystems * libvirt: replace direct libvirt's call AbortJobBlock * Allow to update 'v2.1' links in sample files * Do not update links for 'versions' tests * centeralized conf:compute/emphemeral_storage_encryption * Add instance.save() when handling reboot in init instance * Add transitional support for migrate data objects to compute manager * Implements proper UUID format for few objects tests * Filter by leased=False when allocating fixed IPs * Increase informations in nova-net warnings * docs: add concept guide for certificate * Fix reclaim_instance_interval < 0 never delete instance completely * Updated from global requirements * Add placeholders for config options * Implements proper UUID format for the fake_network * Refresh stale volume BDMs in terminate_connection * Block requests 2.9.0 * Implements proper UUID format for the test_compute_api * Remove onSharedStorage from evacuate API * Fix CPU pinning for odd number of CPUs w hyperthreading * hardware: stop using instance cell topology in CPU pinning logic * Check context before returning cached value * deprecate run_tests.sh * remove archaic references to XML in api * simplify the request / response format document * Add signature_utils module * Remove XML description from extension concept * remove ctype from classes * Remove cells service from api samples that don't test cells * Add uuidsentinel test module * Remove the wrong usage of api_major_version in api sample tests * Updated from global requirements * Fix wrong method name in doc filter_scheduler * doc: update threading.rst * Makes GET extension info sample tests run for v2 also * update api_samples code to use better variables * Remove incorrect comments about file injection * Remove a restriction on injection files * Remove unnecessary log when search servers * Deprecated tox -downloadcache option removed * rework warning messages for extension whitelist/blacklist * Make sure bdm.volume_id is set after auto-creating volumes * Replace safe_utils.getcallargs with inspect.getcallargs * Fix wrap_exception to get all arguments for payload * Add hypervisor, aggregates, migration description * retool xen glance plugin to work with urls * always create clients with GlanceEndpoint * Implement GlanceEndpoint object * Clean up glance url handling * Use RequestSpec in the ChanceScheduler * Modify left filters for RequestSpec * Modify NUMA, PCI and num_instances filters for RequestSpec * Improve inject_nmi() in libvirt driver and add tests * Report compute-api bugs against nova * XenAPI: Expose labels for ephemeral disks * Fix use of safeutils.getcallargs * Cache SecurityGroupAPI results from neutron multiplexer * Remove the executable bit from several python files * Optimize _cleanup_incomplete_migrations periodic task * [Py34] api.openstack.compute.legacy_v2.test_servers.Base64ValidationTest * [Py34] api.openstack.test_faults.TestFaultWrapper * [Py34] Enable api.openstack.test_wsgi unit test * default host to service name instead of uuid * Remove start_service calls from the test case * Add SIGHUP handlers for compute rpcapi to console and conductor * Cache the automatic version pin to avoid repeated lookups * virt: allow for direct mounting of LocalBlockImages * Use testscenarios to set attributes directly * update API samples to use endpoints * Updated from global requirements * Add project-id and user-id when list server-groups * Fixes Python 3 compatibility for filter results * Remove duplicate default=None for option compute_available_monitors * Disable IPv6 on bridge devices * Don't load deleted instances * Improve Filter Scheduler doc clarity * libvirt: report pci Type-PF type even when VFs are disabled * Remove deprecated neutron auth options * Fix capitalization of IP * Add separated section for configure guest os * Add separated section for extra specs and image properties * Add a note about fixing "db type could not be determined" with py34 * neutron: skip test_deallocate_for_instance_2* in py34 job * tighten regex on objectify * Replace os.path.join() for URLs * Add hv testing for ImageMetaProps._legacy_property_map * Edit the text to be more native-English sounding * docs: add test strategy and feature classification * Fix the endpoint of /v2 on concept doc * Drop JSON decoding for supported_instances * docs: update old stuff in version section * Scheduler: honor the glance metadata for hypervisor details * Implements proper UUID format for the ComputeAPITestCase * docs: add microversions description in the concept doc * Make admin consistent * Add more concepts for servers * Make "ReSTful service" consistent * Add retry logic for detaching device using LibVirt * Fix Exception message consistency with input protocol * Remove SQLite BigInteger/Integer translation logic * xen: Drop JSON for supported_instances * vmware: Drop JSON for supported_instances * ironic: Drop JSON for supported_instances * hyperv: Drop JSON for supported_instances * libvirt: Drop JSON for supported_instances * Drop JSON for stats in virt API * Replaces izip_longest with six.moves.zip_longest * Fixes dict keys and items references for Python 3 * Scheduler: correct control flow when forcing host * Replaces longs with ints * neutron: only get port id when listing ports in validate_networks * neutron: only list ports if there is a quota limit when validating * Add reviewing point related to REST API * Revert "Enable options for oslo.reports" * Fix wrong CPU metric value in metrics_filter * Reset the compute_rpcapi in Compute manager on SIGHUP * Remove the unused sginfo rootwrap filter * docs: ensure third party tests pass before +2 * Config options: centralize section "scheduler" * add api-samples tox target * Remove Instance object flavor helper methods only used in tests * Remove unnecessary extra instance saves during resize * docs: using the correct format and real world example for fault message * VMware: cleanup ExtraSpecs * Remove HTTPRequestEntityTooLarge usage in test * Enables py3 unit tests for libvirt.host module * Replaces __builtin__ with six.moves.builtins * Converting nova.virt.hyperv to py3 * Hyper-V: removes *Utils modules and unit tests * docs: update services description for concept guide * docs: remove duplicated section about error handling * Remove Useless element in migrate_server shcema * Optimize "open" method with context manager * trivial: Add some logs to 'numa_topology_filter' * Updated from global requirements * Docs: update the concept guide for Host topics * Cleanup of compute api reboot method * Hyper-V: adds os-win library * Remove description about image from faults section * api-guide: add note about users * Updated from global requirements * xenapi: Add helper function and unit tests for client session * Config options: centralize section "scheduler" * Ironic: Workaround to mitigate bug #1341420 * Libvirt: Support fp plug in vhostuser vif * Remove version from setup.cfg 13.0.0.0b1 ---------- * Add note for automatic determination of compute_rpc version by service * Add note for Virtuozzo supporting snapshots * Add note for HyperV 2008 drop of support * Imported Translations from Zanata * Add note for removing conductor RPC API v2 * Add note for dropping InstanceV1 objects * Add note for force_config_drive opt change * Add note for deprecating local conductor * Revert "Detach volume after deleting instance with no host" * force releasenotes warnings to be treated as errors * Fix reno warning for API DB relnote * Adding a new vnic_type for Ironic/Neutron/Nova integration * Use o.vo DictOfListOfStringsField * libvirt: remove todo note not useful anymore * Modify metric-related filters for RequestSpec * Modify default filters for RequestSpec * servicegroup: stop zombie service due to exception * Add persistence to the RequestSpec object * Updated from global requirements * add hacking check for config options location * Correct some nits for moving servers in concept doc * use graduated oslo.policy * TrivialFix: remove 'deleted' flag * Make server concept guide use 'server' consistently * api-guide: fix up navigation bar * Use version convert methods from oslo_utils.versionutils * docs: reorder move servers text * docs: add clarifications to move servers * Change some wording on server_concepts.rst * Cleanup unused test code in test_scheduler.py * Modify Aggregate filters for RequestSpec * Add code-review devref for release notes * Hyper-V: refines the exceptions raised in the driver * Use o.vo FlexibleBooleanField * docs: describe migration and other movement concepts * Double 'an' in message * Unify on _schedule_instances * Add review guideline to microversion API * Remove the TestRemoteObject class * Catch FixedIpNotFoundForAddress when create server * doc: add server status to concept.rst * docs: update the concept guide shelve actions * Fixed incorrect name of 'tag' and 'tag-any' filters * Fix resource tracker VCPU counting * Add relnote for change in default setting * use NoDBTestCase for KeypairPolicyTest * doc: change policies.rst to indicate API links * Remove useless code in _poll_volume_usage function * Neutron: add logging context * Remove unused param of CertificatesController * Add user data into general concept * Fix a typo in api-guide doc * Make some classes inherit from NoDBTestCase * XenAPI: Workaround for 6.5 iSCSI bug * NFS setup for live-migration job * Fix ebtables-version release note * config options: enhance help text of section "serial_console" * Updating nova config-reference doc * Updated from global requirements * Prevent redundant instance.update notifications * VMware: fix docstring for cluster management * api: remove re-declared type in migrate schema * enginefacade: 'agent' and 'action' * config options: centralize section "serial_console" * Replaced private field in get_session/engine with public method * SR-IOV: Improve the vnic type check in the neutron api * Simplified boolean variable check * update connect_volume test * Enable options for oslo.reports * Reverse sort tables before archiving * scheduler: fix incorrect log message * Updated from global requirements * Add release note for API DB migration requirements * Replaced deprecated timeutils methods * Multinode job for live-migration * Use o.vo VersionPredicateField * Use flavor instead of flavour * Corrected few grammatical nitpics * Add more 'actions' for server concepts doc * libvirt: mlnx_direct vif type removal * xen: mask passwords in volume connection_data dict * Updated from global requirements * Use --concurrent with ebtables * Removed extra spaces from double line strings * Change test function name to make more sense * Change Invalid exception to a specified exception * Add 'lxd' to the list of recognized hypervisors * Add microversions schema unit test for None * Clean up legacy multi-version test constructs * Fix Nova's indirection fixture override * Remove skips for resize tests from tempest-dsvm-cells-rc * Modify Affinity filter for RequestSpec * Prepare filters for using RequestSpec object * Use ServiceList object rather than direct db call * Add relnote for ERT deprecation * Remove IN-predicate warnings * docs: update the API faults concept guide * Deprecate nova-manage service subcommand * Double detach volume causes server fault * Use JSON format instead of json format * Network: add in missing translation * cells is a sad panda about scheduler hints * VMware: expand support for Opaque networks * Fix is_volume_backed_instance() for unset image_ref * Add _LE to LOG.error statement in nova/service * Add service records for nova-api services * Added method is_supported to check API microversions * enginefacade: 'host_mapping' * Removes support for Hyper-V Server 2008 R2 * Fix the bug of "Error spelling of 'explicitely'" * Claims: fix log message * Fix paths for api-guide build * Remove flavors.get_flavor() only used in tests * VMware: Raise DiskNotFound for missing disk device * Remove two unneeded db lookups during delete of a resizing instance * Fix pci_stats logging in resource tracker * live-mig: Mark migration as failed on fail to schedule * Move the Migration set-status-if-exists pattern to a method * Don't track migrations in 'accepted' state * live-migrate: Change the status Migration is created with * compute: split check_can_live_migrate_destination * Replace N block_device_mapping queries with 1 * Add "unreleased" release notes page * Add reno for release notes management * XenAPI: Correct hypervisor type in Horizon's admin view * Fix typo in test_post_select_populate * Rearranges to create new Compute API Guide * Added CORS support to Nova * Aggregate Extra Specs Filter should return if extra_specs is empty * cells: skip 5 networking scenario tests that use floating IPs * force_config_drive: StrOpt -> BoolOpt * Updated from global requirements * Add test coverage for both types of not-found-ness in neutronclient for floating * Fix impotent _poll_shelved_instances tests * Fix race in _poll_shelved_instances task * Handle a NeutronClientException 404 Error for floating ips * Handle DB failures in servicegroup DB driver * Hook for live-migration job * Omit RescheduledException in instance_fault.message * Remove duplicate server.kill on test shutdown * make the driver.Scheduler as abstract class * Fix a spelling mistake in the log * objects: remove remote_object_calls from _BaseTestCase * Repair and rename test_is_volume_backed_instance_no_bdms() * Use ObjectVersionChecker fixture from oslo.versionedobjects * VMware: add in vif resource limitations * Untie subobject versions * Block oslo.messaging 2.8.0 * Split up test_is_volume_backed_instance() into five functions * Avoid the dual-naming confusion * enginefacade: 'provider_fw', 'console_pool' and 'console' * enginefacade: 'network' * clean up regex in tempest-dsvm-cells-rc * skip lock_unlock_server test for cells * ScalityVolume:fix how remote FS mount is detected * OpenStack typo * Remove duplicate keys in policy.json * Add missing policy rules * devref:Don't suggest decorate private method * VMware: use a constant for 'iscsi' * Config drive: make use of an instance object * Fix attibute error when cloning raw images in Ceph * Properly log BlockDeviceMappingList in _create_block_device_mapping * Exclude all BDM checks for cells * glance:add helper method to get client version * enginefacade: 'dnsdomain' and 'ec2' * enginefacade: 'certificate' and 'pci_device' * enginefacade: 'key_pair' and 'cell' * enginefacade: 'instance_mapping' * enginefacade: 'cell_mapping' * enginefacade: 'instance_info' and 'instance_extra' * Use EngineFacade from oslo_db.enginefacade * VMware: fix trivial indentations * Remove flavors.get_all_flavors() only used in tests * Make lock policy default to admin or owner * libvirt:Fix a typo of test cases * Deprecate local conductor mode * Deprecate Extensible Resource Tracker * Change image to instance in comment * VMware: use oslo_config new type PortOpt * Remove vcpu resource from extensible resource tracker * Add logging to snapshot_volume_backed method * Remove unnecessary destroy call from Ironic virt driver * cells: add debug logging to bdm_update_or_create_at_top * Drop Instance v1.x support * Check prefix with startswith() instead of slicing * Add debug logging for when boot sequence is invalid in _validate_bdm * remove the redundant policy check for SecurityGroupsOutputController * virt: add constraint to handle realtime policy * libvirt: add cpu schedular priority config * libvirt: rework membacking config to support future features * Do not mask original spawn failure if shutdown_instance fails * Point to cinder options in nova block alloc docs * Fix booting fail when unlimited project quota * Remove useless get_instance_faults() * Remove "Can't resolve label reference" warnings * Remove reservation_id from the logs when a schedule fails * Use RequestSpec object in HostManager * Use RequestSpec object in the FilterScheduler * Add ppcle architectures to libvirt blockinfo * Deprecated: failIf * Imported Translations from Zanata * Remove obj_relationships from objects * Delete dead test code * Add tempest-dsvm-lxc-rc * Mark set-admin-password as complete for libvirt in support matrix * Hypervisor support matrix: define pause & unpause * Revert "Implement online schema migrations" * Fix the os-extended-volumes key reference in the REST API history docs * Remove get_all method from servicegroup API * Remove SoftDeleteMixin from NovaBase * libvirt: support snapshots with parallels virt_type * Use oslo.config choices kwarg with StrOpt for servicegroup_driver * Imported Translations from Zanata * Add -constraints sections for CI jobs * Add "vnc" option group for sample nova.conf file * Updated from global requirements * Expands python34 unit tests list * Fix missing obj_make_compatible() for ImageMetaProps object * Fix error handling in nova.cmd.baseproxy * Change 'ec2-api' stackforge url to openstack url * Fixes Python 3 str issue in ConfigDrive creation * Revert "Store correct VirtCPUTopology" * Enable all extension for image API sample tests * Add tags to .gitignore * Updated from global requirements * Add a nova functional test for the os-server-groups GET API with all_projects parameter * Image meta: treat legacy vmware adapter type values * Attempt rollback live migrate at dest even if network dealloc fails * hacking check for contextlib.nested for py34 support * Print number of rows archived per table in db archive_deleted_rows * Updated from global requirements * Fix more inconsistency between Nova-Net and Neutron * Fix metadata service security-groups when using Neutron * Remove redundant deps in tox.ini * Add some tests for map_dev * Clean up tests for dropping obj_relationships * Fix up Service object for manifest-based backports * Fix service_version minimum calculation for compute RPC * docs: add the scheduler evolution plans * Revert "virt: Use preexec_fn to ulimit qemu-img info call" * Updated from global requirements * Ensure Glance image 'size' attribute is 0, not 'None' * Ignore errorcode=4 when executing `cryptsetup remove` command * libvirt: Don't attempt to convert initrd images * Revert "Fixes Python 3 str issue in ConfigDrive creation" * Monkey patch nova-ec2 api * Compute: remove unused parameter 12.0.0 ------ * Omnibus stable/liberty fix * Drop outdated sqlite downgrade script * Updated from global requirements * Fix Status-Line in HTTP response * Imported Translations from Zanata * Default ConvertedException code to 500 * Updated from global requirements * VMware: fix bug for config drive when inventory folder is used * Fix a typo * code-review guidelines: add checklist for config options * Add a code-review guideline document * virt: Use preexec_fn to ulimit qemu-img info call * Clean up some Instancev1 stuff in the tests * Updated from global requirements * Replaces contextlib.nested with test.nested * Sync cliutils from oslo-incubator * Make archive_deleted_rows_for_table private 12.0.0.0rc2 ----------- * load consoleauth_topic option before using it * Revert "[libvirt] Move cleanup of imported files to imagebackend" * Add more documentation for RetryFilter * Fix InstanceV1 backports to use context * Imported Translations from Zanata * Add test of claim context manager abort * Log DBReferenceError in archive_deleted_rows_for_table * Use DBReferenceError in archive_deleted_rows_for_table * Add testresources used by oslo.db fixture * Remove unused context parameter from db.archive_deleted_rows* methods * xenapi_device_id integer, expected string * Fix InstanceV1 backports to use context * Drop unused obj_to_primitive() override * Updated from global requirements * libvirt: remove unnecessary else in blockinfo.get_root_info * Make test cases in test_test.py use NoDBTest * XenAPI: Fix unit tests for python34 * docs: re-organise the API concept docs * VMware: specify chunk size when reading image data * Make ConsoleauthTestCase inherit from NoDBTest * Change a test class of consoleauth to no db test * Imported Translations from Zanata * Catch 3 InvalidBDM related exc when boot instance * Move create vm states to svg diagram * Ironic: Fix bad capacity reporting if instance_info is unset * Revert "[libvirt] Move cleanup of imported files to imagebackend" * Honor until_refresh config when creating default security group * remove sphinxcontrib-seqdiag * [Py34] nova.tests.unit.api.openstack.test_common * [Py34] Enable api.openstack.test_mapper unit test * [Py34] Enable test_legacy_v2_compatible_wrapper * Extend the ServiceTooOld exception with more data * Make service create/update fail if version is too old * Allow automatic determination of compute_rpc version by service * Add get_minimum_version() to Service object and DB API * Correct memory validation for live migration * devref: change error messages no need microversion * Replace f.func_name and f.func_code with f.__name__ and f.__code__ * Imported Translations from Zanata * Add a note about the 500->404 not requiring a microversion * Ensure Nova metrics derived from a set of metrics * Updated from global requirements * Fixes Python 3 str issue in ConfigDrive creation * Make secgroup rules refresh with refresh_instance_security_rules() * Remove unused refresh_security_group_members() call * Imported Translations from Zanata * Check DBReferenceError foreign key in Instance.save * Fix Instance unit test for DBReferenceError * Ironic: Fix bad capacity reporting if instance_info is unset * libvirt: check if ImageMeta.disk_format is set before accessing it * libvirt: check if ImageMeta.disk_format is set before accessing it * Rollback is needed if initialize_connection times out * Updated from global requirements * Add Pillow to test-requirements.txt * VMware: raise NotImplementedError for live migration methods * xapi-tools: fixes cache cleaner script * Cleanup of Translations * Add Pillow to test-requirements.txt * Update rpc version aliases for liberty * Remove orphaned code related to extended_volumes * Add checkpoint logging when terminating an instance * Add checkpoint logging when building an instance in compute manager * Removed unused method from compute/rpcapi * Remove unused read-only cell code * Change warn to debug logs when migration context is missing * Use os-testr for py34 tox target * Add sample config file to nova docs * Remove lazy-loading property compute_task_api from compute api * Remove conductor 2.x RPC API * Reserve 10 migrations for backports * Use StrOpt's parameter choices to restritct option auth_strategy * vmware: set default value in fake _db_content when creating objects * Avoid needless list copy in 'scheduler_host_subset_size' case * libvirt: Log warning for wrong migration flag config options * Slightly better translation friendly formatting * Identify more py34 tests that already pass * rebuild: Apply migration context before calling the driver * hardware: improve parse_cpu_spec to handle exclusion range * Correct Instance type check to work with InstanceV1 * Imported Translations from Zanata * Correct Instance type check to work with InstanceV1 * Only create volumes with instance.az if cinder.cross_az_attach is False * Fix the help text of monkey_patch config param * Rollback of live-migration fails with the NFS driver * Set TrustedFilter as experimental * doc: gmr: Update instructions to generate GMR error reports * rebuild: Apply migration context before calling the driver * Fix MetricWeigher to use MonitorMetricList * VMware: update log to be warning * Add more help text to the cinder.cross_az_attach option * Cleanup of Translations * Revert "Deprecate cinder.cross_az_attach option" * Fix some spelling typo in manual * Fix NoneType error when calling MetricsWeigher * wsgi: removing semicolon * Fix logging_sample.conf to use oslo_log formatter * Remove unused _check_string_length() * Deprecate cinder.cross_az_attach option * Neutron: update cells when saving info_cache * Fix MetricWeigher to use MonitorMetricList 12.0.0.0rc1 ----------- * Imported Translations from Zanata * Detach volume after deleting instance with no host * Remove unnecessary call to info_cache.delete * Filter leading/trailing spaces for name field in v2.1 compat mode * Give instance default hostname if hostname is empty * If rescue failed set instance to ERROR * Add some devref for AZs * Change parameter name in utility function * RT: track evacuation migrations * rebuild: RPC sends additional args and claims are done * Cells: Limit instances pulled in _heal_instances * Open Mitaka development * Fix order of arguments in assertEqual * devref: update the nova architecture doc * Imported Translations from Zanata * Fix quota update in init_instance on nova-compute restart * net: explicitly set mac on linux bridge * live-migration: Logs exception if operation failed * libvirt: add unit tests for the designer utility methods * Add test cases for some classes in objects.fields * Change ignore-errors to ignore_errors * libvirt: fix direct OVS plugging * claims: move a debug msg to a warn on missing migration * Fix order of arguments in assertEqual * Remove duplicate VALID_NAME_REGEX * Pep8 didn't check api/openstack/common.py * Updated from global requirements * libvirt: Add unit tests for methods * Devref: Document why conductor has a task api/manager * Imported Translations from Zanata * Fix nova configuration options description * libvirt:on snapshot delete, use qemu-img to blockRebase if VM is stopped * Allow filtering using unicode characters * Updated from global requirements * Imported Translations from Zanata * Test both NoAuthMiddleware and NoAuthMiddlewareV3 * Remove redundant variable 'context' * Add 'OS-EXT-VIF-NET:net_id' for v21 compatible mode * libvirt: Add NUMA cell count to cpu_info * Xenapi: Don't access image_meta.id when booting from a volume * Imported Translations from Zanata * Fix typo in HACKING.rst * Remove comment in wrong place * Fix string formatting in api/metadata/vendordata_json.py * Raise exception.Migration earlier in REST API layer * Remove "shelved_image_id" key from instance system metadata * Only set access_ip_* when instance goes ACTIVE * VMware: fix typo in comment * RT: Migration resource tracking uses migration context * compute: migrate/resize paths properly handle stashed numa_topology * Claims: Make sure move claims create a migration context records * libvirt:update live_migration_monitor to use Guest * VMware: create method for getting datacenter from datastore * User APIRouterV21 instead of APIRouterV3 for v2.1 unittests * Remove TestOpenStackClientV3 from nova functional tests * Rename all the ViewBuilderV3 to ViewBuilderV21 * libvirt: Split out resize_image logic from create_image * Reuse method to convert key to passphrase * Creating instance fail when inject ssh key in cells mode * Fix the usage output of the nova-idmapshift command * Make test_revoke_cert_project_not_found_chdir_fails deterministic * Reduce the number of Instance.get_by_uuid calls * Remove 'v3' from comments in Nova API code * xapi: cleanup volume sr on live migration rollback * Hyper-V: Implements attach_interface and detach_interface method * Remove unnecessary 'context' param from quotas reserve method call * VMware: Replace get_dynamic_properties with get_object_properties_dict * VMware: Replace get_dynamic_property with get_object_property * Return empty PciDevicePoolList obj instead of None * libvirt: add debug logging for lxc teardown paths * Add API schema for different_cell filter * Add microversion bump exception for scheduler-hint * Use six.text_type instead of str in serialize_args * Set vif and allocated when associating fixed ip * Fix ScaleIO commands in rootwrap filters * Add missing information to docstring * Add microversion rule when adding attr to request * Check unknown event name when create external server event * Don't expect meta attributes in object_compat that aren't in the db obj * CONF.allow_resize_to_same_host should check only once in controller * Updated from global requirements * Fix debug log format in object_backport_versions() * Add version 3.0 of conductor RPC interface * Remove and deprecate conductor object_backport() * Invalidate AZ cache when the instance AZ information is different * Consolidate code to get the correct availability zone of an instance * Fix order of arguments in assertEqual * Ironic: Call unprovison for nodes in DEPLOYING state * libvirt: use guest as parameter for get serial ports * Separate API schemas for v2.0 compatible API * api: allow any scheduler hints * API: Handle InstanceUnknownCell exceptions * Updated from global requirements * Add some explanation for the instance AZ field * Remove 'v3' from extension code * Remove more 'v3' references from the code * Sorting and pagination params used as filters * Freeze v1 Instance and InstanceList schema hashes * Imported Translations from Transifex * Remove unused parameter overwrite in elevated * Add missing delete policies in the sample file * Fix a few typos * ironic: convert driver to use nova.objects.ImageMeta * objects: convert config drive to use ImageMeta object * VMware: ensure that instance is deleted when volume is missing * libvirt:Rsync compression removed * xenapi: Support extra tgz images that with only a single VHD * Hyper-V: Fixes snapshoting inexistent VM issue * Hyper-V: Adds RDPConsoleOps unit tests * Rectify spelling mistake in nova * libvirt: Add a finish log * Remove old unused baremetal rootwrap filters * Relax restrictions on server name * network_request_obj: Clean up outdated code * Object: Fix KeyError when loading instance from db * Add os-brick's scsi_id command to rootwrap * Expose keystoneclient's session and auth plugin loading parameters * Remove and deprecate conductor compute_node_create() * Drop unused conductor manager vol_usage_update() mock * Add constraint target to tox.ini * nova-net: fix missing log variable in deallocate_fixed_ip * Provide working SQLA_VERSION attribute * Don't "lock" the DB on expand dry run * New sensible network bandwidth quota values in Nova tests * Fix Cells gate test by modifying the regressions regex * Add functional test for server group * Reject the cell name include '!', '.' and '@' for Nova API * Hyper-V: Adds HyperVDriver unit tests * claims: Remove compat code with instance dicts * Add Instance and InstanceList v2.0 objects * Teach conductor to do manifest-based object_class_action() things * Make the conductor fixture use version manifests * Update objects test infrastructure for multiple versions * Refactor Instance tests to use objects.Instance * Fix an issue with NovaObjectRegistry hook * Pull out the common bits of InstanceList into _BaseInstanceList * Pull out the common bits of Instance into _BaseInstance * Clarify max_local_block_devices config option usage * Allow to use autodetection of volume device path * Remove the blacklisted nova-cells shelve tests * Update from global requirements * objects: Hook migration object into Instance * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Detach and terminate conn if Cinder attach fails * [libvirt] Move cleanup of imported files to imagebackend * hyperv: convert driver to use nova.objects.ImageMeta 12.0.0.0b3 ---------- * Add notes explaining vmware's suds usage * Adds instance_uuid index for instance_system_metadata * Handle nova-compute failure during a soft reboot * Fix mistake in UT:test_detach_unattached_volume * Fix RequestSpec.instance_group hydration * Remove unused root_metadata method of BlockDeviceMappingList * Add JSON-Schema note to api_plugins.rst * Compute: update finish_revert_resize log to have some context * Revert "Remove references to suds" * Fix API directories on the doc * Fix incomplete error message of quota exceeded * Add secgroup param checks for Neutron * Implement manifest-based backports * Delete orphaned instance files from compute nodes * Fixed incorrect keys in cpu_pinning * api: deprecate the api v2 extension configuration * Remove the v3 word from help message of api_rate_limit option * Use the same pci_requests field for all filters and HostManager * objects: Add MigrationContext object * Don't query database with an empty list of tags for creation * Remove duplicate NullHandler test fixture * Add migration policy to upgrades devref * Add warning log when deprecated v2 and v3 code get used * Update ComputeNode values with allocation ratios in the RT * Update HostManager and filters to use ComputeNode ratios * Add cpu_allocation_ratio and ram_allocation_ratio to ComputeNode * VMware: adds support for rescue image * filter pre_assigned_dev_names when finding disk dev * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * rt: Rewrite abort and update_usage tests * Cleanup RT _instance_in_resize_state() * Compute: be consistent with logs about NotImplemented methods * VMware: pass network info to config drive * Remove/deprecate conductor instance_update() * Make compute manager instance updates use objects * xenapi: add necessary timeout check * Fix permission issue of server group API * Make query to quota usage table order preserved * Change v3 to v21 for devref api_plugins.rst * Remove duplicate exception * Don't trace on InstanceInfoCacheNotFound when refreshing network info_cache * Cells: Improve block device mapping update/create calls * Rm openstack/common/versionutils from setup.cfg * Add a warning in the microversion docs around the usage of 'latest' * Fix exception message mistake in WSGI service * Replace "vol" variable by "bdm" * Remove v3 references in unit test 'contrib' * Removed unused dependency: discover * Rename tests so that they are run * Adds unit tests to test_common.py * db: Add the migration_context to the instance_extra table * tests: Make test_claims use Instance object * api: use v2.1 only in api-paste.ini * VMware: Update to return the correct ESX iqn * Pass block_device_info when delete an encrypted lvm * Handle neutron exception on bad floating ip create request * API: remove unused parameter * Consider that all scheduler calls are IO Ops * Add RequestSpec methods for primitiving into dicts * Add a note about the 400 response not requiring a microversion * api: deprecate the concept of extensions in v2.1 * Fix precedence of image bdms over image mappings * Cells: remove redundant check if cells are enabled * Strip the extra properties out when using legacy v2 compatible middleware * Remove unused sample files from /doc dir * Expose VIF net-id attribute in os-virtual-interfaces * libvirt: take account of disks in migration data size * Add deprecated_for_removal parm for deprecated neutron_ops * Use compatibility methods from oslo * compute: Split the rebuild_instance method * Allow for migration object to be passed to _move_claim * rt: move filtering of migration by type lower in the call stack * rt: generalize claim code to be useful for other move actions * libvirt: make guest to return power state * libvirt: move domain info to guest * Xen: import migrated ephemeral disk based on previous size * cleanup NovaObjectDictCompat from external_event * cleanup NovaObjectDictCompat from agent * Catch invalid id input in service_delete * Convert percent metrics back into the [0, 1] range * Cleanup for merging v2 and v2.1 functional tests * Remove doc/source/api and doc/build before building docs * Fixes a typo on nova.tests.unit.api.ec2.test_api.py * Add a note about the 403 response not requiring a microversion * Pre-load expected attrs that the view builder needs for server details * Remove 'Retry-After' in server create and resize * Remove debug log message in SG API constructor * Updated from global requirements * Refactor test cases for live-migrate error case * Fixes Bug "destroy_vm fails with HyperVException" * libvirt: refactor _create_domain_setup_lxc to use Image.get_model * Set task_state=None when booting instance failed * libvirt: Fix snapshot delete for network disk type for blockRebase op * [Ironic]Not count available resources of deployed ironic node * Catch OverQuota in volume create function * Don't allow instance to overcommit against itself * n-net: add more debug logging to release_fixed_ip * Fix scheduler code to use monitor metric objects * objects: add missing enum values to DiskBus field * Move objects registration in tests directory * xenapi: convert driver to use nova.objects.ImageMeta * libvirt: convert driver to use nova.objects.ImageMeta * Updated from global requirements * VMware: Delete vmdk UUID during volume detach * Move common sample files methods in test base class * Share server POST sample file for microversion too * Fix remote_consoles microversion 2.8 not to run on /v3 * Remove merged sample tests and file for v2 tests * Move "versions" functional tests in v2.1 tests * Nil out inst.host and inst.node when build fails * Fix link's href to consider osapi_compute_link_prefix * Fix abnormal quota usage after restore by admin * Specify current directory using new cwd param in processutils.execute * Remove and deprecate unused conductor method vol_usage_update() * Replace conductor proxying calls with the new VolumeUsage object * Add a VolumeUsage object * Updated from global requirements * Move CPU and RAM allocation ratios to ResourceTracker * Pull the all_tenants search_opts checking code into a common utility * Gate on nova.conf.sample generation * libvirt: use proper disk_info in _hard_reboot * Update obj_reset_changes signatures to match * libvirt: only get bdm in _create_domain_setup_lxc if booted from volume * libvirt: _create_domain_setup_lxc needs to default disk mapping as a dict * libvirt: add docstring for _get_instance_disk_info * Add rootwrap daemon mode support * Removed duplicated keys in dictionary * Xenapi: Correct misaligned partitioning * libvirt:Remove duplicated check code for config option sysinfo_serial * Test cases for better handling of SSH key comments * Allow compute monitors in different namespaces * cleanup NovaObjectDictCompat from hv_spec * cleanup NovaObjectDictCompat from quota * Correct a wrong docstring * Create RequestSpec object * Clarify API microversion docs around handling 500 errors * libvirt: Fix KeyError during LXC instance boot * Xenapi: Handle missing aggregate metadata on startup * Handle NotFound exceptions while processing network-changed events * Added processing /compute URL * libvirt: enable live migration with serial console * Remove the useless require_admin_context decorator * Correct expected error code for os-resetState action * libvirt: add helper methods for getting guest devices/disks * compute: improve exceptions related to disk size checks * Improve error logs for start/stop of locked instance * pci: Remove nova.pci.device module * pci: Remove objects.InstancePCIRequests.save() * Remove unused db.security_group_rule_get_by_security_group_grantee() * Revert "Make nova-network use conductor for security groups refresh" * Make compute_api.trigger_members_refresh() issue a single db call * Fix cells use of legacy bdms during local instance delete operations * Hyper-V: Fixes serial port issue on Windows Threshold * Consolidate initialization of instance snapshot metadata * Fix collection of metadata for a snapshot of a volume-backed instance * Remove unnecessary ValueError exception * Update log's level when backup a volume backend instance * The API unit tests for serial console use http instead of ws * Drop scheduler RPC 3.x support * Move quota delta reserve methods from api to utils * nova.utils._get_root_helper() should be public * Host manager: add in missing log hints * Removing extension "OS-EXT-VIF-NET" from v2.1 extension-list * nova-manage: fix typo in docstring about mangaging * hyper-v: mock time.sleep in test_rmtree * Remove tie between system_metadata and extra.flavor * Fixes Hyper-V boot from volume fails when using ephemeral disk * Re-write way of compare APIVersionRequest's * Store "null api version" as 0.0 * add docstring to virt driver interface (as-is) [1 of ?] * Remove last of the plugins/v3 from unit tests * Rename classes containing 'v3' to 'v21' * Move the v2 api_sample functional tests * Updated from global requirements * Add logging when filtering returns nothing * libvirt: cleanup() serial_consoles after instance failure * Don't query database with an empty list of tags for IN clause * Libvirt: Make live_migration_bandwidth help msg more meaning * Move V2.1 API unittest to top level directory * Neutron: Check port binding status * Move legacy v2 api smaple tests * conductor: update comments for rpc and use object * Load flavor when getting instances for simple-tenant-usage * Make pagination tolerate a deleted marker * Updated from global requirements * Cleanup HTTPRequest for security_groups test * Add api samples impact to microversion devref * Use min and max on IntOpt option types * Add hacking check for eventlet.spawn() * Updated from global requirements * neutron: filter None port_ids from ports list in _unbind_ports * VMware: treat deletion exception with attached volumes * VMware: ensure that get_info raises the correct exception * Allow resize root_gb to 0 for volume-backed instances * Limit parallel live migrations in progress * Validate quota class_name * Move V2 API unittests under legacy_v2 directory * Updated from global requirements * Replace get_cinder_client_version in cinder.py * Avoid querying for Service in resource tracker * Remove/deprecate unused parts of the compute node object * Make ComputeNode.service_id nullable to match db schema * Add missing rules in policy.json * Add V2.1 API tests parity with V2 API tests * Fixed indentation * Simplify interface for creating snapshot of volume-backed instance * Add instance action events for live migration * Remove 'v3' directory for v2.1 json-schemas * Move v2.1 code to the main compute directory - remove v3 step3 * libvirt: qemu-img convert should be skipped when migrating * Add version counter to Service object * Fix the peer review link in the 'Patches and Reviews' policy section * Handle port delete initiated by neutron * Don't check flavor disk size when booting from volume * libvirt: make instance compulsory in blockinfo APIs * xapi: ensure pv driver info is present prior to live-migration * Move existing V2 to legacy_v2 - step 2 * Move existing V2 to legacy_v2 * Return v2 version info with v2 legacy compatible wrapper * Ironic: Add numa_topology to get_available_resource return values * Fix three typos on nova/pci directory * Imported Translations from Transifex * pci: Use PciDeviceList for PciDevTracker.pci_devs * pci: Remove get_pci_devices_filter() method * pci: Move whitelist filtering inside PCI tracker * libvirt: call host.get_capabilities after checking for bad numa versions * libvirt: log when BAD_LIBVIRT_NUMA_VERSIONS detected * Use string substitution before raising exception * Hyper-V: deprecates support for Windows / Hyper-V Server 2008 R2 * VMware: Do not untar OVA on the file system * Add hacking check for greenthread.spawn() * Ironic: Use ironicclient native retries for Conflict in ClientWrapper * Prevent (un)pinning unknown CPUs * libvirt: use instance UUID with exception InstanceNotFound * Fix notify_decorator errors * VMware: update supported vsphere 6.0 os types * libvirt: convert Scality vol driver to LibvirtBaseFileSystemVolumeDriver * libvirt: convert Quobyte driver to LibvirtBaseFileSystemVolumeDriver * pci: Use fields.Enum type for PCI device type * pci: Use fields.Enum type for PCI device status * More specific error messages on building BDM * Ensure test_models_sync() works with new Alembic releases * Hyper-V: Adds VolumeOps unit tests * Hyper-V: Adds MigrationOps unit tests * Suppress not image properties for image metadata from volume * Add non-negative integer and float fields * Fix DeprecationWarning when using BaseException.message * Added support for specifying units to hw:mem_page_size * Compute: use instance object for refresh_instance_security_rules * libvirt: convert GPFS volume driver to LibvirtBaseFileSystemVolumeDriver * Updated from global requirements * Add os-brick based LibvirtVolumeDriver for ScaleIO * docs: add link to liberty summit session on v2.1 API * Refactor unit test for InstanceGroup objects * Don't pass the service catalog when making glance requests * libvirt: check min required qemu/libvirt versions on s390/s390x * libvirt: ensure LibvirtConfigGuestDisk parses readonly/shareable flags * libvirt: set caps on maximum live migration time * libvirt: support management of downtime during migration * cleanup NovaObjectDictCompat from numa object * Fix test_relationships() for subobject versions * libvirt: don't open connection in driver constructor * Skip SO_REUSEADDR tests on BSD * __getitem__ method not returning value * Compute: replace incorrect instance object with dict * Fix live-migrations usage of the wrong connector information * Honour nullability constraints of Glance schema in ImageMeta * Change docstring in test to comment * libvirt: convert GlusterFS driver to LibvirtBaseFileSystemVolumeDriver * libvirt: convert SMBFS vol driver to LibvirtBaseFileSystemVolumeDriver * libvirt: convert NFS volume driver to LibvirtBaseFileSystemVolumeDriver * Introduce LibvirtBaseFileSystemVolumeDriver * Add test to check relations at or below current * Add documentation for the nova-cells command * libvirt:Rsync remote FS driver was added * Clean the deprecated noauth middleware * Add os_brick-based VolumeDriver for HGST connector * libvirt: add os_admin_user to use with set admin password * Fixed incorrect behaviour of method _check_instance_exists * Squashing down update method * Fix the wrong file name for legacy v2 compatible wrapper functional test * Add scenario for API sample tests with legacy v2 compatible wrapper * Skip additionalProperties checks when LegacyV2CompatibleWrapper enabled * Libvirt: correct libvirt reference url link when live-migration failed * libvirt: enable virtio-net multiqueue * Replacing unichr() with six.unichr() and reduce with six.moves.reduce() * Fix resource leaking when consume_from_instance raise exception * :Add documentation for the nova-idmapshift command * RBD: Reading rbd_default_features from ceph.conf * New nova API call to mark nova-compute down * libvirt: move LibvirtISCSIVolumeDriver into it's own module * libvirt: move LibvirtNETVolumeDriver into it's own module * libvirt: move LibvirtISERVolumeDriver into it's own module * libvirt: move LibvirtNFSVolumeDriver into it's own module * allow live migration in case of a booted from volume instance * Handle MessageTimeout to MigrationPreCheckError * Create a new dictionary for type_data in VMwareAPIVMTestCase class * resource tracker style pci resource management * Added missed '-' to the rest_api_version_history.rst * Imported Translations from Transifex * Remove db layer hard-code permission checks for keypair * Fix a couple dead links in docs * cleanup NovaObjectDictCompat from virt_cpu_topology * Adding user_id handling to keypair index, show and create api calls * Updated from global requirements * Remove legacy flavor compatibility code from Instance * libvirt: Fix root device name for volume-backed instances * Fix few typos in nova code and docs * Helper script for running under Apache2 * Raise NovaException for missing/empty machine-id * Fixed random failing of test_describe_instances_with_filters_tags * libvirt: enhance libvirt to set admin password * libvirt: rework quiesce to not share "sensitive" informations * Metadata: support proxying loadbalancers * formely is not correct * Remove 'scheduled_at' - DB cleanup * Remove unnecessary executable permission * Neutron: add in API method for updating VNIC index * Xen: convert image auto_disk_config value to bool before compare * Make BaseProxyTestCase.test_proxy deterministic wrt traffic/verbose * Cells: Handle instance_destroy_at_top failure * cleanup NovaObjectDictCompat from virtual_interface * Fix test mock that abuses objects * VMware: map one nova-compute to one VC cluster * VMware: add serial port device * Handle SSL termination proxies for version list * Use urlencode instead of dict_to_query_str function * libvirt: move LibvirtSMBFSVolumeDriver into it's own module * libvirt: move LibvirtAOEVolumeDriver into it's own module * libvirt: move LibvirtGlusterfsVolumeDriver into it's own module * libvirt: move LibvirtFibreChannelVolumeDriver into it's own module * VMware: set create_virtual_disk_spec method as local * Retry live migration on pre-check failure * Handle config drives being stored on rbd * Change List objects to use obj_relationships * Fixes delayed instance lifecycle events issue * libvirt-vif: Allow to configure a script on bridge interface * Include DiskFilter in the default list * Adding support for InfiniBand SR-IOV vif type * VMware: Add support for swap disk * libvirt: Add logging for dm-crypt error conditions * Service group drivers forced_down flag utilization * libvirt: Replace stubs with mocks for test_dmcrypt * clarify docs on 2.9 API change * Remove db layer hard-code permission checks for instance_get_all_hung_in_rebooting * Undo tox -e docs pip install sphinx workaround * Set autodoc_index_modules=True so tox -e docs builds module docs again * Allow NUMA based reporting for Monitors * libvirt: don't add filesystem disk to parallels containers unconditionally * objects: add hw_vif_multiqueue_enabled image property * Prepare for unicode enums from Oslo * rootwrap: remove obsolete filters for baremetal * Create class hierarchy for tasks in conductor * return more details on assertJsonEqual fail * Fix IronicHostManager to skip get_by_host() call * Store correct VirtCPUTopology * Add documentation for block device mapping * Show 'locked' information in server details * VMware: add resource limits for disk * VMware: store extra_specs object * VMware: Resource limits for memory * VMware: create common object for limits, reservations and shares * VMware: add support for cores per socket * Add DiskNotFound and VolumeNotFound test * Not check rotation at compute level * Instance destroyed if ironic node in CLEANWAIT * Ironic: Better handle InstanceNotFound on destroy() * Fix overloading of block device on boot by device name * tweak graphviz formatting for readability * libvirt: rename parallels driver to virtuozzo * libvirt: Add macvtap as virtual interface (vif) type to Nova's libvirt driver * cells: document upgrade limitations/assumptions * rebuild: make sure server is shut down before volumes are detached * Implement compare-and-swap for instance update * docs: add a placeholder link to mentoring docs * libvirt: Kill rsync/scp processes before deleting instance * Updated from global requirements * Add console allowed origins setting * libvirt: move the LibvirtScalityVolumeDriver into it's own module * libvirt: move the LibvirtGPFSVolumeDriver into it's own module * libvirt: move the LibvirtQuobyteVolumeDriver into the quobyte module * libvirt: move volume/remotefs/quobyte modules under volume subdir * Add missing policy for limits extension * Move to using ovo's remotable decorators * Base NovaObject on VersionedObject * Document when we should have a microversion * libvirt: do relative block rebase only with non-null base * Add DictOfListOfStrings type of field * Get py34 subunit.run test discovery to work * Enable python34 tests for nova/tests/unit/scheduler/test*.py * libvirt: mark NUMA huge page mappings as shared access * libvirt:Add a driver API to inject an NMI * virt: convert hardware module to use nova.objects.ImageMeta 12.0.0.0b2 ---------- * Replace openssl calls with cryptography lib * libvirt: move lvm/dmcrypt/rbd_utils modules under storage subdir * Fix Instance object usage in test_extended_ips tests * Fix test_extended_server_attributes for proper Instance object usage * Fix test_security_groups to use Instance object properly * Refactor test_servers to use instance objects * Switch to using os-brick * Updated from global requirements * VMware: remove redundant check for block devices * Remove unused decorator on attach/detach volume * libvirt: test capability for supports_migrate_to_same_host * Added removing of tags from instance after its deletion * Remove unused import of the my_ip option from the manager * Scheduler: enhance debug messages for multitenancy aggregates * VMware: Handle missing vmdk during volume detach * Running microversion v2.6 sample tests under '/v2' endpoint * VMware: implement get_mks_console() * Add MKS protocol for remote consoles * Add MKS console support * libvirt: improve logging in the driver.py code * Fix serializer supported version reporting in object_backport * Updated from global requirements * Revert "Add error message to failed block device transform" * tox: make it possible to run pep8 on current patch only * Fix seven typos on nova documentation * Add two fields to ImageMetaProps object * Check flavor type before add tenant access * Switch to the oslo_utils.fileutils * hypervisor support matrix: fix snapshot for libvirt Xen * libvirt: implement get_device_name_for_instance * libvirt: Always default device names at boot * Remove unused import of the compute_topic option from the DB API * Remove unused call to _get_networks_by_uuids() * libvirt: fix disk I/O QOS support with RBD * Updated from global requirements * Remove unnecessary oslo namespace import checks * VMware: Fixed redeclared CONF = cfg.CONF * Execute _poll_shelved_instances only if shelved_offload_time is > 0 * Switch to oslo.reports * Support Network objects in set_network_host * Fix Filter Schedulers doc to refer to all_filters * Fixup uses of mock in hyperv tests * Cleanup log lines in nova.image.glance * Revert "Add config drive support for Virtuozzo containers" * Virt: fix debug log messages * Virt: use flavor object and not flavor dict * Add VersionPredicate type of field * Remove unnecessary method in FilterScheduler * Use utf8_bin collation on the flavor extra-specs table in MySQL * docs: clear between current vs future plans * cleanup NovaObjectDictCompat subclassing from pci_device * libvirt: make unit tests concise by setup guest object * libvirt: introduce method to wait for block device job * Decouple instance object tests from the api fakes module * Fixed typos in self parameter * Hyper-V: restart serial console workers after instance power change * Only work with ipv4 subnet metadata if one exists * Do not import using oslo namespace * Refresh instance info cache within lock * Remove db layer hard-code permission checks for fixed_ip_associate_* * Add middleware filterout Microversions http headers * Correct backup_type param description * Fix a request body template for secgroup tests * Images: fix invalid exception message * Updated from global requirements * rebuild: fix rebuild of server with volume attached * objects: send PciDeviceList 1.2 to all code that can handle it * Fix libguestfs failure in test_can_resize_need_fs_type_specified * Fix the incorrect PciDeviceList version number * objects: Don't import CellMapping from the objects module * Deprecate the osapi_v3.enabled option * Remove conductor api from resource tracker * Fix test_tracker object mocks * Fix Python 3 issues in nova.utils and nova.tests * Remove db layer hard-code permission checks for instance_get_all_by_host_and_not_type * Support all_tenants search_opts for neutron * libvirt : remove broken olso_config choices option * Convert instance_type to object in prep_resize * VMware: clean up exceptions * Revert "Remove useless db call instance_get_all_hung_in_rebooting" * VMware: Use virtual disk size instead of image size * Remove db layer hard-code permission checks for provider_fw_rule_* * Remove db layer hard-code permission checks for archive_deleted_rows* * Revert "Implement compare-and-swap for instance update" * Add tool to build a doc latex pdf * make test_save_updates_numa_topology stable across python versions * Update HACKING.rst for running tests and building docs * Cleanup quota_class unittest with appropriate request context * Remove db layer hard-code permission checks for quota_class_create/update * Remove db layer hard-code permission checks for quota_class_get_all_by_name * Improve functional test base for microversion * Remove db layer hard-code permission checks for reservation_expire * Introducing new forced_down field for a Service object * Use stevedore for loading monitor extensions * libvirt: Remove dead code path in method clear_volume * Switch to oslo.service library * Include project_id in instance metadata * Convert test_compute_utils to use Instance object * Fix for mock-1.1.0 * Port crypto to Python 3 * Add HostMapping object * Remove useless db call instance_get_all_hung_in_rebooting * Cleanup unused method fake_set_snapshot_id * Handle KeyError when volume encryption is not supported * Expose Neutron network data in metadata service * Build Neutron network data for metadata service * Implement compare-and-swap for instance update * Added method exists to the Tag object * Add DB2 support * compute: rename ResizeClaim to MoveClaim * Fix the little spelling mistake of the comment * Remove db layer hard-code permission checks for quota_create/update * Fix the typo from _pre_upgrade_294 to _pre_upgrade_295 for tests/unit/db/test_migration * Ironic:check the configuration item api_max_retries * Modified testscenario for micro version 2.4 * Add some notifications to the evacuate path * Make evacuate leave a record for the source compute host to process * Fix incorrect enum in Migration object and DB model * Refactoring of the os-services module * libvirt: update docstring in blockinfo module for disk_info * Ignore bridge already exists error when creating bridge * libvirt: handle rescue flag first in blockinfo.get_disk_mapping * libvirt: update volume delete snapshot to use Guest * libvirt: update live snapshot to use Guest object * libvirt: update swap volume to use Guest * libvirt: introduce GuestBlock to wrap around Block API * libvirt: rename GuestVCPUInfo to VCPUInfo * libvirt: save the memory state of guest * removed unused method _get_default_deleted_value * Remove flavor migration from db_api and nova-manage * Rework monitor plugin interface and API * Adds MonitorMetric object * virt: add get_device_name_for_instance to the base driver class * libvirt: return whether a domain is persistent * Cells: fix indentation for configuration variable declaration * VMware: add unit tests for vmops attach and detach interface * Remove unneeded OS_TEST_DBAPI_ADMIN_CONNECTION * Switch from MySQL-python to PyMySQL * virt: fix picking CPU topologies based on desired NUMA topology * Port test_exception to Python 3 * devref: virtual machine states and transitions * Consolidate the APIs for getting consoles * Remove db layer hard-code permission checks for floating_ip_dns * Fix typo in model doc string * virt: Fix AttributeError for raw image format * log meaningful error message on download exception * Updated from global requirements * Add bandit for security static analysis testing * Handle unexpected clear events call * Make on_shared_storage optional in compute manager * snapshot: Add device_name to the snapshot bdms * compute: Make swap_volume with resize updates BDM size * Make Nova better at keeping track of volume sizes in BDM * API: make sure a blank volume with no size is rejected * Ironic: Improve driver logs * Drop MANIFEST.in - it's not needed with PBR * Libvirt: Define system_family for libvirt guests * Convert RT compute_node to be a ComputeNode object * glance:check the num_retries option * tests: Move test_resource_tracker to Instance objects * Remove compat_instance() * Enable python34 tests for nova/tests/unit/objects/test*.py * Soft delete system_metadata when destroy instance * Remove python3 specific test-requirements file * Try luksFormat up to 3 times in case the device is in use * rootwrap: update ln --symbolic filter for FS and FC type volume drivers * Add wording to error message in TestObjectVersions.test_relationships * Close temporary files in virt/disk/test_api.py * Add BlockDeviceType enum field * Add BlockDeviceDestinationType enum field * Add BlockDeviceSourceType enum field * Avoid recursion in object relationships test * tests: move a test to the proper class in test_resource_tracker * Remove db layer hard-code permission checks for network_set_host * Block subtractive operations in migrations for Kilo and beyond * Remove db layer hard-code permission checks for network_disassociate * libvirt: Correct domxml node name * Test relationships of List objects * libvirt: configuration for interface driver options * Fix Python 3 issues in nova.db.sqlalchemy * Update test_db_api for oslo.db 2.0 * Fix is_image_extendable() thinko * Validate maximum limit for quota * utils: ignore block device mapping in system metadata * libvirt: add in missing doc string for hypervisor_version * Remove useless policy rule from fake_policy.py * Replace ascii art architecture diagram with svg image * Adds MonitorMetricTypeField enum field * Unfudge tox -e genconfig wrt missing versionutils module * virt: update doctrings * hypervisor support matrix: add feature "evacuate" * XenAPI: Refactor rotate_xen_guest_logs to avoid races * hypervisor support matrix: add feature "serial console" * hypervisor support matrix: add CLI commands to features * Fix typos detected by toolkit misspellings * hypervisor support matrix: fix "evacuate" for s390 and hyper-v * Make live migration create a migration object record * Cells: add instance cell registration utility to nova-manage * fix typos in docs * Logging corrected * Check mac for instance before disassociate in release_fixed_ip * Add the rule of separate plugin for Nova REST API in devref * Use flavor object in compute manager 12.0.0.0b1 ---------- * Changes conf.py for Sphinx build because oslosphinx now contains GA * Fix testing object fields with missing instance rows * Change group controller of V2 test cases * Reduce window for allocate_fixed_ip / release_fixed_ip race in nova-net * Make NoValidHost exceptions clearer * Hyper-V: Fixes method retrieving free SCSI controller slot on V1 * Refactor network API 'get_instance_nw_info' * Removed extra '-' from rest_api_version_history.rst * Remove an useless variable and fix a typo in api * VMware: convert driver to use nova.objects.ImageMeta * Bypass ironic server not available issue * Fix test_create_security_group_with_no_name * Remove unused "id" and "rules" from secgroup body * cells: add devstack/tempest-dsvm-cells-rc for gating * Add common function for v2.1 API flavor_get * Fix comment typo * Fix up instance flavor usage in compute and network tests * Fix up ec2 tests for flavors on instances * Fix up xenapi tests for instance flavors * Fix up some bits of resource_tracker to use instance flavors * Register the vnc config options under group 'vnc' * Cells: cell scheduler anti-affinity filter * Cells: add in missing unit test for get_by_uuid * VMware driver: Increasing speed of downloading image * Hyper-V: Fix virtual hard disk detach * Add flag to force experimental run of db contract * Make readonly field tests use exception from oslo.versionedobjects * Fixes "Hyper-V destroy vm fails on Windows Server 2008R2" * Add microversion to allow server search option ip6 for non-admin * Updated from global requirements * VMware: Handle port group not found case * Imported Translations from Transifex * libvirt: use correct translation format * Add explicit alembic dependency * network: add more debug logging context for race bug 1249065 * Add virt resource update to ComputeNode object * xenapi: remove bittorrent entry point lookup code * Use oslo-config-generator instead of generate_sample.sh * Add unit tests for PCI utils * Support flavor object in migrate_disk_and_power_off * Remove usage of WritableLogger from oslo_log * libvirt: Don't fetch kernel/ramdisk files if you already have them * Allow non-admin to list all tenants based on policy * Remove redundant policy check from security_group_default_rule * Return bandwidth usage after updating * Update version for Liberty * neutron: remove deprecated allow_duplicate_networks config option * Validate maximum limit for integer * Improve the ability to resolve capabilities from Ironic * Fix the wrong address ref when the fixed_ip is invalid * The devref for Nova stable API * Fix wrong check when use image in local * Fixes TypeError when libvirt version is BAD_LIBVIRT_CPU_POLICY_VERSIONS 12.0.0a0 -------- * Remove hv_type translation shim for powervm * cells: remove deprecated mute_weight_value option * Make resize api of compute manager to send flavor object * VMware: detach cinder volume when instance destroyed * Add unit tests for the exact filters * test: add MatchType helper class as equivalent of mox.IsA * Validate int using utils.validate_integer method * VMware: use min supported VC version in fake driver * Updated from global requirements * Added documentation around database upgrades * Avoid always saving flavor info in instance * Warn when CONF torrent_base_url is missing slash * Raise invalid input if use invalid ip for network to attach interface * Hyper-V: Removes old instance dirs after live migration * DB downgrades are no longer supported * Add Host Mapping table to API Database * VMware: verify vCenter server certificate * Implement online schema migrations * Hyper-V: Fixes live migration configdrive copy operation * Avoid resizing disk if the disk size doesn't change * Remove openstack/common/versionutils module * Fix TestObjEqualPrims test object registration * Remove references to suds * VMware: Remove configuration check * Remove and deprecate conductor task_log methods * Remove unused compute utils methods * Make instance usage audit use the brand new TaskLog object * Add a TaskLog object * Updated from global requirements * Fix noVNC console access for an IPv6 setup * hypervisor support matrix: add status "unknown" * VMware: typo fix in config option help * Sync with latest oslo-incubator * Associating of floating IPs corrected * Minor refactor in nova.scheduler.filters.utils * Cleanup wording for the disable_libvirt_livesnapshot workaround option * Remove cell api overrides for force-delete * libvirt: convert imagebackend to support nova.virt.image.model classes * virt: convert disk API over to use nova.virt.image.model * Cells: Skip initial sync of block_device_mapping * Pass Down the Instance Name to Ironic Driver * Handle InstanceNotFound when sending instance update notification * Add an index to virtual_interfaces.uuid * Updated from global requirements * Add config drive support for Virtuozzo containers * Update formatting of microversion 2.4 documentation * Consolidates scheduler utils tests into a single file * Send Instance object to cells instance_update_at_top * VMware: use vCenter instead of VC * fix "down" nova-compute service spuriously marked as "up" * Improve formatting of rest_api_version_history * Link to microversion history in docs * libvirt: fix live migration handling of disk_info * libvirt: introduce method to get domain XML * libvirt: introduce method detach_device to Guest object * Remove db layer hard-code permission checks for quota_usage_update * pass environment variables of proxy to tox * Remove db layer hard-code permission checks for quota_get_all_* * Fixed some misspellings * Clean up Fake_Url for unit test of flavor_access * Updated from global requirements * Add AggregateTypeAffinityFilter multi values support * volume: log which encryptor class is being used * VMware: Don't raise exception on resize of 0 disk * Hyper-V: sets supports_migrate_to_same_host capability * libvirt: remove _get_disk_xml to use get_disk from Guest * libvirt: introduce method to attach device * libvirt: update tests to use Mock instead of MagicMock * libvirt: Remove unnecessary JSON conversions * objects: fix parsing of NUMA cpu/mem properties * compute: remove get_image_metadata method * compute: only use non_inheritable_image_properties if snapshotting * objects: add os_require_quiesce image property * libvirt: make default_device_names DRY-er * virt: Move building the block_device_info dict into a method * Objects: update missing adapter types * Add error handling for creating secgroup * libvirt: handle code=38 + sigkill (ebusy) in destroy() * Removed a non-conditional 'if' statement * Map uuid db field to instance_uuid in BandwidthUsage object * Hyper-V: Fix missing WMI namespace issue on Windows 2008 R2 * Replace metaclass registry with explicit opt-in registry from oslo * Fix an objects layering violation in compute/api * Remove assertRemotes() from objects tests * Use fields from oslo.versionedobjects * Convert test objects to new field formats * Begin the transition to an explicit object registry * Set default event status to completed * Add a hacking rule for consistent HTTP501 message * Add and use raise_feature_not_supported() * Objects: fix typo with exception * Remove useless volume when boot from volume failed * Hyper-V: Lock snapshot operation using instance uuid * Refactor show_port() in neutron api * Ironic: Don't report resources for nodes without instances * libvirt: Remove unit tests for _hard_reboot * Adds hostutilsv2 to HyperV * libvirt: introduce method to delete domain config * libvirt: introduce method to get vcpus info * libvirt: Don't try to confine a non-NUMA instance * Removed explicit return from __init__ method * libvirt: introduce method resume to Guest object * libvirt: introduce method poweroff to Guest object * libvirt: make _create_domain return a Guest object * Raise InstanceNotFound when save FK constraint fails * Updated from global requirements * Add new VIF type VIF_TYPE_TAP * libvirt: Disable NUMA for broken libvirt * Handle FlavorNotFound when augmenting migrated flavors * virt: convert VFS API to use nova.virt.image.model * virt: convert disk mount API to use nova.virt.image.model * virt: introduce model for describing local image metadata * Remove unused instance_group_policy db calls * Improve compute swap_volume logging * libvirt: introduce method get_guest to Host object * libvirt: introduce a Guest to wrap around virDomain * Remove unused exceptions * Extract helper method to get image metadata from volume * Fix _quota_reserve test setup for incompatible type checking * Fixes referenced path in nova/doc/README.rst * Updated from global requirements * Handle cells race condition deleting unscheduled instance * Compute: tidy up legacy treatment for vif types * Allow libvirt cleanup completion when serial ports already released * objects: define the ImageMeta & ImageMetaProps objects * Ensure to store context in thread local after spawn/spawn_n * Ironic: Parse and validate Node's properties * Hyper-V: Fix SMBFS volume attach race condition * Remove unit_test doc * Make blueprints doc a reference for nova blueprints * Remove jenkins, launchpad and gerrit docs * Prune development.environment doc * docs: fixup libvirt NUMA testing docs to match reality * Fix some issues in devref for api_microversions * nova response code 403 on block device quota error * Updated from global requirements * Remove unused variables from images api * Compute: improve logging using {} instead of dict * snapshot: Copy some missing attrs to the snapshot bdms * bdm: Make sure that delete_on_termination is a boolean * Get rid of oslo-incubator copy of middleware * Make nova-manage handle completely missing flavor information * Use oslo_config choices support * Let soft-deleted instance_system_metadata readable * Make InstanceExternalEvent use an Enum for status * Add error message to failed block device transform * network: fix instance cache refresh for empty list * Imported Translations from Transifex * Add common function for v2 API flavor_get * Remove cell policy check * VMware: replace hardcoded strings with constants * Add missing @require_context * Standardize on assertJsonEqual in tests * Tolerate iso style timestamps for cells rpc communication * Force the value of LC_ALL to be en_US.UTF-8 * libvirt: disconnect_volume does not return anything * Remove hash seed comment from tox.ini * Allow querying for migrations by source_compute only * libvirt: Do not cache number of CPUs of the hypervisor * Create instance_extra entry if it doesn't update * Ignore Cinder error when shutdown instance * Remove use of builtin name * Hyper-V: Fixes cold migration / resize issue * Fix cells capacity calculation for n:1 virt drivers * VMware: Log should use uuid instead of name * VMware: fill in instance metadata when resizing instances * VMware: fill in instance metadata when launching instances * Add the swap and ephemeral BDMs if needed * Updated from global requirements * Block oslo.vmware 0.13.0 due to a backwards incompatible change * hypervisor support matrix: update libvirt KVM (s390x) * Hyper-V: ensure only one log writer is spawned per VM * Prevent access to image when filesystem resize is disabled * Share admin password func test between v2 and v2.1 * VMware: remove dead function in vim_util * Fix version unit test on Python 3 * Resource tracker: remove invalid conductor call from tests * Remove outdated TODO comment * Disable oslo.vmware test dependency on Python 3 * Run tests with PyMySQL on Python 3 * Drop explicit suds dependency * improve speed of some ec2 keypair tests * Add nova object equivalence based on prims * Cleanups for pci stats in preparation for RT using ComputeNode * Replace dict.iteritems() with six.iteritems(dict) * Add a maintainers file * virt: make sure convert_all_volumes catches blank volumes too * compute utils: Remove a useless context parameter * make SchedulerV3PassthroughTestCase use NoDBTest * Don't use dict.iterkeys() * VMware: enforce minimum support VC version * Split up and improve speed of keygen tests * Replace dict(obj.iteritems()) with dict(obj) * libvirt: Fix cpu_compare tests and a wrong method when logging * Detect empty result when calling objects.BlockDeviceMapping.save() * remove _rescan_iscsi from disconnect_volume_multipath_iscsi * Use six.moves.range for Python 3 * Use EnumField for instance external event name * Revert "Detach volume after deleting instance with no host" * Removed unused methods and classes * Removed unused variables * Removed unused "as e/exp/error" statements * Resource tracker: use instance objects for claims * Remove db layer hard-code permission checks for security_group_default_rule_destroy * Avoid AttributeError at instance.info_cache.delete * Remove db layer hard-code permission checks for network_associate * Remove db layer hard-code permission checks for network_create_safe * Pass project_id when create networks by os-tenant-networks * Disassociate before deleting network in os-tenant-networks delete method * Remove db layer hard-code permission checks for v2.1 cells * Move unlock_override policy enforcement into V2.1 REST API layer * tests: libvirt: Fix test_volume_snapshot_delete tests * Add a finish log * Add nova-idmapshift to rootwrap filters * VMware: Missing docstring on parameter * Update docs layout * Add note to doc explaining scope * Show 'reserved' status in os-fixed-ips * Split instance event/tag correctly * libvirt: deprecate libvirt version usage < 0.10.2 * Fix race between resource audit and cpu pinning * Set migration_type for existing cold migrations and resizes * Add migration_type to Migration object * Add migration_type and hidden to Migration database model * libvirt: improve logging * Fix pip-missing-reqs * objects: convert HVSpec to use named enums * objects: convert VirtCPUModel to use named enums * Ironic: Fix delete instance when spawning * Retry a cell delete if host constraint fails * objects: introduce BaseEnumField to allow subclassing * Add policy to cover snapshotting of volume backed instances * objects: add a FlexibleBoolean field type * Don't update RT status when set instance to ERROR * Delete shelved_* keys in n-cpu unshelve call * Fix loading things in instance_extra for old instances * VMware: remove invalid comment * neutron: log hypervisor_macs before raising PortNotUsable * VMware: use get_object_properties_dict from oslo.vmware * VMware: use get_datastore_by_ref from oslo.vmware * Unshelving volume backed instance fails * Avoid useless copy in get_instance_metadata() * Fix raise syntax for Python 3 * Replace iter.next() with next(iter) * libvirt: use instance UUID with exception InstanceNotFound * devref: add information to clarify nova scope * Refactor an unit test to use urlencode() * Additional cleanup after compute RPC 3.x removal * Drop compute RPC 3.x support * libvirt: deprecate the remove_unused_kernels config option * Updated from global requirements * libvirt: Use 'relative' flag for online snapshot's commit/rebase operations * Remove db layer hard-code permission checks for quota_destroy_all_* * Replace unicode with six.text_type * Replace dict.itervalues() with six.itervalues(dict) * Use compute_node consistently in ResourceTracker * Fix the wrong comment in the test_servers.py file * Move ebrctl to compute.filter * libvirt: handle NotSupportedError in compareCPU * Hypervisor Support Matrix renders links in notes * Update fake flavor's root and ephemeral disk size * Code clean up db.instance_get_all_by_host() * use block_dev.get_bdm_swap_list in compute api * Catch SnapshotNotFound exception at os-volumes * Rename _CellProxy.iteritems method to items on py3 * Overwrite NovaException message * API: remove unuseful expected error code from v2.1 service delete api * Fix quota-update of instances stuck in deleting when nova-compute startup finish * API: remove admin require from certificate_* from db layer * API: Add policy enforcement test cases for pci API * API: remove admin require for compute_node(get_all/search_by_hyperviso) from db * API: remove admin require for compute_node_create/update/delete from db layer * API: remove admin require from compute_node_get_all_by_* from db layer * Share deferred_delete func tests between v2 and v2.1 * VMware: add support for NFS 4.1 * Compute: remove reverts_task_state from interface attach/detach * VMware: ensure that the adapter type is used * Fix failure of stopping instances during init host * Share assisted vol snapshots test between v2 and v2.1 * Compute: use instance object for _deleted_old_enough method * API: remove instance_get_all_by_host(_and_node) hard-code admin check from db * Remove db layer hard-code permission checks for service_get_by_host* * Remove db layer hard-code permission checks for service_get_by_compute_host * Detach volume after deleting instance with no host * libvirt: safe_decode xml for i18n logging * Fix scheduler issue when multiple-create failed * Move our ObjectListBase to subclass from the Oslo one * Fix cinder v1 warning with cinder_catalog_info option reference * Deprecate nova ironic driver's admin_auth_token * Handle return code 2 from blkid calls * Drop L from literal integer numbers for Python 3 * Libvirt: Use tpool to invoke guestfs api * Minor edits to support-matrix doc * hacking: remove unused variable author_tag_re * Update kilo version alias * Refactor tests that use compute's deprecated run_instance() method * Helper scripts for running under Apache2 * downgrade log messages for memcache server (dis)connect events * don't report service group connection events as errors in dbdriver * Updated from global requirements * Switch to _set_instance_obj_error_state in build_and_run_instance * Add SpawnFixture * Log the actual instance.info_cache when empty in floating ip associate * unify libvirt driver checks for qemu * VMware: Allow other nested hypervisors (HyperV) * servicegroup: remove get_all method never used as public * libvirt: add todo note to avoid call to libvirt from the driver * libvirt: add method to compare cpu to Host * libvirt: add method to list pci devices to Host * libvirt: add method to get device by name to Host * libvirt: add method to define instance to host * libvirt: add method to get cpu stats to host * monitor: remove dependance with libvirt * Clean up ComputeManager._get_instance_nw_info * Updated from global requirements * Cells: Call compute api methods with instance objects * Correct docstring info on two parameters * Start the conversion to oslo.versionedobjects * Cleanup conductor unused methods * Revert "Ironic: do not destroy if node is in maintenance" * fix network setup on evacuate * Reschedules sometimes do not allocate networks * Incorrect argument order passed to swap_volume * Mark ironic credential config as secret * Fix missing format arg in compute manager * objects: remove field ListOfEnumField * Cleaning up debug messages from previous change in vmops.py * Remove orphaned tables - iscsi_targets, volumes * console: clean tokens do not happen for all kind of consoles * Fix import order * Skip only one host weight calculation * Fix typo for test cases * VMWare: Isolate unit tests from requests * Imported Translations from Transifex * Cleanup docs landing page * Updated from global requirements * Add ability to inject routes in interfaces.template * tests: make API signature test also check static function * Make test_version_string_with_package_is_good work with pbr 0.11 * Fix disconnect_volume issue when find_multipath_device returns None * Updated from global requirements * Fix assert on call count for encodeutils.safe_decode mock * Don't wait for an event on a resize-revert * minor edit to policy_enforcement.rst * Update self with db result in InstanceInfoCache.save * libvirt: retry to undefine network filters during _post_live_migration * Wedge DB migrations if flavor migrations are not complete * Removed twice declared variables * Removed variables used not in the scope that they are declared * libvirt: add method to get hardware info to Host * libvirt: avoid call of listDefinedDomains when post live migration * Remove unused db.aggregate_metadata_get_by_metadata_key() call * Removed 'PYTHONHASHSEED=0' from tox.ini * Changed logic in _compare_result api_samples_test_base * Convert bandwidth_usage related timestamp to UTC native datetime * Drop use of 'oslo' namespace package 2015.1.0 -------- * Add a method to skip cells syncs on instance.save * Add some testing for flavor migrations with deleted things * Add support for forcing migrate_flavor_data * Virt: update shared storage log information message * Fixed functional in tests_servers, to pass with random PYTHONHASHSEED * Adds toctree to v2 section of docs * Fixes X509 keypair creation failure * Update rpc version aliases for kilo * libvirt/utils.py: Remove 'encryption' flag from create_cow_image * Libvirt: Correct logging information and progress when LM * libvirt/utils.py: Remove needless code from create_cow_image * libvirt/utils.py: Clarify comment in create_cow_image function * Fix documentation for scheduling filters * libvirt: check qemu version for NUMA & hugepage support * Add security group calls missing from latest compute rpc api version bump * Add security group calls missing from latest compute rpc api version bump * Make objects serialize_args() handle datetimes in positional args * Imported Translations from Transifex * view hypervisor details rest api should be allowed for non-admins * n-net: turn down log level when vif isn't found in deallocate_fixed_ip * Associate floating IPs with first v4 fixed IP if none specified * Correct the help text for the compute option * Convert NetworkDuplicated to HTTPBadRequest for v2.1 API * Remove comment inconsistent with code * Remove db layer hard-code permission checks for fixed_ip_get_* * Fixed nova-network dhcp-hostsfile update during live-migration * Remove db layer hard-code permission checks for network_get_all_by_host * Remove db layer hard-code permission checks for security_group_default_rule_create * Remove db layer hard-code permission checks for floating_ips_bulk * sync oslo: service child process normal SIGTERM exit * Remove downgrade support from the cellsv2 api db * Fix migrate_flavor_data() to catch instances with no instance_extra rows * libvirt: use importutils instead of python built-in 2015.1.0rc2 ----------- * Imported Translations from Transifex * Updated from global requirements * Control create/delete flavor api permissions using policy.json * Add config option to disable handling virt lifecycle events * Ironic: pass injected files through to configdrive * libvirt: Allow discrete online pCPUs for pinning * Fix migrate_flavor_data() to catch instances with no instance_extra rows * libvirt: unused imported option default_ephemeral_format * libvirt: introduce new method to guest tablet device * Fix migrate_flavor_data string substitution * Object: Fix incorrect parameter set in flavor save_extra_specs * Fix max_number for migrate_flavor data * remove downgrade support from our database migrations * Add policy check for extension_info * Cleanup unnecessary session creation in floating_ip_deallocate * Fix inefficient transaction usage in floating_ip_bulk_destroy * Control create/delete flavor api permissions using policy.json * Fix handling of pci_requests in consume_from_instance * Use list of requests in InstancePCIRequests.obj_from_db * Add numa_node field to PciDevicePool * scheduler: re-calculate NUMA on consume_from_instance * VMware: remove unused method * VMware: enable configuring of console delay * Don't query compute_node through service object in nova-manage * Fixed test in test_tracker to work with random PYTHONHASHSEED * Update rpc version aliases for kilo * remove the CONF.allow_migrate_to_same_host * Fix kwargs['migration'] KeyError in @errors_out_migration decorator * Add equality operators to PciDeviceStats and PciDevice objects * libvirt: Add option to ssh to prevent prompting * Validate server group affinity policy * VMware: use oslo.vmware methods for handling tokens * Remove db layer hard-code permission checks for network_get_associated_fixed_ips * tests: use numa xml automatic generation in libvirt tests * Resource tracker: unable to restart nova compute * Include supported version information * Release Import of Translations from Transifex * Fixed tests in test_glance to pass with random PYTHONHASHSEED * Refactored tests in test_neutron_driver to pass with random PYTHONHASHSEED * refactored test in vmware test_read_write_util to pass with random PYTHONHASHSEED * fixed tests in test_matchers to pass with random PYTHONHASHSEED * fix for vmware test_driver_api to pass with random PYTHONHASHSEED * Update hypervisor support matrix with kvm on system z * Fix kwargs['migration'] KeyError in @errors_out_migration decorator * VMware: remove unused parameter for VMOPS spawn * libvirt: make _get_instance_disk_info conservative * refactored tests to pass in test_inject to pass with random PYTHONHASHSEED * fixed tests in test_iptables_network to work with random PYTHONHASHSEED * refactored tests in test_objects to pass with random PYTHONHASHSEED * fixed tests in test_instance to pass with random PYTHONHASHSEED * Replace ssh exec calls with paramiko lib * Fix handling of pci_requests in consume_from_instance * Use list of requests in InstancePCIRequests.obj_from_db * Share hide server add tests between v2 and v2.1 * Share V2 and V2.1 images functional tests * change the reboot rpc call to local reboot * 'deleted' filter does not work properly * Spelling mistakes in nova/compute/api.py * Use kwargs from compute v4 proxy change_instance_metadata * Delay STOPPED lifecycle event for all domains, not just Xen * Use kwargs from compute v4 proxy change_instance_metadata * compute: stop handling virt lifecycle events in cleanup_host() * Replace BareMetalDriver with IronicDriver in option help string * tests: introduce a NUMAServersTest class * Fix test_set_admin_password_bad_state() * Fix test_attach_interface_failure() * Fix test_swap_volume_api_usage() * Resource tracker: unable to restart nova compute * Forbid booting of QCOW2 images with virtual_size > root_gb * Pass migrate_data to pre_live_migration * Fixed order of arguments during execution live_migrate() * update .gitreview for stable/kilo * Add min/max of API microversions to version API * VMware: Fix attribute error in resize * Release bdm constraint source and dest type * Fix check_can_live_migrate_destination() in ComputeV4Proxy * compute: stop handling virt lifecycle events in cleanup_host() * Store context in local store after spawn_n * Fixed incorrect dhcp_server value during nova-network creation * Share multiple create server tests between v2 and v2.1 * Remove power_state.BUILDING * libvirt: cleanup unused lifecycle event handling variables from driver * Add min/max of API microversions to version API * Pass migrate_data to pre_live_migration * libvirt: add debug logging to pre_live_migration * Don't ignore template argument in get_injected_network_template * Refactor some service tests and make them not require db * Remove and deprecate unused conductor service calls * Convert service and servicegroup to objects * Add numa_node field to PciDevicePool * Ironic: do not destroy if node is in maintenance * libvirt: remove unnecesary quotes * VMware: fix log warning * libvirt: quit early when mempages requested found * VMware: validate CPU limits level * Remove and deprecate conductor get_ec2_ids() * Remove unused metadata conductor parameter * Replace conductor get_ec2_ids() with new Instance.ec2_ids attribute * Add EC2Ids object and link to Instance object as optional attribute * neutron: reduce complexity of allocate_for_instance (security_groups) * neutron: reduce complexity of allocate_for_instance (requested_networks) * Avoid indexing into an empty list in getcallargs * Fixed order of arguments during execution live_migrate() * Fix check_can_live_migrate_destination() in ComputeV4Proxy 2015.1.0rc1 ----------- * Add compute RPC API v4.0 * Reserve 10 migrations for backports * Honor uuid parameter passed to nova-network create * Update compute version alias for kilo * Refactor nova-net cidr validation in prep for bug fix * Fix how service objects are looked up for Cells * websocketproxy: Make protocol validation use connection_info * scheduler: re-calculate NUMA on consume_from_instance * Prevent scheduling new external events when compute is shutdown * Print choices in the config generator * Manage compute node that exposes no pci devices * libvirt: make fakelibvirt more customizable * Use cells.utils.ServiceProxy object within cells_api * Fix Enum field, which allows unrestricted values * consoleauth: Store access_url on token authorization * tests: add a ServersTestBase class * tests: enhance functional tests primitives * libvirt: Add version check when pinning guest CPUs * Open Liberty development * xenapi: pull vm_mode and auto_disk_config from image when rescue * VMware: Fix attribute error in resize * Allow _exec_ebtables to parse stderr * Fix rebuild of an instance with a volume attached * Imported Translations from Transifex * Stacktrace on live migration monitoring * Add 'docker' to the list of known hypervisor types * Respect CONF.scheduler_use_baremetal_filters * Make migration 274 idempotent so it can be backported * Add 'suspended' lifecycle event * Fix how the Cells API is returning ComputeNode objects * Ironic: fix log level manipulation * Fix serialization for Cells Responses * libvirt: fix disablement of NUMA & hugepages on unsupported platforms * Optimize periodic call to get_by_host * Fix multipath device discovery when UFN is enabled * Use retrying decorator from oslo_db * virt: Make sure block device info is persisted * virt: Fix block_device tests * instance termination with update_dns_entries set fails * Filter fixed IPs from requested_networks in deallocate_for_instance * Fixes _cleanup_rbd code to capture ImageBusy exception * Remove old relation in Cells for ComputeNode and Service * consoleauth: remove an instance of mutation while iterating * Add json-schema for v2.1 fixed-ips * Share V2 and V2.1 tenant-networks functional tests * Share migrations tests between V2 and V2.1 * Merging instance_actions tests between V2 and V2.1 * Share V2 and V2.1 hosts functional tests * Add serialization of context to FakeNotifier * Handle nova-network tuple format in legacy RPC calls * remove usage of policy.d which isn't cached * Update check before migrating flavor * Expand Origin header check for serial console * libvirt: reuse unfilter_instance pass-through method * No need to create APIVersionRequest every time * Libvirt: preallocate_images CONFIG can be arbitrary characters * Add some tests for the error path(s) in RBD cleanup_volumes() * VMware: add instance to log messages * Hyper-V: checks for existent Notes in list_instance_notes * Fix incorrect statement in inline neutronv2 docs * Imported Translations from Transifex * Vmware:Find a SCSI adapter type for attaching iSCSI disk * Avoid MODULEPATH environment var in config generator * Be more forgiving to empty context in notification * Store cells credentials in transport_url properly * Fix API links and labels * Stale rc.local file - vestige from cloudpipe.rst * Remove stale test + opensssl information from docs * Add the last of the oslo libraries to hacking check * Cancel all waiting events during compute node shutdown * Update hypervisor support matrix for ironic wrt pause/suspend * Scheduler: deprecate mute_weight_value option on weigher * Pass instance object to add_instance_fault_from_exc * Remove dead vmrc code * Add vnc_keymap support for vmware compute * Remove compute/api.py::update() * add ironic hypervisor type * Removes XML MIME types from v2 API information * API: fix typo in unit tests * Add field name to error messages in object type checking * Remove obsolete TODO in scheduler filters * Expand valid server group name character set * Raise exception when backup volume-backed instance * Libvirt SMB volume driver: fix volume attach * Adds Compute API v2 docs * PCI tracker: make O(M * N) clean_usage algo linear * Fix v2.1 list-host to remove 'services' filter * Fix incorrect http_conflict error message * Link to devstack guide for appropriate serial_console instructions * Skip socket related unit tests on OSX * Add debug logging to quota_reserve flow * Fix missing the cpu_pinning request * Hyper-V: Sets *DataRoot paths for instances * Refactored test in test_neutron_driver to pass with random PYTHONHASHSEED * fixed tests in test_neutrounv2 to pass with random PYTHONHASHSEED * Refactored test in linux_net to pass with random PYTHONHASHSEED * refactored tests in test_wsgi to pass with random PYTHONHASHSEED * fixed tests in test_simple_tenant_usage to pass with random PYTHONHASHSEED * Refactored test_availability_zone to work properly with random PYTHONHASHSEED * fixed test in test_disk_config to work with random PYTHONHASHSEED * Fixed test to work with random PYTHONHASHSEED * Fix _instance_action call for resize_instance in cells * Add some logging in the quota.reserve flow * Check host cpu_info if no cpu_model for guest * Move ComputeNode creation at init stage in ResourceTracker * Releasing DHCP in nova-network fixed * Fix PCIDevicePool.to_dict() when the object has no tags * Convert pci_device_pools dict to object before passing to scheduler * Sync from Oslo-Incubator - reload config files * Fix v2.1 hypervisor servers to return empty list * Add support for cleaning in Ironic driver * Adjust resource tracker for new Ironic states * Ironic: Remove passing Flavor's deploy_{kernel, ramdisk} * don't 500 on invalid security group format * Adds cleanup on v2.2 keypair api and tests * Set conductor use_local flag in compute manager tests * Use migration object in resource_tracker * Move suds into test-requirements.txt * Make refresh_instance_security_rules() handle non-object instances * Add a fixture for the NovaObject indirection API * Add missing `shows` to the RPC casts documentation * Override update_available_resources interval * Fix for deletes first preexisting port if second was attached to instance * Avoid load real policy from policy.d when using fake policy fixture * Neutron: simplify validate_networks * Switch to newer cirros image in docs * Fix common misspellings * Scheduler: update doctring to use oslo_config * Skip 'id' attribute to be explicitly deleted in TestCase * Remove unused class variables in extended_volumes * libvirt: remove volume_drivers config param * Make conductor use instance object * VMware: add VirtualVmxnet3 to the supported network types * Fix test cases still use v3 prefix * Typo in oslo.i18n url * Fix docs build break * Updated from global requirements * Fix typo in nova/tests/unit/test_availability_zones.py * mock out build_instances/rebuild_instance when not used * Make ComputeAPIIpFilterTestCase a NoDBTestCase * Remove vol_get_usage_by_time from conductor api/rpcapi * default tox cmd should also run 'functional' target * VMware: Consume the oslo.vmware objects * Release bdm constraint source and dest type * VMware: save instance object creation in test_vmops * libvirt: Delay only STOPPED event for Xen domain * Remove invalid hacking recheck for baremetal driver * Adds Not Null constraint to KeyPair name * Fix orphaned ports on build failure * VMware: Fix volume relocate during detach 2015.1.0b3 ---------- * Fix AggregateCoreFilter return incorrect value * Remove comments on API policy, remove core param * Add policy check for consoles * Sync from oslo-incubator * Rename and move the v2.1 api policy into separated files * Disable oslo_messaging debug logging * heal_instance_info_cache_interval help clearer * Forbid booting of QCOW2 images with virtual_size > root_gb * don't use oslo.messaging in mock * BDM: Avoiding saving if there were no changes * Tidy up sentinel comparison in pop_instance_event * Tidy up dict.setdefault() usage in prepare_for_instance_event * Remove duplicate InvalidBDMVolumeNotBootable * libvirt: make default value of numa cell memory to 0 when not defined * Add the instance update calls from Compute * Save bdm.connection_info before calling volume_api.attach_volume * Add InstanceMapping object * Add CellMapping object * load ram_allocation_ratio when asked * Remove pci_device.update_device helper function * Tox: reduce complexity level to 35 * Remove db layer hard-code permission checks for service_get_all * Expand help message on some quota config options * Test fixture for the api database * remove duplicate calls to cfg.get() * Remove context from remotable call signature * Actually stop passing context to remotable methods * Remove usage of remotable context parameter in service, tag, vif * Remove usage of remotable context parameter in security_group* * Remove usage of remotable context parameter in pci_device, quotas * let fake virt track resources * doc: fix a docstext formatting * Update unique constraint of compute_nodes with deleted column * Modify filters to get instance info from HostState * Add the RPC calls for instance updates * Implement instance update logic in Scheduler * Log exception from deallocate_port_for_instance for triage * Remove usage of remotable context parameter in migration, network * Remove usage of remotable context parameter in compute_node, keypair * Remove usage of remotable context parameter in instance* objects * Remove usage of remotable context parameter in fixed_ip, flavor, floating_ip * Remove usage of remotable context parameter in ec2 object * libvirt: partial fix for live-migration with config drive * Added assertJsonEqual method to TestCase class * VMware: Improve reporting of path test failures * libvirt test_cpu_info method fixed random PYTHONHASHSEED compatibility * Remove usage of remotable context parameter in bandwidth, block_device * Remove usage of remotable context parameter in agent, aggregate * Remove db layer hard-code permission checks for pci * Objects: use setattr rather than dict syntax in remotable * Split out NovaTimestampObject * libvirt: Resize down an instance booted from a volume * add neutron api NotImplemented test cases for Network V2.1 * Stop using exception.message * Remove unused oslo logging fixture * libvirt: don't allow to resize down the default ephemeral disk * Add api microvesion unit test case for wsgi.action * Change some comments for instance param * Hyper-V: Adds VMOps unit tests (part 2) * Add get_api_session to db api * Use the proper database engine for nova-manage * Add support for multiple database engines * Virt: update fake driver to use UUID as lookup key * VMware: use instance UUID as instance name * VMware: update test_vm_util to use instance object * Handle exception when doing detach_interface * Variable 'name' already declared in 'for' loop * Handle RESIZE_PREP status when nova compute do init_instance * Move policy enforcement into REST API layer for v2.1 api volume_attachment * Remove the elevated context when get network * Handles exception when unsupported virt-type given * Fix confusing log output in nova/nova/network/linux_net.py * Workaround for race condition in libvirt * remove unneeded teardown related code * Fixed archiving of deleted records * libvirt: Remove minidom usage in driver.py * Stop spamming logs when creating context * Fix ComputeNode backport for Service.obj_make_compatible * Break out the child version calculation logic from obj_make_compatible() * Fix PciDeviceDBApiTestCase with referential constraint checking * Verify all quotas before updating the db * Add shadow table empty verification * Add @wrap_exception() for 3 compute functions * Remove FK on service_id and make service_id nullable * Using Instance object instead of db call * Revert "Removed useless method _get_default_deleted_value." * Remove db layer hard-code permission checks for network_count_reserved_ips * implement user negative testing for flavor manage * refactor policy fixtures to allow use of real policy * libvirt: remove unnecessary flavor parameter * Compute: no longer need to pass flavor to the spawn method * Update some ResizeClaimTestCase tests * Move InstanceClaimTestCase.test_claim_and_audit * Handle exception when attaching interface failed * Deprecate Nova in tree EC2 APIs * cells: don't pass context to instance.save in instance_update_from_api * ensure DatabaseFixture removes db on cleanup * objects: introduce numa topology limits objects * Add a test that validates object backports and child object versions * Fix ArchiveTestCase on MySQL due to differing exceptions * VMware: fix VM rescue problem with VNC console * VMware: Deprecation warning - map one nova-compute to one VC cluster * compute: don't trace on InstanceNotFound in reverts_task_state * Fix backporting objects with sub-objects that can look falsey * neutron: deprecate 'allow_duplicate_networks' config option * Fix Juno nodes checking service.compute_node * Fix typo in _live_migration_cleanup_flags method * libvirt: add in missing translation for exception * Move policy enforcement into REST API layer for v2.1 extended_volumes * Remove useless policy rules for v2.1 api which removed/disabled * Remove db layer hard-code permission checks for service_get_all_by_* * Fix infinite recursion caused by unnecessary stub * Websocket Proxy should verify Origin header * Improve 'attach interface' exception handling * Remove unused method _make_stub_method * Remove useless get_one() method in SG API * Fix up join() and leave() methods of servicegroup * network: Fix another IPv6 test for Mac * Add InstanceList.get_all method * Use session with neutronclient * Pass correct context to get_by_compute_node() * Revert "Allow force-delete irrespective of VM task_state" * Fix kwargs['instance'] KeyError in @reverts_task_state decorator * Fix copy configdrive during live-migration on HyperV * Move V2 sample files to respective directory * V2 tests -Reuse server post req/resp sample file * V2.1 tests - Reuse server post req/resp sample file * Remove an unused config import in nova-compute * Raise HTTPNotFound for Port/NetworkNotFound * neutronv2: only create client once when adding/removing fixed IPs * Stop stacktracing in _get_filter_uuid * libvirt: Fix live migration failure cleanup on ceph * Sync with latest oslo-incubator * Better logging of resources * Preserve preexisting ports on server delete * Move oslo.vmware into test-requirements.txt * Remove db layer hard-code permission checks for network_get_by_uuid * Refactor _regex_instance_filter for testing * Add instance_mappings table to api database * ec2: clean up in test_cinder_cloud * Remove unused method queue_get_for * Remove make_ip_dict method which is not used * Remove unused method delete_subnet * Remove unused method disable_vlan * Remove unused method get_request_extensions * Fix wrong log output in nova/nova/tests/unit/fake_volume.py * Updated from global requirements * Remove db layer hard-code permission checks for network_get_by_cidr * Add cell_mappings table to api database * Ban passing contexts to remotable methods * Fix a remaining case of passing context to a remotable in scheduler * Fix several cases of passing context to quota-related remotable methods * Fix some cases of passing context to remotables with security groups * Replace RPC topic-based service queries with binary-based in cells * Replace RPC topic-based service queries with binary-based in scheduler * Fix some straggling uses of passing context to remotable methods in tests * VMware: remove code invoking deprecation warning * Fix typo in nova/scheduler/filters/utils.py * Remove db layer hard-code permission checks for network_delete_safe * Don't add exception instance in LOG.exception * Move policy enforcement into REST API layer for v2.1 servers * Move policy enforcement into REST API layer for v2.1 api attach_interfaces * Remove db layer hard-code permission checks for flavor-manager * Remove db layer hard-code permission checks for service_delete/service_get * Remove db layer hard-code permission checks for service_update * Fix 'nova show' return incorrect mac info * Use controller method in all admin actions tests * Remove db layer hard-code permission checks for flavor_access * Modify filters so they can look to HostState * let us specify when samples tests need admin privs * Updated from global requirements * Remove cases of passing context to remotable methods in Flavor * Remove cases of passing context to remotable methods in Instance * Fix up PciDevice remotable context usage * libvirt: add comment for vifs_already_plugged=True in finish_migration * neutron: check for same host in _update_port_binding_for_instance * Move policy enforcement into REST API layer for v2.1 security groups * Keep instance state if lvm backend not impl * Replace RPC topic-based service queries in nova/api with binary-based * Remove service_get_by_args from the DB API * Remove usage of db.service_get_by_args * Make unit tests inherit from test.NoDBTestCase * Fixed incorrect behavior of method sqlalchemy.api._check_instance_exists * Remove db layer hard-code permission checks for migrations_get* * vmware: support both hard and soft reboot * xenapi: Fix session tests leaking state * libvirt: Cleanup snapshot tests * Change instance disappeared during destroy from Warning to Info * Replace instance flavor delete hacks with proper usage * Add delattr support to base object * Use flavor stored with instance in vmware driver * Use flavor stored with instance in ironic driver * Modify AggregateAPI methods to call the Scheduler client methods * Create Scheduler client methods for aggregates * Add update and delete _aggregate() method to the Scheduler RPC API * Instantiate aggregates information when HostManager is starting * Add equivalence operators to NUMACell and NUMAPagesTopology * Adds x509 certificate keypair support * Better round trip for RequestContext<->Dict conversion * Make scheduler client reporting use ComputeNode object * Prevent update of ReadOnlyDict * Copy the default value for field * neutron: add logging during nw info_cache refresh when port is gone * Add info for Standalone EC2 API to cut access to Nova DB * VMware: Fix disk UUID in instance's extra config * Update config generator to use new style list_opts discovery * Avoid KeyError Exception in extract_flavor() * Imported Translations from Transifex * Updated from global requirements * Move policy enforcement into REST API layer for v2.1 create backup * Truncate encoded instance sys meta to 255 or less * Adds keypair type in nova-api * Switch nova.virt.vmwareapi.* to instance dot notation * Allow disabling the evacuate cleanup mechanism in compute manager * Change queries for network services to use binary instead of topic * Add Service.get_by_host_and_binary and ServiceList.get_by_binary * Compute: update config drive settings on instance * Fix docstrings for assorted methods * Config driver: update help text for force_config_drive * libvirt-numa.rst: trivial spelling fixes * Ensure bridge deleted with brctl delbr * create noauth2 * enhance flavor manage functional tests * Add API Response class for more complex testing * Add more log info around 'not found' error * Remove extended addresses from V2.1 update & rebuild * Switch nova.virt.hyperv.* to instance dot notation * Revert instance task_state when compareCPU fails * Libvirt: Fix error message when unable to preallocate image * Switch nova.virt.libvirt.* to instance dot notation * Add nova-manage commands for the new api database * Add second migrate_repo for cells v2 database migrations * Updated from global requirements * Force LANGUAGE=en_US in test runs * neutron: consolidate common unbind ports logic * Sync oslo policy change * Remove compute_node field from service_get_by_compute_host * Fix how the Service object is loading the compute_node field * Remove compute_node from service_get_by_cn Cells API method * Remove want_objects kwarg from nova.api.openstack.common.get_instance * Switch nova.virt.* to use the object dot notation * add string representation for context * Remove db layer hard-code permission checks for migration_create/update * Disables pci plugin for v2.1 & microversions * Fix logic for checking if az can be updated * Add obj_alternate_context() helper * libvirt: remove libvirt import from tests so we only use fakelibvirt * capture stdout and logging for OSAPIfixture test * remove unused _authorize_context from security_group_default_rules.py * Switch nova.context to actually use oslo.context * Fixed incorrect indent of test_config_read_only_disk * Fixed incorrect assertion in test_db_api * Remove TranslationFixture * Replace fanout to False for CastAsCall fixture * Make ConsoleAuthTokensExtensionTestV21 inherit from test.NoDBTestCase * Remove db layer hard-code permission checks for task_log_get* * Remove db layer hard-code permission checks for task_log_begin/end_task * Api: remove unusefull compute api from cells * Remove db layer hard-code permission checks for service_create * Imported Translations from Transifex * Change v3 import to v21 in 2.1 api unit test * Fix NotImplementedError handling in interfaces API * Support specifing multiple values for aggregate keys * Remove attach/detach/swap from V2.1 extended_volumes * Make metadata cache time configurable * Remove db layer hard-code permission checks for fixed_ip_disassociate_all_by_timeout * Move policy enforcement into REST API layer for v2.1 api assisted_volume_snapshots * Fix tiny typo in api microversions doc * Fixes Hyper-V: configdrive is not migrated to destination * ensure that ram is >= 1 in random flavor creation * Fixes 500 error message and traces when no free ip is left * db: Add index on fixed_ips updated_at * Display host chosen for instance by scheduler * PYTHONHASHSEED bug fix in test_utils * fixed tests in test_vm_util to work with random PYTHONHASHSEED * Add microversion allocation on devref * Remove OS-EXT-IPS attributes from V2.1 server ips * Remove 'locked_by' from V2.1 extended server status * Remove 'id' from V2.1 update quota_set resp * Fix bad exception logging * VMware: Ensure compute_node.hypervisor_hostname is unique * Inherit exceptions correctly * Remove en_US translation * Move policy enforcement into REST API layer for v2.1 cloudpipe * Move policy enforcement into REST API layer for v2.1 security_group_default_rules * linux_net.metadata_accept(): IPv6 support * Enforce in REST API layer on v2.1 api remote consoles * Remove accessips attribute from V2.1 POST server resp * Move policy enforcement into REST API layer for v2.1 floating_ip_dns * Fix bad interaction between @wsgi.extends and @wsgi.api_version * Enforce in REST API layer on v2.1 shelve api * Move policy enforcement into REST API layer for v2.1 api evacuate * Add manual version comparison to microversion devref document * Switch to uuidutils from oslo_utils library * Add developer documentation for writing V2.1 API plugins * Convert nova.compute.* to use instance dot notation * Better power_state logging in _sync_instance_power_state * Use instance objects in fping/instance_actions/server_metadata * Fix misspellings words in nova * Fix KeyErrors from incorrectly formatted NovaExceptions in unit tests * Move policy enforcement into REST API layer for v2.1 floating ips * Switch nova.network.* to use instance dot notation * Revert : Switch off oslo.* namespace check temporarily * Move policy enforcement into REST API layer for v2.1 networks related * Remove db layer hard-code permission checks for v2.1 agents * Move v2.1 virtual_interfaces api policy enforcement into REST API layer * fix 'Empty module name' exception attaching volume * Use flavor stored with instance in libvirt driver * Handle 404 in os-baremetal-nodes GET * API: Change the API cpu_info to be meaning ful * Updated from global requirements * Make compute unit tests inherit from test.NoDBTestCase * Request objects in security_groups api extensions * Reuse is_int_like from oslo_utils * VMware: fix network connectivity problems * Move policy enforcement into REST API layer for v2.1 admin password * Fix the order of base classes in migrations test cases * Libvirt: Allow missing volumes during delete * Move policy enforcement into REST API layer for v2.1 server_diagnostics * Fix wrong log when reschedule is disabled * Replace select-for-update in fixed_ip_associate * Move policy enforcement into REST API layer for v2.1 fping * Consolidate use api request version header * Copy image from source host when ImageNotFound * VMware: update get_available_datastores to only use clusters * Add useful debug logging when policy checks fail * Remove unused conductor methods * Call notify_usage_exists() without conductor proxying * Updated from global requirements * Make notifications use BandwidthUsageList object * libvirt: Fix migration when image doesn't exist * Fix a typo of devref document for api_plugin * console: add unit tests for baseproxy * libvirt: log host capabilities on startup * Allow configuring proxy_host and proxy_port in nova.conf * Fixes novncproxy logging.setup() * Add descriptions to some assertBooleans * Remove update_store usage * Enforce policy checking in REST API layer for v2.1 server_password * Add methods that convert any volume BDM to driver format * Split scheduler weight test on ram * Split scheduler weight test on metrics * Split scheduler weight test on ioops * Fix 500 when deleting a not existing ec2 security group * Remove backwards compat oslo.messaging entries from setup.cfg * Change utils.vpn_ping() to return a Boolean * Enable retry when there are multiple force hosts/nodes * Use oslo.log * switch LOG.audit to LOG.info * Add catch FlavorExtraSpecsNotFound in V2 API * tests: remove duplicate keys from dictionary * Add blkid rootwrap filter * Fix idempotency of migration 269 * objects: fix issue in test cases for instance numa * VMware: Accept image and block device mappings * nova flavor manage functional test * extract API fixture * Fix V2 hide server address functional tests * Remove unused touch command filter * Add a test for block_device_make_list_from_dicts * Move policy enforcement into REST API layer for v2.1 floating_ip_pools * libvirt: address test comments for zfcp volume driver changes * libvirt: Adjust Nova to support FCP on System z systems * Fix BM nodes extension to deal with missing node properties * VMware: update the support matrix for security groups * Ignore 'dynamic' addr flag on gateway initialization * Adds xend to rootwrap.d/compute.filters * Create volume in the same availability zone as instance * Wrap IPv6 address in square brackets for scp/rsync * fake: fix public API signatures to match virt driver * Added retries in 'network_set_host' function * Use NoDBTestCase instead of TestCase * Change microversion header name * VMware: ensure that resize treats CPU limits correctly * Compute: pass flavor object to migrate_disk_and_power_off * extract method from fc volume discovery * Set instance NUMA topology on HostState * Support live-migrate of instances in PAUSED state * Fix DB access by FormatMappingTestCase * api: report progress when instance is migrating * libvirt: proper monitoring of live migration progress * libvirt: using instance like object * libvirt: convert tests from mox to mock * XenAPI: Fix data loss on resize up * Delete instance files from dest host in revert-resize * Pass the capabilities to ironic node instance_info * No need to re-fetch instance with sysmeta * Switch nova.api.* to use instance dot notation * Objectify calls to service_get_by_compute_host * Refactor how to remove compute nodes when service is deleted * Move policy enforcement into REST API layer for v2.1 admin actions * Contrail VIF Driver changes for Nova-Compute * libvirt : Fix slightly misleading parameter name, validate param * libvirt: cleanup setattr usage in test_host * libvirt: add TODOs for removing libvirt attribute stubs * Expand try/except for get_machine_ips * Switch nova.compute.manager to use instance dot notation * libvirt: stub out VIR_CONNECT_LIST_DOMAINS_INACTIVE * libvirt: stub out VIR_SECRET_USAGE_TYPE_ISCSI for older libvirt * Change calls to service information for Hypervisors API * Add handling for offlined CPUs to the nova libvirt driver * Make compute API create() use BDM objects * Remove redundant tearDown from ArchiveTestCase * libvirt: switch LibvirtConnTestCase back to NoDBTestCase * Replace usage of LazyPluggable by stevedore driver * Don't mock time.sleep with None * Libvirt: Support ovs plug in vhostuser vif * Removed duplicate key from dictionary * Fixes Attribute Error when trying to spawn instance from vhd on HyperV * Remove computenode relationship on service_get * Remove nested service from DB API compute_nodes * libvirt: Use XPath instead of loop in _get_interfaces * fixed tests to work with random PYTHONHASHSEED * Imported Translations from Transifex * Make the method _op_method() public * Quiesce boot from volume instances during live snapshot * Fix "Host Aggregate" section of the Nova Developer Guide * network: Fix another IPv6 test for Mac * Pre-load default filters during scheduler initialization * Libvirt: Gracefully Handle Destroy Error For LXC * libvirt: stub VIR_CONNECT_LIST_DOMAINS_ACTIVE for older libvirts * Fix VNC access, when reverse DNS lookups fail * Remove now useless requirements wsgiref * Add JSON schema for v2.1 add network API * Handle MessagingException in unshelving instance * Compute: make use of dot notation for console access * Compute: update exception handling for spice console * Add missing api samples for floating-ips api(v2) * Move v2.1 rescue api policy enforcement into REST API layer * Move policy enforcement into REST API layer for v2.1 ips * Move policy enforcement into REST API layer for v2.1 multinic * Move policy enforcement into REST API layer for v2.1 server_metadata * VMware: fix resize of ephemeral disks * VMware: add in a utility method for detaching devices * VMware: address instance resize problems * Fixes logic in compute_node_statistics * Cover ListOfObjectField for relationship test * Replace oslo-incubator with oslo_context * Libvirt: add in unit tests for driver capabilities * Ironic: add in unit tests for driver capabilities * Tests: Don't require binding to port 4444 * libvirt: fix overly strict CPU model comparison in live migration * Libvirt: vcpu_model support * IP filtering is not accurate when used with limit * Change how the API is getting a list of compute nodes * Change how Cells are getting the list of compute nodes * Change how HostManager is calling the service information * Move scheduler.host_manager to use ComputeNode object * patch out nova libvirt driver event thread in tests * Change outer to inner join in fixed IP DB API func * Small cleanup in pci_device_update * Remove useless NotFound exception catching for v2/v2.1 fping * V2.1 cleanup: Use concrete NotFound exception instead of generic * Drop deprecated namespace for oslo.rootwrap * Add vcpu_model to instance object * Pass instance primitive to instance_update_at_top() * Adds infrastructure for microversioned api samples * Libvirt: Support for generic vhostuser vif * Pull singleton config check cruft out of SG API * hacking: Got rid of unnecessary TODO * Remove unused function in test * Remove unused function * hardware: fix reported host mempages in numa cell * objects: fix numa obj relationships * objects: remove default values for numa cell * Move policy enforcement into REST API layer for v2.1 suspend/resume server * Move policy enforcement into REST API layer for v2.1 api console-output * Move policy enforcement into REST API layer for v2.1 deferred_delete * Move migrate-server policy enforce into REST API * Add API schema for v2.1 tenant networks API * Move policy enforcement into REST API layer for v2.1 lock server * Libvirt: cleanup rescue lvm when unrescue * Sync simple_tenant_usage V2.1 exception with V2 and add test case * IP filtering can include duplicate instances * Add recursive flag to obj_reset_changes() * Compute: use dot convension for _poll_rescued_instances * Add tests for nova-manage vm list * libvirt: add libvirt/parallels to hypervisor support matrix * Compute: update reboot_instance to use dot instance notation * Fix incorrect compute api config indentation * libvirt: fix emulator thread pinning when doing strict CPU pinning * libvirt: rewrite NUMA topology generator to be more flexible * libvirt: Fix logically inconsistent host NUMA topology * libvirt: utils canonicalize now the image architecture property * A couple of grammar fixes in help strings * Implement api samples test for os-baremetal-nodes Part 2 * Compute: use consistant instance dot notation * Log warning if CONF.my_ip is not found on system * libvirt: remove _destroy_instance_files shim * virt: Fix interaction between disk API tests * network: Fix IPv6 tests for Mac * Use dot notation on instance object fields in _delete_instance * libvirt: memnodes shuold be set to a list instead of None * Cleanup add_fixed_ip_to_instance tests * Cleanup test_instance_dns * Fix detach_sriov_ports to get context to be able to get image metadata * Implement api samples test for os-baremetal-nodes * Fix description of parameters in nova functions * Stop making the database migration backend lazy pluggable * Updated from global requirements * Libvirt: Created Nova driver for Quobyte * Adds keypair type database migration * libvirt: Enable serial_console feature for system z * Make tests use sha256 as openssl default digest algorithm * Improved performance of db method network_in_use_on_host * Replace select-for-update in floating_ip_allocate_address * Move policy enforcement into REST API layer for v2.1 pause server * Libvirt: update log message * Update usage of exception MigrationError * Extract preserve ephemeral on rebuild from servers plugin * VMware: update get_vm_resize_spec interface * VMware: Enable spawn from OVA image * Raise bad request for missing 'label' in tenant network * CWD is incorrectly set if exceptions are thrown * VMware: add disk device information to VmdkInfo * Use controller methods directly in test_rescue * Call controller methods directly in test_multinic * Add version specific test cases for microverison * Change v2.1 API status to CURRENT * Remove wsgi_app usage from test_server_actions * Change some v2.1 extension names to v2 * Add VirtCPUModel nova objects * Add enum fieldtype field * Convert v2.1 extension_info to show V2 API extension list * Remove compability check for ratelimit_v3 * Keep instance state if ssh failed during migration * Cleanup and removal of unused code in scheduler unit tests * Fix incorrect use of mock in scheduler test * Make test re-use HTTPRequest part 5 * Refactor test_filter_scheduler use of fakes * consoliate set_availability_zones usage * Warn about zookeeper service group driver usage * Updated from global requirements * Update matrix for kvm on ppc64 * Switch off oslo.* namespace check temporarily * Switch to using oslo_* instead of oslo.* * Adjust object_compat wrapper order * Add more tests for tenant network API * Sync with oslo-incubator * Make compute use objects usage 'best practice' * Enable BIOS bootmenu on AMI-based images 2015.1.0b2 ---------- * libvirt: fix console device for system z for log file * Fix references to non-existent "pause" section * libvirt: generate proper config for PCS containers * libvirt: add ability to add file and block based filesystem * libvirt: add ploop disks format support * Fix improper use of Stevedore * libvirt: Fail when live block migrating instance with volumes * Add notification for suspend * Add API schema for v2.1 networks API * Remove v1.1 from v2.1 extension description * Add _LW for missing translations * Treat LOG.warning and LOG.warn same * Add JSON schema for v2.1 'quota_class' API * Add missing setup.cfg entry for os-user-data plugin * Add api_version parameter for API sample test base class * Add suggestion to dev docs for debugging odd test failures * Add max_concurrent_builds limit configuration * Fixes Hyper-V configdrive network injection issue * Update Power State after deleting instance * Remove temporary power state variables * Make obj_set_defaults() more useful * Adds devref for API Microversions * PCI NUMA filtering * Ensure publisher_id is set correctly in notifications * libvirt: Use XPath instead of loop in _get_all_block_devices * libvirt: Use XPath instead of loop in get_instance_diagnostics * fix typo in rpcapi docstring * Fix conductor servicegroup joining when zk driver is used * Do not treat empty key_name as None * Failed to discovery when iscsi multipath and CHAP both enabled * Fix network tests response code checking * Remove unused error from v2.1 create server * Fix corrupting the object repository with test instance objects * Change cell_type values in nova-manage * Fix bad mocking of methods on Instance * Updated from global requirements * VMware: fix resume_state_on_host_boot * Fix cells rpc connection leak * Remove redundant assert of mock volume save call * Don't create block device mappings in the API cell * Add formal doc recording hypervisor feature capability matrix * Ironic: Adds config drive support * libvirt-xen: Fix block device prefix and disk bus * libvirt-xen: don't request features ACPI or APIC with PV guest * Make EC2 compatible with current AWS CLI * libvirt: remove pointless loop after live migration finishes * Remove useless argparse requirement * add asserts of DriverBlockDevice save call parameters * fix call of DriverVolumeBlockDevice save in swap_volume * Use a workarounds group option to disable live snaphots * libvirt : Add support for --interface option in iscsiadm * Cells: Fix service_get_by_compute_host * Expand instances project_id index to cover deleted as well * Remove unused conductor parameter from get_host_availability_zone() * Fixes Hyper-V instance snapshot * Add more status when do _poll_rebooting_instances * Adds barbican keymgr wrapper * libvirt: avoid setting the memnodes where when it's not a supported option * Make code compatible with v4 auth and workaround webob bug * Fix likely undesired use of redirection * Save bdm in swap_volume * doc: document manual testing procedure for serial-console * nova net-delete network is not informative enough * Improvement in 'network_set_host' function * Fix typo in nova/virt/disk/vfs/localfs.py * Fix expected error in V2.1 add network API * libvirt: fix failure when attaching volume to iso instance * Add log message to is_luks function * Access migration fields like an object in finish_revert_resize * Remove unused migration parameter from _cleanup_stored_instance_types * object: serialize set to list * Fix leaking exceptions from scheduler utils * Adds tests for Hyper-V LiveMigration utils * Adds tests for Hyper-V VHD utils * libvirt: fix missing block device mapping parameter * libvirt: add QEMU built-in iSCSI initiator support * Add update_or_create flag to BDM objects create() * Typos fixed * Remove unused method from test_metadata * libvirt: Support iSCSI live migration for different iSCSI target * Add JSON schema for "associate_host" API * Add migrate_flavor_data to nova-manage * Adds logging to ComputeCapabilitiesFilter failures * Add flavor fields to Instance object * Fix up some instance object creation issues in tests * Fix misspellings in hardware.py * VMware: add in utility methods for copying and deleting disks * Apply v2.1 API to href of version API * Revert "Raise if sec-groups and port id are provided on boot" * libvirt: always pass image_meta when getting guest XML * libvirt: assume image_meta is non-None in blockinfo module * libvirt: always pass image meta when getting disk info from bdm * Calls to superclass' __init__ function is optional * Enforce DB model matches results of DB migrations * Add missing foreign keys for sqlite * Fix an indentation in server group api samples template * Allow instances to attach to shared external nets * Handle ironic_client non-existent case * Cells: Record initial database split in devref * Use a workarounds option to disable rootwrap * virt: Fix images test interaction * libvirt: add parallels virt_type * Convert nova-manage list to use Instance objects * Create a 'workarounds' config group * Updated from global requirements * don't use exec cat when we can use read * don't assert_called_once_with with a real time * Network: correct VMware DVS port group name lookup * Refactor ComputeCapabilitiesFilter as bugfix preparation * libvirt: Set SCSI as the default cdrom bus on System z * Adds common policy authorizer helper functions for Nova V2.1 API * Adds skip_policy_check flag to Compute/Network/SecurityGroup API * Make test re-use HTTPRequest part 4 * libvirt: update uri_whitelist in fakelibvirt.Connection * Revert "Adds keypair type database migration" * Support for ext4 as default filesystem for ephemeral disks * Raise NotFound if attach interface with invalid net id or port id * Change default value of multi_instance_display_name_template * Check for LUKS device via 'isLuks' subcommand * disk: use new vfs method and option to extend * Replace select-for-update in fixed_ip_associate_pool * Remove unused content_type_params() * libvirt: always pass image meta when getting disk mapping * libvirt: always pass image meta when getting disk info * Add API schema for v2.1 server reboot actions * objects: fix typo in changelog of compute_node * Add API schema for v2.1 'removeFloatingIp' * Add API schema for v2.1 'addFloatingIp' * Add parameter_types.ip_address for cleanup * Reply with a meaningful exception when ports are over the quota limit * Adds keypair type database migration * A minor change of CamelCase parameter * Imported Translations from Transifex * Remove N331 hacking rules * GET details REST API next link missing 'details' * Add missing indexes in SQLite and PostgreSQL * libvirt: cleanup warning log formatting in _set_host_enabled * Revert temporary hack to monkey patch the fake rpc timeout * Remove H238 comment from tox.ini * libvirt: use image_meta when looking up default device names * Fix bdm transformation for volume backed servers * Removed host_id check in ServersController.update * Fix policy validation in JSONSchema * Adds assert_has_no_errors check * Removed useless method _get_default_deleted_value * virt: make tests pass instance object to get_instance_disk_info * libvirt: rename conn variable in LibvirtConnTestCase * Raise if sec-groups and port id are provided on boot * Begin using ironic's "AVAILABLE" state * Transform IPAddress to string when creating port * Break base service group driver class out from API * Remove unused _get_ip_and_port() * Updated from global requirements * Add method for getting the CPU pinning constraint * libvirt: Consider CPU pinning when booting * Make ec2/cloud.py use get_instance_availability_zone() helper * HACKING.rst: Update the location of unit tests' README.rst * Remove unused method log_db_contents * Make use of controller method in test_flavor_manage * libvirt: Use XPath instead of loop in _get_disk_xml * Avoid bdms db call when cleaning deleted instance * Ignore warnings from contextlib.nested * Cleanup bad JSON files * Switch to oslo.vmware API for reading and writing files * Make test re-use HTTPRequest part 1 * Make test re-use HTTPRequest part 2 * Make test re-use HTTPRequest part 3 * Remove HTTPRequestV3 in scheduler_hints test * Hyper-V: Adds instance missing metrics enabling * ephemeral file names should reflect fs type and mkfs command * Reschedule queries to nova-scheduler after a timeout occurs * libvirt: remove use of utils.instance_sys_meta * libvirt: remove use of fake_instance.fake_instance_obj * Remove redundant catch for InstanceNotFound * Add to_dict() method to PciDevicePool object * libvirt: rename self.conn in LibvirtVolume{Snapshot||Usage}TestCase * libvirt: rename self.libvirtconnection in LibvirtDriverTestCase * libvirt: convert LibvirtConnTestCase to use fakelibvirt fixture * Remove unused network rpcapi calls * Added hacking rule for assertEqual(a in b, True/False) * Add API schema for v2.1 createImage API * Fix errors in string formatting operations * libvirt: Create correct BDM object type for conn info update * Fixes undocumented commands * Make _get_instance_block_device_info preserve root_device_name * Convert tests to NoDBTestCase * Fixes Hyper-V should log a clear error message * Provide compatibliity for db.compute_node_statistics * Update network resource when shelve offload instance * Update network resource when rescheduling instance * libvirt: Expanded test libvirt driver * Adds "file" disk driver support to Xen libvirt driver * Virt: remove unused 'host' parameter from get_host_uptime * Don't translate logs in tests * Don't translate exceptions in tests * disk/vfs: introduce new option to setup * disk/vfs: introduce new method get_image_fs * initialize objects with context in block device * Remove unused controller instance in test_config_drive * Fix v2.1 os-tenant-networks/networks API * Use controller methods in test_floating_ips * Cleanup in test_admin_actions * Calling controller methods directly in test_snapshots * Add checking changePassword None in _action_change_password(v2) * Add more exceptions handle when change server password (v2) * Share admin_password unit test between V2 & V2.1 * Share server_actions unit test between V2 & V2.1 * Fix server_groups schema on v2.1 API * Implement a safe copy.copy() operation for Nova models * clean up extension loading logging * Hyper-V: Fixes wrong hypervisor_version * console: introduce baseproxy and update consoles cmd * libvirt: update get_capabilities to Host class * libvirt: add get_connection doc string in Host class * Enable check for H238 rule * Call ComputeNode instead of Service for getting the nodes * Remove mox dependency * Fix JSONFilter docs * libvirt: move _get_hypervisor_* functions to Host class * libvirt: don't turn time.sleep into a no-op in tests * Adds Hyper-V generation 2 VMs implementation * VMware: ensure that correct disk details are returned * Improve api-microversion hacking check * Add unit test for getting project quota remains * Fix py27 gate failure - test_create_instance_both_bdm_formats * Reduce complexity of the _get_guest_config method * Cleanups in preparation of flavor attributes on Instance * Add flavor column to instance_extra table * docs: document manual testing procedure for NUMA support * Add setup/cleanup_instance_network_on_host api for neutron/nova-network * Remove useless requirements * Make get_best_cpu_topology consider NUMA requested CPU topology * Make libvirt driver expose sibling info in NUMA topology * VMware: snapshot as stream-optimized image * VMware: refactor utility functions related to VMDK * Get settable user quota maximum correctly * Add missing policy for nova in policy.json * Fix typo in nfs_mount_options option description * increase fake rpc POLL_TIMEOUT to 0.1s * work around for until-failure * Fix inconsistencies in the ComputeNode object about service * Fixed incorrect initialization of availability zone tests * Revert "initialize objects with context in block device" * Fix wrong instructions for rebuilding API samples * Performance: leverage dict comprehension in PEP-0274 * Sync with latest oslo-incubator * initialize objects with context in VirtualInterface object tests * initialize objects with context in Tag object tests * initialize objects with context in Service object tests * Fixes Hyper-V boot from volume live migration * Expansion of matching XML strings logic * Xenapi: Attempt clean shutdown when deleting instance * don't use debug logs for object validation * create some unit of work logging in n-net * Make service-update work in API cells * oslo: remove useless modules * Do not use deprecated assertRaisesRegexp() * Honor shared storage on resize revert * Stub out instance action events in test_compute_mgr * Remove unused instance_group_metadata_* DB APIs * initialize objects with context in block device * Reduce the complexity of the create() method * speed up tests setting fake rpc polling timeout * xenapi: don't send terminating chunk on errors * Make service-delete work in API cells * Add version as request param for fake HTTPRequest * Fix OverQuota headroom KeyError in nova-network allocate_fixed_ip * Updated from global requirements * Make numa_usage_from_instances consider CPU pinning * Cleanup in admin_actions(v2.1api) * Cache ironic-client in ironic driver * tests: fix handling of TIMEOUT_SCALING_FACTOR * libvirt: remove/revert pointless logic for getVersion call * libvirt: move capabilities helper into host.py * libvirt: move domain list helpers into Host class * libvirt: move domain lookup helpers into Host class * Fix live migration RPC compatibility with older versions * Added _get_volume_driver method in libvirt driver * fix wrong file path in docstring of hacking.checks * Make ec2 auth support v4 signature format * VMware: driver not handling port other than 443 * libvirt: use XPath in _get_serial_ports_from_instance * Remove non existent rule N327 from HACKING.rst * Replace Hacking N315 with H105 * Enable W292 * Fix and re-gate on H306 * Move to hacking 0.10 * Fix nova-manage shell ipython * Make service-list output consistent * Updated from global requirements * Make V2.1 servers filtering (--tenant-id) same as V2 * Fix failure rebuilding instance after resize_revert * Move WarningsFixture after DatabaseFixture so emit once * libvirt: Use arch.from_host instead of platform.processor * Cells: Improve invalid hostname handling * Fix obj_to_primitive() expecting the dict interface methods * Remove unused XML_WARNING variable in servers API * Guard against missing X-Instance-ID-Signature header * libvirt: not setting membacking when mempages are empty host topology * remove pylint source code annotations * Cleanup XML for api samples tests for Nova REST API * remove all traces of pylint testing infrastructure * initialize objects with context in SecurityGroupRule object tests * initialize objects with context in SecurityGroup object tests * initialize objects with context in base object tests * initialize objects with context in Migration object tests * initialize objects with context in KeyPair object tests * initialize objects with context in InstanceNUMATopology object tests * initialize objects with context in InstanceGroup object tests * initialize objects with context in InstanceFault object tests * Fix error message when no IP addresses available * Update WSGI SSL IPv6 test and SSL certificates * Catch more specific exception in _get_power_state * Add WarningsFixture to only emit DeprecationWarning once in a test run * Maintain the creation order for vifs * Update docstring for wrap_exception decorator * Doc: Adds python-tox to Ubuntu dependencies * Added hacking rule for assertTrue/False(A in B) * ironic: use instance object in driver.py * Add LibvirtGPFSVolumeDriver class * Make pagination work with deleted marker * Return 500 when unexpected exception raising when live migrate v2 * Remove no need LOG.exception on attach_interface * Make LOG exception use format_message * make IptablesRule debug calls meaningful * Switch to tempest-lib's packaged subunit-trace * Update eventlet API in libvirt driver * initialize objects with context in Instance object tests * initialize objects with context in Flavor object tests * initialize objects with context in FixedIP object tests * initialize objects with context in EC2 object tests * initialize objects with context in ComputeNode object tests * initialize objects with context in BlockDeviceMapping object tests * Nuke XML support from Nova REST API - Phase 3 * Return floating_ip['fixed_ip']['instance_uuid'] from neutronv2 API * Add handling of BadRequest from Neutron * Add numa_node to PCIDevice * Nuke XML support from Nova REST API - Phase 2 * Remove unused methods in nova utils * Use get_my_ipv4 from oslo.utils * Add cpu pinning check to numa_fit_instance_to_host * Add methods for calculating CPU pinning * Remove duplicated policy check at nova-network FlatManager * boot instance with same net-id for multiple --nic * XenAPI: Check image status before uploading data * XenAPI: Refactor message strings to remove locals * Cellsv2 devref addition * Nuke XML support from Nova REST API - Phase 1 * hardware: fix numa topology from image meta data * Support both list and dict for pci_passthrough_whitelist * libvirt: Add balloon period only if it is not None * Don't assume contents of values after aggregate_update * Add API schema for server_groups API * Remove unused function _get_flavor_refs in flavor_access extension * Make rebuild server schema 'additionalProperties' False * Tests with controller methods in test_simple_tenant_usage * Convert wsgi call to controller in test_virtual_interfaces * Fix the comment of host index api * Imported Translations from Transifex * Use controller methods directly in test_admin_password * Drop workarounds for python2.6 * VMware: add in utility method for copying files * Remove lock files when remove libvirt images * Change log when set_admin_password failed * Catch InstanceInvalidState for start/stop action * Unshelving a volume backed instance doesn't work * Cache empty results in libvirt get_volume_connector * VMware: improve the performance of list_instances * VMware: use power_off_instance instead of power_off * VMware: refactor unit tests to use _get_info * libvirt: clean instance's directory when block migration fails * Remove unused scheduler driver methods * Reuse methods from netutils * VMware: make use of oslo.vmware logout * Remove unused directory nova/tests/unit/bundle * Prevent new code from using namespaced oslo imports * Move metadata filtering logic to utils.py * Make test_consoles to directly call controller methods * Catch expected exceptions in remote console controller * Make direct call to controller test_server_password * Cleanup in test_keypairs not to use wsgi_app * Add ipv6 support to fake network models * Add host field when missing from compute_node * Remove condition check for python2.6 in test_glance * Cleanup in test_availability_zone not to use wsgi_app * Call controller methods directly in test_evacuate * VMware: Use datastore_regex for disk stats * Add support for clean_shutdown to resize in compute api layer * Fix Instance relationships in two objects * objects: remove NovaObjectDictCompat from Tag object * libvirt: introduce new helper for getting libvirt domain * libvirt: remove pointless _get_host_uuid method * libvirt: pass Host object into firewall class * Cleanup in server group unit tests * Enhance EvacuateHostTestCase test cases * Call controller methods directly in test_console_output * Make direct call to controller in test_console_auth_tokens * Populates retry info when unshelve offloaded instance * Catch NUMA related exceptions for create server v2.1 API * Remove unnecessary cleanup from ComputeAPITestCase * extract RPC setup into a fixture 2015.1.0b1 ---------- * Fix recent regression filling in flavor extra_specs * remove detail method from LimitsController * Remove instance_uuids from request_spec * libvirt: remove unused get_connection parameter from VIF driver * libvirt: sanitize use of mocking in test_host.py * libvirt: convert test_host.py to use FakeLibvirtFixture * libvirt: introduce a fixture for mocking out libvirt connections * Expand valid resource name character set * Set socket options in correct way * Make resize server schema 'additionalProperties' False * Make lock file use same function * Remove unused db.api.dnsdomain_list * Remove unused db.api.instance_get_floating_address * Remove unused db.api.aggregate_host_get_by_metadata_key * Remove unused db.api.get_ec2_instance_id_by_uuid * Join instances column before expecting it to exist * ec2: Change FormatMappingTestCase to NoDBTestCase * libvirt: enhance driver to configure guests based on hugepages * Fix ironic delete fails when flavor deleted * virt: pass instance object to block_stats & get_instance_disk_info * Add pci_device_pools to ComputeNode object * Handle invalid sort keys/dirs gracefully * hardware: determine whether a pagesize request is acceptable * objects: add method to verify requested hugepages * hardware: make get_constraints to return topology for hugepages * hardware: add method to return requested memory page size * Cleanup in ResourceExtension ALIAS(v2.1api) * Replace use of handle_schedule_error() with set_vm_state_and_notify() * Fix set_vm_state_and_notify passing SQLA objects to send_update() * Imported Translations from Transifex * Libvirt: use strutils.bool_from_string * Use constant for microversions header name (cleanup) * Adds support for versioned schema validation for microversions api * Add support for microversions API special version latest * Adds API microversion response headers * Use osapi_compute worker for api v2 service * initialize objects with context in Aggregate object tests * Replace the rest of the non-object-using test_compute tests * Fix using anyjson in fake_notifier * Fix a bug in _get_instance_nw_info() where we re-query for sysmeta * Corrects link to API Reference on landing page * libvirt: disk_bus setting is being lost when migration is reverted * libvirt: enable hyperv enlightenments for windows guests * libvirt: enhance to return avail free pages on cells * libvirt: move setting of guest features out into helper method * libvirt: add support for configuring hyperv enlightenments in XML * libvirt: change representation of guest features * libvirt: add support for hyperv timer source with windows guests * libvirt: move setting of clock out into helper method * libvirt: don't pass a module import into methods * Reject non existent mock assert calls * VMware: remove unused method in the fake module * Use oslo db concurrency to generate nova.conf.sample * Make instance_get_all_*() funtions support the smart extra.$foo columns * Make cells send Instance objects in build_instance() * Fix spelling error in compute api * objects: fix changed fields for instance numa cell * Hyper-V: Fix volume attach issue caused by wrong constant name * Move test_extension_info from V3 dir to V2.1 * Make create server schema 'additionalProperties' False * Make update server schema 'additionalProperties' False * Updated from global requirements * Update devref with link to kilo priorities * Add vision of nova rest API policy improvement in devref * objects: remove dict compat support from all XXXList() objects * objects: stop conductor manager using dict field access on objects * objects: allow creation of objects without dict item compat * Remove duplicated constant DISK_TYPE_THIN * Hyper-V: Fix retrieving console logs on live migration * Remove FlavorExtraSpecsNotFound catch in v3 API * Add API schema for v2.1 block_device_mapping_v1 * Add API schema for v2.1 block_device_mapping extension * VMware: Support volume hotplug * fix import of oslo.concurrency * libvirt: set guest cpu_shares value as a multiple of guest vCPUs * Make objects use the generalized backport scheme * Fix base obj_make_compatible() handling ListOfObjectsField * VMware: make use of oslo.vmware pbm_wsdl_loc_set * Replace stubs with mocks * Updated from global requirements * use more specific error messages in ec2 keystone auth * Add backoff to ebtables retry * Add support for clean_shutdown to rescue in compute api layer * Add support for clean_shutdown to shelve in compute api layer * Add support for clean_shutdown to stop in compute api layer * Extend clean_shutdown to the compute rpc layer * initialize objects with context in compute manager * Add obj_as_admin() to NovaPersistentObject * Bump major version of Scheduler RPC API to 4.0 * Use model_query from oslo.db * Only check db/api.py for session in arguments * Small cleanup in db.sqlalchemy.api.action_finish() * Inline _instance_extra_get_by_instance_uuid_query * libvirt: Convert more tests to use instance objects * virt: Convert more tests to use instance objects * virt: delete unused 'interface_stats' method * objects: fix version changelog in numa * libvirt: have _get_guest_numa_config return a named tuple * simplify database fixture to the features we use * extract the timeout setup as a fixture * Stop neutron.api relying on base neutron package * Move pci unit test from V3 to V2.1 * Clarify point of setting dirname in load_standard_extensions * Remove support for deprecated header X_ROLE * move all conf overrides to conf_fixture * move ServiceFixture and TranslationFixture * extract fixtures from nova.test to nova.test.fixtures * libvirt: Fix NUMA memnode assignments to host cells * libvirt: un-cruft _get_guest_numa_config * Make scheduler filters/weighers only load once * Refactor unit tests for scheduler weights * Fix cells RPC version 1.30 compatibility with dict-based Flavors * Objects: add in missing translation * network:Separate the translatable messages into different catalogs * objects: introduce numa pages topology as an object * check the configuration num_vbd_unplug_retries * Doc: minor fixes to unit testing devref * Doc: Update i18n devref * VMware: remove flag in tests indicating VC is supported * virt: use instance object for attach in block_device * VMware: clean up unit tests * Do not compute deltas when doing migration * Modify v21 alias name for compatible with v2 * Clean bdms and networks after deleting shelved VM * move eventlet GREENDNS override to top level * fix pep8 errors that apparently slipped in * include python-novaclient in abandon policy * replace httplib.HTTPSConnection in EC2KeystoneAuth * Re-revert "libvirt: add version cap tied to gate CI testing" * ironic: remove non-standard info in get_available_resource dict * hyperv: use standard architecture constants for CPU model * xenapi: fix structure of data reported for cpu_info * ironic: delete cpu_info data from get_available_resource * vmware: delete cpu_info data from get_available_resource * pci: move filtering of devices up into resource tracker * Libvirt: Fsfreeze during live-snapshot of qemu/kvm instances * libvirt: Fixes live migration for volume backed instances * Updated from global requirements * Remove unused db.api.fixed_ip_get_by_address_detailed * VMware: Remove unused _check_if_folder_file_exists from vmops * VMware: Remove unused _get_orig_vm_name_label from vmops * VMware: enable a cache prefix configuration parameter * Hyper-V: attach volumes via SMB * etc: replace NullHandler by Python one * Add cn_get_all_by_host and cn_get_by_host_and_node to ComputeNode * Add host field to ComputeNode * Reject unsupported image to local BDM * Update LVM lockfile name identical to RAW and Qcow * Fix invalid read_deleted value in _validate_unique_server_name() * Adds hacking check for api_version decorator * Parse "networks" attribute if loading os-networks * Fixes interfaces template identification issue * VMware: support passing flavor object in spawn * Libvirt: make use of flavor passed by spawn method * Virt: change instance_type to flavor * rename oslo.concurrency to oslo_concurrency * Support macvtap for vif_type being hw_veb * downgrade 'No network configured!' to debug log level * Remove unnecessary timeutils override cleanup * Cleanup timeutils override in tests/functional/test_servers * Downgrade quota exceeded log messages * libvirt: Decomposition plug hybrid methods in vif * Remove unused cinder code * Libvirt normalize numa cell ids * Remove needless workaround in utils module * Check for floating IP pool in nova-network * Remove except Exception cases * Fixes multi-line strings with missing spaces * Fix incorrectly formatted log message * libvirt: check value of need_legacy_block_device_info * Fixed typo in testcase and comment * Share server access ips tests between V2 & V2.1 * Workflow documentation is now in infra-manual * Add a validation format "cidr" * Use a copy of NEW_NETWORK for test_networks * Adds global API version check for microversions * Implement microversion support on api methods * Fix long hostname in dnsmasq * This patch fixes the check that 'options' object is empty correctly * Assert order of DB index members * Updated from global requirements * object-ify flavors manager side of the RPC * Add CPU pinning data to InstanceNUMACell object * Enforce unique instance uuid in data model * libvirt: Handle empty context on _hard_reboot * Move admin_only_action_common out of v3 directory(cleanup) * Compute Add build_instance hook in compute manager * SQL scripts should not manage transactions * Clear libvirt test on LibvirtDriverTestCase * Replacement `_` on `_LW` in all LOG.warning part 4 * Replacement `_` on `_LW` in all LOG.warning part 3 * Convert v3/v2.1 extension info to present v2 API format * Adds NUMA CPU Pinning object modeling * objects: Add several complex field types * VMware: ephemeral disk support * Imported Translations from Transifex * Fix disconnecting necessary iSCSI sessions issue * VMware: ensure that fake VM deletion returns a task * Compute: Catch binding failed exception while init host * libvirt: Fix domain creation for LXC * Xenapi: Allow volume backed instances to migrate * Break V2 XML Support * Libvirt: SMB volume driver * libvirt: Enable console and log for system z guests * libvirt: Set guest machine type on system z * Drop support for legacy server groups * Libvirt: Don't let get_console_output crash on missing console file * Hyper-V: Adds VMOps unit tests (part 1) * VMware: allow selection of vSAN datastores * libvirt: enhance config memory backing to handle hugepages * VMware: support spawn of stream-optimized image * libvirt: reuse defined method to return instance numa topology * Remove the volume api related useless policy rules * Error code for creating secgroup default rule * Don't mock external locks with Semaphore * Add shelve and unshelve info into devref doc * VMware: optimize resource pool usage * Added objects Tag and TagList * libvirt: video RAM setting should be passed in kb to libvirt * Switch to moxstubout and mockpatch from oslotest * Check that volume != root device during boot by image * Imported Translations from Transifex * Make a flavorRef validation strict * Add missing indexes from 203 migration to model * Fix type of uniq_security_groups0project_id0name0deleted * Correct columns covered in migrations_instance_uuid_and_status_idx * Add debug log for url not found * Optimize 'floating_ip_bulk_create' function * factor out _setup_logging in test.py * extract _setup_timeouts in test.py * Scheduler: return a namedtuple from _get_group_details * Use "is_neutron_security_groups" check * Fix function name mismatch in test case * VMware: prevent exception with migrate_disk_and_power_off * Fix URL mapping of image metadata PUT request * Compute: catch correct exception when host does not exists * Fix URL mapping of server metadata PUT request * objects: move numa host and cell to objects * objects: introduce numa objects * Code cleanup: quota limit validation * Add api validation schema for image_metadata * Correct InvalidAggregateAction translation&format * Remove blanks before ':' * Port virtual-interfaces plugin to v2.1(v3) API * Catch ComputeServiceUnavailable on v2 API * GET servers API sorting REST API updates * Add API validation schema for volume_attachments * Changed testcase 'test_send_on_vm_change' to test vm change * VMware: associate instance with storage policy * VMware: use storage policy in datastore selection * VMWare: get storage policy from flavor * Share CreateBackup unit test between V2 & V2.1 * Share suspend_server unit test between V2 & V2.1 * Share pause_server unit test between V2 & V2.1 * Share lock_server unit test between V2 & V2.1 * VMware: enable VMware driver to use new BDM format * Use admin only common test case in admin action unit test cases * objects: move virt numa instance to objects * Fix v2.1 API os-simple-tenant-usage policy * Set vm state error when raising unexpected exception in live migrate * Add delete not found unit testcase for floating_ip api * Improve error return code of floating_ips in v2/v2.1 api * Port floating_ips extension to v2.1 * Removing the headroom calculation from db layer * Make multiple_create unit tests share between v2 and v2.1 * Set API version request information on request objects * Change definition of API_EXTENSION_NAMESPACE to method * Adds APIVersionRequest class for API Microversions * Updated from global requirements * remove test.ReplaceModule from test.py * Added db API layer to add instance tag-list filtering support * Added db API layer for CRUD operations on instance tags * Implement 'personality' plugin for V2.1 * Fix API samples/templates of multinic-add-fixed-ip * move the integrated tests into the functional tree * Sync latest from oslo-incubator * Fix use of conf_fixture * Make network/* use Instance.get_flavor() * Make metadata server use Instance.get_flavor() * Fix use of extract_flavor() in hyper-v driver * Check server group policy on migrate/evacuate * VMware: fix exception when multiple compute nodes are running * Add API json schema for server_external_event(v2.1) * Port v2 quota_classes extension to work in v2.1(v3) framework * Share unit test case for server_external_events api * Add API schema for v2.1/v3 scheduler_hints extension * Make compute/api.py::resize() use Instance.get_flavor() * Make get_image_metadata() use Instance.get_flavor() * Fix instance_update() passing SQLA objects to send_update() * Fix EC2 volume attachment state at attaching stage * Fixes Hyper-V agent IDE/SCSI related refactoring * dummy patch to let tox functional pass * Remove Python 2.6 classifier * Make aggregate filters use objects * hardware: clean test to use well defined fake flavor * Enable pep8 on ./tools directory * objects: Add test for instance _save methods * Error code for creating duplicate floating_ip_bulk * Use HTTPRequest instead of HTTPRequestV3 for v2/v2.1 tests * objects: make instance numa topology versioned in db * Clean up in test_server_diagnostics unit test case * Add "x-compute-request-id" to a response header * Prevent admin role leak in context.elevated * Hyper-V: Refactors Hyper-V VMOps unit tests * Hyper-V: Adds Hyper-V SnapshotOps tests * Introduce a .z version element for backportable objects * Adds new RT unit tests for _sync_compute_node * Fix for extra_specs KeyError * Remove old Baremetal Host Manager * Remove unused network_api.get_instance_uuids_by_ip_filter() * Remove unused network_api.get_floating_ips_by_fixed_address() * add abandon_old_reviews script * Remove havana compat from nova.cert.rpcapi * Retry ebtables on race * Eventlet green threads not released back to pool * Hyper-V: Adds LiveMigrationOps unit tests * Hyper-V: Removes redundant utilsfactory tests from test_hypervapi * Hyper-V: Adds HostOps unit tests * Make nova-api use quotas object for create_security_group * Make nova-api use quotas object for count() and limit_check() * Add count and limit_check methods to quota object * Make neutronapi get networks operations return objects * Hyper-V: fix tgt iSCSI targets disconnect issue * Network object: add missing translations * Adapting pylint runner to the new message format * Cleanup v2.1 controller inheritance * Load extension 2 times fix load sequence issue * Make get_next_device_name() handle an instance object * Add obj_set_defaults() to NovaObject * Switch to oslo.config fixture * Remove VirtNUMAHostTopology.claim_test() method * Instances with NUMA will be packed onto hosts * Make Instance.save() update numa_topology * objects: remove VirtPageSize from hardware.py * VMware: enable backward compatibility with existing clusters * Make notifications use Instance.get_flavor() * Make notify_usage_exists() take an Instance object * Convert hardware.VirtCPUTopology to nova object * Updated from global requirements * Replacement `_` on `_LW` in all LOG.warning part 2 * compute: rename hvtype.py to hv_type.py * Replacement `_` on `_LW` in all LOG.warning part 1 * Replacement `_` on `_LE` in all LOG.exception * Use opportunistic approach for migration testing * Replacement `_` on `_LI` in all LOG.info - part 2 * Replacement `_` on `_LI` in all LOG.info - part 1 * Add ALL-IN operator to extra spec ops * Sync server_external_events v2 to v2.1 Part 2 * Sync server_external_events v2 to v2.1 Part 1 * Fix connecting unnecessary iSCSI sessions issue * Add API validation schema for services v2.1 plugin * Fix exception handling in _get_host_metrics() * initialize objects with context in network manager tests * initialize objects with context in flavors * initialize objects with context in compute api * initialize objects with context in resource tracker * Use common get_instance call in API plugins part 3 * Clean the test cases for service plugins * initialize objects with context in server groups api * initialize objects with context in cells * tests: update _get_instance_xml to accept custom flavor object * libvirt: vif tests should use a flavor object * Compute: improve test_compute_utils time * Compute: improve usage of Xen driver support * libvirt: introduce new 'Host' class to manage the connection * Add CHAP credentials support * Document the upgrade plans * Move test_hostops into nova/tests/unit * Fix get_all API to pass search option filter to cinder api * VMware: remove ESX support for getting resource pool * objects: Makes sure Instance._save methods are called * Add support for fitting instance NUMA nodes onto a host * VMware: remove unnecessary brackets * Imported Translations from Transifex * Port volume_attachments extension to v2.1 API * Only filter once for trusted filters * Indicate whether service is down for mc driver * Port assisted-volume-snapshots extension to v2.1 * Updated from global requirements * Add custom is_backend_avail() method * Fixes differencing VHDX images issue on Hyper-V * Add debug log when over quota exception occurs * Fix rule not found error in sec grp default rule API * Convert service v3 plugin to v2.1 API * Decrease admin context usage in _get_guest_config * Catch NotImplemented nova exceptions in API extension * Add API json schema to volumes api(v2.1) * Don't modify columns_to_join formal parameter in _manual_join_columns * Limit tcp/udp port to be empty string in json-schema * Fix the cell API with string rpc_port failed * Add decorator expected_errors for security_group extension * Fix bulk floating ip ext to show uuid and fixed_ip * Use session in cinderclient * Make objects.Flavor._orig_projects a list * Refactor more compute tests to use Instance objects * Use Instance.get_flavor() in more places * Support instance_extra fields in expected_attrs on Instance object * Adds host power actions support for Hyper-V * Exceptions: finish sentence with fullstop * Type conflict in trusted_filter.py using attestation_port default value * Get EC2 metadata localip return controller node ip * Rename private functions in db.sqla.api * Enable hard-reboot on more states * Better error message when check volume status * libvirt: use qemu (qdisk) disk driver for Xen >= 4.2.0 * Add resource types for JSON-Schema validation * Add integer types for JSON-Schema * Revert pause/unpause state when host restart * Extends use of ServiceProxy to more methods in HostAPI in cells * Nova devref: Fix the rpc documentation typos * Remove duplicated code in services api integrated test case * Share server_password unit test between V2 & V2.1 * Key manager: ensure exception reason is translated * Virt: update spawn signature to pass instance_type * Compute: set instance to ERROR if resume fails * Limit InstanceList join to system_metadata in os-simple-tenant-usage * Pass expected_attrs to instance_get_active_by_window_joined * VMware: remove unused parameter (mountpoint) * Truncate encoded instance message to 255 or fewer * Only load necessary instance info for use in sync power state * Revert "Truncate encoded instance message to 255" * VMware: refactor cpu allocations * Fixes spawn issue on Hyper-V * Refine HTTP error code for os-interface * Share migrations unit test between V2 & V2.1 * Use common get_instance call in API plugins part 2 * make get_by_host use slave in periodic task * Add update_cells to BandwidthUsage.create() * Fix usage of BandwidthUsage.create() * Updated from global requirements * Hard reboot doesn't re-create instance folder * object-ify flavors api and compute/api side of RPC * Allow passing columns_to_join to instance_get_all_by_host_and_node() * Don't make a no-op DB call * Remove deprecated affinity filters * Generalize dependent object backporting * GET servers API sorting compute/instance/DB updates * Hyper-V: cleanup basevolumeutils * Specify storage IP for iscsi connector * Fix conductor processes race trying to join servicegroup (zk driver) * Remove unused db.api.floating_ip_set_auto_assigned * Remove unused db.api.flavor_extra_specs_get_item * Remove unused oslo.config import * Create instance_extra items atomically with the instance itself * Shelve_offload() should give guests a chance to shutdown * Fixes Hyper-V driver WMI issue on 2008 R2 * Fix circular reference error when live migration failed * Fix live migration api stuck when migrate to old nova node * Remove native security group api class * libvirt: pin emulator threads to union of vCPU cpuset * libvirt: add classes for emulator thread CPU pinning configuration * libvirt: set NUMA memory allocation policy for instances * Fixed quotas double decreasing problem * Convert v3 console plugin to v2.1 * Virt: make use of the InstanceInfo object * Virt: create an object InstanceInfo * Metadata service: make use of get_instance_availability_zone * Metadata service: remove check for the instance object type * Metadata: use instance objects instead of dictionary * VMware: Fix problem transferring files with ipv6 host * VMware: pass vm_ref to _set_machine_id * VMware: pass vm_ref to _get_and_set_vnc_config * Add API schema for aggregates set_metadata API * Compute: Add start notification for resume * VMware: fix regression for 'TaskInProgress' * Remove havana compat from nova.console.rpcapi * Remove havana compat from nova.consoleauth.rpcapi * Share console-auth-tokens tests between V2 & V2.1 * Raise HTTPNotFound in V2 console extension * Add 'instance-usage-audit-log' plugin for V2.1 * Truncate encoded instance message to 255 * Deduplicate some INFO and AUDIT level messages * move all tests to nova/tests/unit * Add tox -e functional * Don't touch info_cache after refreshing it in Instance.refresh() * Drop max-complexity to 47 * Aggregate.save() shouldn't return a value * Remove useless host parameter in virt * Use real disk size to consider a resize down * Add virtual interface before add fixed IP on nova-network * image cache clean-up to clean swap disk * Make unit test floating ips bulk faster * Remove flush_operations in the volume usage output * Updated from global requirements * xenapi plugins must target only Python 2.4 features * libvirt: add classes for NUMA memory binding configuration * libvirt: add in missing translation for LVM migration * Config bindings: remove redundant brackets * Config drive: delete deprecated config var config_drive_tempdir * Refactor Ironic driver tests as per review comment * Switch default cinder API to V2 * Remove deprecated spicehtml5 options * Fix xen plugin to retry on upload failure * Log sqlalchemy exception message in migration.py * Use six.text_type instead of unicode * XENAPI add duration measure to log message * Quotas: remove deprecated configuration variable * Glance: remove deprecated config options * Cinder: remove deprecated configuration options * Neutron: remove deprecated config options * object: update instance numa object to handle pagesize * hardware: make cell instance topology to handle memory pages * hardware: introduce VirtNUMATopologyCellInstance * hardware: fix in doctstring the memory unit used * virt: introduce types VirtPageSize and VirtPagesTopology * Clearer default implmentation for dhcp_options.. * Fix instance_usage_audit_log test to use admin context * VMware: remove unused method _get_vmfolder_ref * libvirt: safe_decode domain.XMLDesc(0) for i18n logging * VMware: trivial fix for comment * Fix the uris in documentation * Make test_security_groups nose compatible * Make test_quotas compatible with nosetests * Return HTTP 400 if use invalid fixed ip to attach interface * Fixed typos in nova.objects.base docstrings * Add note on running single tests to HACKING.rst * Use sizelimit from oslo.middleware * Use oslo.middleware * Make resource tracker always use Flavor objects * maint:Don't translate debug level logs * Make console show and delete exception msg better * Change error code of floating_ip_dns api(v2.1) * Make scheduler code use object with good practice * Switch Nova to use oslo.concurrency * scheduler: Remove assert on the exact number of weighers * Update docstring for check_instance_shared_storage_local * remove use of explicit lockutils invocation in tests * Delay STOPPED lifecycle event for Xen domains * Remove warning & change @periodic_task behaviour * Fix nova-compute start issue after evacuate * Ignore DiskNotFound error on detaching volumes * Move setup_instance_group to conductor * Small doc fix in compute test * libvirt: introduce config to handle cells memory pages caps * Fixes DOS issue in instance list ip filter * Use 404 instead of 400 when security_group is non-existed * Port security-group-default-rules extension into v2.1 * Port SecurityGroupRules controller into v2.1 * error if we don't run any tests * Revert "Switch Nova to use oslo.concurrency" * Updated from global requirements * Remove admin context which is not needed * Add API validation schema for disk_config * Make test_host_filters a NoDBTestCase * Move group affinity filters tests to own test file * Split out metrics filter unit tests * Splits out retry filter unit tests * Split out compute filters unit tests * Update hooks from oslo-incubator copy * Split out aggregate disk filter unit tests * Split out core filter unit tests * Split out IO Ops filter unit tests * Split out num instances filter unit tests * Split and fix the type filters unit tests * Split and fix availability zone filter unit tests * Split out PCI passthrough filter unit tests * Use common get_instance call in API plugins * Fix nova evacuate issues for RBD * DB API: Pass columns_to_join to instance_get_active_by_window_joined * Read flavor even if it is already deleted * Use py27 version of assertRaisesRegexp * update retryable errors & instance fault on retry * xenapi: upload/download params consistency change * Use assertRaisesRegexp * Drop python26 support for Kilo nova * Switch Nova to use oslo.concurrency * Remove param check for backup type on v2.1 API * Set error state when unshelve an instance due to not found image * fix the error log print in encryptor __init__.py * Remove unused compute_api in extend_status * Compute: maint: adjust code to use instance object format * VMware: use instance.uuid instead of instance['uuid'] * Network: manage neutron client better in allocate_for_instance * Split out agg multitenancy isolation unit tests * Split agg image props isolation filter unit tests * Separate isolated hosts filter unit tests * Separate NUMA topology filter unit tests * resource-tracker: Begin refactor unit tests * Faster get_attrname in nova/objects/base.py * Hyper-V: Skip logging out in-use targets * Compute: catch more specific exception for _get_instance_nw_info * typo in the policy.json "rule_admin_api" * Fix the unittest use wrong controller for SecurityGroups V2 * host manager: Log the host generating the warning * Add API validation schema for floating_ip_dns * Remove `domain` from floating-ip-dns-create-or-update-req body * Port floating_ip_dns extention to v2.1 * Remove LOG outputs from v2.1 API layer * Run build_and_run_instance in a separate greenthread * VMware: Improve the efficiency of vm_util.get_host_name_for_vm * VMware: Add fake.create_vm() * Use wsgi.response for v2.1 API * Use wsgi.response for v2.1 unrescue API * Add API schema for v2.1 "resize a server" API * Remove use of unicode on exceptions * Fix error in comments * Make pci_requests a proper field on Instance object * libvirt: fully parse PCI vendor/product IDs to integer data type * Remove uncessary instance.save in nova compute * api: add serial console API calls v2.1/v3 * Add API validation schema for cloudpipe api * Remove project id in ViewBuilder alternate link * Handle exception better in v2.1 attach_interface * Cleanup of tenant network tests * Port floating_ips_bulk extention to v2.1 * Make v2.1 tests use wsgi_app_v21 and remove wsgi_app_v3 * Translate 'powervm' hypervisor_type to 'phyp' for scheduling * Give a reason why NoValidHost in select_destinations * ironic: use instance object for `_add_driver_fields` * ironic: use instance object for `_wait_for_active` * ironic: use instance object for `get_info` * ironic: use instance object for `rebuild` * ironic: use instance object for plug_vifs * Revert "Replace outdated oslo-incubator middleware" * Set logging level for glanceclient to WARN * Nova should be in charge of its log defaults * Reduce the complexity of _get_guest_config() * VMware: fix compute node exception when no hosts in cluster * libvirt: use instance object for detach_volume * libvirt: use instance object for attach_volume * libvirt: use instance object for resume_state_on_host_boot * libvirt: treat suspend instance as an object * VMware: Remove redundant fake.reset() in test_vm_util * VMware: add tests for spawn with config drive enabled * Adds tests for Hyper-V Network utils * Adds tests for Hyper-V Host utils * Fix order of arguments in assertEqual * Replace custom patching in `setUp` on HypervisorsSampleJsonTests * Console: delete code for VMRCConsole and VMRCSessionConsole * VMware: delete the driver VMwareESXDriver * Replacement `_` on `_LE` in all LOG.error * VMware: rename vmware_images to images * Remove unuseful parameter in cloudpipe api(v2/v2.1) * Moves trusted filter unit tests into own file * Port update method of cloudpipe_update to v2.1(v3) * Clean up iSCSI multipath devices in Post Live Migration * Check fixed-cidr is within fixed-range-v4 * Porting baremetal_nodes extension to v2.1/v3 * Port fixed_ip extention to v2.1 * Separate filter unit tests for agg extra specs * Move JSON filter unit tests into own file * Separate compute caps filter unit tests * Separate image props filter unit tests * Separate disk filters out from test_host_filters * Separate and refactor RAM filter unit tests * Remove duplicate test * Reduce the complexity of stub_out_db_network_api() * Remove duplicate index from model * Remove useless join in nova.virt.vmwareapi.vm_util * fixed typo in test name * Separate and refactor affinity filter tests * Pull extra_specs_ops tests from test_host_filters * Remove outdated docstring for XenApi driver's options * VMware: attach config drive if booting from a volume * Remove duplicated comments in virt/storage_users * Compute: use instance object for vm_state * libvirt: use six.text_type when setting text node value in guest xml * Allow strategic loading of InstanceExtra columns * Create Nova Scheduler IO Ops Weighter * Put a cap on our cyclomatic complexity * Add notification for server group operations * Clean up the naming of PCI python modules * Port os-networks-associate plugin to v2.1(v3) infrastructure * Port os-tenant-networks plugin to v2.1(v3) infrastructure * Cleanup of exception handling in network REST API plugin * Fix instance_extra backref * Refactor compute tests to not use _objectify() * Refactor compute and conductor tests to use objects * Fix genconfig - missed one import from oslo cleanup * Handle Forbidden error from network_api.show_port in os-interface:show * Replace outdated oslo-incubator middleware * VMware: Improve logging on failure due to invalid guestId * Ironic: Continue pagination when listing nodes * Fix unit test failure due to tests sharing mocks * libvirt: fully parse PCI addresses to integer data type * libvirt: remove pointless HostState class * Porting SecurityGroup related controller into v2.1 * Allow force-delete irrespective of VM task_state * Use response.text for returning unicode EC2 metadata * Remove unused modules copied from oslo-incubator * Remove unused code in pci_manager.get_instance_pci_devs() * VMWare: Remove unused exceptions * Switch to nova's jsonutils in oslo.serialization * VMware: mark virtual machines as 'belonging' to OpenStack * XenAPI: Inform XAPI who is connecting to it * Rename cli variable in ironic driver * Add more input validation of bdm param in server creation * Return HTTP 400 if use an in-use fixed ip to attach interface * VMware: get_all_cluster_refs_by_name default to {} * Minor refactor of _setup_instance_group() * add InstanceGroup.get_by_instance_uuid * Add instance_group_get_by_instance to db.api * Updated from global requirements * Add supported_hv_specs to ComputeNode object * Pass block device info in pre_live_migration * Use 400 instead of 422 for security_groups v2 API * Port floating_ip_pools extention to v2.1 * Imported Translations from Transifex * Sync with latest oslo-incubator * Don't translate unit test logs * Optimize get_instance_nw_info and remove ipam * Convert migrate reqeusts to use joins * Use database joins for fixed ips to other objects * Keep migration status if instance still resizing * Don't log every (friggin) migration version step during unit tests * Remove init for object list in api layer * Revise compute API schemas and add tests * Add Quota roll back for deallocate fix ip in nova-network * Update README for openstack/common * Fix libvirt watchdog support * VMware: add support for default pbm policy * Remove unused imports from neutron api * Cleanup tenant networks plugin config creation * Port os-networks plugin to v2.1(v3) infrastructure * Use reasonable timeout for rpc service_update() * Finish objects conversion in the os-interface API 2014.2 ------ * Fix pci_request_id break the upgrade from icehouse to juno * Fix pci_request_id break the upgrade from icehouse to juno * Updated translations * vfs: guestfs logging integration * Fix broken cert revocation * Port cloudpipe extension to v2.1 * Cleanup log marker in neutronv2 api * Add 'zvm' to the list of known hypervisor types * Fix wrong exception return in fixed_ips v2 extention * Extend XML unicode test coverage * Remove unnecessary debug/info logs of normal API ops * Refactor of test case of floating_ips * Make v2.1 API tests use v2 URLs(test_[r-v].*) * Make v2.1 API tests use v2 URLs(test_[f-m].*) * Break out over-quota calculation code from quota_reserve() * Fix image metadata returned for volumes * Log quota refresh in_use message at INFO level for logstash * Break out over-quota processing from quota_reserve() * Remove obsolete vmware/esx tools * Fix broken cert revocation * Remove baremetal virt driver * Update rpc version aliases for juno * VMware: Set vmPathName properly in fake driver * Port disk_config extension for V2.1 * Allow backup operation in paused and suspend state * Update NoMoreFixedIps message description * Make separate calls to libvirt volume * Correct VERSION of NetworkRequest * Break out quota usage refresh code from quota_reserve() * libvirt: abort init_host method on libvirt that is too old * Mask passwords in exceptions and error messages * Support message queue clusters in inter-cell communication * neutronv2: translate 401 and 404 neutron client errors in show_port * Log id in raise_http_conflict_for_instance_invalid_state() * Use image metadata from source volume of a snapshot * Fix KeyError for euca-describe-images * Optimize 'fixed_ip_bulk_create' function * Remove 'get_host_stats' virt driver API method * Suppressed misleading log in unshelve, resize api * Imported Translations from Transifex * libvirt: add _get_launch_flags helper method in unit test * Refactoring of contrib.test_networks tests * Make v2.1 API tests use v2 URLs(test_[a-e].*) * Port fping extension to work in v2.1/v3 framework * Use oslo.utils * Correctly catch InstanceExists in servers create API * Fix the os_networks display to show cidr properly * Avoid using except Exception in unit test * nova-net: add more useful logging before raising FixedIpLimitExceeded * libvirt: convert conn test case to avoid DB usage * libvirt: convert driver test suite to avoid DB usage * Mask passwords in exceptions and error messages * Disable libvirt NUMA topology support if libvirt < 1.0.4 * Resource tracker: use brackets for line wrap * VMWare: Remove unnecessery method * console: make unsupported ws scheme in python < 2.7.4 * VMWare: Fix nova-compute crash when instance datastore not available * Disable libvirt NUMA topology support if libvirt < 1.0.4 * VMware: remove _get_vim() from VMwareAPISession * Compute: use an instance object in terminate_instance * VMware: remove unnecessary deepcopy * Destroy orig VM during resize if triggered by user * Break out quota refresh check code from quota_reserve() * move integrated api client to requests library * Fix unsafe SSL connection on TrustedFilter * Update rpc version aliases for juno * Fix the os_networks display to show cidr properly * libvirt: convert mox to mock in test_utils * Remove kombu as a dependency for Nova * Adds missing exception handling in resize and rebuild servers API * Remove keystoneclient requirement * Destroy orig VM during resize if triggered by user * VMware: Fix deletion of an instance with no files * console: introduce a new exception InvalidConnectionInfo * Remove the nova-manage flavor sub-command * support TRACE_FAILONLY env variable * Ensure files are closed promptly when generating a key pair * libvirt: convert volume snapshot test case to avoid DB usage * libvirt: convert volume usage test case to avoid DB usage * libvirt: convert LibvirtNonblockingTestCase to avoid DB usage * libvirt: convert firewall tests to avoid DB usage * libvirt: convert HostStateTestCase to avoid DB usage * libvirt: split firewall tests out into test_firewall.py * libvirt: convert utils test case to avoid DB usage * Add VIR_ERR_CONFIG_UNSUPPORTED to fakelibvirt * Remove indexes that are prefix subsets of other indexes * remove scary error message in tox * Cleanup _convert_block_devices * Enhance V2 disk_config extension Unit Test * Add developer policy about contractual APIs * Reserve 10 migrations for backports * libvirt: Make sure volumes are well detected during block migration * Remove websocketproxy workaround * Fix unsafe SSL connection on TrustedFilter 2014.2.rc1 ---------- * Remove xmlutils module * libvirt: Make sure NUMA cell memory is in Kb in XML * Fix disk_allocation_ratio on filter_scheduler.rst * Remove unused method within filter_scheduler test * Open Kilo development * Correct missing vcpupin elements for numa case * VMware: remove unused variable from tests * Imported Translations from Transifex * VMWare: Fix VM leak when deletion of VM during resizing * Logging detail when attach interface failed * Removes unused code from wsgi _to_xml_node * Fix XML UnicodeEncode serialization error * Add @_retry_on_deadlock to _instance_update() * Remove duplicate entry from .gitignore file * console: fix bug when invalid connection info * console: introduce a new exception InvalidToken * cmd: update the default behavior of serial console * console: make websocketproxy handles token from path * VMware: Remove tests for None in fake._db_content['files'] * Fix creating bdm for failed volume attachment * libvirt: consider vcpu_pin_set when choosing NUMA cells * Fix hook documentation on entry_points config * Remove local version of generate_request_id * fix usage of obj_reset_changes() call in flavor * Fix Bad except clauses order * Typo in exception name - CellsUpdateProhibited * Log original error when attaching volume fails * Retry on closing of luks encrypted volume in case device is busy * VMware: Remove VMwareImage.file_size_in_gb * VMware: remove unused argument from _delete_datastore_file() * xenapi: deal with reboots while talking to agent * Ironic: Do not try to unplug VIF if not associated * Fix Typo in method name - parse_Dom * Adds openSUSE support for developer documentation * VMware: Remove class orphaned by ESX driver removal * Fixes missing ec2 api address disassociate error on failure * Fixes potential reliablity issue with missing CONF import * Updated from global requirements * Port extended_ips/extended_ips_mac extension to V2.1 * the value of retries is error in _allocate_network * Ironic driver must wait for power state changes * Fallback to legacy live migration if config error * libvirt: log exception info when interface detach failed * libvirt: support live migration with shared instances dir * Fix SecurityGroupExists error when booting instances * Undo changes to obj_make_compatible * Clarify virt driver test comments & log statement * move integrated api client to requests library * VMware: Make DatastorePath hashable * Remove usage of self.__dict__ for message var replacement * VMware: trivial formatting fix in fake driver * VMware: Improve logging of DatastorePath in error messages * VMware: Use vm_mode constants * Imported Translations from Transifex * Updated from global requirements * do not use unittest.TestCase for tests * Neutron: Atomic update of instance info cache * Reduce the scope of RT work while holding the big lock * libvirt: convert CacheConcurrencyTestCase to avoid DB usage * Give context to the warning in _sync_power_states * remove test_multiprocess_api * add time to logging in unit tests * XenAPI: clean up old snapshots before create new * Return vcpu pin set as set rather than list * Fix start/stop return active/stopped immediately in EC2 API * consistently set status as REBUILD when rebuilding * Add test case for vim header check * Add missing instance action record for start of live migration * Reduce the log level for the guestfs being missing * Sync network_info if instance not found before _build_resources yield * Remove the AUDIT log message about loaded ext * Fix unset extra_spec for a flavor * Add further debug logging for multiprocess test * Revert "libvirt: support live migrate of instances with conf drives" * Revert "libvirt: Uses correct imagebackend for configdrive" * Fixes server list filtering on metadata * Add good path test cases of osapi_compute_workers * Be less confusing about notification states * Remove unused py33 tox env * fix_typo_in_heal_instance_info_cache * Refactor test_get_port_vnic_info 2 and 3 * Revert "libvirt: reworks configdrive creation" * Making nova.compute.api to return Aggregate Objects * Scheduler: add log warning hints * Change test function from snapshot to backup * Fixes Hyper-V dynamic memory issue with vNUMA * Update InstanceInvalidState output * Add unit test for glanceclient ssl options * Fix Broken links in devref/filter_scheduler.rst * Change "is lazy loaded" detection method in db_api test * Handle VolumeBDMPathNotFound in _get_disk_over_committed_size_total * Handle volume bdm not found in lvm.get_volume_size * Updated from global requirements * Address nits in I6b4123590 * Add exists check to fetch_func_sync in libvirt imagebackend * libvirt: avoid changing UUID when redefining nwfilters * Vmware:Add support for ParaVirtualSCSIController * Fix floating_ips_bulk unit test name * refactor flavor manage tests in prep for object-ify flavors * refactor flavor db fakes in prep for object-ify flavors * move dict copy in prep for object-ify flavors * tests: kill worker pids as well on timeouts * Close standard fds in test child process * Mitigating performance impact with getting pci requests from DB * Return None from get_swap() if input is not swap * Require tests for DB migrations * VMware: fix broken mock of ds_util.mkdir * Fix KeyError for euca-describe-images * Fixes HyperV VM Console Log * FIX: Fail to remove the logical volume * correct _sync_instance_power_state log message * Add support for hypervisor type in IronicHostManager * Don't list entire module autoindex on docs index * Add multinic API unit test * Add plan for kilo blueprints: project priorities * make flavors use common limit and marker * Raise an exception if qemu-img fails * Libvirt: Always teardown lxc container on destroy * Mark nova-baremetal driver as deprecated in Juno, removed in K * libvirt: Unnecessary instance.save(s) called * Add progress and cell_name into notifications * XenAPI: run vhd-util repair if VHD check fails * Get instance_properties from request_spec * libvirt: convert encrypted LVM test to avoid DB usage * libvirt: convert test_dmcrypt to avoid DB usage * libvirt: convert test_blockinfo.py to avoid DB usage * libvirt: convert test_vif.py to avoid DB usage * libvirt: remove pointless class in util test suite * libvirt: avoid need for lockutils setup running test cases * VMware: Remove host argument to ds_util.get_datastore() * Fix DB migration 254 by adding missing unittest * postgresql: use postgres db instead of template1 * Assume VNIC_NORMAL if binding:vnic_type not set * mock.assert_called_once() is not a valid method * db: Add @_retry_on_deadlock to service_update() * Update ironic states and documentation * XenAPI improve post snapshot coalesce detection * Catch NotImplementedError on reset_network for xen * VMware: Fix usage of assertEqual in test_vmops * Add more information to generic _add_floating_ip exception message * bring over pretty_tox.sh from tempest * Console: warn that the Nova VMRC console driver will be deprecated in K * virt: use compute.vm_mode constants and validate vm mode type * compute: tweaks to vm_mode APIs to align with arch/hvtype * Fix NUMA fit testing in claims and filter class * consolidate apirequest tests to single file * ensure that we safely encode ec2 utf8 responses * instance_topology_from_instance handles request_spec properly * NUMA _get_constraint auto assumed Flavor object * Imported Translations from Transifex * Fix 'force' parameter for quota-update * Update devref * default=None is unneeded in config definitions * Remove unused elevated context param from quota helper methods * Remove stale code from ObjectListBase * Split up libvirt volume's connect_volume method * Record instance faults during boot process * ironic/baremetal: add validation of host manager/state APIs * virt: move assertPublicAPISignatures into base test class * libvirt: avoid 30 second long test in LXC mount setup * Remove all redundant `setUp` methods * fix up assertEqual(None...) check to catch more cases * Fix object version hash test * disk/vfs: make docstring conventional to python * disk/vfs: ensure guestfs capabilities * NIST: increase RSA key length to 2048 bit * Fix incorrect exception when bdm with error state volume * ironic: Clean LOG usage * Improve secgroup create error message * Always log the releasing, even under failure * Fix race condition in update_dhcp * Make obj_make_compatible consistent * Correct baremetal/ironic consume_from_instance.. * Fix parsing sloppiness from iscsiadm discover * correct inverted subtraction in quota check * Add quotas for Server Groups (quota checks) * Add quotas for Server Groups (V2 API change) * check network ambiguity before external network auth * Updated from global requirements * libvirt: Consider numa_topology when booting * Add the NUMATopologyFilter * Make HostManager track NUMA usage * API boot process sets NUMA topology for instances * Make resource tracker track NUMA usage * Hook NUMA topology checking into claims * Stash numa-related flavor extra_spec items in system_metadata * Fixes network_get_all_by_host to use indexes * Add plan for kilo blueprints: when is a blueprint needed * Bump FakeDriver's resource numbers * delete python bytecode before every test run * Stop using intersphinx * Don't swallow exceptions in deallocate_port_for_instance * neutronv2: attempt to delete all ports * Proxy nova baremetal commands to Ironic * Increase sleeps in baremetal driver * Improve logging of external events on the compute node * virt: use compute.virttype constants and validate virt type * compute: Add standard constants for hypervisor virt types * Fix test_create_instance_invalid_key_name * Fix `confirmResize` action status code in V2 * Remove unnecessary imageRef setting from tests * Add unit test for add_floating_ip API * Remove unused config "service_down_time" reference * Clarify logging in lockutils * Make sure libvirt VIR_ERR_NO_DOMAIN errors are handled correctly * Adds LOG statements in multiprocess API test * Block sqlalchemy migrate 0.9.2 as it breaks all of nova * Xen: Attempt to find and cleanup orphaned SR during delete * Nova-net: fix server side deallocate_for_instance() * Method for getting NUMA usage from an instance * Ironic: save DB calls for getting flavor * Imported Translations from Transifex * Fix 'os-interface' resource name for Nova V2.1 * Add new unit tests for PCI stats * Fixes AttributeError with api sample test fail * Fix "revertResize/confirmResize" for V2.1 API * Add unit test to os-agent API * check the block_device_allocate_retries * Support SR-IOV networking in libvirt * Support SR-IOV networking in nova compute api and nova neutronv2 * Support SR-IOV networking in the PCI modules * Add request_id in PciDevice * Replace pci_request flavor storage with proper object usage * Adds a test for raw_cpu_arch in _node_resource * Stop stack tracing when trying to auto-stop a stopped instance * Add quotas for Server Groups (V2 API compatibility & V2.1 support) * Fixes Hyper-V volume mapping issue on reboot * Libvirt-Enable support for discard option for disk device * libvirt: set pae for Xen PVM and HVM * Add warning to periodic_task with interval 0 * document why we disable usb_tablet in code * Fix 'os-start/os-stop' server actions for V2.1 API * Fix 'createImage' server actions for V2.1 API * Add unit test to aggregate api * Handle exception better in v2 attach_interface * Fix integrated test cases for assisted-volume-snapshots * libvirt: start lxc from block device * Remove exclude coverage regex from coverage job * Pass instance to set_instance_error_state vs. uuid * Add InstancePCIRequests object * Drop verbose and useless nova-api log information * Add instance_extra_update_by_uuid() to DB API * Add pci_requests to instance_extra table * Add claims testing to VirtNUMAHostTopology class * Expose numa_topology to the resource tracker * libvirt: fix bug when releasing port(s) * Specify correct operation type when NVH is raised * Ironic: don't canonicalize extra_specs data * VMware: add tests for image fetch/cache functions * VMware: spawn refactor image fetch/cache * Ironic: Fix direct use of flavor and instance module objects * Ironic driver fetches extra_specs when needed * Maint: neutronclient exceptions from a more appropriate module * Check requirements.txt files for missing (used) requirements * Sync oslo-incubator module log: * Add amd64 to arch.canonicalize() * Sync oslo lockutils to nova * libvirt: deprecated volume_drivers config parameter * VMware: Remove get_copy_virtual_disk_spec from vmops and vm_util * maint: various spelling fixes * Fix config generator to use keystonemiddleware * libvirt: improve unit test time * VMware: prevent race condition with VNC port allocation * VMware: Fix return type of get_vnc_console() * VMware: Remove VMwareVCVMOps * Network: enable instance deletion when dhcp release fails * Adds ephemeral storage encryption for LVM back-end images * Don't elevate context when rescheduling * Ironic driver backports: patch 7 * Improve Ironic driver performance: patch 6 * Import Ironic Driver & supporting files - part 5 * Import Ironic Driver & supporting files - part 4 * Import Ironic Driver & supporting files - part 3 * Import Ironic Driver & supporting files - part 2 * Import Ironic Driver & supporting files - part 1 * Add sqlite dev packages to devref env setup doc * Add user namespace support for libvirt-lxc * Move to oslo.db * api: add serial console API calls v2 * compute: add get_serial_console rpc and cells api calls * compute: add get_serial_console in manager.py * virt: add method get_serial_console to driver * Clean up LOG import in floating_ips_bulk v2 api 2014.2.b3 --------- * Update invalid state error message on reboot API * Fix race condition with vif plugging in finish migrate * Fix service groups with zookeeper * xenapi: send chunk terminator on subprocess exc * Add support for ipv6 nameservers * Remove unused oslo.config import * Support image property for config drive * warn against sorting requirements * VMware: remove unused _get_vmdk_path from vmops * virt: use compute.arch constants and validate architectures * Change v3 quota-sets API to v2.1 * always set --no-hosts for dnsmasq * Allow _poll_bandwidth_usage task to hit slave * Add bandwidth usage object * VMware: spawn refactor enlist image * VMware: image user functions for spawn() * Change v3 flavor_manage API to v2.1 * Port used_limits & used_limits_for_admin into v2.1 * Add API schema for v2.1 access_ips extension * Add API schema for v2.1 "rebuild a server" API * Add API schema for v2.1 "update a server" API * Enabled qemu memory balloon stats * Reset task state 'migrating' on nova compute restart * Pass certificate, key and cacert to glanceclient * Add a policy for handling retrospective vetos * Adds Hyper-V soft shutdown implementation * Fix swap_volumes * Add API schema for v2.1/v3 multiple_create extension * Return hydrated net info from novanet add/remove_fixed_ip calls * Add API schema for v2.1/v3 availability_zone extension * Add API schema for v2.1/v3 server_metadata API * Fixes a Hyper-V list_instances localization issue * Adds list_instance_uuids to the Hyper-V driver * Change v3 admin_actions to v2.1 * Change v3 aggregate API to v2.1 * Convert v3 ExtendedAvailabilityZone api to v2.1 * Convert v3 hypervisor plugin to v2.1 * Convert server_usage v3 plugin to v2.1 API * Convert v3 servers return_reservation_id behaviour to v2.1 * the headroom infomation is incomplete * Port volumes extension to work in v2.1/v3 framework * vmwareapi oslo.vmware library integration * Allow forceDelete to delete running instances * Port limits extension to work in v2.1/v3 framework * Port image-size extension to work in v2.1/v3 framework * Port v2 image_metadata extension to work in v2.1(v3) framework * Port v2 images extension to work in v2.1(v3) framework * Convert migrate_server v3 plugin to v2.1 * Changes V3 evacuate extension into v2.1 * console: add typed console objects * virt: setup TCP chardevice in libvirt driver * Remove snapshot_id from _volume_snapshot_create() * Check min_ram and min_disk when boot from volume * Add API schema for v2.1 "create a server" API * InstanceNUMAToplogy object create remove uuid param * Change v3 flavor_access to v2.1 * Convert rescue v3 plugin to v2.1 API * Change v3 security_groups API to v2.1 * Changes V3 remote_console extension into v2.1 * Use common get_instance function in v2 consoles extension * Add API schema for v2.1/v3 user_data extension * Convert v3 cells API to v2.1 * Convert v3 server metadata plugin to v2.1 * Convert multiple-create v3 plugin to v2.1 * Convert v3 flavor extraspecs plugin to v2.1 * Fix scheduler_available_filters help * cmd: add nova-serialproxy service * console: add serial console module * Changes V3 server_actions extension into v2.1 * Change v3 version API to v2.1 * Change v3 shelve to v2.1 * Process power state syncs asynchronously * Made unassigned networks visible in flat networking * Add functions to setup user namespaced filesystems * Adds nova-idmapshift cli utility * Add idmap to libvirt config * Allow hard reboots when still attempting a soft reboot * Decrease amount of queries while adding aggregate metadata * Adds Hyper-V serial console log * Store original state when suspending * Fix NoopQuotasDriver.get_settable_quotas() * Use instance objects consistently in suspend tests * Instance objects: fix indentation issue * libvirt: Add method for getting host NUMA topology * Add instance_extra table and related objects * Change v3 availability-zone API to v2.1 * Move and generalize decorator serialize_args to nova.objects.base * Convert v3 certificate API to v2.1 * Make neutronapi use NetworkRequest for allocate_for_instance() * Use NetworkRequest objects through to nova-network * Add extension block_device_mapping_v1 for v2.1 * Catch BDM related InvalidBDM exceptions for server create v2.1 * Changes block_device_mapping extension into v2.1 * Fix rootwrap for non openstack.org iqn's * Let update_available_resource hit slave * Plumb NetworkRequest objects through conductor and compute RPC * Updates available resources after live migration * Convert compute/api to use NetworkRequest object and list * Refactor the servers API to use NetworkRequest * Cells: Update set_admin_password for objects * Remove libvirt legacy LVM code * libvirt: reworks configdrive creation * Handle non dict metadata in server metadata V2 API * Fix wrong disk type limitation for disk IO throttling * Use v2.1 URLs instead of v3 ones in API unit tests * VMware: Add in support for CPU shares in event of resource contention * VMware: add resource limits for CPU * Refactor admin_action plugin and test cases * Fix error in log when log exception in guestfs.py * Remove concatenation with translated messages * Port simple_tenant_usage into v2.1 * Convert console_output v3 plugin to v2.1 * GET servers API sorting enhancements common utilities * Add _security_group_ensure_default() DBAPI method * Fix instance boot when Ceph is used for ephemeral storage * Add NetworkRequest object and associated list * Remove use of str on exceptions * Fix the current state name as 'shutting-down' * Explicitly handle exception ConsoleTypeUnavailable for v2 consoles * Convert v3 server diagnostics plugin to v2.1 * Porting v3 evacuate testcases to v2 * libvirt: Uses correct imagebackend for configdrive * Add v2.1 API router and endpoint * Change v3 keypairs API to v2.1 * Backport V3 hypervisor plugin unit tests to V2 * Remove duplicated negative factors in keypair test * filter: add per-aggregate filter to configure max_instances_per_host * Updated from global requirements * Mask passwords in exceptions and error messages * Make strutils.mask_password more secure * A minor change to a comments * Check vlan parameter is valid * filter: add per-aggregate filter to configure disk_allocation_ratio * Deprecate cinder_* configuration settings * Allow attaching external networks based on configurable policy * Fix CellStateManagerFile init to failure * Change v3 extended_status to v2.1 * Fixes Hyper-V volume discovery exception message * Use default quota values in test_quotas * libvirt: add validation of migration hostname * Add a Set and SetOfIntegers object fields * Add numa_topology column to the compute_node table * Preserve exception text during schedule retries * Change v3 admin-password to v2.1 * Make Object FieldType from_primitive pass objects * Change V3 access_ips extension into v2.1 * Update RESP message when failed to create flavor * Cleanup of V2 console output tests and add missing tests * Convert multinic v3 plugin to v2.1 * Change 'changes_since'/'changes-since' into v2.1 style for servers * Backport v3 multinic tests to v2 * Change ViewBuilder into v2.1 for servers * Change v3 agents API to v2.1 * Change v3 attach_interface to v2.1 * Backport V3 flavor extraspecs API unit tests to V2 * Return BadRequest instead of UnprocessableEntity for volumes API * Convert create_backup v3 plugin to v2.1 API * Update instance state after compute service died for rebuilded instance * Make floatingip-ip-delete atomic with neutron * Add v3 versions plugin unit test to v2 * Remove duplicated code in test_versions * Change v3 hosts to v2.1 * Change v3 extended_server_attibutes to v2.1 * Make test_killed_worker_recover faster * Change v3 flavor_rxtx to v2.1 * fix typo in docstring * libvirt: driver used memory tests cleanup * Avoid refreshing PCI devices on instance.save() * Updated from global requirements * Change v3 flavors to v2.1 * neutronv2: treat instance as object in deallocate_for_instance * Fix class name for ServerGroupAffinityFilter * Adds Hyper-V Compute Driver soft reboot implementation * Add QuotaError handling to servers rebuild API * Allow to create a flavor without specifying id * XenAPI: Remove interrupted snapshots * Fix typo in comment * Fix V2 unit tests to test hypervisor API as admin * Create compute api var at __init__ * libvirt: support live migrations of instances with config drives * Change v3 os-user-data extension to v2.1 * Remove duplicated code in test_user_data * Convert v3 server SchedulerHints plugin to v2.1 * Convert deferred_delete v3 plugin to v2.1 API * Backport some v3 scheduler hints API UT to v2 API * Change error status code for out of quota to be 403 instead of 413 * Correct seconds of a day from 84400 to 86400 * VMware: add adapter type constants * Fix comment typo * scheduler sends select_destinations notifications * Fix for volume detach error when use NFS as the cinder backend * objects: Add base test for obj_make_compatible() * objects: Fix InstanceGroup.obj_make_compatible() * Restore backward compat for int/float in extra_specs * Convert v3 config drive plugin to v2.1 * Fix sample files miss for os-aggregates * Backport v3 config_drive API unittest to v2 API * Backport some v3 availability zones API UT to v2 API * Handle non-ascii characters in spawn exception msg * Log warning message if volume quota is exceeded * Remove _instance_update usage in _build_instance * Treat instance like an object in _build_instance * Remove _instance_update usage in _default_block_device_names * Add missing flags to fakelibvirt for migration * Adds tests for Hyper-V Volume utils * Fix ability to generate object hashes in test_objects.py * Fix expected error details from jsonschema * Extend the docstring for obj_make_compatible() with examples * HyperV Driver - Fix to implement hypervisor-uptime * Port os-server-groups extension to work in v2.1/v3 framework * Fix the exception for a nonexistent flavor * Add api extension for new network fields * Use real exceptions for network create and destroy * Support reserving ips at network create time * Adds get_instance_disk_info to compute drivers * Use rfc3986 library to validate URL paths and URIs * Send create.end notification even if instance is deleted * Allow three periodic tasks to hit slave * Fixes Hyper-V unit test path separator issue * Share common test settings in test_flavor_manage * Shelve should give guests a chance to shutdown * Rescue should give guests a chance to shutdown * Resize should give guests a chance to shutdown * Power off commands should give guests a chance to shutdown * objects: Make use of utils.convert_version_to_tuple() * tests: fix test_compute to have predictable service list * libvirt: make sysinfo serial number configurable * Fixes Hyper-V resize down exception * Make usage_from_instances consider current usage * VMware: ensure test case for init_host in driver * Add some v2 agents API tests * Libvirt: Do not raise ENOENT exception * Add missing create() method on SecurityGroupRule object * Add test for get_instance_disk_info to test_virt_drivers * Move fake_quotas and fake_get_quotas into a class * Objectify association in neutronapi * Objectify last uses of direct db access in network/floating_ips * Update migration defaults * libvirt: reduce indentation in is_vif_model_valid_for_virt * Fixes Hyper-V boot from volume root device issue * Fixes Hyper-V vm state issue * Imported Translations from Transifex * Share unittest between v2 and v2.1 for hide_server_addresses extension * Check compulsory flavor create parameters exist * Treat instance like an object in _default_block_device_names * Change 'image_ref'/'flavor_ref' into v2 style for servers * Change 'admin_password' into v2 style for servers extension * Image caching tests: use list comprehension * Move _is_mapping to more central location * Stop augmenting oslo-incubators default log levels * Track object version relationships * Remove final use of glance_stubs * Removes GlanceClient stubs * Pull transfer module unit tests from glance tests * VMware: remove specific VC support from class VMwareVolumeOps * VMware: remove Host class * Image cache tests: ensure that assertEquals has expected param first * VMware: spawn refactor _configure_config_drive * VMware: refactor spawn() code to build a new VM * VMware: Fix type of VM's config.hardware.device in fake * VMware: Create fake VM with given datastore * VMware: Remove references to ebs_root from spawn() * VMware: Create VMwareImage object for image metadata * Image caching: update image caching to use objects * Report all objects with hash mismatches in a single go * Include child_versions in object hashes * Direct-load Instance.fault when lazy-loading * VMware: Remove unused variable in test_configdrive * Raise HTTPNotFound error from V2 cert show API * Add dict and json methods to VirtNUMATopology classes * virt: helper for processing NUMA topology configuration * Raise Not Implemented error from V2 diagnostics API * Make NovaObjectSerializer work with dicts * Updated from global requirements * neutronv2: treat instance like object in allocate_for_instance * nova-network: treat instance like object in allocate_for_instance * Treat instance like object in _validate_instance_group_policy * Treat instance like an object in _prebuild_instance * Treat instance like an object in _start_building * Add graphviz to list of distro packages to install * Fixes Hyper-V agent force_hyperv_utils_v1 flag issue * ec2: Use S3ImageMapping object * ec2: Add S3ImageMapping object * Remove unused db api methods * Get EC2 snapshot mappings with nova object * Use EC2SnapshotMapping for creating mappings * Add EC2SnapshotMapping object * Fix NotImplementedError in floating-ip-list * filter: add per-aggregate filter to configure max_io_ops_per_host * Hacking: a new hacking check was added that used an existing number * Fix hacking check for jsonutils * VMware: revert deletion of cleanup_host * Use flavor in confirm-resize to drop claim * Add new db api get functions for ec2_snapshot * Partial oslo-incubator sync -- log.py * Add unit tests for libvirt domain creation * Fix Trusted Filter to work with Mt. Wilson `vtime` * Fix 202 responses to contain valid content * Fix EC2 instance type for a volume backed instance * libvirt: add serial ports config * Split EC2 ID validator to validator per resource type * libvirt: do not fail instance destroy, if mount_device is missing * libvirt: persist lxc attached volumes across reboots and power down * Resize block device after swap to larger volume * Make API name validation failure deterministic * VMware: spawn refactor add VirtualMachineConfigInfo * libvirt: Fix kwargs for _create_image * VMware: fix crash when VC driver boots * baremetal: Remove depenency on libvirt's fetch_image method * libvirt: Remove unecessary suffix defaulting * Drop instance_group_metadata from the database * Neutron v2 API: fix get_floating_ip_pools * libvirt: Allow specification of default machine type * Fix rebuild with cells * Added hacking check for jsonutils * Consistently use jsonutils instead of specific implementation * Convert network/api.py uses of vif database functions to objects * Convert last use of direct database instance fetching from network api * libvirt: skip disk resize when resize_instance is False * libvirt: fix _disk_resize to make sure converted image will be restored * Backport some v3 certificate API unittest to v2 API * Backport some v3 aggregate API unittest to v2 API * Imported Translations from Transifex * More informative nova-scheduler log after NoValidHost is caught * Remove metadata/metadetails from instance/server groups * Prepend /dev/ to root_device_name in get_next_device_name * Lock attach_volume * Adjust audit logs to avoid negative disk info * Convert network/api.py to use FloatingIP object * Correct some IPAddress DB interaction in objects * docs - Set pbr 'warnerrors' option for doc build * docs - Fix errors,warnings from document generation * Provide a quick way to run flake8 * Add support for select_destinations in Scheduler client * Create a Scheduler client library * VMware: handle case when VM snapshot delete fails * Use common get_instance function in v2 attach_interface * Add some v2 flavor_manage API tests * Backport v3 api unittest into v2 api for attach_interface extension * Fix the error status code of duplicated agents * Handle ExternalNetworkAttachForbidden exception * Allow empty volumes to be created * docs - Fix errors,warnings from document generation * docs - Fix exception in docs generation * docs - Fix docstring issues in virt tree * VMware: test_driver_api: Use local variables in closures * VMware: Remove ds_util.build_datastore_path() * Use v1 as default for cinder_catalog_info * Fix live-migration failure in FC multipath case * Optimize instance_floating_address_get_all * Enhance PCI whitelist * Add a missing instance=instance in compute/mgr * Correct returned HTTP status code (Use 403 instead of 413) * Fix wrong command for _rescan_multipath * add log exception hints in some modules * Fix extension parameters in test_multiple_create * Standardize logging for v3 api extensions * Standardize logging for v2 api extensions * Add ListOfDictOfNullableString field type * Enable terminate for EC2 InstanceInitiatedShutdownBehavior * Remove duplicate test of passing glance params * Convert glance unit tests to not use stubs * Add decorator expected_errors for ips v3 extension * Return 404 instead of 501 for unsupported actions * Return 404 when floating IP pool not found * Makes versions API output deterministic * Work on document structure and doc building * Catch NeutronClientException when showing a network * Add API schema for v2.1/v3 security_groups extension * Add API schema for v2.1/v3 config_drive extension * Remove pre-icehouse rpc client API compat * makes sure correct PCI device allocation * Adds tests for Hyper-V VM Utils * Make nova-api use quotas object for reservations * VMware: implement get_host_ip_addr * Boot an instance with multiple vnics on same network * Optimize db.floating_ip_deallocate * Fixes wrong usage of mock.assert_not_called() * Code change for nova support cinder client v2 * libvirt: saving the lxc rootfs device in instance metadata * Add method for deallocating networks on reschedule * DB: use assertIsNotNone for unit test * Add expire reservations in backport position * Make network/api.py use Network object for associations * Migrate test_glance from mox to mock * Add instanceset info to StartInstance response * Adds verbosity to child cell update log messages * Removes unnecessary instructions in test_hypervapi * Diagnostics: add validation for types * Add missed discoverable policy rules for flavor-manage v3 * Rename rbd.py to rbd_utils.py in libvirt driver directory * Correct a maybe-typo in pci_manager * libvirt: make guestfs methods always return list of tuples * Revert "Deallocate the network if rescheduling for * libvirt: volume snapshot delete for network-attached disks * libvirt: parse disk backing chains from domain XML * Handle MacAddressInUseClient exception from Neutron when creating port * Updated from global requirements * Remove instance_info_cache_delete() from conductor * Make spawn_n() stub properly ignore errors in the child thread work * Update devref out-of-tree policy grammar error * Compute: add log exception hints * Handle NetworkAmbiguous error when booting a new instance with v3 api * Handle FloatingIpPoolNotFound exception in floating ip creation * Add policy on how patches and reviews go hand in hand * Add hacking check for explicit import of _() * VMware: Do not read opaque type for DVS network * VMware: add in DVS VXLAN support * Network: add in a new network type - DVS * Network: interface attach and detach raised confusing exception * Deprecate metadata_neutron_* configuration settings * Log cleanups for nova.network.neutron.api * Remove ESXDriver from Juno * Only get image location attributes if including locations * Use JSON instead of json in the parameter descriptions * Add a retry_on_deadlock to reservations_expire * docs - Fix doc build errors with SQLAlchemy 0.9 * docs - Fix indentation for RPC API's * docs - Prevent eventlet exception during docs generation * docs - Add an index for the command line utilities * docs - fix missing references * Change LOG.warn to LOG.debug in _shutdown_instance * EC2: fixed AttributeError when metadata is not found * Import Ironic scheduler filters and host manager * EndpointNotFound deleting volume backend instance * Fix nova boot failure using admin role for another tenant * docs - Fix docstring issues * Update scheduler after instance delete * Remove duplicate index from block_device_mapping table * Fix ownership checking in get_networks_by_uuid * Raises NotImplementedError for LVM migration * Convert network/api.py fixedip calls to use FixedIP object * Convert network/api.py get calls to use Network object * Add extensible resources to resource tracker (2) * Make DriverBlockDevice save() context arg optional * Improved error logging in nova-network for allocate_fixed_ip() * Issue multiple SQL statements in separate engine.execute() calls * Move check_image_exists out of try in _inject_data * Fix fake_update in test_update_missing_server * Add unit tests to cells conductor link * Revert "libvirt: add version cap tied to gate CI testing" * Use Ceph cluster stats to report disk info on RBD * Add trace logging to allocate_fixed_ip * Update devref setup docs for latest libvirt on ubuntu * libvirt re-define guest with wrong XML document * Improve logging when python-guestfs/libguestfs isn't working * Update dev env docs on libvirt-dev(el) requirement * Parse unicode cpu_info as json before using it * Fix Resource tracker should report virt driver stats * Fix _parse_datetime in simple tenant usage extension * Add API schema for v2.1/v3 cells API * Fix attaching config drive issue on Hyper-V when migrate instances * Allow to unshelve instance booted from volume * libvirt: add support for guest NUMA topology in XML config * libvirt: remove pointless LibvirtBaseVIFDriver class * libvirt: remove 'vif_driver' config parameter * libvirt: remove use of CONF.libvirt.virt_type in vif.py * Handle NotImplementedError in server_diagnostics v3 api * Remove useless check in _add_retry_host * Initialize Ironic virt driver directory * Live migration is broken for NFS shared storage * Fix ImportError during docs generation * Updated from global requirements * Extend API schema for "create a server" extensions * Enable cloning for rbd-backed ephemeral disks * Add include_locations kwarg to nova.image.API.get() * Add index for reservations on (deleted, expire) * VMWare Driver - Ignore datastore in maintenance mode * Remove outdated docstring for nova.network.manager * libvirt: remove 3 unused vif.py methods * Turn on pbr's autodoc feature * Remove api reference section in devref * Deduplicate module listings in devref * VMware: Resize operation fails to change disk size * Use library instead of CLI to cleanup RBD volumes * Move libvirt RBD utilities to a new file * Properly handle snatting for external gateways * Only use dhcp if enable_dhcp is set on the network * Allow dhcp_server to be set from new field * Set python hash seed to 0 in tox.ini * Make devref point to official devstack vagrant repo * Stop depending on sitepackages libvirt-python * libvirt: driver tests use non-mocked BDMs * Fix doc build errors in models.py * Make several ec2 API tests inherit from NoDBTestCase * Stub out rpc notifications in ec2 cloud unit tests * Add standard constants for CPU architectures * virt: switch order of args to assertEqual in guestfs test * virt: move disk tests into a sub-directory * virt: force TCG with libguestfs unless KVM is enabled in libvirt * Do not pass instances without host to compute API * Pass errors from detach methods back to api proc * libvirt: add tests for _live_snapshot and _swap_volume methods * libvirt: fill in metadata when launching instances * Increase min required libvirt to 0.9.11 * Rollback quota when confirm resize concurrently completed * API: Enable support for tenant option in nova absolute-limits * libvirt: removing lxc specific disk mapping * Method to filter non-root block device mappings * VMware: remove local variable * Use hypervisor hostname for compute trust level * Remove unused cell_scheduler_method * Fix the i18n for some warnings in compute utils * Fix FloatingIP.save() passing FixedIP object to sqlalchemy * Scheduler: throw exception if no configured affinity filter * xenapi: Attach original local disks during rescue * libvirt: remove VIF driver classes deprecated in Icehouse * Move logs of restore state to inner logic * Clean nova.compute.resource_tracker:_update_usage_from_instances * Fix and Gate on E265 * Log translation hint for nova.api * Fix duplicated images in test_block_device_mapping * Add Hyper-V driver in the "compute_driver" option description * reduce network down time during live-migration * Augment oslo's default log levels with nova specific ones * Make the coding style consistent with other Controller in plugins/v3 * Fix extra metadata didn't assign into snapshot image * Add i18n log markers in disk api * VMware: improve log message for attachment of CDROM * Raise NotImplemented default-security-group-rule api with neutron * vmwareapi: remove some unused fake vim methods * Correct image_metadata API use of nova.image.glance * Revert "Add extensible resources to resource tracker" * Update database columns nullable to match model * Updated from global requirements * Make quotas APIv3 extension use Quotas object for create/update * Make quotas APIv2 extension use Quotas object for create/update * Add quota limit create/update methods to Quotas object 2014.2.b2 --------- * libvirt: VM diagnostics (v3 API only) * Add ibmveth model as a supported network driver for KVM * libvirt: add support for memory tuning in config * libvirt: add support for memory backing parameters * libvirt: add support for per-vCPU pinning in guest XML * libvirt: add parsing of NUMA topology in capabilities XML * handle AutoDiskConfigDisabledByImage at API layer * Rollback quota in os_tenant_network * Raise specific error of network IP allocation * Convert to importutils * Catch CannotResizeDisk exception when resize to zero disk * VMware: do not cache image when root_gb is 0 * Turn periodic tasks off in all unit tests * Rename virtutils to the more common libvirt_utils * Check for resize path on libvirt instance delete * Return status for compute node * servers list API support specify multi-status * Deprecate scheduler prep_resize * Updated from global requirements * Fix nova cells exiting on db failure at launch * Remove unneeded calls in test_shelve to start instances * Correct InvalidAggregateAction reason for Xen * Handle a flavor create failed better * Add valid method check for quota resources * VMware: power_off_instance support * Add debug log for availability zone filter * Fix typo * Fix last of direct use of object modules * Check instance state before attach/detach interface * Fix error status code for cloudpipe_update * Fix unit tests related to cloudpipe_update * Add API schema for v2.1/v3 reset_server_state API * Adjust audit logs to avoid negative mem/cpu info * Re-add H803 to flake8 ignore list * Fix nova/pci direct use of object modules * Gate on F402/pep8 * Inject expected results for IBM Power when testing bus devices * Add extensible resources to resource tracker * libvirt: define XML schema for recording nova instance metadata * Sync loopingcall from oslo * Add APIv2 support to make host optional on evacuate * Add differencing vhdx resize support in Hyper-V Driver * Imported Translations from Transifex * Add context as param to cleanup function * Downgrade the warn log in network to debug * Correct use of nova.image.glance in compute API * Keep Migration status in automatic confirm-resize * Removes useless stub of glanceclient create * Remove rescue/unrescue NotImplementedError handle * Add missing foreign key on pci_devices.compute_node_id * Revert "Add missing image to instance booted from volume" * Add debug log for pci passthrough filter * Cleanup and gate on hacking E711 and E712 rule * Keep resizing&resized instances when compute init * Commit quota when deallocate floating ip * Remove unnecessary error log in cell API * Remove stubs in favor of mock in test_policy * Remove translation for debug message * Fix error status code for agents * Remove warn log for over quota * Use oslo.i18n * Cleanup: remove unused argument * Implement methods to modify volume metadata * Minor tweaks to hypervisor_version to int * update ignore list for pep8 * Add decorator expected_errors for v3 attach_interfaces * Add instance to debug log at compute api * Don't truncate osapi_glance_link or osapi_compute_link prefixes * Add decorator expected_errors to V3 servers core * Correctly reject request to add lists of hosts to an aggregate * Do not process events for instances without host * Fix Cells ImagePropertiesFilter can raise exceptions * libvirt: remove flawed get_num_instances method impl * libvirt: remove unused list_instance_ids method * libvirt: speed up _get_disk_over_committed_size_total method * Partial oslo-incubator sync * VMware: Remove unnecessary deepcopy()s in test_configdrive * VMware: Convert vmops to use instance as an object * VMware: Trivial indentation cleanups in vmops * VMware: use datastore classes in file_move/delete/exists, mkdir * VMware: use datastore classes get_allowed_datastores/_sub_folder * VMware: DatastorePath join() and __eq__() * VMware: consolidate datastore code * VMware: Consolidate fake_session in test_(vm|ds)_util * Make BDM dict __init__ behave more like a dict * VMware: support the hotplug of a neutron port * Deallocate the network if rescheduling for Ironic * Make sure that metadata handler uses constant_time_compare() * Enable live migration unit test use instance object * Move volume_clear option to where it's used * move the cloudpipe_update API v2 extension to use objects * Avoid possible timing attack in metadata api * Move injected_network_template config to where it's used * Don't remove delete_on_terminate volumes on a reschedule * Defer raising an exception when deleting volumes * Xen: Cleanup orphan volume connections on boot failure * Adds more policy control to cells ext * shelve doesn't work on nova-cells environment * libvirt: add migrateToURI2 method to fakelibvirt * libvirt: fix recent test changes to work on libvirt < 0.9.13 * Update requirements to include decorator>=3.4.0 * Cleanup and gate on hacking E713 rule * libvirt: add version cap tied to gate CI testing * Small grammar fix in libvirt/driver.py. fix all occurrences * Correct exception for flavor extra spec create/update * Fixes Hyper-V SCSI slot selection * xenapi: Use netuils.get_injected_network_template * libvirt: Support IPv6 with LXC * Improve shared storage checks for live migration * XenAPI: VM diagnostics for v3 API * Move retry of prep_resize to conductor instead of scheduler * Retry db.api.instance_destroy on deadlock * Translations: add LC to all LOG.critical messages * Remove redundant code in Libvirt driver * Virt: fix typo (flavour should be flavor) * Fix and gate on H305 and H307 * Remove unused instance variables from HostState * Send compute.instance.create.end after launched_at is set * VMware: validate the network_info is defined * Security groups: add missing translation * Standardization of nova.image.API.download * Catch InvalidAggregateAction when deleting an aggregate * Restore ability to delete aggregate metadata * Nova-api service throws error when SIGHUP is sent * Remove cell api overrides for lock and unlock * Don't mask out HostState details in WeighedHost * vmware: VM diagnostics (v3 API only) * Use pool/volume_name notation when deleting RBD volumes * Add instanceset info to StopInstance response * Change compute updates from periodic to on demand * Store volume backed snapshot in current tenant * libvirt+lxc: Unmount guest FS from host on error * libvirt: speed up get_memory_mb_used method * libvirt: speed up get_vcpus method * libvirt: speed up get_all_block_devices method * libvirt: speed up list_instances method * libvirt: speed up list_instance_uuids method * Updated from global requirements * Fix interfaces template for two interfaces and IPv6 * Fix error status code for multinic * libvirt: fix typo in fakelibvirt listAllDomains() * Refactors VIF configuration logic * Add missing test coverage for MultiplePortsNotApplicable compute/api * Make the block device mapping retries configurable * Catch image and flavor exceptions in _build_and_run_instance * Restore instance flavor info when driver finish_migration fails * synchronize 'stop' and power state periodic task * Fix more re-definitions and enable F811/F813 in gate * Prepend '/dev/' to supplied dev names in the API * Handle over quota exception from Neutron * Remove pause/unpause NotImplementedError API layer * Add test cases for 2 block_device functions * Make compute api use util.check_string_length * add comment about why snapshot/backup have no lock check * VM diagnostics (v3 API only) * VM diagnostics: add serializer to Diagnostics object * VM diagnostics: add methods to class to update diagnotics * object-ify API v2 availability_zone extension * object-ify availability_zones * add get_by_metadata_key to AggregateList object * xenapi: make boot from volume use volumeops * libvirt: Avoid Glance.show on hard_reboot * Add host_ip to compute node object * VMware: move fake.py to the test directory * libvirt: convert cpuset XML handling to use set instead of string * virt: add method for formatting CPU sets to strings * Fixes rbd backend image size * Prevent max_count > 1 and specified ip address as input * Add aggregates.rst to devref index * VMware: virt unrescue method now supports objects * VMware: virt rescue method now supports objects * Remove duplicate python-pip from Fedora devref setup doc * Do not fail cell's instance deletion, if it's missing info_cache * libvirt: more efficient method to list domains on host * vmwareapi: make method signatures match parent class * Remove duplicate keys from dictionaries * virt: split CPU spec parsing code out into helper method * virt: move get_cpuset_ids into nova.virt.hardware * Fix duplicate definitions of variables/methods * change the firewall debugging for clarity * VMware: consolidate common constants into one file * Require posix_ipc for lockutils * hyperv: make method signatures match parent class * Format eph disk with specified format in libvirt * Resolve import dependency in consoleauth service * Add 'anon' kwarg to FakeDbBlockDeviceDict class * Make cells rpc bdm_update_or_create_at_top use BDM objects * Improve BlockDeviceMapping object cells awareness * Add support for user_id based authentication with Neutron * VMware: add in test utility to get correct VM backing * Change instance disappeared during destroy from Error to Warning * VMware: Fix race in spawn() when resizing cached image * VMware: add support for driver method instance_exists * Object-ify APIv3 agents extension * Object-ify APIv2 agents extension * Avoid re-adding iptables rules for instances that have disappeared * libvirt: Save device_path in connection_info when booting from volume * sync periodic_task fix from incubator * Fix virt BDM __setattr__ and __getattr__ * Handle InstanceUserDataTooLarge at api layer * Updated from global requirements * Mask node.session.auth.password in volume.py _run_iscsiadm debug logs * Nova api service doesn't handle SIGHUP properly * check ephemeral disk format at libvirt before use * Avoid referencing stale instance/network_info dicts in firewall * Use mtu setting from table instead of flag * Add debug log for core_filter * VMware: optimize VM spawn by caching the vm_ref after creating VM * libvirt: Add configuration of guest VCPU topology * virt: add helper module for determining VCPU topology * Change the comments of SOFT_DELETED race condition * Fix bad log message with glance client timeout * Move the instance_type_id judgment to the except-block * Update port binding when unshelve instance * Libvirt: Added suffix to configdrive_path required for rescue * sync policy logging fix from incubator * Sync process utils from olso * Remove instance_uuids argument to _schedule * Add __repr__ handler for NovaObjects * Pass instance to _reschedule rather than instance_uuid * Pass instance to _set_instance_error_state * Pass instance to _error_out_instance_on_exception * Add APIv3 support to make host optional on evacuate * Move rebuild to conductor and add find host logic * VMware: validate that VM exists on backend prior to deletion * VMware: remove duplicate key from test_instance dict * ConfigDriveBuilder refactor for tempdir cleanliness * VMware: cleanup the constructors of the compute drivers * Fix wrong lock name for operating instance external events * VMware: remove unused parameter 'network_info' * VM diagnostics: introduce Diagnostics model object * Fixes internal server error for add/remove tenant flavor access request * add repr for event objects * Sync oslo lockutils to nova * Neutronv2 api does not support neutron without port quota * Be explicit about objects in _shutdown_instance() * Pass instance object into _shutdown_instance() * Skip none value attributes for ec2 image bdm output * Fixed wrong assertion in test_vmops.py * Remove a not used function _get_ip_by_id * make lifecycle event logs more clear * xenapi: make method signatures match parent class * libvirt: make method signatures match parent class * virt: add test helper for checking public driver API method names * virt: fix signature of set_admin_password method * virt: use context & instance as param names in migrate APIs * virt: add get_instance_disk_info to virt driver API * vmwareapi: remove unused update_host_status method * libvirt: remove hack from ensure_filtering_rules_for_instance * libvirt: remove volume_driver_method API * libvirt: add '_' prefix to remaining internal methods * Imported Translations from Transifex * Fake driver: remove unused method get_disk_available_least * Baremetal driver: remove unused states * Fix nova/network direct use of object modules * Fix rest of API objects usage * Fix rest of compute objects usage * Clean conntrack records when removing floating ip * Updated from global requirements * Enforce task_state is None in ec2 create_image stop instance wait loop * Update compute rpcapi tests to use instance object instead of dict * Fix run_instance() rpc method to pass instance object * Fix error in rescue rpcapi that prevents sending objects * add checksums to udp independent of /dev/vhost-net * Use dot notation to access instance object fields in ec2 create_image * vmwareapi: remove unused fake vim logout method * vmware: remove unused delete_disk fake vim method * Revert "Sync revert and finish resize on instance.uuid" * Add test cases for block_device * Add assert_called check for "brclt addif" test * Log when nova-conductor connection established * Xen: Remove extraneous logging of type information * Fix agent_id with string type in API samples files for os-agents v2 * Fix update agent return agent_id with string for os-agents v3 * VMware: Fix fake raising the wrong exception in _remove_file * VMware: refactor get_datastore_ref_and_name * libvirt: introduce separate class for cpu tune XML config * libvirt: test setting of CPU tuning data * Make Evacuate API use Instance objects * VMware: create utility function for reconfiguring a VM * effectively disable libvirt live snapshotting * Fix exception raised when a requested console type is disabled * Add missing image to instance booted from volume * Use default rpc_response_timeout in unit tests * vmware: Use exc_info when logging exceptions * vmware: Reuse existing StorageError class * vmware: Refactor: fold volume_util.py into volumeops.py * Use ebtables to isolate dhcp traffic * Replace nova.utils.cpu_count() with processutils.get_worker_count() * Sync log and processutils from oslo * libvirt: add '_' prefix to host state information methods * libvirt: add '_' prefix to some get_host_* methods * Deprecate and remove agent_build_get_by_triple() * Object-ify xenapi driver's use of agent_build_get_by_triple() * Add Agent object * Move the error check for "brctl addif" * Add API schema for v2.1/v3 quota_sets API * Add API schema for v2.1/v3 flavors_extraspecs API * Add API schema for v2.1/v3 attach_interfaces API * Add API schema for v2.1/v3 remote_consoles API * Use auth_token from keystonemiddleware * Use _set_instance_obj_error_state in compute manager set_admin_password * api: remove unused function * api: remove useless get_actions() in consoles * Do not allow resize to zero disk flavor * api: remove dead code in WSGI XML serializer * Updated from global requirements * Standardize logging for nova.virt.libvirt * Fix log debug statement in compute manager * Add API schema for v2.1/v3 aggregates API * Fix object code direct use of other object modules * Fix the rest of direct uses of instance module objects * Imported Translations from Transifex * Add API schema for v2.1/v3 flavor_manage API * Forcibly set libvirt uri in baremetal virtual power driver * Synced jsonutils and its dependencies * Sync revert and finish resize on instance.uuid * Object-ify APIv3 availability_zone extension * Fix bug in TestObjectVersions * libvirt: add '_' prefix to all get_guest_*_config methods * libvirt: remove unused 'get_disks' method * Downgrade some exception LOG messages in the ec2 API * Conductor: remove irrelevant comment * Added statement for ... else * Avoid traceback logs from simple tenant usage extension * Fix detaching pci device failed * Adds instance lock check for live migrate * Don't follow HTTP_PROXY when talking to localhost test server * Correct the variable name in trusted filter * Target host in evacuate can't be the original one * Add API schema for v2.1/v3 hosts API * Object-ify APIv3 flavor_extraspecs extension * Object-ify APIv2 flavorextraspecs extension * Catch permission denied exception when update host * Fix resource cleanup in NetworkManager.allocate_fixed_ip * libvirt: Support snapshot creation via libgfapi * Allow evacuate from vm_state=Error * xenapi: reorder volume_utils * Replace assertTrue/False with assertEqual/NotEqual * Replace assert* with more suitable asserts in tests * Replace assertTrue/False with assertIn/NotIn * VMware: remove unused code in vm_util.py * Not count disabled compute node for statistics * Instance and volume cleanup when a build fails * wrap_instance_event() shouldn't swallow return codes * Don't replace instance object with dict in _allocate_network() * Determine shared ip from table instead of flag * Set reasonable defaults for new network values * Adds network fields to object * Add new fields to the networks table * Log exception if max scheduling attempts exceeded * Make remove_volume_connection() use objects * Create lvm.py module containing helper API for LVM * Reduce unit test times for glance * Should not delete active snapshot when instance is terminated * Add supported file system type check at virt layer * Don't store duplicate policies for server_group * Make exception handling in get_image_metadata more specific * live migrate conductor tasks to use nova.image.API * Fix Flavor object extra_specs and projects handling * Drop support for scheduler 2.x rpc interface * Drop support for conductor 1.x rpc interface * Deprecate glance_* configuration settings * Update websocketproxy to work with websockify 0.6 * XenAPI: disable/enable host will be failed when using XenServer * Remove traces of now unused host capabilities from scheduler * Fix BaremetalHostManager node detection logic * Add missing stats info to BaremetalNodeState * Replace assertTrue(not *) with assertFalse(*) * Clean nova.compute.api.API:_check_num_instances_quota * Fix the duplicated image params in a test * Imported Translations from Transifex * Fix "fixed_ip" parameters in unit tests * Removes the use of mutables as default args * Add API schema for v2.1/v3 create_backup API * Catch ProcessExecutionError in revoke_cert * Updated from global requirements * Sync oslo lockutils to nova * devref policy: code is canonical source of truth for API * Log cleanups for nova.virt.libvirt.volume * Log cleanups for nova.virt.libvirt.imagecache * Rename VolumeMapping to EC2VolumeMapping * ec2: Convert to use EC2InstanceMapping object * Add EC2InstanceMapping object for use in EC2 * Add hook for network info update * Enhance and test exception safety in hooks * Object-ify server_password APIv3 extension * Object-ify server_password APIv2 extension * Move the fixed_ips APIv2 extension to use objects * Completely object-ify the floating_ips_bulk V2 extension * Add bulk create/destroy functionality to FloatingIP * Cleanup and gate on pep8 rules that are stricter in hacking 0.9 * VMware: update file permissions and mode * Downgrade log level when create network failed * Updated from global requirements * libvirt: Use VIR_DOMAIN_AFFECT_LIVE for paused instances * Initialize objects field in ObjectsListBase class * Remove bdms from run_instance RPC conductor call * Sync "Prevent races in opportunistic db test cases" * Imported Translations from Transifex * Check the network_info obj type before invoke wait function * Migrate nvp-qos to generic name qos-queue * Add test for HypervisorUnavailable on conductor * Test force_config_drive as a boolean as last resort * Add helper functions for getting local disk * Add more logging to nova-network * Make resize raise exception when no valid host found * Fix doc for service list * Handle service creation race by service workers * Add configurable HTTP timeout to cinder API calls * Prevent clean-up of migrating instances on compute init * Deprecate neutron_* configuration settings * Skip migrations test_walk_versions instead of pass * Remove duplicate code in Objects create() function * Fix object change detection * Fix object leak in nova.tests.objects.test_fields.TestObject * Failure during termination should always leave state as error() * Make check_instance_shared_storage() use objects * Save connection info in libvirt after volume connect * Remove unused code from test_compute_cells * libvirt: Don't pass None for image_meta parameter in tests * Revert "Allow admin user to get all tenant's floating IPs" * libvirt: Remove use of db for flavor extra specs in tests * libvirt: Close opened file explicitly * Network: ensure that ports are 'unset' when instance is deleted * Don't translate debug level logs in nova * maint: Fixes wrong docstring of method get_memory_mb_used * Ensure changes to api.QUOTA_SYNC_FUNCTIONS are restored * Fix the wrong dest of 'vlan' option and add new 'vlan_start' option * Add deprecation warning to nova baremetal virt driver * Fixes typo error in Nova * Attach/detach interface to paused instance with affect live flag * Block device API missing translations for exceptions * Enabled swap disk to be resized when resizing instance * libvirt: return the correct instance path while cleanup_resize * Remove the device handling from pci device object * Use new pci device handling code in pci_manager * Separate the PCI device object handling code * xenapi: move find_vbd_by_number into volume utils * Virt: remove unnecesary return code * Fixes hyper-v volume attach when host is AD member * Remove variability from object change detection unit test * Remove XML namespace from some v3 extensions 2014.2.b1 --------- * xenapi: Do not retry snapshot upload on 500 * Fix H401,H402 violations and re-enable gating * Bump hacking to 0.9.x series * Change listen address on libvirt live-migration * Make get_console_output() use objects * Add testing for hooks * Handle string types for InstanceActionEvent exc_tb serialization * Revert "Remove broken quota-classes API" * Revert "Remove quota-class logic from context and make unit tests pass" * Fix cold-migrate missing retry info after scheduling * Disable rescheduling instance when no retry info * Fix infinitely reschedule instance due to miss retry info * Use VIF details dictionary to get physical_network * Fix live_migration method's docstring * Add subnet routes to network_info when Neutron is used * fix nova test_enforce_http_true unit test * novncproxy: Setup log when start nova-novncproxy * Make sure domain exists before referencing it * Network: add instance to the debug statement * V3 Pause: treat case when driver does not implement the operation * Don't translate debug level logs in nova.virt * Remove duplicate method * websocketproxy: remove leftover debug output * Remove unnecessary else block in compute manager set_admin_password * Treat instance objects like objects in set_admin_password flow * Move set_admin_password tests from test_compute.py to api/mgr modules * Fix a wrong comment in the code * maint: correct docstring parameter description * libvirt: Remove dated docstring * Add unit tests for ipv4/ipv6 format validation * Cleanup allocating networks when InstanceNotFound is raised * Add test to verify ironic api contracts * VMware: spawn refactor - phase 1 - test for spawn * Revert "Fix migration and instance resize update order" * Simplify filter_scheduler.populate_retry() * libvirt: Use os_command_line when kernel_id is set * libvirt: Make nwfilter driver use right filterref * libvirt: convert cpu features attribute from list to a set * Don't log TRACE info in notify_about_instance_usage * xenapi: add tests for find_bad_volumes * Revert "Remove traces of now unused host capabilities from scheduler" * Check the length of aggregate metadata * Add out of tree support dev policy * Deprecate instance_get_by_uuid() from conductor * Make metadata password routines use Instance object * Make SecurityGroupAPI use Object instead of instance_get_by_uuid() * Add development policies section to devref * Add read_only field attribute * Fix api direct use of instance module objects * Fix direct use of block_device module objects * Fix InstanceActionEvent traceback parameter not serializable * Fix state mutation in cells image filter * libvirt: split and test finish_migration disk resize * Use no_timer_check with soft-qemu * Add missing translation support * Update HACKING.rst to include N320 * Add tests to avoid inconsistent extension names * VMware: spawn refactor - Datastore class * VMware: remove dsutil.split_datastore_path * VMware: spawn refactor - DatastorePath class * Updated from global requirements * VMware: Fix memory leaks caused by caches * Allow user to specify image to use during rescue - V3 API changes * VMware: create utility functions * Check if volume is bootable when creating an instance * VMware: remove unused parameters in imagecache * xenapi: virt unrescue method now supports objects * libvirt: virt unrescue method now supports objects * libvirt: virt rescue method now supports objects * xenapi: virt rescue method now supports objects * Remove useless codes for server_group * Catch InstanceInfoCacheNotFound during build_instances * Do not replace the aggregate metadata when updating az * Move oslotest to test only requirements * libvirt: merge two utils tests files * libvirt: remove redundant 'libvirt_' prefix in test case names * xenapi: refactor detach volume * Add API schema for v2.1/v3 migrate_server API * Adds IVS unit tests for new VIF firewall logic * Don't set CONF options directly in unit tests * Fix docstring typo in need_legacy_block_device_info * Revert "Partially remove quota-class logic from nova.quotas" * Revert "Remove quota_class params from rest of nova.quota" * Revert "Remove quota_class db API calls" * Revert "Convert address to str in fixed_ip_obj.associate" * String-convert IPAddr objects for FixedIP.attach() * Updated from global requirements * Run instance root device determination fix * xenapi: tidy up volumeops tests * Don't return from a finally block * Support detection of fixed ip already in use * Rewrite nova policy to use the new changes of common policy * Treat instance objects as objects in unrescue API flow * Treat instance objects as objects in rescue API flow * Refactor test_rescue_unrescue into compute api/manager unit tests * Sync oslo network utils * Fix EC2 not found errors for volumes and snapshots * xenapi: refactor volumeops attach * xenapi: remove calls to call_xenapi in volumeops * xenapi: move StorageError into global exception.py * Virt: ensure that instance_exists uses objects * Use objects through the run_instance() path * Deprecate run_instance and remove unnecessary code * Change conductor to cast to build_and_run_instance * Fix migration and instance resize update order * remove cpu feature duplications in libvirt * Add unit test trap for object change detection * Sync periodic_task from oslo-incubator * VCDriver - Ignore host in Maintenance mode in stats update * Enable flake8 F841 checking * Imported Translations from Transifex * Reverse order of cinder.detach() and bdm.delete() * Correct exception info format of v3 flavor manage * Imported Translations from Transifex * Handle NetworkInUse exception in api layer * Correct exception handling when create aggregate * Properly skip coreutils readlink tests * Record right action name while migrate * Imported Translations from Transifex * Fix for multiple misspelled words * Refactor test to ensure file is closed * VM in rescue state must have a restricted set of actions * versions API: ignore request with a body * xenapi: fix live-migrate with volume attached * Add helper methods to convert disk * XenAPI: Tolerate multiple coalesces * Add helpers to create per-aggregate filters * Ensure live-migrate reverts if server not running * Raise HTTPInternalServerError when boot_from_volume with cinder down * Imported Translations from Transifex * [EC2]Correct the return status of attaching volume * Fix security group race condition while creating rule * VMware: spawn refactor - phase 1 - copy_virtual_disk * Catch InstanceNotFound exception if migration fails * Inject expected results for IBM Power when testing bus * Fix InstanceActionTestCase on PostgreSQL/MySQL * Fix ReservationTestCase on PostgreSQL * VMware: deprecate ESX driver from virt configuration * Add new ec2 instance db API calls * Remove two unused db.api methods * Fix direct use of aggregate module objects * Fix tests/compute direct use of instance module objects * share neutron admin auth tokens * Fix nova image-show with queued image * Catch missing Glance image attrs with None * Align internal image API with volume and network * Do not wait for neutron event if not powering on libvirt domain * Mask block_device_info auth_password in virt driver debug logs * Remove all mostly untranslated PO files * Payload meta_data is empty when remove metadata * Handle situation when key not memcached * Fix nova/compute direct use of instance module objects * Address issues with objects of same name * Register objects in more services * Imported Translations from Transifex * Default dhcp lease time of 120s is too short * Add VIF mac address to fixed_ips in notifications * Call _validate_instance_group_policy in _build_and_run_instance * Add refresh=True to get_available_nodes call in build_and_run_instance * Add better coverage support under tox * remove unneeded call to network_api on detach_interface * Cells: Pass instance objects to build_instances * XenAPI: Add logging information for cache/download duration * Remove spaces from SSH public key comment * Make hacking test more accurate * Fix security group race condition while listing and deleting rules * On rebuild check for null image_ref * Add a reference to the nova developer documentation * VMware: use default values in get_info() when properties are missing * VMware: uncaught exception during snapshot deletion * Enforce query order for getting VIFs by instance * Fix typo in comment * Allow admin user to get all tenant's floating IPs * Defer applying iptable changes when nova-network start * Remove traces of now unused host capabilities from scheduler * Add log translation hints * Imported Translations from Transifex * Fix CIDR values denoting hosts in PostgreSQL * Sync common db and db/sqlalchemy * Remove quota_class db API calls * Remove quota_class params from rest of nova.quota * Fix wrong quota calculation when deleting a resizing instance * Ignore etc/nova/nova.conf.sample * Fix wrong method name assert_called_once * Correct pci resources log * Downgrade log when attach interface can't find resources * Fixes Hyper-V iSCSI target login method * VMware: spawn refactor - phase 1 - fetch_image * vmware:Don't shadow builtin function type * Partially remove quota-class logic from nova.quotas and test_quotas * Convert address to str in fixed_ip_obj.associate * Accurate exception info in api layer for aggregate * minor corrections to devref rpc page * libvirt: Handle unsupported host capabilities * Fix the duplicated extension summaries * Imported Translations from Transifex * Raise more information on V2 API volumes when resource not found * Remove comments since it's pointless * Downgrade and fix log message for floating ip already disassociated * Fix wrong method name for test_hacking * Imported Translations from Transifex * Add specific regexp for timestamps in v2 xml * VMWare: spawn refactor - phase 1 - create_virtual_disk * VMware: spawn refactor - phase 1 - power_on_vm * Move tests into test_volume_utils * Tidy up xenapi/volume_utils.py * Updated from global requirements * VMware: Fix usage of an alternate ESX/vCenter port * VMware: Add check for datacenter with no datastore * Remove unused instance_update() method from virtapi * Make baremetal driver use Instance object for updates * Rename quota_injected_file_path_bytes * Imported Translations from Transifex * Fixes arguments parsing when executing command * Remove explicit dependency on amqplib * Deprecate action_event_*() from conductor * Remove conductor usage from compute.utils.EventReporter * Unit test case for more than 1 ephemeral disks in BDM * Network: replace neutron check with decorator * Update links in README * Add mailmap entry * XenAPI: Remove unneeded instance argument from image downloading * XenAPI: adjust bittorrent settings * Fix a minor comments error * Code Improvement * Fix the explanation of HTTPNotFound for cell showing v2 API * Add Nova API Sample file & test for get keypair * Add a docstring to hacking unit tests * Make libvirt driver use instance object for updates * Make vmwareapi/vmops use Instance object for updates * Convert xenapi/vmops uses of instance_update to objects * Make xenapi agent code use Instance object for updates * Check object's field * Use Field in fixed_ip * Remove logging in libvirt _connect_auth_cb to avoid eventlet locking * Fix v3 API extension names for camelcase * VMware: prevent image snapshot if no root disk defined * Remove unnecessary cleanup in test * Raise HTTPForbidden from os-floating-ips API rather than 404 * Improve hacking rule to avoid author markers * Remove and block DB access in dhcpbridge * Improve conductor error cases when unshelving * Dedup devref on unit tests * Shrink devref.unit_tests, since info is in wiki * Fix calls to mock.assert_not_called() * VMware: reduce unit test times * Fix wrong used ProcessExecutionError exception * Clean up openstack-common.conf * Revert "Address the comments of the merged image handler patch" * Remove duplicated import in unit test * Fix security group list when not defined for an instance * Include pending task in log message on skip sync_power_state * Make cells use Fault obj for create * libvirt: Handle `listDevices` unsupported exception * libvirt: Stub O_DIRECT in test if not supported * Deprecate instance_fault_create() from conductor * Remove conductor usage from add_instance_fault_from_exc() * Add create() method to InstanceFault object * Remove use of service_* conductor calls from xenapi host.py * Updated from global requirements * Optimize validate_networks to query neutron only when needed * Remove quota-class logic from context and make unit tests pass * VMware: spawn refactor - phase 1 - execute_create_vm * xenapi: fixup agent tests * Don't translate debug level logs in nova.spice, storage, tests and vnc * libvirt: Refresh volume connection_info after volume snapshot * Fix instance cross AZ check when attaching volumes * Raise descriptive error for over volume quota * Fix broken version responses * Don't translate debug level logs in objectstore, pci, rdp, servicegroup * Don't translate debug level logs in cloudpipe, hacking, ipv6, keymgr * Don't translate debug level logs in nova.cert, console and consoleauth * Don't translate debug level logs in nova.cmd and nova.db * Don't translate debug level logs in nova.objects * Don't translate debug level logs in nova.compute * Fix bad Mock calls to assert_called_once() * VCDriver - No longer returns uptime due to multiple hosts * Make live_migration use instance objects * wrap_check_security_groups_policy is already defined * Updated from global requirements * Don't translate debug level logs in nova.conductor * Don't translate debug level logs in nova.cells * Use strtime() specific timestamp regexp * Use datetime object for fake network timestamps * Use datetime object for stub created_at timestamp * Verify created_at cloudpipe timestamp is isotime * Verify next-available limit timestamps are isotime * Verify created/updated timestamps are isotime * Use timeutils.isotime() in images view builder * Use actual fake timestamp in API templates * Normalize API extension updated timestamp format * Regenerate API samples for GET /extensions * objects: remove unused utils module * objects: restore some datetime field comments * Add fault wrapper for rescue function * Add x-openstack-request-id to nova v3 responses * Remove unnecessary wrapper for 5 compute APIs * Update block_device_info to contain swap and ephemeral disks * Hacking: add rule number to HACKING.rst * Create the image mappings BDMs earlier in the boot * Delete in-process snapshot when deleting instance * Imported Translations from Transifex * Fixed many typos * VMware: remove unneeded code * Rename NotAuthorized exception to Forbidden * Add warning to periodic_task with interval 0 * Fix typo in unit tests * Remove a bogus and unnecessary docstring * Don't translate debug level logs in nova.api * Don't translate debug level logs in nova.volume * VMware: remove duplicate _fake_create_session code * libvirt: Make `fakelibvirt.libvirtError` match * ec2utils: Use VolumeMapping object * ec2: create volume mapping using nova object * Add VolumeMapping object for use in EC2 * Add new ec2 volume db API calls * Remove legacy block device usage in ec2 API * Deprecate instance_get_active_by_window_joined() from conductor * Deprecate instance_get_all_by_filters() from conductor * Don't translate debug level logs in nova.network * Fix bad param name in method docstring * Nova should pass device_id='' instead of None to neutron.update_port() * Set default auth_strategy to keystone * Support multi-version pydevd * replace NovaException with VirtualInterfaceCreate when neutron fails * Spice proxy config setting to be read from the spice group in nova.conf * xenapi: make auto_config_disk persist boot flag * Deprecate compute_unrescue() from conductor * Deprecate instance_destroy() from conductor * libvirt: fix comment for get_num_instances * Fix exception message being changed by nested exception * DescribeInstances in ec2 shows wrong image-message * Imported Translations from Transifex * Remove unused nova.crypto.compute_md5() * VMware: spawn refactor - phase 1 - get_vif_info * Remove comments and to-do for quota inconsistency * Set the volume access mode during volume attach * Fix a typo in compute/manager::remove_volume_connection() * XenAPI: Use local rsync rather than remote if possible * Delete image when backup operation failed on snapshot step * Fix migrate_instance_*() using DB for floating addresses * Ignore errors when deleting non-existing vifs * Use eventlet.tpool.Proxy for DB API calls * Improve performance for checking hosts AZs * Correct the log in conductor unshelve_instance * Imported Translations from Transifex * Make instance_exists() take an instance, not instance_name * Xen: Retry plugin call after connection reset * Remove metadata's network-api dependence on the database * Add helper method to determine disk size from instance properties * Deprecate nova-manage flavor subcommand * Updated from global requirements * Imported Translations from Transifex * VMware: remove unused variable * Scheduler: enable scheduler hint to pass the group name * Loosen import_exceptions to cover all of gettextutils * Don't translate debug level scheduler logs * VMWare - Check for compute node before triggering destroy * Update version aliases for rpc version control * make ec2 errors not useless * VMware: ensure rescue instance is deleted when instance is deleted * Ensure info cache updates don't overwhelm cells * Remove utils.reset_is_neutron() to avoid races * Remove unnecessary call to fetch info_cache * Remove deprecated config option names: Juno Edition * Don't overwrite instance object with dict in _init_instance() * Add specific doc build option to tox * Fix up import of conductor * Use one query instead of two for quota_usages * VMware: Log additional details of suds faults * Disable nova-manage network commands with Neutron V2 * Fix the explanations of HTTPNotFound for keypair's API * remove unneeded call to network_api on rebuild_instance * Deprecate network_migrate_instance_* from conductor * Deprecate aggregate_host_* operations in conductor * Convert instance_usage_audit() periodic task to objects * Return to using network_api directly for migrations * Make _is_multi_host() use objects * Remove unneeded call to fetch network info on shutdown * Instance groups: add method get_by_hint * Imported Translations from Transifex * GET details REST API next link missing 'details' * Don't explode if we fail to unplug VIFs after a failed boot * nit: correct docstring for FilterScheduler.schedule_run_instance * Revert "Fix network-api direct database hits in metadata server" * ec2: use BlockDeviceMappingList object * ec2: use SecurityGroup object * ec2: get services using ServiceList object * ec2: remove db.instance_system_metadata usage * Remove nova-clear-rabbit-queues * Allow -1 as the length of "get console output" API * Fix AvailabilityZone check for hosts in multiple aggregates * Move _get_locations to module level plus tests * Define constants for the VIF model types * Imported Translations from Transifex * Make aggregate host operations use Aggregate object * Convert poll_rescued_instances() periodic task to objects * Make update_available_resource() use objects * Add get_by_service() method to ComputeNodeList object * Add with_compute_node to service_get() * Make _get_compute_info() use objects * Pass configured auth strategy to neutronclient * Imported Translations from Transifex * Make quota rollback checks more robust in conductor tests * Updated from global requirements * Remove duplicate code from nova.db.sqlalchemy.utils * Downgrade the log level when automatic confirm_resize fails * Refactor unit tests for image service CRUD * Finish _delete_instance() object conversion * Make detach_volume() use objects * Add lock on API layer delete floating IP * ec2: Convert instance_get_by_uuid calls to objects * Fix network-api direct database hits in metadata server * Scheduler: remove test scheduling methods that are not used * Add info_cache as expected attribute when evacuate instance * Make compute manager use network api method return values * Allow user to specify image to use during rescue - V2 API changes * Allow user to specify image to use during rescue * Use debug level logging in unit tests, but don't save them * Update user_id length to match Keystone schema in volume_usage_cache * Avoid the possibility of truncating disk info file * Read deleted instances during lifecycle events * Add RBAC policy for ec2 API security groups calls * compute: using format_message() to convert exception to string * support local debug logging * Fix bug detach volume fails with "KeyError" in EC2 * Fix straggling uses of direct-to-database queries in nova-network * Xen: Do not resize root volumes * Remove mention of nova-manage.conf from nova-manage.rst * XenAPI: Add host information to glance download logs * Check image exists before calling inject_data * xenapi: Cleanup tar process on glance error * Missing catch InstanceNotFound in v3 API * Recover from POWERING-* state on compute manager start-up * Remove the unused _validate_device_name() * Adds missing expected_errors for V3 API multinic extension * Correct test boundary for libvirt_driver.get_info * Updated from global requirements * Update docs to reflect new default filters * Enable ServerGroup scheduler filters by default * Revert "Use debug level logging during unit tests" * Remove redundant tests from Qcow2TestCase * libvirt: remove_logical_volumes should remove each separately * VMware: Fixes the instance resize problem * Fix anti-affinity server-group boot failure * Nova utils: add in missing translation * Add exception handling in "nova diagnostics" * mark vif_driver as deprecated and log warning * Revert object-assuming changes to _post_live_migration() * Revert object-assuming changes to _post_live_migration() * libvirt: optimize pause mode support * Check for None or timestamp in availability zone api sample * Refactor Network API * Require admin context for interfaces on ext network * remove redundant copy of test_cache_base_dir_exists * Make sure leases are maintained until release * Add tests for remaining expected conductor exceptions * Fix Jenkins translation jobs * libvirt: pause mode is not supported by all drivers * Reduce config access in scheduler * VMWare: add power off vm before detach disk during unrescue * Reduce logging in scheduler * xenapi: add a test for _get_partitions * Refactor network_utils to new call_xenapi pattern * Sync request_id middleware bug fix from oslo * Make example 'entry_points' parameter a dictionary * Localized error exception message on delete host aggregate * Note that XML support *may* be removed * Change errors_out_migration decorator to work with RPC * low hanging fruit oslo-incubator sync * Fix description of ServerGroupAffinityFilter * Added test cases in ConfKeyManagerTestCase to verify fixed_key * Moved the registration of lifecycle event handler in init_host() * Change NotFound to InstanceNotFound in server_diagnostics.py * Remove unnecessary passing of task_state to check_instance_state * Rename instance_actions v3 to server_actions * Drop nova-rpc-zmq-receiver man-page * Correct the keypairs-get-resp.json API sample file * Make hypervisor_version an int in fakeVirt driver * Ensure network interfaces are in requested order * Reserve 10 migrations for backports * XenAPI: Calculate disk_available_least * Open Juno development 2014.1.rc1 ---------- * Fix getting instance events on subsequent attempts * Improved logs for add/remove security group rules * VMware: remove double import * VMware: clean up VNC console handling * Make conductor expect ActionEventNotFound for action methods * Remove zmq-receiver from setup.cfg * Add a note about deprecated group filters * Fix the section name in CONTRIBUTING.rst * Fix display of server group members * Add new style instance group scheduler filters * Automatically create groups that do not exist * Add InstanceGroup.get_by_name() * Remove unnecessary check for CONF.notify_on_state_change * Add nova.conf.sample to gitignore * Use binding:vif_details to control firewall * Disable volume attach/detach for suspended instances * Updated from global requirements * Persist image format to a file, to prevent attacks based on changing it * Add test cases for validate_extra_spec_keys * Catch InstanceInLocked exception for rescue and instance metadata APIs * Imported Translations from Transifex * Make 'VDI too big' more verbose * Use osapi_glance_link_prefix for image location header * postgres incompatibility in InstanceGroup.get_hosts() * Add missing test for None in sqlalchemy query filter * Use instance data instead of flavor in simple_tenant_usage extension * Sync oslo imageutils, strutils to Nova * Use correct project/user id in conductor.manager * fix the extension of README in etc/nova * Tell pip to install packages it sees globally * Change exception type from HTTPBadRequest to HTTPForbidden * Don't attempt to fill faults for instance_list if FlavorNotFound * Bypass the database if limit=0 for server-list requests * Fix availability-zone option miss when creates an instance * No longer any need to pass admin context to aggregate DB API methods * Updated Setting up Developer Environment for Ubuntu * Change libvirt close callback to use green thread * Re-work how debugger CLI opts are registered * Imported Translations from Transifex * _translate_from_glance() can cause an unnecessary HTTP request * Add UNSHELVING and RESCUING into IoOPSFilter consideration state * VMware: fix booting from volume * Do not add current tenant to private flavor access * Disable oslo.messaging debug logs * Update vm_mode when rebuilding instance with new image * VMware: fix list_instances for multi-node driver * VMware: Add utility method to retrieve remote objects * Use project/user from instance for quotas * Refactors unit tests of image service detail() * Refactors nova.image.glance unit tests for show() * Revert deprecation warning on Neutron auth * V2 API: remove unused imports * Change HTTPUnprocessableEntity to HTTPBadRequest * Rename _post_live_migration instance_ref arg * Add a decorator decorator that checks func args * Updated from global requirements * Instance groups: cleanup * Use the list when get information from libvirt * Remove unused quota_* calls from conductor * Use correct project/user for quotas * Include proper Content-Type in the HTTP Headers * Fix inconsistent quota usage for security group * Handling unlimited values when updating quota * Fix service API and cells * Remove unnecessary stubbing in test_services * InvalidCPUInfo exception added to except block * VMware: fix exception when no objects are returned * Don't allow empty or 0 volume size for images * Wait till message handling is done on service stop * Remove PciDeviceList usage in pci manager * Fix the rpc module import in the service module * Revert "VMware Driver update correct disk usage stat" * Catch HostBinaryNotFound exception in V2 API * Ignore InstanceNotFound while getting console output * Raise error on nova-api if missing subnets/fixed_ips on networks/port * Fix the explanations of HTTPNotFound for new APIs * Remove the nova.config.sample file * Refuse to block migrate instances with config drive * Include next link when default limit is reached * Catch NotImplementedError on Network Associate * VMware: add a file to help config the firewall for vnc * Change initial delay for servicegroup api reporting * Fix KeyError if neutron security group is not TCP/UDP/ICMP and no ports * Prevent rescheduling on block device failure * Check if nfs/glusterfs export is already mounted * Make compute API resize methods use Quotas objects * Remove commented out code in test_cinder_cloud * Update quantum to neutron in comment * Add deleted_at attribute in glance stub on delete() * Add API sample files of "unshelve a server" API * Remove unused method from fake_network.py * Don't refresh network cache for instances building or deleting * GlanceImageService static methods to module scope * Remove XenAPI driver deprecation warning log message * VMware: bug fix for host operations when using VMwareVCDriver * xenapi: boot from volume without image_ref * Use HTTPRequestV3 instead of HTTPRequest in v3 API tests * Cells: Send instance object for instance_delete_everywhere * Fix "computeFault" when v3 API "GET /versions/:(id)" is called * VMware: ensure that the task completed for resize operation * Change parameters of add_timestamp in ComputeDriverCPUMonitor class * Cells API calls return 501 when cells disabled * Add version 2.0 of conductor rpc interface * Added missing raise statement when checking the config driver format * Make NovaObject report changed-ness of its children * Increase volume creation max waiting time * Remove action-args from nova-manage help * VMware: fix rescue disk location when image is not linked clone * Fix comment for block_migration in nova/virt/libvirt/driver.py * Don't import library guestfs directly * Correct inheritance of nova.volume.cinder.API * VMware: enable booting an ISO with root disk size 0 * Remove bad log message in get_remote_image_service * Raise NotImplementedError in NeutronV2 API * Remove block_device_mapping_destroy() from conductor API * Make sure instance saves network_info when we go ACTIVE * Fix sqlalchemy utils test cases for SA 0.9.x * Fix equal_any() DB API helper * Remove migration_update() from conductor API * Remove instance_get() from conductor API * Remove aggregate_get_by_host() from conductor API * add support for host driver cleanup during shutdown * Add security_group_rule to objects registry * Remove aggregate_get() from conductor API * Delete meaningless lines in test_server_metadata.py * Imported Translations from Transifex * Move log statement to expose actually info_cache value * Fix input validation for V2 API server group API extension * Adds test for rebuild in compute api * Specify spacing on periodic_tasks in manager.py * network_info cache should be cleared before being rescheduled * Don't sync [system_]metadata down to cells on instance.save() * Fixes the Hyper-V agent individual disk metrics * VMware: remove unused code (_delete method in vmops.py) * Fix docstring for shelve_offload_instance in compute manager * Block database access in nova-network binary * Make nova-network use conductor for security groups refresh * Make nova-network use quotas object * Reverts change to default state_path * Fix raise_http_conflict_for_instance_invalid_state docstring * Cells: Pass instance objects to update/delete_instance_metadata * Don't detach root device volume * Revert "Adding image multiple location support" * Revert "Move libvirt RBD utilities to a new file" * Revert "enable cloning for rbd-backed ephemeral disks" * Add helper method for injecting data in an image * Add helper method for checking if VM is booting from a volume * Libvirt: Repair metadata injection into guests * Make linux_net use objects for last fixed ip query * Add get_by_network() to FixedIPList * Update aggregate should not allow duplicated names * Recover from REBOOT-* state on compute manager start-up * VMware: raise an exception for unsupported disk formats * VMware: ensure that deprecation does not appear for VC driver * rename ExtensionsResource to ExtensionsController * Ensure is_image_available handles V2 Glance API * libvirt: fix blockinfo get_device_name helper * Log Content-Type/Accept API request info * Remove the docker driver * xenapi: Speed up tests by not waiting on conductor * Updated from global requirements * xenapi: Fix test_rescue test to ensure assertions are valid * VMware: image cache aging * Add py27local tox target * Fix broken API os-migrations * Catch FloatingIpNotFoundForHost exception * Fix get_download_hander() typo * Handle IpAddressGenerationClient neutron * Delete ERROR+DELETING VMs during compute startup * VMware: delete vm snapshot after nova snapshot * Fix difference between mysql & psql of flavor-show * Add version 3.0 of scheduler rpc interface * Make libvirt wait for neutron to confirm plugging before boot * Task cleanup_running_deleted_instances can now use slave * Do not add HPET timer config to non x86 targets * Make test computations explicit * Instance groups: only display valid instances for policy members * Don't allow reboot when instance in rebooting_hard * VMware: add missing translations * Fix typo and add test for refresh_instance_security_rules * Add declaration of 'refresh_instance_security_rules' to virt driver * Remove mention of removed dhcp_options_enabled * Fix compute_node stats * Fix: Unshelving an instance uses original image * Noted that tox is the preferred unit tester * Updated development.environment.rst * Use instance object instead of _instance_update() * Remove compute virtapi BDM methods * enable cloning for rbd-backed ephemeral disks * Move libvirt RBD utilities to a new file * Fixup debug log statements in the nova compute manager * Use debug level logging during unit tests * Fix debug message formatting in server_external_events * VMware: VimException __str__ attempts to concatenate string to list * Mark ESX driver as deprecated * Volume operations should be blocked for non-null task state * xenapi: fix spawn servers with ephemeral disks * Fixes NoneType vcpu list returned by Libvirt driver * Add conversion type to LOG.exception's string * Remove compute API get_instance_bdms method * Move run_instance compute to BDM objects * Move live migration callbacks to BDM objects * Instance groups: validate policy configuration * Add REST API for instance group api extension * VMware: boot from iso support * Store neutron port status in VIF model * Correct network_model tests and __eq__ operator * Make network_cache more robust with neutron * Error out failed migrations * Fix BDM legacy usage with objects * Fix anti-affinity race condition on boot * Initial scheduler support for instance_groups * Add get_hosts to InstanceGroup object * Add instance to instance group in compute.api * Add add_members to InstanceGroup object * Remove run-time dependency on fixtures module by the nova baremetal * Make compute manager prune instance events on delete and migrate * Make compute manager's virtapi support waiting for events * Add os-server-external-events V3 API * Add os-server-external-events API * Add external_instance_event() method to compute manager * Fix invalid vim call in vim_util.get_dynamic_properties() * Rescue API handle NotImplementedError * VMware: Add a test helper to mock the suds client * VMware: Ensure test VM is running in rescue tests * Move _poll_volume_usage periodic task to BDM objects * Move instance_resize code paths to BDM objects * Make swap_volume code path use BDM objects * Fix log messages typos in rebuild_instance function * Move detach_volume and remove_vol_connection to BDM objects * Move instance delete to new-world BDM objects * VMware ESX: Boot from volume must not relocate vol * Fix development environment docs for redhat-based systems * neutron_metadata_proxy_shared_secret should not be written to log file * VMware: create datastore utility functions * Address the comments of the merged image handler patch * Ignore the image name when booting from volume 2014.1.b3 --------- * Fix typo in devref * VMware: refactor _get_volume_uuid * Add return value to some network API methods * Fixing host_ip configuration help message * No longer call check_uptodate.sh in pep8 * notifier middleware broken by oslo.messaging * regenerate the config file to support 1.3.0a9 * Add doc update for 4 filters which is missing in filter_scheduler.rst * Remove 3 unnecessary variables in scheduler * Adding image multiple location support * Move all shelve code paths to BDM objects * Move rebuild to BDM objects * sync sslutils to not conflict with oslo.messaging * Accurate comment in compute layer * Refactor xenapi/host.py to new call_xenapi pattern * Add a missing space in a log message * VMware: iscsi target discovery fails while attaching volumes * Remove warn log in quota function on API layer * Sync the latest DB code from oslo-incubator * Prevent thrashing when deploying many bm instances * Support configuring libvirt watchdog from flavors * Add watchdog device support to libvirt driver * Remove extra space at the end of help string * Port libvirt copy_image tests to mock * Updated from global requirements * Sync latest Guru Meditation Reports from Oslo * Skip sqlite-specific tests if sqlite is not configured * VMware: add in debug information for network selection * vmwareapi:Fix nova compute service down issue when injecting pure IPv6 * Make compute use quota object existing function * Fixes api samples for V2 os-assisted-volume-snapshots * Raise exception if volume snapshot id not found instead of return * Added os-security-groups prefix * VMware Driver update correct disk usage stat * attach/detach interface should raise exception when instance is locked * Restore get_available_resource method in docker driver * Make compute manager use InstanceInfoCache object for deletes * Deprecate conductor instance_type_get() and remove from VirtAPI * Make restore_instance pass the Instance object to compute manager * Use uuid instead of name for lvm backend * Adds get_console_connect_info API * Remove log_handler module from oslo-incubator sync * Remove deleted module flakes from openstack-common.conf * When a claim is rejected, explain why * Move xenapi/agent.py to new call_xenapi style * xenapi plugins: Make sure subprocesses finish executing * Update Oslo wiki link in README * Refactor pool.py to remove calls to call_xenapi * Move vbd plug/unplug into session object * xenapi: make session calls more discoverable * Make error notifications more consistent * Adds unit test for etc/nova/policy.json data * Support IPv6 when booting instances * xenapi: changes the debug log formatting * libvirt: raises exception when attempt to resize disk down * xenapi: stop destroy_vdi errors masking real error * Make resource_tracker use Flavor object * Make compute manager use Flavor object * Make baremetal driver use Flavor object instead of VirtAPI * Sync latest config file generator from oslo-incubator * Fixes evacuate doesn't honor enable password conf for v3 * Removed copyright from empty files * Fix the explanations of HTTPNotFound response * VMware: support instance objects * Add support for tenant_id based authentication with Neutron * Remove and recreate interface if already exists * Prevent caller from specifying id during Aggregate.create() * Enable flake8 H404 checking * Imported Translations from Transifex * Fix logic for aggregate_metadata_get_by_host_with_key test case * Use oslo-common's logging fixture * Re-Sync oslo-incubator fixtures * Updated from global requirements * Update pre_live_migration to take instance object * Remove unused method inject_file() * Remove db query from deallocate_fixed_ip * update deallocate_for_instance to take instance obj * Update server_diagnostics to use instance object * Move the metrics update to get_metrics * Unmount the NFS and GlusterFS shares on detach * Add a caching scheduler driver * libvirt: image property variable already defined * Replaces exception re-raising in Hyper-V * Remove blank space after print * VMware: add instance detail to detach log message * libvirt: Enable custom video RAM setting * Remove trailing comma from sample JSON * Add pack_action_start/finish helper to InstanceAction object * Rewrite InstanceActionEvent object testcase using mock * Clean up _make_*_list in object models to use base.obj_make_list * libvirt: remove explicit /dev/random rng default * Document virt driver methods that take Instance objects * Make interface attach and detach use objects * Pass instance object to soft_delete() and get_info() * libvirt: setting a correct driver name for iscsi volumes * libvirt: host specific virtio-rng backend * Fix HTTP methods for test_attach_interfaces * Fix the calls of webob exception classes * VMware: remove unused parameter from _wait_for_task * Downgrade the log level for floating IP associate * Removing redundant validation for rebuild request * VMware: add a test for driver capabilities * Catch HostBinaryNotFound exception when updating a service * VMware: ensure that datastore name exists prior to deleting disk * Move compute's _get_instance_volume_block_device_info to BDM objects * Use disk_bus and device_type in attaching volumes * Add device bus and type to virt attach_volume call * Make volume attach use objects * compute: invalid gettext message format * VMware: fix the VNC port allocation * VMware: fix datastore selection when token is returned * Hyper-V log cleanups * vmware: driver races to create instance images * Introduce Guru Meditation Reports into Nova * Updated from global requirements * Revert "VMware: fix race for datastore directory existence" * Use instance object for delete * Update ubuntu dev env instructions * VMware: fix race for datastore directory existence * libvirt: adding a random number generator device to instances * Add 'use_slave' to instance_get_all_by_filter in conductor * Fix the validation of flavor_extraspecs v2 API * Make webob.exc.HTTPForbidden return correct message * Use image from the api in run_instance, if present * Remove unused variables in the xenapi.vmops module * Describe addresses in ec2 api broken with neutron * Cleanup v3 test_versions * Fix import order in log_handler * Emit message which merged user-supplied argument in log_handler * Adds service request parameter filter for V3 API os-hosts request * Fix comment typo in nova/compute/api.py * stop throwing deprecation warnings on init * Remove broken quota-classes API * VMware: fix instance lookup against vSphere * Add a new compute API method for deleting retired services * Fix instance_get_all_by_host to actually use slave * Periodic task poll_bandwidth_usage can use slave * Partially revert "XenAPI: Monitor the GC when coalescing" * Mark XML as deprecated in the v2 API * adjust version definition for v3 to be only json * Fix option indenting in compute manager * Adds create backup server extension for the V3 API * Catch InstanceNotFound exceptions for V2 API instance_actions * Sync log.py from oslo * Make floating_ips module use FloatingIP for associations * Remove __del__ usage in vmwareapi driver * Fixed spelling errors in nova * LibVirt: Disable hairpin when using Neutron * VMware: optimize instance reference access * Serialize the notification payload in json * Add resource tracking to unshelve_instance() * Typo in the name 'libvirt_snapshot_compression' * Refactor driver BDM attach() to cover all uses * Fix assertEqual parameter order post V3 API admin-actions-split * Fix copyright messages after admin actions split for V3 API * Catch InstanceNotFound exceptions for V2 API virtual interfaces * Correct the assert() order in test_libvirt_blockinfo * Use disk_bus when guessing the device name for vol * libvirt: add virtio-scsi disk interface support * libvirt: configuration element for virtual controller * VMware: factor out management of controller keys and unit numbers * Remove unused notifier and rpc modules from oslo sync * Imported Translations from Transifex * Remove XML support from schemas v3 * Treat port attachment failures correctly * Add experimental warning for Cells * Add boolean convertor to "create multiple servers" API * VMware: prevent race for vmdk deletion * VMware: raise more specific exceptions * Disable IGMP snooping on hybrid Linux bridge * libvirt: remove retval from libvirt _set_host_enabled() * VMware: remove unused class * compute: format_message is a method not an attribute * MetricsWeigher: Added support of unavailable metrics * Fix incorrect kwargs 'reason' for HTTPBadRequest * Fix the indents of v3 API sample docs * Refactor get_iscsi_initiator to a common location * Fix compute_node_update() compatibility with older clients * XenAPI: Add the mechanism to attach a pci device to a VM * Remove underscore for the STATE_MAP variable * XenAPI: Add the support for updating the status of the host * libvirt: support configurable wipe methods for LVM backed instances * Fix InstanceNotFound error in _delete_instance_files * Ensure parent dir exists while injecting files * Convert post_live_migration_at_destination to objects * Convert remove_fixed_ip_to_instance to objects * Convert add_fixed_ip_to_instance to objects * Fix invalid facilities documented in rootwrap.conf * VMware: improve unit test time * Replace assertEqual(None, *) with assertIsNone in tests * Add comment/doc about utils.mkfs in rootwrap * Add mkfs to the baremetal-deploy-helper rootwrap * libvirt-volume: improve unit test time * Move consoleauth_manager option into nova.service and fix imports * libvirt: improve unit test time * Imported Translations from Transifex * Make is_neutron() thread-safe * Update the mailmap * Rewrite InstanceAction object test cases using mock * Make floating_ips module use FloatingIP for updates * Make floating_ips module use FloatingIP for (de-)allocations * Make floating_ips module use FloatingIP for all get queries * Make floating_ips module use Service object * Make floating_ips module use Instance object * Make floating_ips module use Network object * Make floating_ips module use FixedIP object * Fix break in vm_vdi_cleaner after oslo changes * Fixes the Hyper-V VolumeOpsTestCase base class * libvirt: Uses available method get_host_state * Add V3 api for pci support * Update docstring for baremetal opportunistic tests * Fix upper bound checking for flavor create parameters * Fixed check in image cache unit test * Count memory and disk slots once in cells state manager * changed quantum to neutron in vif-openstack * Convert unrescue_instance to objects * Don't allow compute_node free_disk_gb to be None * compute: removes unnecessary condition * Rename Openstack to OpenStack * Support setting a machine type to enable ARMv7/AArch64 guests to boot * Catch InstanceNotFound exceptions for V2 API floating_ips * Explicity teardown on error in libguestfs setup() * Catch InstanceNotFound exceptions for V2 API deferred delete * Replace oslo.sphinx with oslosphinx * Change assertTrue(isinstance()) by optimal assert * Make nova_ipam_lib use Network, FixedIP, and FloatingIP objects * Make nova-network use FixedIP for timeouts * Make nova-network use FixedIP object for updates * Make nova-network use FixedIP object for disassociations * Use six.moves.urllib.parse instead of urlparse * Add "body=" argument to v3 API unit tests * Remove unused methods * Adds migrate server extension for V3 API * Move policy check of start/stop to api layer * Refactor stats to avoid bad join * Remove @author from copyright statements * Remove character filtering from V3 API console_output * DB: logging exceptions should use save_and_reraise * Fix incorrect check in aggregate/az test * xenapi: set viridian=false for linux servers * Delete baremetal image files after deployment * Make sure "volumeId" in req body on volume actions * Removes console output plugin from the core list * Using six.add_metaclass * Fix bad log formatting * Remove quota classes extension from the V3 API * Group kvm image_meta tests for get_disk_bus * Prefix private methods with _ in docker driver * Fix the sample and unittest params of v3 scheduler-hints * Add a instance lookup helper to v3 plugins * Use raw string notation for regexes in hacking checks * Improve detection of imports in hacking check * Renumber some nova hacking checks * Docker cannot start a new instance because of an internal error * libvirt: configuration element for a random number generator device * VMware: fix instance rescue bug * Fix run_tests.sh lockutils when run with -d * Adds tests to sqlachemy.api._retry_on_deadlock * Replace detail for explanation msgs on webob exceptions * Allow operators to customize max header size * Prevent caller from specifying id during Migration.create() * Prevent caller from specifying id during KeyPair.create() * Prevent caller from specifying id during Service.create() * Prevent caller from specifying id during ComputeNode.create() * Clean IMAGE_SNAPSHOT_PENDING state on compute manager start up * Fix trivial typo in libvirt test comment * Refactoring metadata/base * Removes XML namespace from V3 API test_servers * correct the bugs reference url in man documents * Objectify instance_action for cell scheduler * Remove tox locale overrides * libvirt: use to_xml() in post_live_migration_at_destination * Removes os-instance-usage-audit-log from the V3 API * VMware: update test name * VMware: improve unit test performance * Fix english grammar in the quota error messages * Removes os-simple-tenant-usage from the V3 API * Fix a couple of unit test typos * Add HEAD api response for test s3 server BucketHandler * Removes XML support from security_groups v3 API * Hyper-V driver RDP console access support * Make consoleauth token verification pass an Instance object * Adds RDP console support * Fix migrations changing the type of deleted column * Add hpet option for time drifting * Typo in backwards compat names for notification drivers * Support building wheels (PEP-427) * Fix misspellings in nova * Disable file injection in baremetal by default * Drop unused dump_ SQL tables * Convert rescue_instance to objects * Convert set_admin_password to objects * The object_compat decorator should come first * Default video type to 'vga' for PowerKVM * Sync latest db.sqlalchemy from oslo-incubator * Guard against oversize flavor rxtx_factor float * Make libvirt use Flavor object instead of using VirtAPI * Fix instance metadata tracking during resets * Make delete_instance_metadata() use objects * Break out the meat of the object hydration process * V2 Pause: treat case when driver does not implement the operation * VMware: fix bug for exceptions thrown in _wait_for_task * Nova Docker: Metadata service doesn't work * nova: use RequestContextSerializer for notifications * Fix auto instance unrescue after poll period * Fix typos in hacking check warning numbers * Fix exception handling miss in remote_consoles * Don't try to restore VM's in state ERROR * Make it possible to disable polling for bandwidth usage * XenAPI: Monitor the GC when coalescing * Revert "Allow deleting instances while uuid lock is held" * report port number for address already in use errors * Update my mailmap * libvirt: Adds missing tests to copy_image * Sync latest gettextutils from oslo-incubator * Make change_instance_metadata() use objects * Add XenAPI driver deprecation warning log message * Adds host_ip to hypervisor show API * VMware: update the default 'task_poll_interval' time * Fixes Hyper-V VHDX snapshot bigger than instance * Define common "name" parameter for Nova v3 API * Stacktrace on error from libvirt during unfilter * Disable libvirt driver file injection by default * Add super call to db Base class * Fix baremetal stats type * Fix bittorrent URL configuration option * Fix VirtualInterfaceMacAddressException message * Add serializer capability to fake_notifier * Avoid deadlock when stringifying NetworkInfo model * Add hacking test to block cross-virt driver code usage * Hyper-V: Change variable in debug log message * Rename API schema modules with removing "_schema" * Fixed naming issue of variable in a debug statement formatting * Use new images when spawning BM instances * Remove get_instance_type and get_active_by_window from nova compute API * Make the simple_tenant_usage API use objects * Add instance_get_active_by_window_joined to InstanceList * Update nova.conf.sample for python-keystoneclient 0.5.0 * Add ESX quality warning * Set SCSI as the default cdrom bus for PowerKVM * Enforce FlavorExtraSpecs Key format * Fix scheduler_hints parameter of v3 API * Remove vi modelines * VMware: Remove some unused variables * Fix a bug in v3 API doc * Move logging out of BDM attach method * Add missing translation support * libvirt: making set_host_enabled to be a private methods * Remove unused variable * Call get_pgsql_connection_info from _test_postgresql_opportunistically * Port to oslo.messaging * Sync latest config file generator from oslo-incubator * Test guestfs without support for close_on_exit * Make nova-network use FixedIP object for vif queries and bulk create * Make nova-network use FixedIP for host and instance queries * Make nova-network use FixedIP object for associations * Make nova-network use FixedIP for get_by_address() queries * Add FixedIP.floating_ips dynamic property * Add FloatingIP object implementation * Add FixedIP Object implementation * Deal with old versions of libguestfs * Destroy docker container if spawn fails to set up network * Adds suspend server extension for V3 API * Adds pause server extension for V3 API * Removes XML namespace definitions from V3 API plugins * Remove XML support from migrations pci multiple_create v3 API plugins * Remove extra space in log message * Allow deleting instances while uuid lock is held * Add 'icehouse-compat' to [upgrade_levels] compute= * Make os-service API return correct error messages * Make fixed_ip_get_by_address() take columns_to_join * Refactor return value of fixed_ip_associate calls * Make nova-network use Network object for deleting networks * Make nova-network use Network for associations * Make nova-network use Network object for set_host() operation * Make nova-network use Network object for updates * Make nova-network use Network object for remaining "get" queries * Make nova-network use NetworkList for remaining "all" queries * Make nova-network use Network object for get-all-by-host query * Make nova-network a "conductor-using service" * Ignore 'dynamic' addr flag on bridge configuration * Remove XML support from some v3 API plugins * xenapi: clean up step decorator fake steps * Use objects internally in DriverBlockDevice class * Make snapshot_volume_backed use new-world objects * Make volume_snapshot_{create,delete} use objects * Move compute API is_volume_backed to BDM objects * Add block device mapping objects implementation * XenAPI: Wait for VDI on introduce * Shelve: The snapshot should be removed when delete instance * Revert "Allow deleting instances while uuid lock is held" * Retry reservation commit and rollback on deadlock * Adds lock server extension for V3 API * Remove duplicated method in mock_key_mgr * Add quality warning for non-standard libvirt configurations * Add docker driver removal warning * Remove V3 API XML entry points * Remove XML support from admin_password V3 API plugin * Remove XML support from certificates v3 API * Remove XML support from some v3 API plugins(e.g. services) * Remove XML support from some extension v3 API plugins * Remove XML support from some server v3 API plugins * Remove XML support from quota and scheduler_hints v3 API plugins * Remove XML support from flavor v3 API plugins * Revert "Fix race conditions between imagebackend and imagecache" * Remove XML support from v3 API plugins * Remove unused methods * Remove trace XML from unittests * removing xml from servers.py * Remove xml unit tests for v3 api plugins * Remove v3 xml API sample tests * Adds dmcrypt utility module * Adds ephemeral_key_uuid field to instance * Error message is malformed when removing a sec group from an instance * Do not set root device for libvirt+Xen * Docker Set Container name to Instance ID * Fix init of pci_stats in resource tracker * Catch NotImplementedError in get_spice_console in v2/v3 API * Minor changes to make certificates test cases use HTTPRequestV3 * VMware: Only include connected hosts in cluster stats * disk/api.py: refactors extends and adds missing tests * Make nova-network use Network to create networks * Make obj_to_primitive() handle netaddr types * Add Network object * Make service workers gracefully handle service creation race * support stevedore >= 0.14 * Increase the default retry for iscsi connects * Finish compacting pre-Icehouse database migrations * Compact pre-Icehouse database migrations <= 210 * Compact pre-Icehouse database migrations <= 200 * Compact pre-Icehouse database migrations <= 190 * Fix cache lock for image not consistent * VMware: fix image snapshot with attached volume * Use block_device_info at post_live_migration_at_destination * Update policy check on each action for certificates * Use (# of CPUs) workers by default * Remove policy check in db layer for aggregates * Remove unused configurations * VMware: fix exception when using multiple compute nodes * Remove copyright from empty files in nova * disk/api.py: resize2fs fails silently + adds tests * remove 2 unused function in test_volumes.py * Update log message to support translations * PCI address should be uniform * Remove flavor-disabled related policy rules for v3 api * Remove get_all_networks from nova.network.rpcapi * Remove get_network from nova.network.rpcapi * Update nova.network to use DNSDomain object * Remove some dead dnsdomain code * Add DNSDomain object * Add db.dnsdomain_get_all() method * Update linux_net to use VirtualInterface * Update nova_ipam_lib to use VirtualInterface * libvirt: Review of the code to use module units * Update network.manager to use VirtualInterface * Imported Translations from Transifex * Updated from global requirements * Define "supported_instances" for fake compute * Remove get_vif_by_mac_address from network rpcapi * Remove unused method from network rpcapi * Allow delete when InstanceInfoCache entry is missing * libvirt: Fix root disk leak in live mig * Additional check for qemu-nbd hang * Correct host managers free disk calculation * Correct the state for PAUSED instances on reboot * XenAPI: Use get_VALUE in preference to get_record()['VALUE'] * XenAPI: Speedup get_vhd_parent_uuid * XenAPI: Report the CPU details correctly * XenAPI: Tidy calls to get_all_ref_and_rec * XenAPI: get_info was very expensive * Fix bug with not implemented virConnect.registerCloseCallback * Make test_poll_volume_usage_with_data more reliable * Re-write sqlite BigInteger mapping test * Small edits on help strings * Make floating_ip_bulk_destroy deallocate quota if not auto_assigned * Sync processutils from oslo-incubator * Create common method for MTU treatment * Move fake_network config option to linux_net * libvirt: move unnecesary comment * Sync log.py from oslo-incubator * hyperv: Retry after WMI query fails to find dev * vmwareapi:remove unused variables in volumeops * Fix docstring in libvirt.driver.LibvirtDriver.get_instance_disk_info() * Hide VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES where needed * Make test_different_fname_concurrency less racy * VMware: improve exception logging in driver.py 2014.1.b2 --------- * Add instance faults during live_migrate errors * VMware: use .get() to access 'summary.accessible' * Nova Docker driver must remove network namespace * Added a new scheduler filter for metrics * Sync module units from oslo * Join pci_devices for servers API * VMware: fix missing datastore regex with ESX driver * Fix the flavor_ref type of unit tests * Sync unhandled exception logging change from Oslo * Fix race conditions between imagebackend and imagecache * Add explicit discussion of dependencies to README.rst * Add host and details column to instance_actions_events table * Join pci_devices when getting all servers in API * Add sort() method to ObjectListBase * Add VirtualInterface object * VMware: Fix incorrect comment indentation * vmwareapi: simple refactor of config drive tests * Fix multi availability zone issue part 2 * Make exception message more friendly * disable debug in eventlet.wsgi server * Alphabetize core list for V3 API plugins * Ensure MTU is set when the OVS vif driver is used * remove redundant __init__() overwriting when getting ExtensionResources * Fix bug for neutron network-name * Fix rbd backend not working for none admin ceph user * Set objects indirection API in network service * Use oslo.rootwrap library instead of local copy * Remove admin auth when getting the list of Neutron API extensions * Fix the test parameter order for v3 evacuate test * Add API schema for v3 evacuate API * Remove unused code * Take a vm out of SNAPSHOTTING after Glance error * Corrected typo in metrics * libvirt: handle exception while get vcpu info * Fixed incorrect test case of test_server_metadata.py * Add API schema for v3 rescue API * Support preserve_ephemeral in baremetal * Show bm deploy how to preserve ephemeral content * Add preserve_ephemeral option to rebuild * Fix string formatting of exception.NoUniqueMatch message * docstring fix * xenapi: stop server destroy on live_migrate errors * Ensure that exception raised in neutron are handled correctly * Fix updating device names when defaulting * libvirt: Fix confusing use of mox.StubOutWithMock * Sync request_id middleware for nova * Calculate default security group into quota usage * Allow run_image_cache_manager_pass to hit db slave * Consolidate the blockdev related filters * VMware: upload images to temporary directory * Refactor CIDR field to use netaddr.IPNetwork * Make nova-network use Instance objects * Make nova-network use Service object * Allow _check_instance_build_time to hit db slave * Set objects indirection API in metadata service * libvirt: Configuration element for sVirt support * VMware: unnecessary session reconnection * Add API schema for v3 multinic API * API schema for v3 console_output API * Workers verification for WSGI service * Remove unused dict BYTE_MULTIPLIERS * Optimize libvirt live migration workflow at source * libvirt, fix test tpool_execute_calls_libvirt * Using staticmethod to mock LibvirtDriver._supports_direct_io * Use the mangle checksum fill rule regardless to the multi_host * Enabled Libvirt driver to read 'os_command_line' from image properties * Update nova.conf.sample * Capture exception for JSON load in virt.storage_users * Ensure that headers are utf8, not unicode * Attribute snapshot not defined in libvirt/config.py * ec2 api should check 'max_count'&'min_count' para * nova docker driver cannot find cgroup in /proc/mounts on RHEL * VMware: fix rescue with disks are not hot-addable * VMware: bug fix for VM rescue when config drive is configured * Define common API parameter types * Fixed a problem in iSCSI multipath * Fix unhandled InvalidServerState exceptions in server start/stop * Cells rebuild regression fix * Fix potential fd leak * Rename instance_type to flavor in libvirt virt driver tests * Rename instance_type to flavor in vmware virt driver tests * Improve error message in services API * Make image props filter handle old vm_modes * XenAPI: Use direct IO for writing config drive * Avoid unnecessary use of rootwrap for some network commands * Remove unused copyright from nova.api.__init__ * replace type() to isinstance() in nova * Make availability_zone optional in create for aggregates * libvirt: Fix infinite loop waiting for block job * baremetal: stop deployment if block devices are not available * Cleanup 'deleting' instances on restart * Ignore duplicate delete requests * Let drivers override default rebuild() behaviour * Enable compute_node_update to tolerate deadlocks * xenapi: resize up ephemeral disks * xenapi: refactor generate_ephemeral * xenapi: refactor resize_up_root_vdi * Abstract add_timestamp out of ComputeDriverCPUMonitor class * Revert "Whitelist external netaddr requirement" * The private method _text_node should be used as function * Add finer granularity to host aggregate APIs * Remove unused import * Adds new method nova.utils.get_hash_str * Make nova/quota use keypair objects * VMware: update test file names * Ensure instance action event list in order * Docker Driver doesn't respect CPU limit * libvirt: stop overwriting LibvirtConfigCPU in get_host_capabilities * Cleanup the flake8 section of tox.ini * Use the full string for localisation * Don't deallocate/reallocate networks on reschedules * Cleanup object usage in the rebuild path * Fix test case with wrong parameter in test_quota_classes * Remove unused variables in imagebackend.py * Remove unused code in test_attach_interfaces.py * Whitelist external netaddr requirement * Better exception handling for deletes during build * Translate the snapshot_pending state for old instances * Prevent Instance.refresh() from returning a new info cache * Extends V3 os-hypervisor api for pci support * Sync config generator from oslo-incubator * Imported Translations from Transifex * Remove uneeded dhcp_opts initialization * Update class/function name for test_extended_availability_zone.py * Allow deleting instances while uuid lock is held * xenapi: add support for vcpu_pin_set * xenapi: more info when assert_can_migrate fails * fix ips to 'ips' in APIRouter * Hyper-V:Preserve config drive image after the instance is resized * fix log message in APIRouter * VMware: use session.call_method to invoke api's * Rename instance_type to flavor in hyper-v virt driver * Rename instance_type to flavor in xenapi virt driver * Compact pre-Icehouse database migrations <= 180 * Change when exists notification is sent for rescue * Revert change of default FS from ext3 to etx4 * Convert nova.compute.manager's _spawn to objects * Add alias as prefix for flavor_rxtx v3 * Remove unused code in nova/api/ec2/__init__.py * Remove unused import * VMware: improve connection issue diagnostic * Fixes messages logged on Glance plugin retries * Aggregate: Hosts isolation based on image properties * Fix for qemu-nbd hang * Return policy error, not generic error * Fix lxc rootfs attached two devices in some action * Removes disk-config extension from v3 api * Fix typo'ed deprecated flag names in libvirt.imagebackend * Disable libguestfs' default atexit handlers * Add API schema for v3 extended_volumes API * Catch InstanceIsLocked exception on server actions * Fix inconsistent "image" value on _get_image() * Add API schema for v3 keypairs API * Add API schema for v3 flavor_access API * Add API schema for v3 agents API * Add API schema for v3 admin_password API * Adds a PREPARED state after baremetal node power on * Make scheduler rpcapi use object serializer * Update log message when remove pci device * Add unit test for ListOfStrings field in object models * Sync oslo db.sqlalchemy.utils to nova * Remove duplicated test * Fixing availability-zone not take effect error * Fix image cache periodic task concurrent access bug * Fix interprocess locks for run_tests.sh * lxc: Fix a bug of baselineCPU parse failure * platform independence for test_virt unit tests * Imagecache: fix docstring * libvirt: Set "Disabled Reason" to None when enable nova compute * Change log from ERROR to WARNING when instance absent * VMware: clean up unnecessary help message of options * Don't use deprecated module commands * Add apache2 license header to appropriate files for enabling H102 * XenAPI: Allow use of clone_vdi on all SR types * Remove unused variables in test_conductor.py * Do not use contextlib.nested if only mock one function * Remove update_service_capabilities from nova * Adds user_data extension to nova.api.v3.extensions * Add wsgiref to requirements.txt * pass the empty body into the controller * Imported Translations from Transifex * Revert recent change to ComputeNode * sync oslo service to fix SIGHUP handling * Fix parameter checking about quota update api * Spelling fix resouce=>resource * Change default ephemeral FS to ext4 * When inject admin password, no need to generate temp file * Make _change_index_columns use existing utility methods * Fix interprocess locks when running unit-tests * Cleanup object usage in the delete path * Change RPC post_live_migration_at_destination from call to cast * Pass rbd_user id and conf path as part of RBD URI for qemu-img * Allow some instance polling periodic tasks to hit db slave * Sync timeutils from oslo-incubator * Catch NotImplementedError for vnc in the api * List NotImplementedError as a client exception for vnc * remove vmwareapi.vmops.get_console_output() * Object-ify build_and_run_instance * Retry on deadlock in instance_metadata_update * use 'os_type' in ephemeral filename only if mkfs defined * ValueError should use '%' instead of ',' * Setting the xen vm device id on vm record * Rename instance_type to flavor in nova.utils and nova.compute.utils * Rename instance_type to flavor in nova.cloudpipe * Serialize instance object while building request_spec * Make rebuild use Instance objects * Remove deprecated config aliases * Changed error message to match usage * Add configurable 120s timeout ovs-vsctl calls * Clarify rebuild_instance's recreate parameter * Clean swap_volume rollback, on libvirt exception * Image cache: move all of the variables to a common place * baremetal: set capabilites explicitly * Remove docker's unsupported capabilities * Set a sane default for state_path * Fix incorrect exception on os-migrateLive * barematal: Cleanup the calls to assertEqual * Refactor time conversion helper function for objects in db api * Fixes ConfigDrive bug on Windows * Remove smoketests * Revert graceful shutdown patch * Handle InstanceUserDataMalformed in create server v2 api * Enable remote debugging for nova * Fix race in unit tests, which can cause gate job to fail * Add boolean convertor to cells sync_instances API * Initialize iptables rules on initialization of MetadataManager * vmwareapi: raise on get_console_output * hyperv: remove get_console_output method * List NotImplementedError as client exception * api: handle NotImplementedError for console output * Make Serializer/Conductor able to backlevel objects * Make ec2 use Flavor object * Move restore and rebuild operations to Flavor objects * Add flavor access methods to Instance object * Rename instance_type to flavor in nova.network tree * Stop, Rescue, and Delete should give guest a chance to shutdown * Remove middleware ratelimits from v3 api * Remove unused variables in neutron api interface and neutron tests * Remove unneeded call to conductor in network interface * Return client tokens in EC2 DescribeInstances * Require List objects to be able to backlevel their contents * Make Instance object compatible with older compute nodes * Deprecate/remove scheduler select_hosts() * Pass Instance object to console output virt driver api * Send Instance object to validate_console_port * Pass Instance object to compute vnc rpc api * Update vnc virt driver api to take Instance object * Add error as not-in-progress migration status * Don't replace instance.info_cache on each save * Add boolean convertors for migrate_live API * VMWare: bug fix for Vim exception handling * XenAPI: Synchronize on all VBD plug/unplug per VM * Add IPAddress field type in object models * Fixes errors on start/stop unittest * Use a dictionary to eliminate the inner loop in _choose_host_filters() * Correct uses of :params in docstrings * Delete iSCSI devices after volume detached * Prevent spoofing instance_id from neutron to nova * Replaces call to lvs with blockdev * Refactor PXE DHCP Option support * Normalize the weights instead of using raw values * Compact pre-Icehouse database migrations <= 170 * XenAPI: Speedup host_ref cannot change - get it once * Updated from global requirements * Rename instance_type to flavor in test_utils and nova.tests.utils * Rename instance_type to flavor in baremetal virt driver * VMware: fix bug when more than one datacenter exists * Sync oslo lockutils for "fix lockutils.lock() to make it thread-safe" * Move calls to os.path.exists() in libvirt imagebackend * Ensure api_paste_conf is an absolute path * Log exception in _heal_instance_info_cache * Raise better exception if duplicate security groups * Remove the largely obsolete basepath helper * libvirt: Custom disk_bus setting is being lost on hard_reboot * Libvirt: Making the video driver element configurable * Give migrations tests more time to run * Remove the api_thread_pool option from libvirt driver * baremetal: volume driver refactoring and tests * Sync middleware audit, base, and notifier from oslo * Get test_openAuth_can_refuse_None_uri to cleanup after itself * Hide injected_file related quotas for V3 API * Make obj_from_primitive() preserve version information * Cells: check states on resize/rebuild updates * Make flavor_access extension use Flavor object * libvirt: add a test to guard against set_host_enabled raising an error * Fix UnboundLocalError in libvirt.driver._close_callback * Quota violations should not cause a stacktrace in the logs * Enforce permissions in snapshots temporary dir * Sync rpc fix from oslo-incubator * Fix changes-since filter for list-servers API * Make it possible to override test timeout value * Imported Translations from Transifex * libvirt: consider minimal I/O size when selecting cache type * Setup destination disk from virt_disk_size * Add Flavor object * Add atomic flavor access creation * Add extra_resources field to compute_nodes table * Recommend the right call instead of datetime.now() * libvirt: remove unused imports from fake libvirt utils * VMware: fix disk extend bug when no space on datastore * Fix monkey_patch docstring bug * Change unit test for availability_zones.reset_cache * Make compute support monitors and store metrics * Added a new scheduler metrics weight plugin * LXC: Image device should be reset in mount() and teardown() * Add shutdown option to cleanup running periodic * xenapi: Update VM memory overhead estimation * Misc typos in nova * Add default arguments for Connection class * Update Instance from database after destroy * Libvirt: Adding video device to instances * Configuration element for describing video drivers * Don't log stacktrace for UnexpectedTaskStateError * Extends V3 servers api for pci support 2014.1.b1 --------- * LOG.warn() and LOG.error() should support translation * Minor change for typo from patch 80b11279b * network_device_mtu should be IntOpt * Fix HTTP response code for network APIs and improve error message * Use password masking utility provided in Oslo * Sync log.py from Oslo-incubator * xenapi: stop hang during glance download * Clean up test cases for compute.manager._check_instance_build_time * Recover from IMAGE-* state on compute manager start-up * Document when config options were deprecated * VMware: Fix unhandled session failure issues * Use utils method when getting instance metadata and system metadata * Add status mapping for shutoff instance when resize * Fix docstring on SnapshotController * Fix trivial typo 'descirption' * Compact pre-Icehouse database migrations <= 160 * Compact pre-Icehouse database migrations <= 150 * Compact pre-Icehouse database migrations <= 140 * Remove redundant body validation for createBackup * Change evacuate test hostnames to preferable ones * Change conductor live migrate task to use select_destinations() * Ensure proper notifications are sent when build finishes * Periodic task _heal_instance_info_cache can now use slave db * docker: access system_metadata as a dict * Don't overwrite marker when checking if it exists * There is no need to set VM status to ERROR on a failed migration * DB migration 209: Clean up child rows as well * Cleanup ec2/metadata/osapi address/port listen config option help * Recover from build state on compute manager start-up * Comply with new hacking 0.8 release * Correct network_device_mtu help string * Remove last of AssertEquals * Fix Neutron Authentication for Metadata Service * Update help for osapi_compute_listen_port * libvirt: host update disable/enable report HTTP 400 * Catch InstanceIsLocked exception on server actions * VMware: enable driver to work with postgres database * Make test_evacuate from compute API DRYer * Fix testcase config option imports * Fix "in" comparisons with one element tuples * Remove _security_group_chain_name from nova/virt/firewall.py * Remove duplicate setting of os_type in libvirt config builder * Fix logic in LibvirtConnTestCase._check_xml_and_uri * Remove unused flag 'host_state_interval' * Make object compat work with more positional args * Fix LibvirtGenericVIFDriver.get_config() for quota * Fix a tiny double quote matching in field obj model * Move flags in libvirt's volume to the libvirt group * Check Neutron port quota during validate_networks in API * Failure during termination should always leave state as Error(Deleting) * Remove duplicate FlavorNotFound exception handling in server create API * Make check more pythonic * Make sure report_interval is less than service_down_time * Set is_public to False by default for volume backed snapshots * Delete instance faults when deleting instance * Pass Instance object to spice compute rpc api * Pass Instance object to get_spice_console virt api * Remove update_service_capabilities from scheduler rpc api * Remove SchedulerDependentManager * powervm: remove powervm virt driver from nova * libvirt: Provide a port field for GlusterFS network disks * Add API input validation framework * Remove duplicate BuildAbortException block * Remove compute 2.x rpc api * Add v3 of compute rpc API * Fix incorrect argument position in DbQuotaDriver * Change ConductorManager to self.db when record cold_migrate event * instance state will be stuck in unshelving when unshelve fails * Fix some i18n issue in nova/compute/manager.py * Don't gate on E125 * Supplement 'os-migrateLive' in actions list * Corrected typo in host_manager * Fix a lazy-load exception in security_group_update() * fakevirt: return hypervisor_version as an int instead of a string * Bump to sqlalchemy-migrate 0.8.2 * ComputeFilter shouldn't generate a warning for disabled hosts * Remove cert 1.X rpc api * Add V2 rpc api for cert * Remove console 1.X rpc api * Do not hide exception in update_instance_cache_with_nw_info * Wrong handling of Instance expected_task_state * XenAPI: Fix caching of images * Extend LibvirtConfigGuest to parse guest cpu element info * Rename instance_type parameter in migrate_disk_and_power_off to flavor * convert min_count and max_count to type int in nova v3 api * Add decorator expected_errors for flavors_extraspecs v3 * Remove nullable=True in models.py which is set by default * baremetal: Make api validate mac address * Use 204 instead of 202 for delete of keypairs v3 * Fix log message format issue for api * Remove "set()" from CoreAPIMissing exception * Move flag in libvirt's vif to the libvirt group * Move flag in libvirt's utils to the libvirt group * Move flags in libvirt's imagebackend to the libvirt group * Extend the scheduler HostState for metrics from compute_node * docker: return hypervisor_version as an int rather than a string * Sync Log Levels from OSLO * Removes check CONF.dhcp_options_enabled from nova * Improved debug ability for log message of cold migration * Adjust the order of notification for shelve instance * Add FloatField for objects * XenAPI: Fix config section usage * Fix performance of Server List with Neutron for Admins * Add context as parameter for two libvirt APIs * Add context as parameter for resume * xenapi: move session into new client module * xenapi: stop key_init timeout failing set password * xenapi: workaround vbd.plug race * Address infinite loop in nova compute when getting network info * Use of logging in native thread causes deadlock connecting to libvirtd * Add v3 api samples for shelve * Imported Translations from Transifex * libvirt: Fix log message when disable/enable a host * Fix missing format specifier in ImagePropertiesFilter log message * Sync the DB2 communication error code change from olso * baremetal: refactor out powervm dependency * handle migration errors * Make compute manager _init_instance use native objects * Fix for reading the xenapi_device_id from image metadata * Check if reboot request type is None * Use model_query() instead of session.query in db.instance_destroy * Fix up spelling mistake * Periodic task _poll_unconfirmed_resizes can now use slave db * Include image block device maps in info * Sync local from oslo * objects: declare some methods as static * Handle UnicodeEncodeError in validate_integer * Remove traces of V3 personality extension from api samples * Removes os-personalities extension from the V3 API * VMware: add support for VM diagnostics * Remove useless api sample template files for flavor-rxtx v3 * Fix libvirt evacuate instance on shared storage fails * Fixes get_vm_storage_paths issue for Hyper-V V2 API * Clean up how test env variables are parsed * Add missing argument max_size in libvirt driver * VMware: Always upload a snapshot as a preallocated disk * Fix empty selector XML bug * Libvirt:Instance resize confirm issue against NFS * Add V2 rpc api for console * Fix sample parameter of agent API * VMware: fix snapshot failure when host in maintenance mode * Clean up unused variables * Add a driver method to toggle instance booting * Fix cells instance_create extra kwarg * handle empty network info in instance cache * Remove deprecated instance_type alias from nova-manage * xenapi: kernel and ramdisk missing after live-migrate * Remove V2 API version of coverage extensions * Remove V3 API version of coverage extension * Update openstack/common/periodic_task * Use 201 instead of 200 for action create of flavor-manage v3 * Enforce metadata string type on key/value pairs * Fixes RequestContext initialization failure * Move flags in libvirt's imagecache to the libvirt group * Move base_dir_name option to somewhere more central * Move some libvirt specific flags into a group * Removed unused instance object helper function * Update openstack/common/lockutils * Rename InstanceType exceptions to Flavor * Added monitor (e.g. CPU) to monitor and collect data * Conditionalise automatic enabling of disabled host * Users with admin role in Nova should not re-auth with Neutron * Use 400 instead of 422 for invalid input in v3 servers core * Fix limits v3 follow API v3 rules * Remove used_limits extension from the V3 API * Remove reduntant call to update_instance_info_cache * Add flavor-extra-specs to core for V3 API * Add flavor-access to core for V3 API * Remove unused libvirt_ovs_bridge flag * Fix AttributeError(s) from get_v4/6_ips_by_interface * Raising exception for invalid floating_ip's ID * libvirt: Allow delete to complete when a volume disconnect fails * replace assertNotEquals with assertNotEqual * Add V3 api samples for access_ips * Add v3 api samples for scheduler-hints * Add v3 api samples for availability_zone * Add V3 API sample for server's actions * Cache Neutron Client for Admin Scenarios * More instance_type -> flavor renames in db.api * Cache compute node info in Hypervisor api * Reverse the quota reservation in revert_resize * Rename virtapi.instance_type_get to flavor_get * Xenapi: Allow windows builds with xentools 6.1 and 6.2 * Make baremetal support metadata for ephemeral block-device-mapping * Make baremetal_deploy_helper understand ephemeral disks * Removed unused methods from db.api * Fix type mismatch errors in NetworkTestCase * VMware: Detach volume should not delete vmdk * xenapi: Fix agent update message format * xenapi: Fix regression issue in agent update * Shrink the exception handling range * Moved quota headroom calculations into quota_reserve * Remove dup of LibvirtISCSIVolumeDriver in LibvirtISERVolumeDriver * Replace assertEquals with assertEqual - tests/etc * libvirt: pass instance to a log() call in the standard way * xenapi: Move settings to their own config section * domainEventRegisterAny called too often * Allow configuring the wsgi pool size * driver tests (loose ends): replace assertEquals with assertEqual * baremetal: replace assertEquals with assertEqual * image tests: replace assertEquals with assertEqual * virt root tests: replace assertEquals with assertEqual * Remove unnecessary steps for cold snapshots * baremetal: Make volume driver use a correct source device * Update quota-class-set/quota-set throw 500 error * Add log_handler to implement the publish_errors config option * Imported Translations from Transifex * Enable non-ascii characters in flavor names * Move docker specific options into a group * Check return code of command instead of checking stderr * Added tests for get_disk_bus_for_disk_dev function * Checking existence of index before dropping * add hints to api_samples documentation * xenapi: check for IP address in live migration pre check * Remove live_snapshot plumbing * Remove unused local variable in test_compute * Make v3 admin_password parameters consistent * Flavor name should not contain only white spaces * fix a typo error in test_libvirt_vif.py * Remove unused local variables in test case * Rename _get_vm_state to _get_vm_status * Ensure deleted instances' status is always DELETED * Let resource_tracker report right migration status * Imported Translations from Transifex * nit: fix indentation * Always pass context to compute driver destroy() * Imported Translations from Transifex * db tests: replace assertEquals with assertEqual * compute tests: replace assertEquals with assertEqual * Catch exception while building due to instance being deleted * Refactor UnexpectedTaskStateError for handling of deleting instances * Parted 'invalid option' in XenAPI driver * Specify DB URL on command-line for schema_diff.py * Fix `NoopQuotaDriver.get_(project|user)_quotas` format * Send delete.end with latest instance state * Add missing fields in DriverBlockDevice * Fix the boto version comparison * Add test for class InsertFromSelect * Process image BDM earlier to avoid duplicates * Clean BDM when snapshoting volume-backed instances * Remove superflous 'instances' joinedload * Fix OLE error for HyperV * Make the vmware pause/unpause unit tests actually test something * Fixes the destroy() method for the Docker virt driver * xenapi: converting XenAPIVolumeTestCase to NoDB * Move `diff_dict` to compute API * Add compatibility for InstanceMetadata and primitives * Issue brctl/delif only if the bridge exists * ensure we don't boot oversized images * Add V3 API samples for config-drive * Remove duplicated test * Add notification for host operation * Sync log from oslo * Replace assertEquals with assertEqual - tests/scheduler * Make non-admin users can unshelve a server * Fix interface-attach removes existing interfaces from db * Correct exception handling * Utilizes assertIsNone and assertIsNotNone - tests/etc * Use elevated context in resource_tracker.instance_claim * Add updates and notifications to build_and_run_instance * Add network handling to build_and_run_instance * Make unshelve use new style BDM * Make _get_instance_nw_info() use Instance object * Convert evacuation code to use objects * Deprecate two security_group-related methods from conductor * Make metadata server use objects for Instance and Security Groups * Replace assertEquals with assertEqual - tests/api * Remove security_group-related methods from VirtAPI * Make virt/firewall use objects for Security Groups and Rules * Drop auth_token configs for api-paste.ini * Add auth_token settings to nova.conf.sample * Use _get_server_admin_password() * Pass volume_api to get_encryption_metadata * Comments for db.api.compute_node_*() methods * Fix migration 185 to work with old fkey names * Adds V3 API samples for user-data * Enforce compute:update policy in V3 API * tenant_id implies all_tenants for servers list in V3 API * Move get_all_tenants policy enforcement to API * all_tenants=0 should not return instances from all tenants * Utilizes assertIsNone and assertIsNotNone - tests/virt * xenapi: workaround for failing vbd detach * xenapi: strip base_mirror after live-migrate * xenapi: refactor get_all_vdis_in_sr * Remove unused expected_sub_attrs * Remove useless variable from libvirt/driver.py * Add a metadata type validation when creating vm * Update schema_diff.py to use 'postgresql' URLs * Disable nova-compute on libvirt connectivity exceptions * Make InstanceInfoCache load base attributes * Add SecurityGroupRule object * Add ephemeral_mb record to bm_nodes * Stylistic improvement of models.ComputeNodeStat * clean up numeric expressions in tests * replaced e.message with unicode(e) * Add DeleteFromSelect to avoid database's limit * Imported Translations from Transifex * Utilizes assertIsNone and assertIsNotNone - tests/api * Include name/level in unit test log messages * Remove instance_type* proxy methods from nova.db.api * Add InstanceList.get_by_security_group() * Make security_group_rule_get_by_security_group() honor columns * Claim IPv6 is unsupported if no interface with IPv6 configured * Pass thru credentials to allow re-authentication * network tests: replace assertEquals with assertEqual * Nova-all: Replace basestring by six for python3 compatability * clean up numeric expressions with byte constants * Adds upper bound checking for flavor create parameters * Remove fake_vm_ref from test_vmwareapi.py * xen tests: replace assertEquals with assertEqual * Fix tests to work with mysql+postgres concurrently * Enable extension access_ips for v3 API * Correct update extension point's check_func for v3 server's controller * Updates the documentation for nova unit tests * Remove consoleauth 1.X rpc api * consoleauth: retain havana rpc client compat * Pull system_metadata for notifications on instance.save() * Allow _sync_power_states periodic task to hit slave DB * Fix power manager hangs while executing ipmitool * Update my mailmap * Stored metrics into compute_nodes as a json dictionary * Bad except clauses order causes wrong text in http response * Add nova.db.migration.db_initial_version() * Fix consoleauth check_token for rpcapi v2 * Nova db/api.py docstring cleanups.. * Adds XML namespace example for disk config extension * Remove multipath mapping device descriptor * VMware: fix VM resize bug * VMware: fix bug for reporting instance UUID's * Remove extra space in tox.ini * Fix migrate w/ cells * Add tests for compute (child) cell * Call baselineCPU for full feature list * Change testing of same flavor resize * Fix bad typo in cloudpipe.py * Fix compute_api tests for migrate * Replace basestring by six for python3 compatability * Add flavor-manage to core for V3 API * Refactor unit tests code for python3 compatability * make libvirt driver get_connection thread-safe * Remove duplicates from exceptions list * Apply six for metaclass * Add byte unit constants * Add block device handling to build_and_run_instance * Reply with a meaningful exception, when libvirt connection is broken * Fix getting nwinfo for Instance obj * Make cells info_cache updates more tolerant * Raise an error if module import fails * Drop RPC securemessage.py and crypto module * Remove deprecated libvirt VIF driver code * nova.exception does not have a ProcessExecutionError * Fix setting backdoor port in service start * Sync lockutils from oslo * Fix wrong description when updating quotas * Expose additional status in baremetal API extension * migrate server doesn't raise correct exception * Make security_group_get() more flexible about joins * Make Object FieldType take an object name instead of a class * Hyper-v: Change the hyper-v error log for debug when resize failed * Adds V3 API samples for the disk-config extension * Utilizes assertIn - tests/etc * Fix all scripts to honor the enabled_ssl_apis flag * Updated from global requirements * Fix i18n issue for nova/compute/manager.py * Change tab to blank space in hypervisors-detail-resp * Fixing ephemeral disk creation * Merging two mkfs commands * xenapi: ephemeral disk partition should fill disk * Fix the ConsolesController class doc string * xenapi: Speeding up the easy cases of test_xenapi * xenapi: Speeding up more tests by switching to NoDB * Remove .pyc files before generating sample conf * xenapi: migrate multiple ephemeral disks * Fail quickly if file injection for boot volume * Add obj_make_compatible() * Updated from global requirements * Make cells 'flavorid' for resizes * Fixes unicode issue in the Hyper-V driver * Add missing ' to extra_specs debug message * VMware: Fix ValueError unsupported format character in log message * graceful-shutdown: add graceful shutdown into compute * remove unused network module from certificates api extension * Sync fixture module from oslo * Fixes Invalid tag name error when using k:v tagname * Fix tests for migration 227 to check sqlite * Adds V3 API samples for console output * Add V2 rpc api for consoleauth * Update version aliases for rpc version control * Improve object instantiation syntax in some tests * A nicer calling convention for object instantiation * Updates OpenStack Style Commandments link * Updated from global requirements * Adding support for multiple hypervisor versions * Manage None value for the 'os_type' property * Add CIDR field type * Validate parameters of agent API * Adding Read-Only volume attaching support to Nova * Update timeutils.py from oslo * Fix docstring related to create_backup API * powervm tests: replace assertEquals with assertEqual * Add V3 API sample for admin-password * Remove duplicated test cases * Add extension access_ips for v3 API * Ensure migration 209 works with NULL fkey values * Cells: Fix instance deletes * Uses oslo.imageutils * Add testr concurrency option for run_tests.sh * Fix the image name of a shelved server * xenapi: test_driver should use NoDBTestCase * xenapi: Speedup vm_util and vmops tests * xenapi: speedup test_wait_for_instance_to_start * Remove xenapi rpm building code * Fixes datastore selection bug * Fixes Hyper-V snapshot spawning issue * Make SecurityGroup receive context * Fix DB API mismatch with sqlalchemy API * Remove aggregate metadata methods from conductor and virtapi * Make XenAPI use Aggregate object * libvirt: add missing i18n support * Adds V3 API samples for attach-interfaces * Make aggregate methods use new-world objects * Add missing key attribute to AggregateList.get_by_host() * Fix i18n issue for nova/virt/baremetal/virtual_power_driver.py * Fix scheduler rpcapi deprecated method comment * Send notifications on keypair create/delete * Use `versionutils.is_compatible` for Dom0 plugin * Use `versionutils.is_compatible` for Nova Objects * Improve logging messages in libvirt driver * xenapi: stop agent errors stopping build * Fix NovaObject versioning attribute usage * xenapi: removes sleep after final upload retry * xenapi: stop using get_all_vdis_in_sr in spawn * populate local-ipv4 address in config drive * Harden version checking for boto * Handle MarkerNotFound better in Flavor API * Sanitize passwords when logging payload in wsgi * Remove unnecessary "LOG.error()" statement * xenapi: simplify _migrate_disk_resizing_up * xenapi: revert on _migrate_disk_resizing_up error * xenapi: make _migrate_disk_resizing_up use @step * libvirt tests: replace assertEquals with assertEqual * Use the oslo fixture module * Port server actions unittests to V3 API Part 2 * Remove unused method _get_res_pool_ref from VMware * Imported Translations from Transifex * Check for None when cleaning PCI dev usage * Fix vmwareapi driver get_diagnostics calls * Remove instance_info_cache_update() from conductor * compute api should throw exception if soft reboot invalid state VM * Make a note about Object deepcopy helper * Avoid caching quota.QUOTAS in Quotas object * Remove transitional callable field interface * Make the base object infrastructure use Fields * Migrate some tests that were using callable fields * Migrate NovaPersistentObject and ObjectListBase to Fields * Migrate Instance object to Fields * Utilizes assertIn - tests/api/etc * Utilizes assertIn - tests/virt * Utilizes assertIn - tests/api/contrib * Utilizes assertIn - tests/api/v3 * Make scheduler disk_filter take swap into account * Add variable to expand for format string * Make quota sets update type handling a bit safer * Add test_instance_get_active_by_window_joined * Fixes error on live-migration of volume-backed vm * Migrate PciDevice object to Fields * Migrate InstanceInfoCache object to Fields * Migrate InstanceFault object to Fields * Migrate Service object to Fields * Migrate ComputeNode object to Fields * Migrate Quotas object to Fields * Migrate InstanceGroup object to Fields * Migrate InstanceAction and InstanceActionEvent objects to Fields * Move exception definitions out of db api * Remove unused scheduler rpcapi from compute api * Libvirt: disallow live-mig for volume-backed with local disk * xeanpi: pass network_info to generate_configdrive * Replace incorrect Null checking to return correctly * Fix nova DB 215 migration script logic error * Xenapi: set hostname when performing a network reset * Fix "resource" length in project_user_quotas table * Migrate SecurityGroup object to Fields * Migrate Migration object to Fields * VMware: fix regression attaching iscsi cinder volumes * Remove whitespace from cfg options * cleanup after boto 2.14 fix * Add boto special casing for param changes in 2.14 * xenapi: simplify PV vs HVM selection logic * fix missing host when unshelving * Fix a typo of tabstop * Fix error message of os-cells sync_instances api * Log which filter failed when on log level INFO * Migrate KeyPair object to Fields * Migrate Aggregate object to Fields * Make field object support transitional call-based interface * Add Field model and tests * Fix conductor's object change detection * Remove obsolete redhat-eventlet.patch * Move is_volume_backed_instance to new style BDM * Add a get_root_bdm utility function * Libvirt: allow more than one boot device * Libvirt: make boot dev a list in GuestConfig * Remove compute_api_class config option * Libvirt: add boot_index to block device info dicts * Fixes Hyper-V issue with VHD file format * Update log message for add_host_to_aggregate * Correct use of ConfigFilesNotFoundError * hyperv tests: replace assertEquals with assertEqual * Utilizes assertNotIn * VMware tests: replace assertEquals with assertEqual * Fix incorrect root partition size and compatible volume name * Imported Translations from Transifex * Utilize assertIsInstance * Fix typos in nova/api code * Make `update_test` compatible with nose * Add a custom iboot power driver for nova bm * Fix FK violation errors in InstanceActionTestCase * Fix test_shadow_tables() on PostgreSQL/MySQL * Fix PCI devices DB API tests * Fix DB API tests depending on the order of rows * Use print function rather than print statement * Update default for running_deleted_instance_action * Drop unused BM start_console/stop_console methods * VMware: Network fallback in case specified one not found * baremetal: Add missing method to volume driver * baremetal: Use network API to get fixed IPs * Replace decprecated method aliases in tests * catch exception in start and stop server api * Ensure that the netaddr import is in the 3rd party section * Fix status code of server's action confirm_resize for v3 * Remove duplicated method in test_compute_api.py * Create flavor-access for the tenant when creating a private flavor * Fix root disk not be detached after deleting lxc container * fallocate image only when user has write access * Fixes typo in ListTargets CLI in hyperv driver * Fixes typos in nova/db code * Fixes typos in the files in the nova folder * Avoid clobbering {system_,}metadata dicts passed to instance update * Baremetal: Be more patient with IPMI and BMC * VMware: fix bug with booting from volumes * Fixes typos in nova/compute files * Fixes typos in virt files * Fix docstring for disk_cachemodes * Plug Vif into Midonet using Neutron port binding * VMware: remove deprecated configuration variable * Fix races in v3 cells extension tests * Add V3 API samples for consoles * Update allowvssprovider in xenstore_data * Fix races in cells extension tests * Move `utils.hash_file` -> `imagecache._hash_file` * Remove `utils.timefunc` function * Remove `utils.total_seconds` * Remove `utils.get_from_path` * Fix divergence in attach_interfaces extensions * Replace assert_ with assertTrue * Fixes several misc typos in scheduler code * Fix libvirt test on systems with real iSCSI devices * Reserve 10 migrations for backports * Sync three-part RPC versions support from Oslo * Remove unused dict functions from utils * Avoid mutable default args in _test_populate_filter_props * XenAPI: Add versioning for plugins * Add Docstring to some scheduler/driver.py methods * Libvirt: default device bus for floppy block devs * Fix filter_properties of unshelve API * hyperv: Initialize target_iqn in attach_volume * Log if a quota_usages sync updates usage information 2013.2.rc1 ---------- * Open Icehouse development * baremetal: Fix misuse of "instance" parameter of attach/detach_volume * Fix the wrong params of attach/detach interface for compute driver * Imported Translations from Transifex * Adds missing entry in setup.cfg for V3 API shelve plugin * Avoid spamming conductor logs with object exceptions * Prefix `utils.get_root_helper` with underscore * Remove `utils.debug` * Remove `utils.last_octet` * Remove `utils.parse_mailmap` * Updated from global requirements * Remove unecessary `get_boolean` function * Make Exception.format_message aware of Messages * Disable lazy gettext * VMware: Check for the propSet attribute's existence before using * VMware: fix bug for invalid data access * Make rbd.libvirt_info parent class compatible * Host aggregate configuration throws exception * VMware: Handle cases when there are no hosts in cluster * VMWare: Disabling linked clone doesn't cache images * Fixes inconsistency in flavors list with marker * Fix indentation in virt.libvirt.blockinfo module * Update jsonutils.py from oslo * Fix loading instance fault in servers view * Refactor test cases related to instance object * Use system locale for default request language * Update attach interface api to use new network model * Adds V3 API specific urlmap tests * Catch volume errors during local delete * Fix processutils.execute errors on windows * Fixes rescue doesn't honor enable password conf for v3 * VMware: Fix bug for root disk size * Fix incorrect exception raised during evacuate * Full sync of quota_usages * Fix log format error in lazy-load message * xenapi: reduce impact of errors during SR.scan * Forced scheduling should be logged as Audit not Debug * xenapi: Resize operations could be faster * Resource limits check sometimes enforced for forced scheduling * Skip test if sqlite3 not installed * Add notification for pause/unpause instance * Make LiveMigrateTask use build_request_spec() * Ensure image property not set to None in build_request_spec() * Make sure periodic task sync_power_states continues on error * get_all_flavors uses id as key to be unique * fix the an Unexpected API Error issue in flavor API * Adds V3 API samples for srvcs, tenant usage, server_diagnostics * VMware: Fix SwitchNotFound error when network exists * Fix unicode string values missing in previous patch * Fix stopping instance in sync_power_states * Remove deprecated task states * plug_vif raise NotImplementedError instead of pass * Check instance exists or not when evacuate * xenapi: ignore 500 errors from agent resetnetwork * Add flavor name validation when create flavor * xenapi: enforce filters after live-migration * xenapi: set vcpu cap to ensure weight is applied * Get image metadata in to_xml for generating xml * Add notification on deleting instance without host * Fix V3 API flavor returning empty string for attributes * Fix v3 server rebuild deserializer checking with wrong access_ip key * Windows instances require the timezone to be "localtime" * Don't wrap Glance exceptions in NovaExceptions * Update rootwrap with code from oslo * fix typo & grammer in comment 363-364 * Make Instance.refresh() extra careful about recursive loads * Log object lazy-loads * Ensure we don't end up with invalid exceptions again * Fix console db can't load attribute pool * Fix HTTP response for PortNotFound during boot (v3 API) * Fixes assertion bug in test_cells_weights.py * Remove _get_compute_info from filter_scheduler.py * VMware: fix bug for incorrect cluster access * Add V3 API samples for security-groups * Correct lock path for storage-registry-lock * Moved registration of lifcycle events handler in init_host() * Rebuilding stopped instance should not set terminated_at * Require oslo.config 1.2.0 final * Removes pre_live_migration need for Fixed IPs * Move call to _default_block_device_names() inside try block * Fix several flake8 issues in the plugins/xenserver code * Fix type is overwritten when UPDATE cell without type specified * Adds v3 API samples for hide server addresses and keypairs * Always filter out multicast from reflection * VMware: fix bug with booting from volume * VMware: enable VNC access without user having to enter password * Remove exceptions.Duplicate * Add v3 API samples for rescue * Added 'page_size' param to image list * Fix SecurityGroupsOutputTest v3 security group tests * Fixes file mode bits of compute/manager.py * Adds v3 API samples for hosts extension * Only update PCI stats if they are reported from the host * xenapi: Cleanup pluginlib_nova * Fix Instance object assumptions about joins * Bring up interface when enslaving to a bridge * v3 API samples for servers * xenapi: refactor: move UpdateGlanceImage to common * Imported Translations from Transifex * Fixes modules with wrong file mode bits in virt package * Adds v3 API samples for ips and server_metadata extensions * Fix V3 API server metadata XML serialization * libvirt: add test case for _hard_reboot * Add tests for pre_live_migration * Adds V3 API samples for evacuate,ext-az,ext-serv-attrs * Add V3 API samples for ext-status,hypervisor,admin-actions * Code change for regex filter matching * Convert TestCases to NoDBTestCase * VMware: ensure that resource exists prior to accessing * Fixes modules with wrong file mode bits * Fixes test scripts with wrong bitmode * Update sample config generator script * Instance object incorrectly handles None info_cache * Don't allow pci_devices/security_groups to be None * Allow for nested object fields that cannot be None * Object cleanups * Convert TestCases to NoDBTestCase * Convert TestCases to NoDBTestCase * Actually fix info_cache healing lazy load * Fixes host stats for VMWareVCDriver * libvirt: ignore false exception due to slow NFS on resize-revert * Syncs install_venv_common.py from oslo-incubator * Correct deleted_at value in notification messages * VMwareVCDriver Fix sparse disk copy error on spawn * Remove unused _instance_update() method in compute api * Change service id to compute for compute/api.py * XenAPI raise InstanceNotFound in _get_vm_opaque_ref * Replace OpenStack LLC with OpenStack Foundation * Send notification for any updates to instance objects * Add flag to make baremetal.pxe file injection optional * Force textmode consoles on baremetal * Typo: certicates=>certificates in nova.conf * Remove print statement from test_quotas that fails H233 check * Fix for os-availability-zone/detail returning 500 * Convert TestCases to NoDBTestCase * Fixes the usage of PowerVMFileTransferFailed class * MultiprocessWSGITest wait for workers to die bug * Prune node stats at compute node delete time * VMware: datastore regex not honoured * VMware: handle exceptions from RetrievePropertiesEx correctly * VMware: Fix volume detach failure * Remove two unused config options in baremetal * Adds API samples and unitests for os-server-usage V3 extension * xenapi: Make rescue safer * Add V3 API samples for quota-sets/class-sets,inst-usage-audit-log * Fix problem with starting Windows 7 instances using VMware Driver * VMware: bug fix for instance deletion with attached volume * Fix migration 201 tests to actually test changes * Don't change the default attach-method * Fix snapshot failure with VMwareVCDriver * Fix quota direct DB access in compute * Add new-world Quota object * Fix use of bare list/dict types in instance_group object * Fix non-unicode string values on objects * Add missing get_available_nodes() refresh arg * Make Instance.Name() not lazy-load things * Add debugging to ComputeCapabilitiesFilter * xenapi: fix pep8 violations in nova plugins * Retry on deadlock in instance_metadata_delete * Make virt drivers use a consistent hostname * [VMware] Fix problem transferring files with ipv6 host * VMware: Fix ensure_vlan_bridge to work properly with existing DVS * Fix network info injection in pure IPv6 environment * delete a non existent flavor extra spec returns 204 * Don't use ModelBase.save() inside of transaction * send the good binding to neutron after live-migration * Add linked clone related unit tests for VMware Hyper * Ensure anti affinity scheduling works * pci passthrough bug fix:hasattr dones not work for dict * Fix rename q_exc to n_exc (from quantum to neutron) * Improve "keypair data is invalid" error message * Enable fake driver can live migration * Don't use sudo to discover ipv4 address * xenapi: Fix rescue * Fix create's response is different with requested for sec-grps V3 * Fix logging of failed baremetal commands * Add v3 API samples for os-extended-volumes * Better help for generate config * Fix hyper-v vhd real size bigger than flavor issue * Remove unused and duplicate code * Policy check for forced_host should be before the instance is created * Remove cached console auth token after migration * Don't generate notifications when reaping running_deleted instances * Add instance_flavor_id to the notification message * Edits for nova.conf.sample * xenapi: fix where root_gb=0 causes problems * Wire in ConfKeyManager._generate_hex_key! * Drop unused logger from keymgr/__init__.py * Move required keymgr classes out of nova/tests * Translate more REST API error messages * pci passthrough fails while trying to decode extra_info * Update requirements not to boto 2.13.0 * Port server actions unittests to V3 API Part 1 * Remove unused method in scheduler driver * Ignore H803 from Hacking * Fixes misuse of assertTrue in virt test scripts * Add missing notifications for rescue/unrescue * Libvirt: volume driver set correct device type * Make v3 API versions extensions core * Make Instance.save() log missing save handlers * Don't fail if volume has no image metadata * Get image properties instead of the whole image * Remove extra 'console' key for index in extensions consoles v3 * Fix V3 API server extension point exception propagation * VMware: nova-compute crashes if VC not available * Update mailmap for jhesketh * Code change for nova support glance ipv6 address * disassociate_address response should match ec2 * Adds V3 API samples for remote consoles, deferred delete * Fix asymmetric view of object fields * Use test.TestingException where possible * Add encryption support for volumes to libvirt * VMware: fix driver support for hypervisor uptime * Wrong arguments when calling safe_utils.getcallargs() * Add key manager implementation with static key * Remove duplication in disk checks * Change the duplicate class name TestDictMatches in test_matches.py * Add alias as prefix to request params for config_drive v3 * xenapi: Add per-instance memory overhead values * Fixes misuse of assertTrue in test scripts * Remove unused and wrong code in test_compute.py * Remove cases of 'except Exception' in tests.image * Remove _assert_compute_node_has_enough_memory from filter_scheduler.py * Fix regression issues with cells target filter * Remove out of date list of jenkins jobs * Don't lose exception info * Add filter for soft-deleted instances to periodic cleanup task * Don't return query from db API * Update fedora dev env instructions * Only return requested network ID's * Ensure get_all_flavors returns deleted items * Fix the order of query output for postgres * Fix migration 211 to downgrade with MySQL * Removed duplicated class in exception.py * Fix console api pass tuple as topic to console rpc api * Enable test_create_multiple_servers test for V3 API * VMware image clone strategy settings and overrides * Reduce DB load caused by heal instance info cache * Clean up object comparison routines in tests * Clean up duplicated change-building code in objects * disable direct mounting of qcow2 images by default * xenapi: ensure finish_migration cleans on errors * xenapi: regroup spawn steps for better progress * xenapi: stop injecting the hostname during resize * xenapi: add tests for finish_migration and spawn * xenapi: tidy ups to some spawn related methods * xenapi: move kernel/ramdisk methods to vm_utils * xenapi: ensure pool based migrate is live * Fix live-migrate when source image deleted * Adds v3 API samples for limits and simple tenant usage * Return a NetworkInfo object instead of a list * Fix compute_node_get_all() for Nova Baremetal * Add Neutron port check for the creation of multiple instances * Remove unused exceptions * Add V3 API samples for flavor-manage,flavor-extra-specs * Add V3 API samples for flavors,flavor-rxtx,flavor-access * Catch more accuracy exception for _lookup_by_name * Fixes race cond between delete and confirm resize * Fixes unexpected exception message in ProjectUserQuotaNotFound * Fixes unexpected exception message in PciConfigInvalidWhitelist * Add missing indexes back in from 152 * Fix the bootfile_name method call in baremetal * update .mailmap * Don't stacktrace on ImageNotFound in image_snapshot * Fix PCIDevice ignoring missing DB attributes * Revert "Call safe_encode() instead of str()" * Avoid errors on some actions when image not usable * Add methods to get image metadata from instance * Fix inconsistent usages for network resources * Revert baremetal v3 API extension * Fixes misuse of assertTrue in compute test scripts * add conf for number of conductor workers * xenapi: Add efficient impl of instance_exists() 2013.2.b3 --------- * Updated from global requirements * Fix failure to emit notification on Instance.save() * MultiprocessWSGITest wait for workers to die bug * Synchronize the key manager interface with Cinder * Remove indirect dependency from requirements.txt * Clean up check for migration 213 * Add V3 API samples for instance-actions,extenions * fix conversion type missing * Enable libvirt driver to use the new BDM format * Allow block devices without device_name * Port to oslo.messaging.Notifier API * Add expected_errors for extension aggregates v3 * Refresh network info cache for secgroups * Port "Make flavors is_public option .." to v3 tree * Add missing Aggregate object tests * Generalize the _make_list() function for objects * PCI passthrough Libvirt vm config * Add columns_to_join to instance_update_and_get_original * XenAPI: Allow 10GB overhead on VHD file check size * Adds ephemeral storage support for Hyper-V * Adds Hyper-V VHDX support * Create mixin class for common DB fields * Deprecate conductor migration_get() * Change finish_revert_resize paths to use objects * Change finish_resize paths to use objects * Change resize_instance paths to use objects * VMware: Nova boot from cinder volume * VMware: Multiple cluster support using single compute service * Nova support for vmware cinder driver * Adds Hyper-V dynamic memory support * xenapi: Fix download_handler fallback * Ensure old style images can be resized * Add nova.utils.get_root_helper() * Inherit base image properties on instance creation * Use utils.execute instead of subprocess * Fixes misuse of assertTrue in Cells test scripts * Remove versioning from IOVisor APIs PATH * Revert "Importing correlation_id middleware from oslo-incubator" * update neutronclient to 2.3.0 minimum * Adds metrics collection support in Hyper-V * Port all rpcapi modules to oslo.messaging interface * Fix a gross duplication of context code in objects tests * Make compute_api use Aggregate objects * Add Aggregate object model * Add dict and list utility functions for object typing * VMware: remove conditional suds validation * Limit instance fault messages to 255 characters * Add os-assisted-volume-snapshots extension * Scheduler rpcapi 2.9 is not backwards compatible * Adds support for Hyper-V WMI V2 namespace * Port flavormanage extension to v3 API Part 2 * Add os-block-device-mapping to v3 API * Improves Hyper-V vmutils module for subclassing * xenapi: add support for auto_disk_config=disabled * Check ephemeral and swap size in the API * Adds V3 API samples for cells and multinic * Increase volume created checking retries to 60 * Fix changes_since for V3 API * Make v3 API console-output extension core * Makes v3 API keypairs extension core * Add support for API message localization * Fix typo and indent error in isolated_hosts_filter.py * Adds 'instance_type' param to build_request_spec * Guest-assisted-snaps libvirt implementation * Improve EC2 API error responses * Remove EC2 postfix from InvalidInstanceIDMalformedEC2 * Introduce InternalError EC2 error code * Introduce UnsupportedOperation EC2 error code * Introduce SecurityGroupLimitExceeded EC2 error code * Introduce IncorrectState EC2 error code * Introduce AuthFailure EC2 error code * Fix ArchiveTestCase on PostgreSQL * Fix AggregateDBApiTestCase on PostreSQL and MySQL * Port Cheetah templates to Jinja2 * Libvirt: call capabilites before getVersion() * Remove _report_driver_status from compute/manager.py * Interpret BDM None size field as 0 on compute side * Add test cases for resume_state_on_host_boot * Add scheduler support for PCI passthrough * Fix v3 swap volume with wrong signature * vm_state and task_state not updated during instance delete * VMware: use VM uuid for volume attach and detach * xenapi: support raw tgz image download * xenapi: refactor - extract image_utils * Add block_device_mapping_get_all_by_instance to virtapi * Sync rpc from oslo-incubator * Fix the multi-instance quota message * Fix virtual power driver fails silently * VMware: Config Drive Support * xenapi: skip metadata updates when VM not found * Make resource_tracker record host_ip * Disable compute fanout to scheduler * Make image_props_filter use information from DB not RPC * Make compute_capabilities_filter use information from DB not RPC * XenAPI: More operations with LVM-based SRs * XenAPI: make_partition fixes for Dom0 * Fix wrong method call in baremetal * powervm: make start_lpar timeout * Disable retry filter with force_hosts or force_nodes * Call safe_encode() instead of str() * Fix usage of classmethod in various places * Fix V3 API quota_set tests using V3 url and request * Handle port over-quota when allocating network for instance * Fix warning log message typo in resource_tracker.instance_claim * Sync filetuils from oslo-incubator * Fix VMware fakes * DRY up use of @wrap_exception() decorator * Remove unused fake run_instance() method * Use ExceptionHelper to bypass @client_exceptions * Added new hypervisor to support Docker containers * Introduce InvalidPermission.Duplicate EC2 error code * Fix and gate on H302 (import only modules) * On snapshot errors delete the image * Remove dis/associate actions from security_groups v3 * Add volume snapshot delete API test case * Assisted snapshots compute API plumbing * Adds V3 API samples for agents, aggregates and certificates * Adds support for security_groups for V3 API server create * powervm: Use FixedIntervalLoopingCall for polling LPAR status * xenapi: agent not inject ssh-key if cloud-init * Tenant id filter test is not correct * Add PCI device tracker to compute resource tracker * PCI devices resource tracker * PCI device auto discover * Add PCI device filters support * Avoid swallowing exceptions in network manager * Make compute_api use Service and ComputeNode objects * Adding VIF Driver to support Mellanox Plugin * Change prep_resize paths to use objects * Make backup and snapshot use objects * Deprecate conductor migration_create() * Make inject_network_info use objects * Convert reset_network to use instance object * Make compute_api use objects for lock/unlock * Add REUSE_EXT in _swap_volume call to blockRebase * Remove unused _decompress_image_file from powervm operator class * powervm: actually remove files after migration * Fix to disallow server name with all blank spaces (v3 API) * Add mock to test-requirements * xenapi: Improve test_xenapi unit testing performance * Sets policy settings so V3 API extensions are discoverable * Pass objects for revert and confirm resizes * Convert _poll_unconfirmed_resizes to use Migration object * Make compute_api confirm/revert resize use objects * Make compute_api migrate/resize paths use instance objects * Fix race when running initialize_gateway_device() * fix bad usage of exc_info=True * Use implicit nullable=True in sqlalchemy model * Introduce Invalid* EC2 error codes * Improve parameter related EC2 error codes * Disconnect from iSCSI volume sessions after live migration * Correct default ratelimits for v3 * Improve db_sqlalchemy_api test coverage * Safe db.api.compute_node_get_all() performance improvement * Remove a couple of unused stubs * Fix Instance object issues * Adds API version discovery support for V3 * Port multiple_create extension to V3 API * Add context information to download plugins * Adds V3 API samples for migrations * Filter network by project id * Added qemu guest agent support for qemu/kvm * PCI alias support * Add PCI stats * Raise timeout in fake RPC if no consumers found * Stub out instance_update() in build instance tests * Mock out action event calls in build instance test * powervm: revert driver to pass for plug_vifs * Remove capabilities.enabled from test_host_filters * xenapi: through-dev raw-tgz image upload to glance * Add PCI device object support * Store CONF.baremetal.instance_type_extra_specs in DB * Pci Device DB support * VMware: remove redundant default=None for config options * Move live-migration control flow from scheduler to conductor * Fix v3 extensions inherit from wrong controller * Fix network creation in Vlan mode * compute rpcapi 2.29 is not backwards compatible * Fix the message of coverage directory error * Fix error messages in v3 aggregate API * compute rpcapi 2.37 is not backwards compatible * use 'exc_info=True' instead of import traceback * Add env to make_subprocess * Remove unused nova.common module * Adds Flavor ID validations * Imported Translations from Transifex * Add DocStrings for function allocate_for_instance * Removes V3 API images and image_metadata extensions * Powervm driver now logs ssh stderr to warning * Update availability_zone on time if it was changed * Add db.block_device_mapping_get_by_id * Add volume snapshot APIs to driver interface * Pass the destination file name to download modules * Fix typo in baremetal docs * VMware: clean up get_network_with_the_name * Stylistic improvement of compute.api.API.update() * Removes fixed ips extension from V3 API * Libvirt: fix KeyError in set_vif_bandwidth_config * Add expected_errors for migrations v3 * Add alias as prefix to request params for user_data v3 * Fix migrations index * Should finish allocating network before VM reaches ACTIVE * Fixes missing host in Hyper-V get_volume_connector * Fix various cells issues due to object changes * Document CONF.default_flavor is for EC2 only * Revert task state when terminate_instance fails * Revert "Make compute_capabilities_filter use ..." * Add resource tracking to build_and_run_instance * Link Service.compute_node with ComputeNode object * Add ComputeNode object implementation * Add Service object implementation * Make compute_api use KeyPair objects * Add KeyPair object * Fix spice/vnc console api samples tests * Fix network manager tests to use correct network host * Stub out get_console_topic() in test_create_console * Stub out instance_fault_create() in compute tests * Fix confirm_resize() mock in compute tests * Fix rpc calls on pre/post live migration tests * Stub out setup_networks_on_host() in compute tests * maint: remove redundant disk_cachemode validation entry * Fix unicode key of azcache can't be stored to memcache * XenAPI: SR location should default to location stored in PBD * XenAPI: Generic Fake.get_all_records_where implementation * XenAPI: Return platform_version if no product_version * XenAPI: Support local connections * Delete expired instance console auth tokens * Fix aggregate creation/update with null or too long name * Fix live migration test for no scheduler running * Fix get_diagnostics() test for no compute consumer * Stubout reserve_block_device_name() in test * Stubout deallocate_for_instance() in compute tests * Stub out net API sooner in servers API test * PCI utils * Object support for instance groups * Add RBD supporting to libvirt for creating local volume * Add alias as prefix to request params for availability_zone v3 * Remove deprecated legacy network info model in Hypervisor drivers * Correct the authorizer for extended-volumes v3 * emit warning while running flake8 without virtual env * Adds Instance UUID to rsync debug logging * Fixes sync issue for user level resources * Fix Fibre Channel attach for single WWN * nova.conf configurable gzip compression level * Stub out more net API methods floating IP DNS test * Enable CastAsCall for test_api_samples * Stub out attach_volume() in test_api_samples * Fix remove_fixed_ip test with CastAsCall * Add add_aggregate_to_host() to FakeDriver * Fix api samples image service stub * Add CastAsCall fixture * Enable consoleauth service during ec2 tests * Disable periodic tasks during integration tests * Use ExceptionHelper to bypass @client_exceptions * Clean up some unused wrap_exception() stuff * Add new compute method for building an instance * VMware: provide a coherent message to user when viewing console log * Use new BDM syntax when determining boot metadata * Allow more than one ephemeral device in the DB * Port flavormanage extension to v3 API part 1 * Correct the status code to 201 for create v3 * Pop extra keys from context in from_dict() * Don't initialize neutronv2 state at module import * Remove instance exists check from rebuild_instance * Remove unused variables in test_compute_cells * Fix fake image_service import in v3 test_disk_config * Updates tools/config/README * xenapi: Added iPXE ISO boot support * Log exception details setting vm_state to error * Fix instance metadata access in xenapi * Fix prep_resize() stale system_metadata issue * Implement hard reboot for powervm driver * Use the common function is_neutron in servers.py * Make xenapi capabilities['enabled'] use service enabled * Remove duplicate test from V3 version of test_hosts * Remove unused nova.tests.image.fake code * Remove unused fake run_instance() method * Remove use of fake_rabbit in Nova * libvirt: fix {attach,detach}_interface() * Added test case in test_migrations for migration 208 * Add flag to make IsolatedHostsFilter less restrictive * Add unique constraint to AggregateMetadata * Fix a typo in test_migrations for migration 209 * Remove duplicate variable _host_state * enhance description of share_dhcp_address option * Adds missing V3 API scheduler hints testcase * [v3] Show detail of an quota in API os-quota-sets * Remove legacy network model in tests and compute manager * Remove redundant _create_instance method from test_compute * Add jsonschema to Nova requirements.txt * Remove docstrings in tests * Fix scheduler prep_resize deprecated comments * Search filters for get_all_system_metadata should use lists * fix volume swap exception cases * Set VM back to its original state if cold migration failed * Enforce flavor access during instance boot * Stub out entry points in LookupTorrentURLTestCase * Port volumes swap to the new API-v3 * correct the name style issue of ExtendedServerAttributes in v3 api * Fix IVS vif to correctly delete interfaces on unplug * Adding support for iSER transport protocol * libvirt: allow passing 'os_type' property to glance * Fixes auto confirm invalid error * Fix ratelimiting * quantum pxeboot-port support for baremetal * baremetal: Log IPMI power on/off timeouts * VMware: Added check for datastore state before selection * Boot from image destination - volume * Virt driver flag for different BDM formats * Refactor how BDMs are handled when booting * Change RPC to use new BDM format for instance boot * Make API part of instance boot use new BDM format * Add Migration object * Fix untranslated log messages in libvirt driver * Fix migration 210 tests for PostgreSQL * Handle InstanceInvalidState of soft_delete * Don't pass RPC connection to pre_start_hook * VMware: Ensure Neutron networking works with VMware drivers * Unimplemented suspend/resume should not change vm state * Fix project_user_quotas_user_id_deleted_idx index * Allow Cinder to specify file format for NFS/GlusterFS * Add migration with missing fkeys * Implement front end rate-limiting for Cinder volume * Update mailmap * Fixup some non-unity-ness to conductor tests * Add scheduler utils unit tests * Convert admin_actions ext tests to unit tests * Unit-ify the compute API resize tests * Raises masked AssertionError in _test_network_api * Have tox install via setup.py develop * Set launch_index to right value * Add passing a logging level to processutils.execute * Clear out service disabled reason on enable for V3 API * Fix HTTP response for PortInUse during boot (v3 API) * Adds infra for v3 API sample creation * Remove deprecated CONF.fixed_range * Offer a paginated version of flavor_get_all * Port integrated tests for V3 API * Refactor integrated tests to support V2 and V3 API testing Part 2 * Refactor integrated tests to support V2 and V3 API testing * Fix cells manager RPC version * Upgrade to Hacking 0.7 * Fix logic in add_host_to_aggregate() * Enforce compute:update policy in API * Removed the duplicated _host_state = None in libvirt driver * Sync gettextutils from oslo-incubator * Fix typo in exception message * Fix message for server name with whitespace * Demote personalities from core of API v3 as extensions os-personality * Port disk_config API to v3 Part 2 * remove _action_change_password the attribute in V3 server API * Fix exception handling in V3 API coverage extension * Remove "N309 Python 3.x incompatible construct" * Allow swap_volume to be called by Cinder * Remove trivial cases of unused variables * Handle NeutronClientException in secgroup create * Fix bad check for openstack versions (vendor_data/config drive) * Make compute_capabilities_filter use information from DB not RPC * Make affinity_filters use host_ip from DB not RPC * db: Add host_ip and supported_instances to compute_nodes * Add supported_instances to get_available_resource to all virt drivers * libvirt: sync get_available_resources and get_host_stats * Clean up unimplemented methods in the powervm driver * Make InvalidInstanceIDMalformed an EC2 exception * Fix one port can be attached to more devices * Removed code duplication in test_get_server_*_by_id * Add option for QEMU Gluster libgfapi support * Moves compute.rpcapi.prep_resize call to conductor * Fix get_available_resource docstrings * Fix spelling in image_props_filter * Fix FK violation in ConsoleTestCase * Fix ReservationTestCase on PostgreSQL * Fix instance_group_delete() DB API method * Fix capitalization, it's OpenStack * Add test cases to validate neutron ports * Add expected_errors for extension quota_classes v3 * Fix leaking of image BDMs * Moved tests for server.delete * Fix VMwareVCDriver to support multi-datastore * Fixes typo in __doc__ of /libvirt/blockinfo.py * User quota update should not exceed project quota * Port "Accept is_public=None .." to v3 tree * Remove clear_rabbit_queues script * Don't need to init testr in run_tests.sh * Imported Translations from Transifex * Deprecate conductor's compute_reboot() interface * Deprecate conductor's compute_stop() interface * Make compute_api use InstanceAction object * Add basic InstanceAction object * Add delete() operation to InstanceInfoCache * Make compute_api use Instance.destroy() * Add Instance.destroy() * Make compute_api use Instance.create() * Change swap_volume volume_api calls to use ID * Fix H501: Do not use locals() for string formatting * fix libguestfs mount order when inspecting * Imported Translations from Transifex * powervm: add test case for get_available_resource * Fix to allow ipv6 in host_ip for ESX/vSphere driver * Improve performance of driver's get_available_nodes * Cleanup exception handling on evacuate * Removed code for modular exponentiation, pow() already does this * Remove unsafe XML parsing * Fix typo with network manager service_name * Remove old legacy network info model in libvirt driver * maint: remove redundant default=None for config options * Fix simultaneous timeout with smart iptables usage * xenapi: send identity headers from glance plugin * Catch ldap ImportError * xenapi: refactor - extract get_virtual_size * xenapi: refactor - extract get_stream_funct_for * xenapi: test functions for _stream_disk * Check host exists before evacuate * Fix EC2 API Fault wrapper * Fix deferred delete use of objects * Remove unsafe XML parsing * Update BareMetal driver to current nova.network.model * Personality files can be injected during server rebuild * Need to allow quota values to be set to zero * Merged flavor_disabled extension into V3 core api * Merged flavorsextraspecs extension into core API * Code dedup in test_update_* * Move tests test_update_* to separate class * VMware: fix rescue/unrescue instance * Add an exception when doesn't have permissions to operate vm on hyper-v * Remove dead capabilities code * Spelling correction in test_glance.py * Enhance object inheritance * Enable no_parent and file_only security * Add Instance.create() * Pull out instance object handling for use by create also * Make fake_instance handle security groups * Fix instance actions testing * Sync models with migrations * Wrong unique key name in 200 migration * Remove unused variable * Make NovaObject.get() avoid lazy-load when defaulting * Fix migration downgrade 146 with mysql * Remove the indexes on downgrade to work with MySQL * Downgrade MySQL to the same state it used to be * Format CIDR strings as per storage * Fix migration downgrade 147 with mysql * Fix typo in compute.rpcapi comments * Imported Translations from Transifex * Avoid extra glance v2 locations call! * xenapi: Adding BitTorrent download handler * xenapi: remove dup code in make_step_decorator * Retry failed instance file deletes * xenapi: retry when plugin killed by signal * Do not use context in db.sqla.api private methods * Finish DB session cleanup * Clean up session in db.sqla.api.migration_* methods * Clean up session in db.sqla.api.network_* and sec_groups_* methods * Don't inject files while resizing instance * Convert CamelCase attribute naming to camel_case for servers V3 API * Convert camelCase attribute naming to camel_case * Add plug-in modules for direct downloads of glance locations * Allow user and admin lock of an instance * Put fault message in the correct field * Fix Instance objects with empty security groups * db: Remove deprecated assert_unicode attribute * VlanManager creates superfluous quota reservations * xenapi: allow non rsa key injection * Add expected_errors for extensions simple_tenant_usage v3 * Clean destroy for project quota * Add expected_errors for extension Console v3 * Add expected_errors for extension baremetal v3 * Clean up session in db.sqla.api.get_ec2 methods * Clean up db.sqla.api.instance_* methods * remove improper usage of 'assert' * Support networks without gateway * Raise 404 when instance not found in admin_actions API * Switch to Oslo-Incubator's EnvFilter rootwrap * xenapi: Moving Glance fetch code into image/glance:download_vhd * Performs hard reboot if libvirt soft reboot raises libvirtError * xenapi: Rename imageupload image * Make nbd reservation thread-safe * Code dedup in class QuotaReserveSqlAlchemyTestCase * Fix multi availability zone issue part 1 * Fix instance_usage_audit_log v3 follow REST principles * Update mailmap * Add obj_attr_is_set() method to NovaObject * Add ObjectActionFailed exception and make Instance use it * Fix change detection logic in conductor * Convert pause/unpause to use objects * Make delete/soft_delete use objects * Refactor compute API's delete to properly do local soft_deletes * Add identity headers while calling glanceclient * xenapi: Reduce code duplication in vmops * vendor-data minor format / style cleanups * maint: remove unused exceptions * Add support for Neutron https endpoint * Add index to reservations.uuid column * Refactor EC2 API error handling code * Cleanup copy/paste in test_quota_sets * Make EvacuateTest DRYer * Add expected_errors for extensions quota_sets and hypervisors * Remove generic exception catching for admin_actions API v3 * Demote admin-passwd from core of API v3 as extensions os-admin-password * handle auto assigned flag on allocate floating ip * Add expected_errors for extension shelve v3 * Use cached nwinfo for secgroup rules * Sync config.generator from Oslo * Remove * import from xenserver plugins * EC2-API: Fix ambiguous ipAddress/dnsName issue * xenapi: no image upload retry on certain errors * Add error checking around host service checking * add vendor_data to the md service and config drive * Moves compute.rpcapi.prep_resize call to scheduler.manager * Removed scheduler doc costs section * Fix formatting on scheduler documentation * Add expected_errors for extension server_diagnostics V3 * Fix extensions agent follow API v3 rules * XenAPI: Change the default SR to be the pool default * Fix flavor_access extension follow API V3 rules * Add notification for live migration call * Correct status code and response for quota_sets API v3 * Fixes for v3 API servers tests * Remove sleep from service group db and mc tests * [xenapi] Unshadow an important test case class * Fix and Gate on H303 (no wildcard imports) * Remove unreachable code * powervm: pass on unimplemented aggregate operations * Fix timing issue in SimpleTenantUsageSample test * Code dedup in virt.libvirt.test_imagecache.test_verify_checksum_* * Move tests test_verify_checksum_* to separate class * Logging virtual size of the QCOW2 * Add expected_errors for extension certificates v3 * Support setting block size for block devices * Set the image_meta for the instance booted from a volume * return 429 on API rate limiting occur * Add task_state filter for nova list * Port server_usage API to v3 part 2 * Port server_usage API to v3 part 1 * Adds factory methods to load Hyper-V utils classes * Fix 2 pep8 errors in tests * Enabled hacking check for Python3 compatible print (H233) * Fix race between aggregate list and delete * Enforce authenticated connections to libvirt * Enabled the hacking warning for Py3 compatible octal literals (H232) * Remove fping plugin from V3 API * Moves scheduler.rpcapi.prep_resize call on compute.api to conductor * Fix some Instance object class usage errors * xenapi: remove pv detection * Add expected_errors for extension keypair and availablity_zone * Add expected_errors for extension console_output v3 * Fix extension hosts follow API v3 rules * Use project quota as default user quota * Adds NoAuthMiddleware for V3 * xenapi: remove propagate xenapi_use_agent key * Update references with new Mailing List location * MinDisk size based on the flavor's Disk size * Use RetrievePropertiesEx instead of RetrieveProperties * Speed up test BareMetalPduTestCase.test_exec_pdutool * Port ips-extended to API-v3 ips core API Part 2 * Disable per-user rate limiting by default * Support EC2 API wildcards for DescribeTags filters * Remove the monkey patching of _ into the builtins * Sync lockutils from Oslo * Set lock_path in tests * Port ips-extended to API-v3 ips core API Part 1 * Fix postgresql failures related to Data type to API-v3 fixed-ip * Bypass queries which cause a contradiction * Add basic BDM format validation in the API layer * Servers API for the new BDM format * Fixes Hyper-V issues on versions prior to 2012 * Add expected_errors for extension instance_actions v3 * Fix extension server_meta follow API v3 rules * Ensure that uuid is returned with mocked instance * Code dedup in class InstanceTypeExtraSpecsTestCase * Add expected_errors for extension cells V3 * Add expected_errors for extension_info V3 * Add latest oslo DB support * Add note why E712 is ignored * Clarify instance_type vs flavor in nova-manage * Fix leaky network tests * Fix HTTP response for PortNotFound during boot * Don't pass empty image to filter on live migration * Start using hacking 0.6 * Set VM back to its original state if cold migration failed * xenapi: ensure vcpu_weight configured correctly * Fix failing network manager unit tests * Add expected_errors for extensions services and server_password v3 * Update oslo.config.generator * Fix the is_volume_backed_instance check * Add support for volume swap * Fix policy failure on image_metadata calls * Sync models for AgentBuild, Aggregates, AggregateHost tables * Imported Translations from Transifex * Make ServerXMLSerializationTest DRYer * Port migrations extension to v3 API part 2 * Port migrations extension to v3 API part 1 * xenapi: Fix console rotate script * Sync some of Instance* models with migrations * Fix extension rescue follow API v3 rules * Per-project-user-quotas for more granularity * Add unique constraint to InstanceTypeExtraSpecs * Remove instance_metadata_get_all* from db api * Merged flavorextradata extension (ephemeral disk size) into core API * Fixed tests for flavor swap extension after merging in core API * Remove hostname param from XenApi after first boot * Cell Scheduler support for hypervisor versions * Fix flavor v3 follow API v3 rules * Sync sample config file generator with Oslo * Allow exceptions to propagate through stevedore map * Create vmware section * Sync latest rpc changes from oslo-incubator * Check instance on dest once during block migration * Revert "Add requests requirement capped <1.2.1." * Unit-ify compute_api delete tests * Convert network API to use InfoCache object * Make InfoCache.network_info be the network model * Make shelve pass old-world instance object to conductor * Make admin API state resets use Instance.save() * Deduplicate data in TestAddressesXMLSerialization * Move _validate_int_value controller func to utils * Correct the action name for admin_actions API v3 * Fixing dnsdomain_get call in nova.network.manager * Raise exception if both port and fixed-ip are in requested networks * Sync eventlet_backdoor from oslo-incubator * Fix up trivial license mismatches * Implements host uptime API call for cell setup * Ensure dates are dates, not strings * Use timeutils.utcnow() throughout the code * Add indexes to sqlite * Fix iptables rules when metadata_host=127.0.0.1 * Sync gettextutils from oslo * Handle instance objects in conductor compute_stop * Config drive attached as cdrom * Change EC2 client tokens to use system_metadata * Check that the configuration file sample is up to date * Make servers::update() use Instance.save() to do the work * Make Instance.save() handle cells DB updates * Convert suspend/resume to use objects * Make compute_api.reboot() use objects * Fix HTTP response for PortInUse during boot * Fix DB access when refreshing the network cache * Use valid IP addresses values in tests * Add ability to factor in per-instance overheads * Send updated aggregate to compute on add/rm host * Fix inconsistency between Nova-Net and Neutron * Fix parse_transport_url when url has query string * xenapi: no glance upload retry on 401 error * Code dedup in test_libvirt_vif * Raise exceptions when Spice/VNC are unavailable * xenapi: Pass string arguments to popen * Add rpcapi tests for shelving calls * Create key manager interface * Remove duplicate cells_rpcapi test * ec2-api: Disable describing of instances using deleted tags as filter * Disable ssl layer compression for glance requests * Missed message -> msg_fmt conversion * Refresh network cache when reassigning a floating IP in Neutron * Remove unnecessary comments for instance rebuild tests * Add missing tests for console_* methods * Force reopening eventlet's hub after fork * Remove project_id from alternate image link path * Fixes wrong action comment 'lock' to 'unlock' * Add expected_errors for extension extended_volumes v3 * port BaremetalNodes API into v3 part2 * port baremetal_nodes API into v3 part1 * Add validation of available_zone during instance create * Move resource usage sync functions to db backend * Remove locals() from various places * Add expected_errors for extension evacuate v3 * Add expected_errors for extension deferred_delete v3 * Fix accessing to '/' of metadata server without any checks to work * Fix duplicate osapi_hide_server_address_states config option * API for shelving * Fix shelve's use of system_metadata * Fix Instance object handling of implied fields * Make Instance object properly update *metadata * Support Client Token for EC2 RunInstances * Change get_all_instance_metadata to use _get_instances_by_filters * Add a new GroupAffinityFilter * Move a migration test to MigrationTestCase * Use db.flavor_ instead of db.instance_type_ * Periodic task for offloading shelved instances * Shelve/unshelve an instance * Code dedup in class ImagesControllerTest * Assert backing_file should exist before attempting to create it * Add API-v3 merged core API into core API list * Don't ignore 'capabilities' flavor extra_spec * Support scoped keys in aggregate extra specs filter * Fix blocking issue when powervm calculate checksum * Avoid shadowing Exception 'message' attribute * Code dedup in class TestServerActionRequestXMLDeserializer * Fix mig 186 downgrade when using sqlalchemy >= 0.8 * Move test_stringified_ips to InstanceTestCase * Move *_ec2_* tests in test_db_api to own test case * Code dedup in class ImageXMLSerializationTest * Fix malformed format string * Fix EC2 DescribeTags filter * Code dedup in test_libvirt_volume * Port AttachInterfaces API to v3 Part 2 * Make ServersViewBuilderTest DRYer * Move test_security_group_update to SecurityGroupTestCase * Code dedup in class ServersControllerCreateTest * Code dedup in tests for server._action_rebuild * Moved tests for server._action_rebuild * Move bw_usage_* tests in test_db_api to own test case * Move dnsdomain_* tests in test_db_api to own test case * Remove redundant if statements in cells.state * Move special cells logic for start/stop * Port used limits extension to v3 API Part 2 * Avoid deleting user-provided Neutron ports if VM spawn fails * Fix nic order not correct after reboot * Porting os-aggregates extensions to API v3 Part 2 * Porting os-aggregates extensions to API v3 Part 1 * Porting server metadata core API to API v3 Part 2 * Porting server metadata core api to API v3 Part 1 * Port limits core API to API-v3 Part 2 * xenapi: Only coalesce VHDs if needed * Don't attach to multiple Quantum networks by default * Load cell data from a configuration file * Fix filtering aggregate metadata by key * remove python-glanceclient cap * Remove duplicated key_pair* tests from test_db_api * Porting limits core api to API v3 Part 1 * Add missing tests for db.api.instance_* methods * Fix IPAddress and CIDR type decorators * Complete deletion when compute manager start-up * Port user_data API to v3 Part 2 * Add legacy flag to get_instance_bdms * XenAPI: Refactor Fake to create pools, SRs and VIFs automatically * Port flavor_rxtx extension to v3 API Part 2 * Port flavor_rxtx extension to v3 API Part 1 * Fix aggregate_get_by_host host filtering * Fix v3 hypervisor extension servers action follow REST principles * xenapi:populating hypervisor version in host state * Port attach and detach of volume-attachment into os-extended-volume v3 * Port deferredDelete API to v3 Part 2 * Fix status code for coverage API v3 * Port instance_actions API to v3 Part 2 * port instance_actions API into v3 part1 * Prompt error message when creating aggregate without aggregate name * Port used limits extension to v3 API Part 1 * Makes _PATH_CELL_SEP a public global variable * port disk_config API into v3 part1 * Imported Translations from Transifex * Remove locals() from virt directory * Handle ImageNotAuthorized exception * Port AvailabilityZone API to v3 Part 2 * port AvailabilityZone API into v3 part1 * Port service API to v3 Part 2 * Imported Translations from Transifex * Unify duplicate code for powering on an instance * Port hide srvr addresses extension to v3 API Pt2 * Sync v2/v3 console_output API extensions * Port extended status extension to v3 API Part 2 * Port os-console-output extension to API v3 Part 2 * Changes select_destinations to return dicts instead of objects * Better start/stop handling for cells * Make notifications properly string-convert instance datetimes * Fix default argument values on get_all_by_filters() * Make db/api strip timezones for datetimes * Fix object_compat decorator for non-kwargs * Imported Translations from Transifex * Remove unused recreate-db options from run_test.sh * update Quantum usage to Neutron * Convert cells to use a transport URL * Fix aggregate update * Passing volume ID as id to InvalidBDMVolume exception * Handle instance being deleted while in filter scheduler * Port extended-availability-zone API into v3 part2 * Fix extensions os-remote-consoles to follow API v3 rules * Add unique constraints to AggregateHost * Unimplemented pause should not change vm state on PowerVM * Port server password extension to v3 API Part 2 * xenapi: Add disk config value to xenstore * Port hide srvr addresses extension to v3 API Pt1 * Add -U to the command line for pip * xenapi: support ephemeral disks bigger than 2TB * Cells: Make bandwidth_update_interval configurable * Add _set_instance_obj_error_state() to compute manager * Update v3 servers API with objects changes * xenapi: enable attach volumes to non-running VM * Change force_dhcp_release default to True * Revert "Sync latest rpc changes from oslo-incubator" * Sync 10 DB models and migrations * Make compute_api.get() use objects natively * port Host API into v3 part2 * Port admin-actions API into v3 part2 * Fix cells manager rpc api version * Allow ::/0 for IPv6 security group rules * Fix issue with pip installing oslo.config-1.2.0 * Sort output for unit tests in test_describe_tags before compare * Document rate limiting is per process * Properly pin pbr and d2to1 in setup.py * Add support for live_snapshot in compute * xenapi: Stub out _add_torrent_url for Vhd tests * Add Instance.get_by_id() query method * Fix duplicate fping_path config option * Port images metadata functionality to v3 API Part 2 * Add unique constraint to ConsolePool * Enable core API-v3 to be optional when unit testing * Clarify flavorid vs instance_type_id in db * Sync db.models.Security* and db.models.Volume* * Sync db.models.Instance* with migrations * Add "ExtendedVolumes" API extension * Fix misc issues with os-multinic v3 API extension * Port multinic extension to v3 API Part 2 * Port security groups extension to v3 API Part 2 * Port security groups extension to v3 API Part 1 * Add missing help messages for nova-manage command * Validate volume_size in block_device_mapping * Imported Translations from Transifex * Fix info_cache and bw_usage update race * xenapi: glance plugin should close connections * Change db.api.instance_type_ to db.api.flavor_ * Replace get_instance_metadata call in api.ec2.cloud._format_instances * Add unique constraint to AgentBuild * Ensure flake8 tests run on all api code * Sync notifier change from oslo-incubator * Sync harmless changes from oslo-incubator * Sync latest rpc changes from oslo-incubator * Add missing matchmaker_ring * Port extended-server-attributes API into v3 part2 * List migrations through Admin API * Add a VIF driver for IOVisor engine * port Service API into v3 part1 * Port admin-actions API into v3 part1 * Port fping extension to v3 API Part 2 * Disassociate fixed IPs not known to dnsmasq * Imported Translations from Transifex * Allow filters to only run once per request if their data is static * Port extended-availability-zone API into v3 part1 * Update openstack.common.config * Export just the volume metadata for the database to be populated * port Deferred_delete API into v3 part1 * Misc fixes for v3 evacuate API extension * Imported Translations from Transifex * Baremetal ensures node is off before powering on * Remove references to deprecated DnsMasqFilter * Port user_data API to v3 Part 1 * Update instance.node on evacuate * Fix formatting errors in documentation * Use oslo.sphinx and remove local copy of doc theme * Remove doc references to distribute * Sync install_venv_common from oslo * Make EC2 API request objects instead of converting them * Make instance show and index use objects * Remove conductor usage from consoleauth service * xenapi: Stub out entry points for BitTorrent tests * Fix debug message for GroupAntiAffinityFilter * Add unique constraints to Service * Add unique constraint to FixedIp * Fixed columns list in indexes * Add cinder cleanup to migrations * Change unique constraint in VirtualInterface * Changes ComputeTaskManager class to inherit base.Base * Moves populate retry logic to the scheduler utils * Exceptions raised by quantum validate_networks result in 500 error * Fix and gate on E125 * Add object (de)serialization support to cells * Add cells get_cell_type() method * Add fill_faults() batch operation to InstanceList * Make api_samples reboot test use a plausible scenario * Fix compute_api object handling code in cells messaging * Fix power_state lookup in confirm_resize * Make flavors is_public option actually work * Imported Translations from Transifex * hyperv: Fix vmops.get_info raises InstanceNotFound KeyError * Make instance_update() string-convert IP addresses * Refactor compute_api reboot tests to be unit-y * Refactors select_destinations to return HostState objects * PowerVM resize and migrate test cases * Clear out service disabled reason on enable * Port agent API to v3 Part 2 * Fix v3 hypervisor extension search action follow REST principles * Fix resize ordering for COW VHD * Add inst_type parameter * Store volume metadata as key/value pairs * Fixes a typo on AggregateCoreFilter documentation * xenapi: Tidy up Popen calls to avoid command injection attacks * Remove notify_on_any_change option * Add unique constraints to Quota * Port images metadata functionality to v3 API Part 1 * Port scheduler hints extension to v3 API Part 2 * Adding action based authorization for keypairs * Port multinic extension to v3 API Part 1 * Port hypervisor API into v3 part2 * port Instance_usage_audit_log API into v3 part2 * port Instance_usage_audit_log API into v3 part1 * Fix metadata for create in child cell * update xen/vmware virt drivers not to hit db directly * Reduce nesting in instance_usage_audit * Port os-console-output extension to API v3 Part 1 * Fix to integer cast of length in console output extension * Imported Translations from Transifex * Add notifiers to both attach and detach volumes * Make test_deferred_delete() be deterministic * Added functionality for nova hooks pass functions * Fix compatibility with older confirm_resize() calls * Pass instance host-id to Quantum using port bindings extension * libvirt: Fix spurious backing file existence check * Add unique constraint for security groups * powervm: make get_host_uptime output consistent with other virt drivers * Remove locals() from virt/vmwareapi package * Add HACKING check for db session param * Select disk driver for libvirt+Xen according to the Xen version * Port coverage API into v3 part2 * Port coverage API into v3 part1 * Fix grizzly compat issue in conducor rpc api * Xenapi shutdown should return True if vm is shutdown * Break out Compute Manager unit tests * Break out compute API unit tests * port Host API into v3 part1 * Imported Translations from Transifex * Standardize use of nova.db * Check system_metadata type in _populate_instance_for_create * Clean up and make HACKING.rst DRYer * Sync db.models with migrations * Refactor ServerStatusTest class * Move tests db.api.instance_* to own class * Add tests for `db.console_pool_*()` functions * Fix binding of SQL query params in DB utils * Make db.fakes stub out API not sqlalchemy * Reassign MAC address for vm when resize_revert * test_xmlutil.py covers more code in xmlutil.py * Handle UnexpectedTaskState and InstanceNotFound exceptions * Port quota classes extension to v3 API Part 2 * Ports image_size extension to v3 API * xenapi: Add configurable BitTorrent URL fetcher * remove locals() from virt/hyperv package * Add resume state on host boot function to vmware Hyper * Port server_diagnostics extension to v3 API Part2 * Port images functionality to v3 API Part 2 * Port cells extension to v3 API Part 2 * Notification support for host aggregate related operation * Fix vol_usage_update() DB API tests * Port consoles extension API into v3 part2 * Port consoles extension API into v3 part1 * Imported Translations from Transifex * New select_destinations scheduler call * Session cleanup for db.security_group_* methods * fix invalid logging * Port scheduler hints extension to v3 API Part 1 * Port config_drive API to v3 Part 2 * Port config drive API to v3 Part 1 * Port images functionality to v3 API Part 1 * Moves scheduler.manager._set_vm_state_and_notify to scheduler.utils * VNC console does not work with VCDriver * Sane rest API rate limit defaults * Ignore lifecycle events for non-existent instances * Fix resizes with attached file-based volumes * Remove trivial cases of unused variables (3) * Remove locals() from compute directory * Hypervisor uptime fails if service is disabled * Fix metadata access in prep for instance objects * Sync to_primitive() IPAddress support from Oslo * Merged flavor_swap extension into core API * Fix typo for instance_get_all_by_filters() function * Implement get_host_uptime for powervm driver * Port flavor_disabled extension to v3 API Part 2 * Fix sqlalchemy utils * Port flavor_disabled extension to v3 API Part 1 * Port flavor_access extension to v3 API Part 2 * Port flavor_access extension to v3 API Part 1 * Fixes for quota_sets v3 extension * Port server password extension to v3 API Part 1 * Port Simple_tenant_usage API to v3 Part 2 * xenapi: Remove vestigial `compile_metrics` code * Add update() method to NovaObject for dict compatibility * Add obj_to_primitive() to recursively primitiveize objects * Make sure periodic instance reclaims continues on error * Remove broken config_drive image_href support * Report the az based on the value in the instance table * Allow retrying network allocations separately * Imported Translations from Transifex * Better default for my_ip if 8.8.8.8 is unreachable * Fix a couple typos in the nova.exception module * Make fake_network tolerant of objects * Prepare fake instance stubs for objects * Make info_cache handle when network_info is None * Fix instance object's use of a db query method parameter * Make NovaObject support the 'in' operator * Add Instance.fault * Add basic InstanceFault model * xenapi: Make BitTorrent url more flexible * xenapi: Improve cross-device linking error message * db.compute_node_update: ignore values['update_at'] * Make sure periodic cleanup of instances continues on error * Fix for failure of periodic instance cleanup * Update instance properties values in child cells to create instance * port Attach_interface API into v3 part1 * Sync models.Console* with migrations * Port quota API into v3 part2 * Stop creating folders in virt unit tests * Imported Translations from Transifex * Refresh volume connections when starting instances * Fix trivial mismatch of license header * Exeption message of 'live migration' is not appropriate * Sync rpc from oslo-incubator * Fix types in test_ec2_ids_not_found_are_printable * Port quota API into v3 part1 * Skip security group code when there is no network * Sync db.models and migrations * Update pyparsing to 1.5.7 * Make InstanceList filter non-column extra attributes * Add Instance.security_groups * Add basic SecurityGroup model * Revert XenApi virt driver should throw exception * Imported Translations from Transifex * Avoid redefining host to none in get_instance_nw_info(...) * Extract live-migration scheduler logic from the scheduler driver * Fix the filtered characters list from console-log * Add invalid number checking in flavor creation api * Port quota classes extension to v3 API Part 1 * Remove usage of locals() from powervm virt package * Fix xenstore-rm race condition * Refactor db.security_group_get() instance join behavior * Fix serialization of iterable types * Fix orphaned instance from get_by_uuid() and _from_db_object() * refactor security group api not to raise http exceptions * Perform additional check before live snapshotting * Do not raise NEW exceptions * Baremetal_deploy_helper error message formatting * Fix sys_meta access in prep for instance object * Cells: Pass object for start/stop * Clarify the compute API is_volume_backed_instance method * Add AggregateCoreFilter * Port extended-server-attributes into v3 part1 * Add AggregateRamFilter * Fix KeyError exception when scheduling to child cell * Port missing bits from httplib2 to requests * Revert "fixes nova resize bug when force_config_drive is set." * Port extended status extension to v3 API Part 1 * Fix quota logging on exceptions * XenApi virt driver should throw exception on failure * Retry quota_reserve on DBDeadlock * Handle NoMoreFixedIps in _shutdown_instance * Make sure instance_type has extra_specs * Remove locals() from nova/virt/libvirt package * Fix importing InstanceInfoCache during register_all() * Make _poll_unconfirmed_resizes() use objects * Revert "Add oslo-config-1.2.0a2 and pbr>=0.5.16 to requirements." * Preserve network order when using ConfigDrive * Revert "Initial scheduler support for instance_groups" * fixes nova resize bug when force_config_drive is set * Add troubleshoot to baremetal PXE template * Sync db.models.Quota* with migrations * Modify _assertEqualListsOfObjects() function * Port hypervisor API into v3 part1 * Remove a layer of nesting in _poll_unconfirmed_resizes() * Use InstanceList for _heal_instance_info_cache() * Remove straggling use of all-kwarg object methods * Allow scheduler manager NoValidHost exception to pass over RPC * Imported Translations from Transifex * Add oslo-config-1.2.0a2 and pbr>=0.5.16 to requirements * Remove usage of locals() for formatting from nova.scheduler.* * Libvirt driver: normalize variable names (part1) * xenapi: script to rotate the guest logs * Clean up scheduler tests * Drop unused _virtual_power_settings global * Remove junk file when ftp transfer failure * xenapi: revisit error handling around calls to agent * Remove the unused plugins framework * Added unit tests for vmware cluster driver * Adds expected_errors decorator for API v3 * Sync oslo-incubator gettextutils * port Simple_tenant_usage API into v3 part1 * Remove db session hack from conductor's vol_usage_update() * Converts scheduler.utils.build_request_spec return to json primitive * Revert "Delegate authentication to quantumclient" * Retry the sfdisk command up to 3 times * No support for double nested 64 bit guest using VCDriver * Fill context on objects in lists * Setting static ip= for baremetal PXE boot * Add tests for libvirt's reboot functionality * Check the instance ID before creating it * Add missing tests for nova.db.api.instance_system_metadata_* * Add err_msg param to baremetal_deploy_helper * Remove _is_precooked pre-cells Zones hacks * Raise max header size to accommodate large tokens * Make NovaObject support extra attributes in items() * Imported Translations from Transifex * Fix instance obj refresh() * Fix overzealous conductor test for vol_usage_update * Add missing tests for certificate_* methods * Log xml in libvirt _create_domain failures * Add unique constraints to Cell * Accept is_public=None when listing all flavors * Add missing tests for cell_* methods * Add missing tests for nova.db.api.instance_metadata_* * Don't deallocate network if destroy time out * Port server_diagnostics extension to v3 API Part1 * Add old display name to update notification * Port fping extension to v3 API Part 1 * libvirt fix resize/migrates with swap or ephemeral * Allow reboot or rebuild from vm_state=Error * Initial scheduler support for instance_groups * Fix the ServerPasswordController class doc string * Imported Translations from Transifex * Cleanup certificate API extension * Enforce sqlite-specific flow in drop_unique_constraint * Remove unused cert db method * Fix bad vm_state change in reboot_instance() * Add rpc client side version control * xenapi: ensure agent check respects image flags * Drop `bm_pxe_ips` table from baremetal database * Adding fixed_ip in create.end notification * Improved tests for instance_actions_* * Refactored tests for instance_actions_* * Add missing tests for provider_fw_rule_* methods * Session cleanup for db.security_group_rule_* methods * Add tests for nova.db.api.security_group_rule_* methods * Refactors qemu image info parsing logic * Port cells extension to v3 API Part 1 * Organize limits units and per-units constants * Fix flavor extra_specs filter doesn't work for number * Replace utils.to_bytes() with strutils.to_bytes() * Updates nova.conf.sample * Remove bin lookup in conf sample generator * Refactor conf sample generator script * Remove unused arg from make_class_properties.getter method * Fix obj_load() in NovaObject base class * Backup and restore object registry for tests * Fix the wrong reference by CONF * Port flavors core API to v3 tree * Remove usage of locals() from xenapi package * Remove trivial cases of unused variables (1) * Don't make nova-compute depend on iSCSI * Change resource links when url has no project id * Make sync_power_state routines use InstanceList * Enhance the validation of the quotas update * Add missing tests for compute_node_* methods * Fix VMware Hyper can't honor hw_vif_model image property * Remove use of locals() in db migrations * Don't advertise mute cells capabilities upwards * Allow confirm_resize if instance is in 'deleting' status * Port certificates API to v3 Part 2 * port agent API into v3 part1 * Port certificates API to v3 Part 1 * Naming instance directory by uuid in VMware Hyper * Revert "Fix local variable 'root_uuid' ref before assign" * Use Python 3.x compatible octal literals * Fix and enable H403 tests * Remove usage of locals() from manager.py * Fix local variable 'root_uuid' ref before assign * Improve the performance of migration 186 * Update to the latest stevedore * Quantum API _get_floating_ip_by_address mismatch with Nova-Net * xenapi: remove auto_disk_config check during resize * xenapi: implement get_console_output for XCP/XenServer * Check libvirt version earlier * update_dns() method optimization * Sync can_send_version() helper from oslo-incubator * Remove unused db api call * Quantumapi returns an empty network list * Add missing tests for nova.db.api.network_* * Cleanup overshadowing in test_evacuate.py * Give a way to save why a service has been disabled * Cells: Add support for global cinder * Fix race conditions with xenstore * Imported Translations from Transifex * Remove explicit distribute depend * Fix assumed port has port_security_enabled * Rename functions in nova.compute.flavors from instance_type * Remove redundant architecture property update in powervm snapshot * Use an inner join on aggregate_hosts in aggregate_get_by_host * xenapi: ensure instance metadata always injected into xenstore * Nova instance group DB support * Fix to disallow server name with all blank spaces * Replace functions in utils with oslo.fileutils * Refactors get_instance_security_groups to only use instance_uuid * Create an image BDM for every instance * DB migration to the new BDM data format * Fix dangling LUN issue under load with multipath * Imported Translations from Transifex * Add missing tests for s3_image_* methods * Register libvirt driver with closed connection callback * Enhance group handling in extract_opts * Removed code duplication in conductor.api * Refactored tests for instance_fault_* * Added verbose error message in tests helper mixin * Adds v3 API extension discovery filtering * Adds support for the Indigo Virtual Switch (IVS) * Some libvirt driver lookups lacks proper exception handling * Put VM UUID to live migration error notification * Fix db.models.Instance description * Fix db.models.Certificate description * Fix db.models.ComputeNodeStats description * Fix db.models.ComputeNode description * Fix db.models.Service description * BDM class and transformation functions * Remove unused method in VMware driver * Cleanup nova exception message conversion * Update analyze_opts to work with new nova.conf sample format * Remove unused methods from VirtAPI * Make xenapi use Instance object for host_maintenance_mode() * Make xenapi/host use instance objects for _uuid_find * Use InstanceList object for init_host * Add Instance.info_cache * Use Instance Objects for Start/Stop * Add lists of instance objects * Add base mixin class for object lists * Add deleted flag to NovaObject base * Export volume metadata to new instances * Sending volume IO usage broken * Rename unique constraints due to new convention * Replace openstack-common with oslo in HACKING.rst * Fixes test_config_drive unittest * Port evacuate API to v3 Part 2 * Port evacuate API to v3 Part 1 * Speeding up scheduler tests * Port rescue API to v3 Part 2 * Port rescue API to v3 Part 1 * Handle security group quota exceeded gracefully * Adds check that the core V3 API is loaded * Call virt.driver.destroy before deallocating network * More KeypairAPI cleanups * Improve Keypair error messages in osapi * Fix Keypair exception messages * Moving more tests to appropriate locations * Skip ipv6 tests on system without ipv6 support * Keypair API test cleanup * Alphabetize v3 API extension entry point list * Add missing exception to cell_update() * Refactors scheduler.chance.select_hosts to raise NoValidHost * Enhance unit test code coverage for availability zone * Converts 'image' to json primitive on compute.rpcapi.prep_resize * Import osapi_v3/enabled option in nova/test * Regenerate missing resized backing files * Moving `test_misc` tests to better locations * Allocate networks in the background * Make the datetime utility function coerce to UTC * API to get the Cell Capacity * Update rpc/impl_qpid.py from oslo * More detailed log in failing aggregate extra filter * xenapi: Added logging for sparse copy * Make object actions pass positional arguments * Don't snat all traffic when force_snat_range set * Add x-compute-request-id header when no response body * Call scheduler for run_instance from conductor * correctly set iface-id in vmware driver * Fix a race where a soft deleted instance might be removed by mistake * Fix quota checks while resizing up by admin * Refactor libvirt driver exception handling * Avoiding multiple code loops in filter scheduler * Don't log warn if v3 API is disabled * Link to explanation of --checksum-full rule * Imported Translations from Transifex * Stop libvirt errors from outputting to strerr * Delete unused bin directory * Make instance object tolerate isotime strings * Add fake_instance.py * Fix postgresql failures related to Data type * hardcode pbr and d2to1 versions * Silence exceptions from qpid connection.close() (from oslo) * Add Davanum to the mailmap * Fix VMwareVCdriver reporting incorrect stats * Adds ability to black/whitelist v3 API extensions * Clean up vmwareapi.network_util.get_network_with_the_name * Imported Translations from Transifex * Normalize path for finding api_samples dir * Add yolanda to the mailmap * Add notes about how doc generation works * python3: Add py33 to tox.ini * Improve Python 3.x compatibility * Ports consoles API to v3 API * Fix nova-compute fails to start if quantum is down * Handle instance directories correctly for migrates * Remove unused launch_time from instance * Launch_at and terminated_at on server(s) response * Fixed two minor docs niggles * Adds v3 API disable config option * Fix bug where consoleauth depended on remote conductor service * Only update cell capabilites once * Ports ips api to v3 API * Make pylint ignore nova/objects/ * Set resized instance back to original vm_state * Add power_on flag to virt driver finish/revert migration methods * Cosmetic fix to parameter name in DB API * compute.api call conductor ComputeTaskManager for live-migrate * Removed session from reservation_create() * Raise exception instances not exception classes * _s3_create handles image being deleted * Imported Translations from Transifex * Add instance object * Add base object model * Enhance multipath parsing * Don't delete sys_meta on instance delete * Fix volume IO usage notifications been sent too often * Add missing os.path.abspath around csrfile * Fix colorizier thowing exception when a test fails * Add db test that checks that shadow tables are up-to-date * Sync shadow table for 159 migration * Sync shadow table for 157 migration * Sync shadow table for 156 migration * Add missing tests for nova.db.api.quota_* methods * Add tests for some db.security_group_* methods * Fix _drop_unique_constraint_in_sqlite() function * Clean up failed image transfers in instance spawn * Make testr preserve existing OS_* env vars values * Fix msg version type sent to cells RPC API * Verify that CONF.compute_driver is defined * Fix EC2 RegisterImage ImageLocation starts with / * Support Cinder mount options for NFS/GlusterFS * Raise exception instances, not exception classes * Add update method of security group name and description * Cell weighing class to handle mute child cells * Add posargs support to flake8 call * Enumerate Flake8 E12x ignores * Fix and enable flake8 F823 * Fix and enable flake8 F812 * libvirt: improve the specification of network disks * Imported Translations from Transifex * In utils.tempdir, pass CONF.tempdir as an argument * Delegate authentication to quantumclient * Pull binary name from sys.argv[0] * Rename policy auth for V3 os-fixed-ips * Fix internationalization for some LOG messages * Enumerate Flake8 Fxxx ignores * Enable flake8 E721 * Removing misleading error message * No relevant message when stop a stopped VM * Cells: Add filtering and weight support * API Extensions framework for v3 API Part 2 * fix a misleading docstring * xenapi: make the xenapi agent optional per image * Fix config drive code logical error * Add missing conversion specifier to ServiceGroupUnavailable * Deprecate compute_api_class option in the config * Add node as instance attribute for notification * removes project_id/tenant_id from v3 api urls * Set up 'compute_task' conductor namespace * Removed superflous eval usage * Fix log message * Sync shadow table for 179 migration * Remove copy paste from 179 migration * Sync shadow table for 175 and 176 migration * Change db `deleted` column type utils * Fix tests for sqlalchemy utils * Add missing tests for nova.db.api.quota_class_* * Moved sample network creation out of unittest base class constructor * Add missing tests for db.api.reservation_* * add xml api sample tests to os-tenant-network * Remove locals() usage from nova.virt.libvirt.utils * IPMI driver sets bootdev option persistently * update mailmap * Imported Translations from Transifex * Remove tempest hack for create/rebuild checks * Better error message on malformed request url * virt: Move generic virt tests to nova/tests/virt/ * vmwareapi: Move tests under tests/virt/vmwareapi/ * hyperv: Move tests under nova/tests/virt/hyperv * Fix UnboundLocalError in powervm lvm cleanup code * Delete a quota through admin api * Remove locals() usage from nova.virt.libvirt.volume * Importing correlation_id middleware from oslo-incubator * Make a few places tolerant of sys_meta being a dict * Remove locals() from scheduler filters * Rename requires files to standard names * Imported Translations from Transifex * translates empty remote_ip_prefix to valid cidr for nova * Reset task_state when resetting vm_state to ACTIVE * xenapi: Moving tests under tests/virt/xenapi/ * xenapi: Disable VDI size check when root_gb=0 * Remove ImageTooLarge exception * Move ImageTooLarge check to Compute API * Share checks between create and rebuild * Remove path_exists from NFS/GlusterFS drivers * Removed session from fixed_ip_*() functions * Catch InstanceNotFound in instance_actions GET * Using unicode() to handle image's properties * Adds live migration support to cells API * Raise AgentBuildNotFound on updating/destroying deleted object * Add missing tests for nova.db.api.agent_build_* methods * Don't update API cell on get_nwinfo * Optimize SecurityGroupsOutputController by len(servers) * get_instance_security_groups() fails if no name on security group * libvirt: Moving tests under tests/virt/libvirt * Make it easier to add namespaced rpc APIs * baremetal: Move tests under tests/virt/baremetal * Disallow resize if image not available * powervm: Move tests under tests/virt/powervm * Sync RPC serializer changes from Oslo * Fix missing argument to logging warning call * set ERROR state when scheduler hits max attempts * Sync latest RPC changes from oslo * Add notification for live migration * Add requests requirement capped <1.2.1 * Adding tests for rebuild image checks * Add ImageNotActive check for instance rebuild * Fix error in instance_get_all_by_filters() use of soft_deleted filter * Fix resize when instance has no image * Fixes encoding issues for nova api req body * Update run_tests.sh to run flake8 too * Added validation for networks parameter value * Added attribute 'ip' to server search options * Make nova-api use servicegroup.API.service_is_up() * Add memorycache import into the oslo config * Fix require_context() decorators * Imported Translations from Transifex * Remove locals() from nova/cells/* * Update mailmap * Strip exec_dirs prefix from rootwrap filters * Clean up test_api_samples a bit * Remove unnecessary parens in test_volumes * Use strict=True instead of `is_valid_boolstr` * Editable default quota support * Remove usage of locals() for formatting from nova.api.* * Switch to flake8+hacking * Fix flake8 errors in anticipation of flake8 * Don't update DB records for unchanged stats * baremetal: drop 'prov_mac_address' column * The vm_state should not be modified until the task is complete * Return Customer's Quota Usage through Admin API * Use prettyxml output * Remove locals() from messages in virt/disk/api.py * 'm1.tiny' now has root_gb=1 * Cast `size` to int before comparison * Don't raise unnecessary stack traces in EC2 API * Mox should cleanup before stubs * Reverse compare arguments in filters tests * Don't inject settings for dynamic network * Add ca cert file support to cinder client requests * libvirt: Catch VIR_ERR_NO_DOMAIN in list_instances * Revert "Include list of attached volumes with instance info" * Sync rpc from oslo * Remove openstack.common.version * Fix for missing multipath device name * Add missing tests for db.fixed_ip_*(). functions * xenapi: ensure vdi is not too big when resizing down * Cells: Don't allow active -> build * Fix whitespace issue in indent * Pass the proper admin context to update_dhcp * Fix quantum security group driver to accept none for from/to_port * Reverse path SNAT for DNAT floating-ip * Use Oslo's `bool_from_string` * Handle IPMI transient failures better * Improve unit tests for DB archiving * Remove "#!/usr/bin/env python" from .py files under nova/cmd * Add missing unique constraint to KeyPair model * Refactored tests for db.key_pair_*() functions * Refactor nova.volume.cinder.API to reduce roundtrips with Cinder * Fix response from snapshot create stub * Hide lock_prefix argument using synchronized_with_prefix() * Cleanups for create-flavor * Cleanup create flavor tests * Imported Translations from Transifex * Test for remote directory creation before shutting down instance * Fix run_tests.sh usage of tools/colorizer.py * Move get_table() from test_migrations to sqlalchemy.utils * Convert Nova to use Oslo service infrastructure * Show the cause of virt driver error * Detach volume fails when using multipath iscsi * API extensions framework for v3 API * Sync service and threadgroup modules from oslo * Fix header issue for baremetal_deploy_helper.py * Extract getting instance's AZ into a helper module * Allow different paths for deploy-helper helpers * Show exception details for failed deploys * Imported Translations from Transifex * Check QCOW2 image size during root disk creation * Adds useful debug logging to filter_scheduler * fix non reporting of failures with floating IP assignment * Improve message and logging for corrupt VHD footers * Cleanup for test_create_server_with_deleted_image * Check cached SSH connection in PowerVM driver * Allow a floating IP to be associated to a specific fixed IP * Record smoketest dependency on gFlags * Make resize/migrated shared storage aware * Imported Translations from Transifex * Add pointer to compute driver matrix wiki page * xenapi: cleanup vdi when disk too big exception raised * Update rootwrap with code from oslo * Fixes typo in server-evacuate-req.xml * Fix variable referenced before assginment in vmwareapi code * Remove invalid block_device_mapping volume_size of '' * Architecture property updated in snapshot libvirt * Add sqlalchemy migration utils.create_shadow_table method * Add sqlalchemy migration utils.check_shadow_table method * Change type of cells.deleted from boolean to integer * Pass None to image if booted from volume in live migration * Raise InstanceInvalidState for double hard reboot * Removes duplicate assertEqual * Remove insecure default for signing_dir option * Removes unnecessary check for admin context in evacuate * Fix zookeeper import and tests * Make sure that hypervisor nodename is set correctly in FakeDriver * Optimize db.instance_floating_address_get_all method * Session cleanup for db.floating_ip_* methods * Optimize instance queries in compute manager * Remove duplicate gettext.install() calls * Include list of attached volumes with instance info * Catch volume create exception * Fixes KeyError bug with network api associate * Add unitests for VMware vif, and fix code logical error * Fix format error in claims * Fixes mock calls in Hyper-V test method * Adds instance root disk size checks during resize * Rename nova.compute.instance_types to flavors * Convert to using newly imported processutils * Import new additions to oslo's processutils * Imported Translations from Transifex * Enable live block migration when using iSCSI volumes * Nova evacuate failed when VM is in SHUTOFF status * Transition from openstack.common.setup to pbr * Remove random print statements * Remove security_group_handler * Add cpuset attr to vcpu conf in libvirt xml * Imported Translations from Transifex * Remove referances to LegacyFormatter in example logging.conf * libvirt: ignore NOSTATE in resume_state_on_host_boot() method * Sync oslo-incubator print statement changes * Fix stub_instance() to include missing attributes * Add an index to compute_node_stats * Convert to using oslo's execute() method * Import latest log module from oslo * Being more defensive around the use_ipv6 config option * Update hypervisor_hostname after live migration * Make nova-network support requested nic ordering * nova coverage creates lots of empty folders * fix broken WSDL logic * Remove race condition (in FloatingIps) * Add missing tests for db.floating_ip_* methods * Deprecate show_host_resources() in scheduler manager * Add force_nodes to filter properties * Adds --addn-hosts to the dnsmasq arg list * Update our import of oslo's processutils * Update oslo-incubator import * Delete InstanceSystemMetadata on instance deletion * vmwareapi: Add supported_instances to host state * xenapi: Always set other_config for VDIs * Copy the RHEL6 eventlet workaround from Oslo * Move db.fixed_ip_* tests from DbApiTestCase to FixedIpTestCase * Checks if volume can be attached * Call format_message on InstanceTypeNotFound exception * xenapi: Don't swallow missing SR exception * Prevent rescuing a VM with a partially mounted volume * Fix key error when create lpar instance failed * Reset migrating task state for MigrationError exceptions * Volume IO usage gets reset to 0 after a reboot / crash * Sync small and safe changes from oslo * Sync jsonutils from oslo * Fix EC2 instance bdm response * Rename _check_image_size to _get_and_check_image_metadata * Convert the cache key from unicode to a string * Catch glance image create exceptions * Update to using oslo periodic tasks implementation * Import oslo periodic tasks support * import and install gettext in vm_vdi_cleaner.py * Fix baremetal get_available_nodes * Fix attach when running as root without sysfsutils * Make _build_network_info_model testable * Fix building quantumapi network model with network list * Add the availability_zone to the volume.usage notifications * Add delete_net_interface function * Performance optimization for contrib.flavorextraspecs * Small whitespace tweak * Kill off usage of locals() in the filter_scheduler * Remove local variable only used in logging * Create instance with deleting image * Refactor work with db.instance_type_* methods * Fix flakey TestS3ImageService bug * Add missing snapshot image properties for VMware Hyper * Imported Translations from Transifex * Fix VMware Hyper console url parameter error * Update NovaBase model per changes on oslo.db.sqlalchemy * Send a instance create error notification * Refactor _run_instance() to unify control flow * set bdm['volume_id'] to None rather than delete it * Destroy conntrack table on source host during migration * Adds tests for isolated_hosts_filter * Fixes race condition of deleting floating ip * Imported Translations from Transifex * Wrong proxy port in nova.conf for Spice proxy * Fix missing kernel output via VNC/Spice on boot * Fix bug in db.instance_type_destroy * Move get_backdoor_port to base rpc API * Move db.instance_type_extra_specs_* to db.instance_type_* methods * Add missing test for db.instance_type_destroy method * Fix powervm driver resize instance error * Support FlatDHCP network for VMware Hyper * Imported Translations from Transifex * Deprecate conductor ping method * Add an rpc API common to all services * If rescue fails don't error the instance * Make os.services.update work with cells * Fix fixed_ip_count_by_project in DB API * Add unit tests for /db/api.py#fixed_ip_* * Add option to exclude joins from instance_get_by_uuid * Remove unnecessary method argument * Improve Python 3.x compatibility * ec2 CreateVolumes/DescribeVolumes status mapping * Can now reboot rescued instances in xenapi * Allows xenapi 'lookup' to look for rescue mode VMs * Adds tests to xenapi.vm_utils's 'lookup' method * Imported Translations from Transifex * Stop vm_state reset on reboot of rescued vm * Fix hyperv copy file error logged incorrect * Fix ec2 CreateVolumes/DescribeVolumes status * Imported Translations from Transifex * Don't swallow PolicyNotAuthorized for resize/reboot actions * Remove unused exception and variable from scheduler * Remove unnecessary full resource audits at the end of resizes * Update the log module from oslo-incubator * Translate NoMoreFloatingIps exception * Imported Translations from Transifex * Fix up regression tester * Delete extra space to api/volumes message * Map internal S3 image state to EC2 API values * removing unused variable from a test * Translate cinder NotFound exception * hypervisor tests more accurate db * Added comments to quantum api client * Cleanup and test volume usage on volume detach * Import and convert to oslo loopingcall * Remove orphaned db method instance_test_and_set * baremetal: VirtualPowerDriver uses mac addresses in bm_interfaces * Sync rpc from oslo-incubator * Correct disk's over committed size computing error * Imported Translations from Transifex * Allow listing fixed_ips for a given compute host * Imported Translations from Transifex * baremetal: Change input for sfdisk * Make sure confirm_resize finishes before setting vm_state to ACTIVE * Completes the power_state mapping from compute driver and manager * Make compute/manager use conductor for unrescue() * Add an extension to show the mac address of a ip in server(s) * Cleans up orphan compute_nodes not cleaned up by compute manager * Allow for the power state interval to be configured * Imported Translations from Transifex * Fix bug in os-availability-zone extension * Remove unnecessary db call in scheduler driver live-migration code * baremetal: Change node api related to prov_mac_address * Don't join metadata twice in instance_get_all() * Imported Translations from Transifex * Don't hide stacktraces for unexpected errors in rescue * Fix issues with check_instance_shared_storage * Remove "undefined name" pyflake errors * Optimize some of compute/manager's periodic tasks' DB queries * Optimize some of the periodic task database queries in n-cpu * Change DB API instance functions for selective metadata fetching * Replace metadata joins with another query * xenapi: Make _connect_volume exc handler eventlet safe * Fix typo: libvir => libvirt * Remove multi scheduler * Remove unnecessary LOG initialisation * Remove unnecessary parens * Simplify random host choice * Add NOVA_LOCALEDIR env variable * Imported Translations from Transifex * Clarify volume related exception message * Cleanup trailing whitespace in api samples * Add tenant/ user id to volume usage notifications * Security groups may be unavailable * Encode consoleauth token in utf-8 to make it a str * Catch NoValidHost exception during live-migration * Evacuated instance disk not deleted * Fix a bad tearDown method in test_quantumv2.py * Import eventlet in __init__.py * Raise correct exception for duplicate networks * Add an extension to show the network id of a virtual interface * Fix error message in pre_live_migration * Add reset function to nova coverage * Imported Translations from Transifex * nova-consoleauth start failed by consoleauth_manager option missing * set timeout for paramiko ssh connection * Define LOG globally in baremetal_deploy_helper * Allow describe_instances to use tags for searches * Correct network uuid field for os-network extension * Only call getLogger after configuring logging * Add SecurityGroups API sample tests * Cannot boot vm if quantum plugin does not support L3 api * Add missing tests for instance_type_extra_specs_* methods * Remove race condition (in InstanceTypeProjects) * Deprecate old vif drivers * Optimize resource tracker queries for instances * baremetal: Integrate provisioning and non-provisioning interfaces * Move console scripts to entrypoints * Remove deprecated Grizzly code * Fallback to conductor if types are not stashed * Imported Translations from Transifex * Resolve conflicting mac address in resize * Simplify and correct the bm partition sizes * Fix legacy_net_info guard * Fix SecurityGroups XML sample tests * Modify _verify_response to validate response codes * Fix a typo in attach_interface error path * After migrate, catch and remove deleted instances * Grab instance for migration before updating usage * Explain why the give methods are whitelisted * libvirt: Get driver type from base image type * Guard against content being None * Limit the checks for block device becoming available * Fix _error_out_instance exception handler * Raise rather than generating millions of IPs * Add unit tests for nova.volume.cinder.API * Update latest oslo.setup * baremetal: Drop unused columns in bm_nodes * Remove print statements * Imported Translations from Transifex * Fix the python version comparison * Remove gettext.install() from nova/__init__.py * Sync latest gettextutils from oslo-incubator * Return 409 on creating/importing same name keypair * Delete tests.baremetal.util.new_bm_deployment() * Return proper error message when network conflicts * Better iptables DROP removal * Query quantum once for instance's security groups * quantum security group driver nova list shows same group * Sync in matchmaker and qpid Conf changes from oslo * improve handling of an empty dnsmasq --domain * Fix automatic confirmation of resizes for no-db-compute * 'injected_files' should be base 64 encoded * Add missing unit tests for FlavorActionController * Set default fixed_ip quota to unlimited * Accepts aws-sdk-java timestamp format * Imported Translations from Transifex * get context from req rather than getting a new admin context * Use Cluster reference to reduce SDK calls * Fix missing punctuation in docstring * xenapi: fix support for iso boot * Ensure only pickle-able objects live in metadata * sync oslo db/sqlalchemy module * Convert host value from unicode to a string * always quote dhcp-domain, otherwise dnsmasq can fail to start * Fix typo in the XML serialization os-services API * Add CRUD methods for tags to the EC2 API * Fix migrating instance to the same host * Rework time handling in periodic tasks * Show quota 'in_use' and 'reserved' info * Imported Translations from Transifex * Fix quantum nic allocation when only portid is specified * Make tenant_usage fall back to instance_type_id * Use format_message on exceptions instead of str() * Add a format_message method to the Exceptions * List AZs fails if there are disabled services * Switch nova-baremetal-deploy-helper to use sfdisk * Bring back colorizer again with error results * Imported Translations from Transifex * Adds Tilera back-end for baremetal * Always store old instance_type during a migration * Make more readable error msg on quantum client authentication failure * Adding netmask to dnsmasq argument --dhcp-range * Add missing tests for db.instance_type_access_* methods * Remove race condition (in InstanceTypes) * Add missing tests for db.instance_type_* methods * Imported Translations from Transifex * set up FakeLogger for root logger * Fix /servers/os-security-groups using quantum * NoneType exception thrown if driver live-migration check returns None * Add missing info to docstring * Include Co-authored-by entries in AUTHORS * Do not test foreign keys with SQLite version < 3.7 * Avoid using whitespace in test_safe_parse_xml * xenapi: Retrieve VM uuid from xenstore * Reformat openstack-common.conf * Imported Translations from Transifex * Fixes Nova API /os-hosts missing element "zone" * disable colorizer as it swallows fails * Make iptables drop action configurable * Fixes argument order of quantumv2.api.get_instance_nw_info * Make _downsize_quota_delta() use stashed instance types * py2.6 doesn't support TextTestRunner resultclass * Reset ec2 image cache between S3 tests * Sync everything from oslo-incubator * Sync rpc from oslo-incubator * Don't log traceback on rpc timeout * Adds return-type in two functions' docstrings * Remove unnecessary checks in api.py * translate cinder BadRequest exception * Initialize compute manager before loading driver * Add a comment to placeholder migrations * xenapi: fix console for rescued instance * Fixes passing arbitrary conductor_api argument * Make nova.virt.fake.FakeDriver useable in integration testing * Remove unnecessary DB call to find EC2 AZs * Remove outdated try except block in ec2 code * nova-manage vm list fails looking 'instance_type' * Update instance network info cache to include vif_type * Bring back sexy colorized test results * Don't actually connect to libvirtd in unit tests * Add placeholder migrations to allow backports * Change arguments to volume_detach() * Change type of ssh_port option from Str to Int * xenapi: rpmbuild fixes * Set version to 2013.2 2013.1.rc1 ---------- * Fix Hyper V instance conflicts * Add caching for ec2 mapping ids * Imported Translations from Transifex * fix add-fixed-ip with quantum * Update the network info when using quantum * List InstanceNotFound as a client exception * Refactor db.service_destroy and db.service_update methods * Fix console support with cells * Fix missing argument to QemuImageInfo * Add missing tests for db.virtual_interface_* methods * Fix multiple fixed-ips with quantum * Add missing tests for db.service_* methods * Ensure that headers are returned as strings, not integers * Enable tox use of site-packages for libvirt * Require netaddr>=0.7.6 to avoid UnboundLocalError * Pass project id in quantum driver secgroup list * Fixes PowerVM spawn failed as missing attr supported_instances * Fix RequestContext crashes w/ no service catalog * Prevent volume-attach/detach from instances in rescue state * Fix XenAPI performance issue * xenapi: Adding logging for migration plugin * libvirt: Tolerate existing vm(s) with cdrom(s) * Remove dead code * Remove unused virt.disk.api methods bind/unbind * Imported Translations from Transifex * Revert "Remove the usage of instance['extra_specs' * Add standard methods to the Limits API * Store project_id for instance actions * rstrip() strips characters, not strings * Fix use of libvirt_disk_prefix * Revert 1154253 causes XenServer image compat issue * Reset migrating task state for more Exceptions * Fix db archiving bug with foreign key constraints * Imported Translations from Transifex * Update migration 153 for efficiency * Don't include traceback when wrapping exceptions * Fix exception message in Networks API extension * Make conductor's quota methods pass project_id properly * Fix: improve API error responses from os-hosts extension * Add missing API doc for networks-post-req * Make os-services API extensions consistent * Fix system_metadata "None" and created_at values * Add the serial to connection info for boot volumes * Do not accept invalid keys in quota-update * Add quotas for fixed ips * Makes safe xml data calls raise 400 http error instead of 500 * Fixes an iSCSI connector issue in the Hyper-V driver * Check keypair destroy result operation * Resize/Migrate refactoring fixes and test cases * Fixes Hyper-V live migration with attached volumes * Force nova to use keystone v2.0 for auth_token * Fix issues with cells and resize * Fix copyright - from LLC to Foundation * Don't log traceback on expected console error * Generalize console error handling during build * Remove sqlalchemy calling back to DB API * Make ssh key injection work with xenapi agent * Fix use of potentially-stale instance_type in tenant_usage * Drop gzip flag from tar command for OVF archives * Fix reconnecting to libvirt * List ComputeHostNotFound as a client exception * Fix: Nova aggregate API throws an uncaught exception on invalid host * Do cleaning up resource before rescheduling * nova-manage: remove unused import * Read instance resource quota info from "quota" namespace * LibvirtGenericVIFDriver update for stp * Switch to final 1.1.0 oslo.config release * Skip deleted fixed ip address for os-fixed-ips extension * Return error details to users in "dns-create-private-domain" * Lazy load CONF.quota_driver * Fix cells instance deletion * Don't load system_metadata when it isn't joined * List ConsoleTypeInvalid as a client exception * Make run_instance() bail quietly if instance has been deleted * Delete instance metadata when delete VM * Virtual Power Driver list running vms quoting error * Refactor work with session in db.block_device_mapping_* methods * Add missing tests for db.block_device_mapping_* methods * websockify 0.4 is busted * Sync rpc from oslo-incubator * Fix: nova-manage throws uncaught exception on invalid host/service * Fix more OS-DCF:diskConfig XML handling * Fix: Managers that incorrectly derive from SchedulerDependentManager * Fix nova-manage --version * Pin SQLAlchemy to 0.7.x * Deprecate CONF.fixed_range, do dynamic setup * Remove the usage of instance['extra_specs'] * Fix behaviour of split_cell_and_item * Fix quota issues with instance deletes * Fixes instance task_state being left as migrating * Force resource updates to update updated_at * Prepare services index method for use with cells * Handle vcpu counting failures gracefully * Return XML message with objectserver 404 * xenapi: Fix reboot with hung volumes * Rename LLC to Foundation * Pass migration_ref when when auto-confirming * Revert changing to FQDN for hostnames * Add numerous fixes to test_api_samples * Fixes instance action exception in "evacuate" API * Remove instance['instance_type'] relationship from db api * Refactor db tests to ensure that notdb driver is used * Rewrap two lines * Server create will only process "networks" if os-networks is loaded * Fixes nbd device can't be released error * Correct exception args in vfs/guestfs * Imported Translations from Transifex * Prevent nova services' coverage data from combining into nova-api's * Check if flavor id is an empty string * Simple syntax fix up * Fixes volume attach on Hyper-V with IPv6 * Add ability to control max utilization of a cell * Extended server attributes can show wrong hypervisor_hostname * Imported Translations from Transifex * Remove uses of instance['instance_type'] from nova/notifications * Libvirt driver create images even without meta * Prevent rescue for volume-backed instances * Fix OS-DCF:diskconfig XML handling * Imported Translations from Transifex * Compile BigInteger to INTEGER for sqlite * Add conductor to nova-all * Make bm model's deleted column match database * Update to Quantum Client 2.2.0 * Remove uses of instance['instance_type'] from nova/scheduler * Remove uses of instance['instance_type'] from nova/api * Remove uses of instance['instance_type'] from nova/network * Remove uses of instance['instance_type'] from nova/compute * Correct substring matching of baremetal VPD node names * Fix Wrong syntax for set:tag in dnsmasq startup option * Fix instance evacuate with shared storage * nova-manage: remove redundant 'dest' args * clear up method parameters for _modify_rules * Check CONF values *after* command line args are parsed * Make nova-manage db archive_deleted_rows more explicit * Fix for delete error in Hyper-V - missing CONF imports * add .idea folder to .gitignore pycharm creates this folder * Make 'os-hosts/node1' case sensitivity defer to DB * Fix access_ip_* race * Add MultipleCreate template and fix conflict with other templates * Update tox.ini to support RHEL 6.x * Fix instance type cleanup when doing a same-id migration * Tiny typo * Remove unnecessary setUp() and tearDown() methods * Remove duplicate API logging * Remove uses of instance['instance_type'] from libvirt driver * Remove uses of instance['instance_type'] from powervm driver * Remove uses of instance['instance_type'] from xenapi driver * Fixed image filter support for vmware * Switch to oslo.config * Fix instance_system_metadata deleted columns * Remove parameters containing passwords from Notifications * Add missing action_start if deleting resized inst * Fix issues with re-raising exceptions * Don't traceback in the API on invalid keypair * delete deleted image 500 bug * Moves Hyper-V options to the hyperv section * Fix 'to integer' conversion of max and min count values * Standarize ip validation along the code * Adjusts reclaim instance interval of deferred delete tests * Fix Network object encoding issue when using qpid * Rename VMWare to VMware * Put options in a list * Bump instance updated_at on network change * Catching InstanceNotFound exception during reboot instance * Imported Translations from Transifex * Remove completed FIXME * quantum security_group driver queries db regression * Prevent reboot of rescued instance * Baremetal deploy helper sets ODIRECT * Read baremetal images from extra_specs namespace * Rename source_(group_id/ip_prefix) to remote_(group_id/ip_prefix) * docs should indicate proper git commit limit * Imporove db.sqlalchemy.api._validate_unique_server_name method * Remove unused db calls from nova.db.api * Fixes oslo-config update for deprecated_group * fix postgresql drop race * Compute manager should remove dead resources * Fix an error in compute api snapshot_volume_backed bdm code * Fixes disk size issue during image boot on Hyper-V * Updating powervm driver snapshot with update_task_state flow * Imported Translations from Transifex * Add ssh port and key based auth to VPD * Make ComputeManager _running_deleted_instances query by uuid * Refactor compute manager _get_instances_by_driver * Fix target host variable from being overwritten * Imported Translations from Transifex * Fixes live migration with attached volumes issue * Don't LOG.error on max_depth (by default) * Set vm_state to ERROR on net deallocate failure * validate security_groups on server create * Fix IBM copyright strings * Implement rules_exist method for quantum security group driver * Switch to using memorycache from oslo * Remove pylint errors for undefined GroupException members * Sync timeutils and memorycache from oslo * instance_info_cache_update creates wrongly * Tone down logging while waiting for conductor * Add os-volumes extension to api samples * Regenerate nova.conf.sample * Fix ephemeral devices on LVM don't get mkfs'd * don't stack trace if long ints are passed to db * Pep8/pyflakes cleanup of deprecated_api * Fix deprecated network api * Fixes the Hyper-V driver's method signature * Imported Translations from Transifex * Fixes a Hyper-V live migration issue * Don't use instance['instance_type'] for scheduler filters in migration * Fallback coverage backdoor telnet connection to lo * Add instance_type_get() to virt api * Make compute manager revert crashed migrations on init_host() * Adds API Sample tests for Volume Attachments * Ensure that FORWARD rule also supports DHCP * Remove duplicate options(joinedload) from aggregates db code * Shrink size of aggregate_metadata_get_by_host sql query * Remove old commented out code in sqlalchemy models * Return proper error messages while disassociating floating IP * Don't blindly skip first migration * Imported Translations from Transifex * Suppress retries on UnexpectedTaskStateErrors * Fix `with_data` handling in test-migrations * BM Migration 004: Actually drop column * Actually run baremetal migration tests * Adds retry on upload_vhd for xapi glance plugin * ec2 _format_security_group() accesses db when using quantum_driver * Remove un-needed methods * Prevent hacking.py from crashing on unexpected import exception * Bump python-quantumclient version to 2.1.2 * Improve output msgs for _compare_result * Add a 'hw_' namespace to glance hardware config properties * Makes sure required powervm config options are set * Update OpenStack LLC to Foundation * Improve hackings docstring detection * Make sure no duplicate forward rules can exist * Use min_ram of original image for snapshot, even with VHD * Revert IP Address column length to 39 * Additional tests for safe parsing with minidom * Make allocate_for_instance() return only info about ports allocated * Fix crash in quantumapi if no network or port id is specified * Unpin PasteDeploy dependency version * Unpin routes dependency version * Unpin suds dependency version * Unpin Cheetah dependency version * Allow zk driver be imported without zookeeper * Retry floating_ip_fixed_ip_associate on deadlock * Fix hacking.py to handle 'cannot import x' * Add missing import to fakelibvirt * Migration 148: Fix drop table dependency order * Minor code optimization in _compute_topic * Fix hacking.py to handle parenthesise in from import as * Fix redefinition of function test_get_host_uptime * Migration 147: Prevent duplicate aggregate_hosts * Rework instance actions to work with cells * Fix incorrect zookeeper group name * Sync nova with oslo DB exception cleanup * Fix broken baremetal migration tests * if reset fails, display the command that failed * Remove unused nova.db.api:instance_get_all_by_reservation * Add API Sample tests for Snapshots extension * Run libguestfs API calls in a thread pool * Change nova-dhcpbridge FLAGFILE to a list of files * Imported Translations from Transifex * Readd run_tests.sh --debug option * Clean unused kernels and ramdisks from image cache * Imported Translations from Transifex * Ensure macs can be serialized * Remove Print Statement * Prevent default security group deletion * libvirt: lxml behavior breaks version check * Add missing import_opt for flat_injected * Add processutils from oslo * Updates to OSAPI sizelimit middleware * Remove compat cfg wrapper * Fix exception handling in baremetal API * Make guestfs use same libvirt URI as Nova * Make LibvirtDriver.uri() a staticmethod * Enable VM DHCP request to reach DHCP agent * Don't set filter name if we use Noop driver * Removes unnecessary qemu-img dependency on powervm driver * Migration 146: Execute delete call * Add `post_downgrade` hook for migration tests * Fix migration snake-walk * BM Migrations 2 & 3: Fix drop_column statements * Migration 144: Fix drop index statement * Remove function redefinitions * Migration 135: Fix drop_column statement * Add missing ec2 security group quantum mixin * Fix baremetal migration skipping * Add module prefix to exception types * Flush tokens on instance delete * Fix launching libvirt instances with swap * Spelling: compatable=>compatible * import base_dir_name config option into vmwareapi * Fix ComputeAPI.get_host_uptime * Move DB thread pooling to DB API * Use a fake coverage module instead of real one * Standardize the coverage initializations * Sync eventlet_backdoor from oslo-incubator * Sync rpc from oslo-incubator * Fix message envelope keys * Remove race condition (in Networks) * Move some context checking code from sqlalchemy * Baremetal driver returns accurate list of instance * Identify baremetal nodes by UUID * Improve performance of baremetal list_instances * Better error handling in baremetal spawn & destroy * Wait for baremetal deploy inside driver.spawn * cfg should be imported from oslo.config * Add Nova quantum security group proxy * Add a volume driver in Nova for Scality SOFS * Make nova security groups more pluggable * libvirt: fix volume walk of /dev/disk/by-path * Add better status to baremetal deployments * Fix handling of source_groups with no-db-compute * Improve I/O performance for periodic tasks * Allow exit code 21 for 'iscsiadm -m session' * Removed duplicate spawn code in PowerVM driver * Add API Sample tests for Hypervisors extension * Log lifecycle events to log INFO (not ERROR) * Sync rpc from oslo-incubator * sync oslo log updates * Adding ability to specify the libvirt cache mode for disk devices * Sync latest install_venv_common.py * Make add-fixed-ip update nwfilter wth in libvirt * Refactor nwfilter parameters * ensure we run db tests in CI * More gracefully handle TimeoutException in test * Multi-tenancy isolation with aggregates * Fix pep8 issues with test_manager.py * Fix broken logging imports * Fix hacking test to handle namespace packages * Use oslo-config-2013.1b4 * support preallocated VM images * Fix instance directory path for lxc * Add snapshot methods to fakes.py * PowerVMDiskAdapter detach/cleanup refactoring * Make ComputeTestCase.test_state_revert faster * Add an extension to show image size * libvirt: Use uuid for instance directory name * Support running periodic tasks immediately at startup * Fix XMLMatcher error reporting * Fix XML config tests for disk/net/cpu tuning * Add support for network adapter hotplug * Handle lifecycle events in the compute manager * Add support for lifecycle events in the libvirt driver * Enhance IPAdresses migration tests * Add basic infrastructure for compute driver async events * Fix key check in instance actions formatter * Add a safe_minidom_parse_string function * Documentation cleanups for nova devref * Fix leak of loop/nbd devices in injection using localfs * Add support for instance CPU consumption control * Add support for instance disk IO control * Retry bw_usage_update() on innodb Deadlock * Change CIDR column size on migration version 149 * Provide way to pass rxtx factor to quantum * Fibre channel block storage support (nova changes) * Default SG rules for the Security Group "Default" * create new cidr type for data storage * Ensure rpc result is primitive types * Change all instances of the non-word "inteface" to "interface" * Remove unused nova.db.api:network_get_by_bridge * Fix a typo in two comments. networksa -> networks * Live migration with an auto selection of dest * Remove unused nova.db.api:network_get_by_instance * Fix network list and show with quantum * Remove unused db calls from nova.db.sqlalchemy.api * Remove unused db calls * Small spelling fix in sqlalchemy utils * Fix _get_instance_volume_block_device_info call parameter * Do not use abbreviated config group names (zookeeper) * Prevent the unexpected with nova-manage network modify * Fix hacking tests on osx * Enable multipath for libvirt iSCSI Volume Driver * Add select_hosts to scheduler manager rpc * Add and check data functions for test_migrations 141 * fix incorrectly defined ints as strs * Remove race condition (in TaskLog) * Add generic dropper for duplicate rows * Imported Translations from Transifex * Fix typo/bug in generic UC dropper * remove intermediate libvirt downloaded images * Add support for instance vif traffic control * Add libvirt XML schema support for resource tuning parameters * Fix instance can not be deleted after soft reboot * Correct spelling of quantum * Make pep8 tests run inside virtualenv * Remove tests for non-existing SimpleScheduler * libvirt: Fix LXC container creation * Rename 'connection' to 'driver' in libvirt HostState * Ensure there is only one instance of LibvirtDriver * Stop unit test for prompting for a sudo password * clean up missing whitespace after ':' * Push 'Error' result from event to instance action * Speedup the revert_state test * Add image to request_spec during resize * Ensure start time is earlier than end time in simple_tenant_usage * Split out body of loop in _sync_power_states in compute manager * Remove dead variable assignment in compute manager * Assign unique names with os-multiple-create * Nova network needs to take care of existing alias * Delete baremetal interfaces when their parent node is deleted * Harmonize PEP8 checking between tox and run_tests.sh * VirtualPowerDriver catches ProcessExecutionError * [xenapi] Cooperatively yield during sparse copy * Allow archiving deleted rows to shadow tables, for performance * Adds API Sample tests for FlavorAccess extension * Add an update option to run_tests.sh * filter_scheduler: Select from a subset of hosts * use nova-conductor for live-migration * Fix script argument parsing * Add option to allow cross AZ attach configurable * relocatable roots doesn't handle testr args/opts * Remove a log message in test code * add config drive to api_samples * Don't modify injected_files inside PXE driver * Synchronize code from oslo * Canonizes IPv6 before insert it into the db * Only dhcp the first ip for each mac address * Use connection_info on resize * Fix add-fixed-ip and remove-fixed-ip * API extension for accessing instance_actions * Use joinedload for system_metadata in db * Add migration with data test for migration 151 * Correct misspelling in PowerVM comment * Add GlusterFS libvirt volume connector * Module import style checking changes * Stub additional FloatingIP methods in FlatManager * Resize/Migrate functions for PowerVM driver * Added a service heartbeat driver using Memcached * Use a more specific error reporting invalid disk hardware * Allow VIF model to be chosen per image * Check the length of flavor name in "flavor-create" * Add API sample tests to Services extension * VMWare driver to use current nova.network.model * Add "is not" test to hacking.py * Update tools/regression_tester * Fix passing conductor to get_instance_nw_info() * Imported Translations from Transifex * Make compute manager use conductor for stopping instances * Move allowvssprovider=false to vm-data field * Allow aggregate create to have None as the az * Forces flavorRef to be string in servers resize api * xenapi: Remove unecessary exception handling * Sync jsonutils from openstack-common * Simplify and optimize az server output extension * Add an extension to show the type of an ip * Ensure that only one IP address is allocated * Make the metadata paths use conductor * Fix nova-compute use of missing DBError * Adding support for AoE block storage SANs * Update docs about testing * Allow generic rules in context_is_admin rule in policy * Implements resize / cold migration on Hyper-V * test_(dis)associate_by_non_existing_security_group_name missing stub * Make scheduler remove dead nodes from its cache * More conductor support for resizes * Allow fixed to float ping with external gateway * Add generic UC dropper * Remove locking declarator in ServiceGroup __new__() * Use ServiceGroup API to show node liveness * Refine PowerVM MAC address generation algorithm * Fixes a bug in attaching volumes on Hyper-V * Fix unconsumed column name warning in test_migrations * Fix regression in non-admin simple_usage:show * Ensure 'subunit2pyunit' is run in venv from run_tests.sh * Fix inaccuracies in the development environment doc * preserve order of pre-existing iptables chains * Adds API Sample tests for FloatingIPDNS extension * Don't call 'vif.plug' twice during VM startup * Disallow setting /0 for network other than 0.0.0.0 * Fix spelling in comment * Imported Translations from Transifex * make vmwareapi driver pass quantum port-id to ESX * Add control-M to list of characters to strip out * Update to simplified common oslo version code * Libvirt: Implement snapshots for LVM-backed roots * Properly write non-raw LVM images on creation * Changes GA code for tracking cross-domain * Return dest_check_data as expected by the Scheduler * Simplify libvirt snapshot code path * fix VM power state to be NOSTATE when instance not found * Fix missing key error in libvirt.driver * Update jsonutils from oslo-incubator * Update nova/compute/api to handle instance as dict * Use joined version of db.api calls * l3.py,add_floating_ip: setup NAT before binding * Regenerate nova.conf.sample * Fixes a race condition on updating security group rules * Ensure that LB VIF drivers creates the bridge if necessary * Remove nova.db call from baremetal PXE driver * Support for scheduler hints for VM groups * Fixed FlavorAccess serializer * Add a virtual PowerDriver for Baremetal testing * Optimize rpc handling for allocate and deallocate * Move floating ip db access to calling side * Implement ZooKeeper driver for ServiceGroup API * Added the build directory to the tox.ini list pep8 ignores * support reloctable venv roots in testing framework * Change to support custom nw filters * Allow multiple dns servers when starting dnsmasq * Clean up extended server output samples * maint: remove unused imports from bin/nova-* * xenapi: Cleanup detach_volume code * Access DB as dict not as attributes part 5 * Introduce support for 802.1qbg and 802.1qbh to Nova VIF model * Adds _(prerun|check)_134 functions to test_migrations * Extension for rebuild-for-ha * Support hypervisor supplied macs in nova-network * Recache or rebuild missing images on hard_reboot * Cells: Add cells support to hypervisors extension * Cells: Add cells support to instance_usage_audit_log api extension * Update modules from common required for rpc with lock detection * Fix lazy load 'system_metadata' failed problem * Ban database access in nova-compute * Move security_groups refreshes to conductor * Fix inject_files for storing binary file * Add regression testing tool * Change forward_bridge_interface to MultiStrOpt * Imported Translations from Transifex * hypervisor-supplied-nics support in PowerVM * Default the last parameter (state) in task_log_get to None * Sync latest install_venv_common from oslo * Remove strcmp_const_time * Adds original copyright notice to refactored files * Update .coveragerc * Allow disk driver to be chosen per image * Refactor code for setting up libvirt disk mappings * Refactor instance usage notifications for compute manager * Flavor Extra Specs should require admin privileges * Remove unused methods * Return to skipping filters when using force_hosts * Refactor server password metadata to avoid direct db usage * lxc: Clean up namespace mounts * Move libvirt volume driver tests to separate test case * Move libvirt NFS volume driver impl into volume.py * replace ssh-keygen -m with a python equivalent * Allow connecting to self-signed quantum endpoints * Sync latest db and importutils from oslo * Use oslo database code * Fix check instance host for instance action * Make get_dev_name_for_instance() use stashed instance_type info * Added Postgres CI opportunistic test case * Remove remaining instance_types query from compute/manager * Make cells_api fetch stashed instance_type info * Teach resource tracker about stashed instance types * Fix up instance types in sys meta for resizes * lxc: virDomainGetVcpus is not supported by driver * Fix incorrect device name being raised * VMware VC Compute Driver * Default value of monkey_patch_modules is broken * Adds evacuate method to compute.api * Fix import for install_venv.py * allow disabling file injection completely * separate libvirt injection and configdrive config variables * Add API sample tests to os-network * Fix incorrect logs in network * Update HACKING.rst per recent changes * Allow for specifying nfs mount options * Add REST API to show availability_zone of instance * Make NFS mount hashes consistent with Cinder * Parse testr output through subunit2pyunit * Imported Translations from Transifex * Optimize floating ip list to make one db query * Remove hardcoded topic strings in network manager * Reimplement is_valid_ipv4() * Tweakify is_valid_boolstr() * Fix update quota with invalid value * Make system_metadata update in place * Mark password config options with secret * Record instance actions and events * Postgres does not like empty strings for type inet * Add 'not in' test to tools/hacking.py * Split floating ip functionality into new file * Optimize network calls by moving them to api * Fixes unhandled exception in detach_volume * Fixes FloatingIPDNS extension 'show' method * import tools/flakes from oslo * Use conductor for instance_info_cache_update * Quantum metadata handler now uses X-Forwarded-For * instance.update notifications don't always identify the service * Handle compute node not available for live migration * Fixes 'not in' operator usage * Fixes "is not" usage * Make scheduler modules pass conductor to add_instance_fault * Condense multiple authorizers into a single one * Extend extension_authorizer to enable cleaner code * Remove unnecessary deserializer test * Added sample tests to FlavorExtraSpecs API * Fix rebuild with volumes attached * DRYing up volume_in_mapping code * Use _prep_block_device in rebuild * xenapi: Ax unecessary `block_device_info` params * Code cleanup for rebuild block device mapping * Fix eventlet/mysql db pooling code * Add support for compressing qcow2 snapshots * Remove deprecation notice in LibvirtBridgeDriver * Fix boto capabilities check * Add api samples to fping extension * Fix SQL Error with fixed ips under devstack/postgresql * Pass testropts in to setup.py in run_tests.sh * Nova Hyper-V driver refactoring * Fixed grammar problems and typos in doc strings * Add option to control where bridges forward * xenapi: Add support for different image upload drivers * Removed print stmts in test cases * Fix get and update in FlavorExtraSpecs * Libvirt: Add support for live snapshots * Move task_log functions to conductor * erase outdated comment * Keep flavor information in system_metadata * Add instance_fault_create() to conductor * Adds API Sample tests for os-instance_usage_audit_log extension * validate specified volumes to boot from at the API layer * Refactor libvirt volume driver classes to reduce duplication * Change ''' to """ in bin/nova-{novncproxy,spicehtml5proxy} * Pass parameter 'filter' back to model layer * Fix boot with image not active * refactored data upgrade tests in test_migrations * Fix authorized_keys file permissions * Finer access control in os-volume_attachments * Stop including full service catalog in each RPC msg * Make sure there are no unused import * Fix missing wrap_db_error for Session.execute() method * Use install_venv_common.py from oslo * Add Region name to quantum client * Removes retry of set_admin_password * fix nova-baremetal-manage version printing * Refactoring/cleanup of compute and db apis * Fix an error in affinity filters * Fix a typo of log message in _poll_unconfirmed_resizes * Allow users to specify a tmp location via config * Avoid hard dependency on python-coverage * iptables-restore error when table not loaded * Don't warn up front about libvirt loading issues in NWFilterFirewall * Relax API restrictions around the use of reboot * Strip out Traceback from HTTP response * VMware Compute Driver OVF Support * VMware Compute Driver Host Ops * VMware Compute Driver Networking * Move policy checks to calling side of rpc * Add api-samples to multinic extension * Add system_metadata to db.instance_get_active_by_window_joined * Enable N302: Import modules only * clean up api_samples documentation * Fix bad imports that cause nova-novncproxy to fail * populate dnsmasq lease db with valid leases * Support optional 4 arg for nova-dhcpbridge * Add debug log when call out to glance * Increase maximum URI size for EC2 API to 16k * VMware Compute Driver Glance improvement * Refactored run_command for better naming * Fix rendering of FixedIpNotFoundForNetworkHost * Fix hacking N302 import only modules * Avoid db lookup in info_from_instance() * Fixes task_log_get and task_log_get_all signatures * Make failures in the periodic tests more detailed * Clearer debug when test_terminate_sigterm fails * Skip backup files when running pep8 * Added sample tests to floating-ip-pools API * _sync_compute_node should log host and nodename * Don't pass the entire list of instances to compute * VMware Compute Driver Volume Management * Bump the base rpc version of the network api to 1.7 * Remove compute api from scheduler driver * Remove network manager from compute manager * Adds SSL support for API server * Provide creating real unique constraints for columns * Add version constraint for coverage * Correct a format string in virt/baremetal/ipmi.py * Add REST api to manage bare-metal nodes * Adding REST API to show all availability zones of an region * Fixed nova-manage argument parsing error * xenapi: Add cleanup_sm_locks script * Fix double reboot during resume_state_on_host_boot * Add support for memory overcommit in live-migration * Adds conductor support for instance_get_active_by_window_joined * Make compare_result show the difference in lists * Don't limit SSH keys generation to 1024 bits * Ensure service's servicegroup API is created first * Drop volume API * Fix for typo in xml API doc sample in nova * Avoid stuck task_state on snapshot image failure * ensure failure to inject user files results in startup error * List servers having non-existent flavor should return empty list * Add version constraint for cinder * Remove duplicated tapdev creation code from libvirt VIF * Move helper APIs for OVS ports into linux_net * Add 'ovs_interfaceid' to nova network VIF model * Replace use of mkdtemp with fixtures.TempDir * Add trust level cache to trusted_filter * Fix the wrong datatype in task_log table * Cleanup of extract_opts.py * Baremetal/utils should not log certain exceptions * Use setup.py testr to run testr in run_tests.sh * Fix nova coverage * PXE driver should rmtree directories it created * Fix floating ips with external gateway * Add support for Option Groups in LazyPluggable * Fix incorrect use of context object * Unpin testtools * fix misspellings in logs, comments and tests * fix mysql race in tests * Fix get Floating ip pools action name to match with its policy * Generate coverage even if tests failed * Allow snapshots of paused and suspended instances * Update en_US message translations * Sync latest cfg from oslo-incubator * Avoid testtools 0.9.25 * Cells: Add support for compute HostAPI() * Refactor compute_utils to avoid db lookup * ensure zeros are written out when clearing volumes * fix service_ref undefined problem * Add rootwrap filters for password injection with localfs * fix floating ip test that wasn't running * Prevent metadata updates until instance is active * More consistent libvirt XML handling and cleanup * pick up eventlet backdoor fix from oslo * Run_as_root to ensure resize2fs succeed for all image backends * libvirt: Fix typo in configdrive implementation * Refactor EC2 keypairs exception * Directly copy a file URL from glance * Remove restoring soft deleted entries part 2 * Remove restoring soft deleted entries part 1 * Use conductor in the servicegroup db driver * Add service_update to conductor * Remove some db calls from db servicegroup driver * XenAPI: Fix volume detach * Refactor: extract method: driver_dict_from_config * Cells: Fix for relaying instance info_cache updates * Fix wrong quota reservation when deleting resizing instance * Go back to the original branch after pylint check * Ignore auto-generated files by lintstack * Add host to instance_faults table * Clean up db network db calls for fixed and float * Remove obsolete baremetal override of MAC addresses * Fix multi line docstring tests in hacking.py * PXE driver should not accept empty kernel UUID * Use common rootwrap from oslo-incubator * Remove network_host config option * Better instance fault message when rescheduling * libvirt: Optimize test_connection and capabilities * don't allow crs in the code * enforce server_id can only be uuid or int * Allow nova to use insecure cinderclient * Makes sure compute doesn't crash on failed resume * Fix fallback when Quantum doesn't provide a 'vif_type' * Move compute node operations to conductor * correcting for proper use of the word 'an' * Correcting improper use of the word 'an' * Save password set through xen agent * Add encryption method using an ssh public key * Make resource tracker use conductor for listing instances * Make resource tracker use conductor for listing compute nodes * Updates prerequisite packages for fedora * Expose a get_spice_console RPC API method * Add a get_spice_console method to nova.virt.ComputeDriver API * Add nova-spicehtml5proxy helper * Pull NovaWebSocketProxy class out of nova-novncproxy binary * Add support for configuring SPICE graphics with libvirt * Add support for setting up elements in libvirt config * Add common config options for SPICE graphics * Create ports in quantum matching hypervisor MAC addresses * Make nova-api logs more useful * Override floating interface on callee side * Reject user ports that have MACs the hypervisor cannot use * Remove unused import * Reduce number of iptable-save restore loops * Clean up get_instance_id_by_floating_address * Move migration_get_..._by_host_and_node to conductor * Make resource tracker use conductor for migration updates * minor improvements to nova/tests/test_metadata.py * Cells: Add some cells support to admin_actions extension * Populate service list with availability zone and correct unit test * Correct misspelling of fake_service_get_all * Add 'devname' to nova.network.model.VIF class * Use testrepository setuptools support * Cleaning up exception handling * libvirt: use tap for non-blockdevice images on Xen * Export the MAC addresses of nodes for bare-metal * Cells: Add cells API extension * More HostAPI() cleanup for cells * Break out a helper function for working with bare metal nodes * Renames the new os-networks extension * Define a hypervisor driver method for getting MAC addresses * enables admin to view instance fault "details" * Revert "Use testr setuptools commands." * Revert "Populate service list with availability zone" * Fix typos in docstring * Fix problem with ipv6 link-local address(es) * Adds support for Quantum networking in Hyper-V * enable hacking.py self tests * Correct docstring on sizelimit middleware * sync latest log and lockutils from oslo * Fix addition of CPU features when running against legacy libvirt * Fix nova.availability_zones docstring * Fix uses of service_get_all_compute_by_host * VMware Compute Driver Rename * use postgresql INET datatype for storing IPs * Extract validation and provision code to separate method * Implement Quantum support for addition and removal of fixed IPs * Keep self and context out of error notification payload * Populate service list with availability zone * Add Compute API validations for block device map * Cells: Commit resize quota reservations immediately * Cells: Reduce the create_image call depth for cells * Clean up compute API image_create * Fix logic error in periodic task wait code * Centralize instance directory logic * Chown doesn't work on mounted vfat * instances_path is now defined here * Convert ConfigDriveHelper to being a context manager itself * Use testr setuptools commands * Move migration_create() to conductor * Move network call from compute API to the manager * Fix incorrect comment, and move a variable close to use * Make sure reboot_instance uses updated instance * Cleanup reboot_instance tests * Fix use of stale instance data in compute manager * Implements getPasswordData for ec2 * Add service_destroy to conductor * Make nova.service get service through conductor * Add service_create to conductor * Handle waiting for conductor in nova.service * Allow forcing local conductor * Make pinging conductor a part of conductor API * Fix some conductor manager return values * Handle directory conflicts with html output * Fix error in NovaBase.save() method * Skip domains on libvirt errors in get_vcpu_used() * Fix state sync logic related to the PAUSED VM state * Remove more unused opts from nova.scheduler.driver * Fix quota updating when admin deletes common user's instance * Tests for PXE bare-metal provisioning helper server * Correct the calculating of disk size when using lvm disk backend * Adding configdrive to xenapi * Validated device_name value in block device map * Fix libvirt resume function call to get_domain_xml * Make it clearer that network.api.API is nova-network specific * Access instance as dict, not object in xenapi * Expand quota logging * Move logic from os-api-host into compute * Create a directory for servicegroup drivers * Move update_instance_info_cache to conductor * Change ComputerDriver.legacy_nwinfo to raise by default * Cleanup pyflakes in nova-manage * Add user/tenant shim to RequestContext * make runtests -p act more like tox * fix new N402 errors * Add host name to log message for _local_delete * Try out a new nova.conf.sample format * Regenerate nova.conf.sample * Make Quantum plugin fill in the 'bridge' name * Make nova network manager fill in vif_type * Add some constants to the network model for drivers to use * Move libvirt VIF XML config into designer.py * Remove bogus 'unplug' calls from libvirt VIF test * Fix bash syntax error in run_tests.sh * Update instance's cell_name in API cell * Fix init_host checking moved instances * Fix test cases in integrated.test_multiprocess_api * Map libvirt error to InstanceNotFound in get_instance_disk_info * Fixed comment typo * Added sample tests to FlavorSwap API * Remove unused baremetal PXE options * Remove unused opt import in scheduler.driver * Move global service networking opts to new module * Move memcached_servers opt into common.memorycache * Move service_down_time to nova.service * Move vpn_key_suffix into pipelib * fix N402 on tools/ * fix N402 for nova-manage * fix N402 for rest of nova * fix N402 for nova/c* * fix N402 for nova/db * don't clear the database dicts in the tearDown method * Fixed typos in doc strings * Enhance wsgi to listen on ipv6 address * Adds a flag to allow configuring a region * Fix double reboot issue during soft reboot * Remove baremetal-compute-pxe.filters * Fix pyflakes issues in integrated tests * Adds option to rebuild instance with existing disk * Move common virt driver options to virt.driver * Move vpn_image_id to pipelib * Move enabled_apis option into nova.service * Move default_instance_type into nova.compute * Move osapi_compute_unique_server_name_scope to db * Move api_class options to where they are used * Move manager options into nova.service * Move compute_topic into nova.compute.rpcapi * fix N402 for nova/network * fix N402 for nova/scheduler * fix N402 for nova/tests * Fix N402 for nova/virt * Fix N402 for nova/api * New instance_actions and events table, model, and api * Cope better with out of sync bm data * Import latest timeutils from oslo-incubator * Remove availability_zones from service table * Enable Aggregate based availability zones * Sync log from oslo-incubator * Clarify the DBApi object in cells fakes * Fix lintstack check for multi-patch reviews * Adds to manager init_host validation for instances location * Add to libvirt driver instance_on_disk method * add to driver option to keep disks when instance destroyed * Fix serialization in impl_zmq * Added sample tests to FlavorRxtx API * Refresh instance metadata in-place * xenapi: Remove dead code, moves, tests * Fix baremetal VIFDriver * Adds a new tenant-centric network extension * CLI for bare-metal database sync * Move scheduler_topic into nova.scheduler.rpcapi * Move console_topic into nova.console.rpcapi * Move network_topic into nova.network.rpcapi * Move cert_topic into nova.cert.rpcapi * Move global s3 opts into nova.image.s3 * Move global glance opts into nova.image.glance * Remove unused osapi_path option * attach/detach_volume() take instance as a parameter * fix N401 errors, stop ignoring all N4* errors * Add api extension to get and reset password * powervm: Implement snapshot for local volumes * Add exception handler for previous deleted flavor * Add NoopQuotaDriver * Conductor instance_get_all replaces _by_filters * Support cinderclient http retries * Sync rpc and notifier from oslo-incubator * PXE bare-metal provisioning helper server * Added sample tests to QuotaClasses API * Changed 'OpenStack, LLC' message to 'OpenStack Foundation' * Convert short doc strings to be on one line * Get instances from conductor in init_host * Invert test stream capture logic for debugging * Upgrade WebOb to 1.2.3 * Make WebOb version specification more flexible * Refactor work with TaskLog in sqlalchemy.api * Check admin context in bm_interface_get_all() * Provide a PXE NodeDriver for the Baremetal driver * Handle compute node records with no timestamp * config_drive is missing in xml deserializer * Imported Translations from Transifex * NovaBase.delete() rename to NovaBase.soft_delete() * livbirt: have a single source of console log file naming * Remove the global DATA * Add ping to conductor * Add two tests for resize action in ServerActionsControllerTest * Move service_get_all operations to conductor * Move migration_get_unconfirmed_by_dest_compute to conductor * Move vol_usage methods to conductor * Add test for resize server in ComputeAPITestCase * Allow pinging own float when using fixed gateway * Use full instance in virt driver volume usage * Imported Translations from Transifex * Refactor periodic tasks * Cells: Add periodic instance healing * Timeout individual tests after one minute * Fix regression in RetryFilter * Cells: Add the main code * Adding two snapshot related task states * update version urls to working v2 urls * Add helper methods to nova.paths * Move global path opts in nova.paths * Remove unused aws access key opts * Move fake_network opt to nova.network.manager * Allow larger encrypted password posts to metadata * Move instance_type_get() to conductor * Move instance_info_cache_delete() to conductor * Move instance_destroy() to conductor * Move instance_get_*() to conductor * Sync timeutils changes from Oslo * Remove system_metadata db calls from compute manager * Move block_device_mapping destroy operations to conductor * Clean up setting of control_exchange default * fix floating-ip in multihost case * Invalid EC2 ids should make the entire request fail * improve libguestfs exception handling * fix resize of unpartitioned images with libguestfs * xenapi: Avoid hotplugging volumes on resize * Remove unused VMWare VIF driver abstraction * Delete pointless nova.virt.VIFDriver class * Clarify & fix docs for nova-novncproxy * Removes unused imports * Imported Translations from Transifex * Fix spelling mistakes in nova.virt * Cells: Add cells commands to nova-manage * Add remaining get_backdoor_port() rpc calls to coverage * Fix race in resource tracker * Move block_device_mapping get operations to conductor * Move block_device_mapping update operations to conductor * Improve baremetal driver error handling * Add unit test to update server metadata * Add unit test to revert resize server action * Add compute build/resize errors to instance faults * Add unit test for too long metadata for server rebuild action * Adds os-volume_attachments 'volume_id' validation * Raise BadRequest when updating 'personality' * Imported Translations from Transifex * Ensure that Quantum uses configured fixed IP * Add conditions in compute APIRouter * Imported Translations from Transifex * CRUD on flavor extra spec extension should be admin-only * Report failures to mount in localfs correctly * Add API sample tests to FixedIPs extension * baremetal power driver takes **kwargs * Implement IPMI sub-driver for baremetal compute * Fix tests/baremetal/test_driver.py * Move baremetal options to [BAREMETAL] OptGroup * Adds test for HTTPUnprocessableEntity when rebooting * Make sure the loadables path is the absolute path * Fix bug and remove update lock in db.instance_test_and_set() * Periodic update of DNS entries * Fix error in test_get_all_by_multiple_options_at_once() * Remove session.flush() and session.query() monkey patching * Update nova-cert man page * Allow new XML API sample file generation * Remove unused imports * spelling in test_migrations * Imported Translations from Transifex * Check for image_meta in libvirt.driver.spawn * Adds test for 'itemNotFound' errors in 'Delete server' * Remove improper NotFound except block in list servers * Spelling: Compatability=>Compatibility * Imported Translations from Transifex * Ensure we add a new line when appending to rc.local * Verify the disk file exists before running qemu-img on it * Remove lxc attaching/detaching of volumes * Teardown container rootfs in host namespace for lxc * Fix cloudpipe instances query * Ensure datetimes can be properly serialized * Imported Translations from Transifex * Database metadata performance optimizations * db.network_delete_safe() method performance optimization * db.security_group_rule_destroy() method performance optimization * Import missing exception * Ignore double messages to associate the same ip * Imported Translations from Transifex * Database reservations methods performance optimization * Using query.soft_delete() method insead of soft deleting by hand * Create and use subclass of sqlalchemy Query with soft_delete() method * Remove inconsistent usage of variable from hyperv * Log last compute error when rescheduling * Removed unused imports * Make libvirt driver default to virtio for KVM/QEMU NICs * Refactor libvirt VIF classes to reduce duplicate code * Makes sure to call crypto scripts with abspath * Enable nova exception format checking in tests * Eliminate race conditions in floating association * Imported Translations from Transifex * Provide a configdrive helper which uses contextlib * Add extension to allow hiding of addresses * Add html reports to report action in coverage extension * Add API samples tests for the coverage extension * Fix _find_ports() for when backdoor_port is None * Parameterize database connection in test.py * fixing the typo of the error message from nbd * add 'random_seed' entry to instance metadata * Baremetal VIF and Volume sub-drivers * Fix revert resize failure with disk.local not found * Fix a test isolation error in compute.test_compute * New Baremetal provisioning framework * Move baremetal database tests to fixtures * address uuid overwriting * Add get_backdoor_port to cert * Add get_backdoor_port to scheduler * Add get_backdoor_port to console * Make libvirt driver.listinstances return defined * Add get_backdoor_port to consoleauth * Export custom SMBIOS info to QEMU/KVM guests * Make configdrive.py use version.product_string() * Allow loading of product/vendor/package info from external file * Remove obsolete VCS version info completely * Trap exception when trying to write csr * Define a product, vendor & package strings in version.py * Extract image metadata from Cinder * Add expected exception to aggregate_metadata_delete() * Move aggregate_get() to conductor * Add .testrepository/ directory to gitginore * Make load_network_driver load passed in driver * Fix race condition of resize confirmation * libvirt: Make vif_driver.plug() returns None * Add an iptables mangle rule per-bridge for DHCP * Make NBD retry logic more generic, add retry to loop * Reliably include OS type in ephemeral filenames * Allow specification of libvirt guest interface backend driver * Fix "image_meta" data passed in libvirt test case * Fix typos in vncserver_listen config param help description * Traceback when user doesn't have permission * removed duplicate function definitions * network/api add_fixed_ip correctly passes uuid * Import cfg module in extract_opts.py * Raise old exception instance instead of new one * Update exceptions to pass correct kwargs * Add option to make exception format errors fatal * allow for the ability to run partial coverage * Remove fake_tests opt from test.py * Execute pygrub using nova-rootwrap in xenapi * Add DBDuplicateEntry exception for unique constraint violations * Fix stack trace on incorrect nova-manage args * Use service fixture in DB servicegroup tests * fix instance rescue without cmdline params in xml.rescue * Added sample tests to FlavorDisabled API * Reset the IPv6 API backend when resetting the conf stack * libvirt: Skip intermediate base files with qcow2 * fix test_nbd using stubs * Imported Translations from Transifex * Properly remove the time override in quota tests * Fix API samples generation * Move TimeOverride to the general reusable-test-helper place * Added conf support for security groups * Add accounting for orphans to resource tracker * Add more association support to network API * Remove the WillNotSchedule exception * Replace fixtures.DetailStream with fixtures.StringStream * Move network_driver into new nova.network.driver * Move DNS manager options into network.manager * Refactor xvp console * Move agent_build_get_by_triple to conductor * Move provider_fw_rule_get_all to conductor * Move security_group operations in VirtAPI to conductor * Retry NBD device allocation * Use testr to run nova unittests * Add a developer trap for api samples * Update command on devref doc * Fixed deleting instance booted from invalid vol * Add general mechanism for testing api coverage * Add the missing replacement text in devref doc * Allow xenapi to work with empty image metadata * Imported Translations from Transifex * Fix for broken switch for config_drive * Fix use of osapi_compute_extension option in api_samples * Remove sleep in test_consoleauth * Fix errors in used_limits extension * Fix poll_rescued_instances periodic task * Add syslogging to nova-rootwrap * Clean up run_tests.sh * Ensure that sql_dbpool_enable is a boolean value * Stop nbd leaks, remove pid race * Fixes KeyError: 'sr_uuid' when booting from volume on xenapi * Add VirtAPI tests * Move remaining aggregate operations to conductor * remove session param from instance_get * remove session param from instance_get_by_uuid * Use nova.test.TestCase as the base test class * Ensure datetimes can be properly serialized * Fixes string formatting error * Adds API Sample tests for DiskConfig extension * Fix for correctly parsing snapshot uuid in ec2api * Autodetect nbd devices * Add Jian Wen to .mailmap * Move metadata_{host,port} to network.linux_net * Move API extension opts to api.openstack.compute * Move osapi_max_limit into api.openstack.common * Move link_prefix options into api.openstack.common * Move some opts into nova.utils * Properly scope password options * Remove the deprecated quantum v1 code and directory * add and removed fixed ip now refresh cache * Implement an XML matcher * Add support for parsing the from libvirt host capabilities * Add support for libvirt domain XML config * Add support for libvirt domain XML config * Add coverage extension to nova API * Allow rpc-silent FloatingIP exceptions in n-net * Allow conductor exceptions to pass over RPC silently * Don't leak info from libvirt LVM backed instances * Add get_backdoor_port to nova-conductor * Properly scope isolated hosts config opts * Move monkey patch config opts into nova.utils * Move zombie_instance_updated_at_window option * Move some options into nova.image.glance * Move cache_images to nova.virt.xenapi.vm_utils * Move api_rate_limit and auth_strategy to nova.api * Move api_paste_config option into nova.wsgi * Port to argparse based cfg * Cleanup the test DNS managers * Move all temporary files into a single /tmp subdir * Modified sample tests to FlavorExtraData API * Fix KeyError of log message in virt/libvirt/utils.py * Allows an instance to post encrypted password * Make nova/virt use aggregate['metadetails'] * Revert "Simplify how ephemeral disks are created and named." * Fix bw_usage_update issue with conductor * Correctly init XenAPIDriver in vm_vdi_cleaner.py * Set instance_ref['node'] in _set_instance_host_and_node * Consider reserved count in os-user-limits extension * Make DNS drivers inherit interface * Map cinder snapshot statuses to ec2 * i18n raise Exception messages * Set default DNS driver to No-op * Access DB values as dict not as attributes. Part 4 * Use conductor for bw_usage operations * libvirt: enable apic setting for Xen or KVM guest * Improve virt/disk/mount/nbd test coverage * Add NFS to the libvirt volume driver list * Use admin user to read Quantum port * Add vif_type to the VIF model * Make the nbd mounter respect CONF.max_nbd_devices * Imported Translations from Transifex * Raise NotImplementedError in dns_driver.DNSDriver * Unpin lxml requirements * Added sample tests to FlavorManage API * Use fixtures library for nova test fixtures * Catch ProcessExecutionError when building config drives * Fix fname concurrency tests * Imported Translations from Transifex * Make ignore_hosts and force_hosts work again * Run test objectstore server on arbitrary free port * Fix network manager ipv6 tests * Prevent creation of extraneous resource trackers * Remove unused bridge interfaces * Use conductor for migration_get() * Reset node to source in finish_revert_resize() * Simplify how ephemeral disks are created and named * Order instance faults by created_at and id * Sync RPC logging-related bits from oslo * Fix bugs in test_migrations.py * Fix regression allowing quotas to be applied to projects * Improve nova-manage usability * Add new cliutils code from oslo-incubator * Update tools/flakes to work with pydoc * Fix pep8 exclude logic for 1.3.3 * Avoid vm instance shutdown when power state is NOSTATE * Fix handling of unimplemented host actions * Fix positional arg swallow decorator * Fix minidns delete_entry to work for hostname with mixed case chars * powervm: Refactored run_command for better naming * Sync latest openstack.common.rpc * Ensure prep_resize arguments can be serialized * Add host to get_backdoor_port() for network api * Add agent build API support for list/create/delete/modify agent build * Added sample tests to extended status API * Imported Translations from Transifex * Make policy.json not filesystem location specific * Use conductor for resourcetracker instance_update * network managers: Pass elevated cxtx to update_dhcp * Volume backed live migration w/o shared storage * Add pyflakes option to tox * Adds API Sample tests for Quotas extension * Boot from volume without image supplied * Implements volume usage metering * Configurable exec_dirs to find rootwrap commands * Allow newer boto library versions * Add notifications when libvirtd goes down * Make update_service_capabilities() accept a list of capabilities * update mailmap to add my perferred mail * Fix test suite to use MiniDNS * Add support for new WMI iSCSI initiator API * Added sample tests to deferred delete API * On confirm_resize, update correct resource tracker * Renaming xml test class in sample tests of consoles API * remove session param from certificate_get * improve sessions for key_pair_(create,destroy) * powervm: add DiskAdapter for local volumes * Access DB values as dict not as attributes. Part 3 * Patch fake_libvirt_utils with fixtures.MonkeyPatch * Open test xenapi/vm_rrd.xml relative to tests * Reset notifier_api before each test * Reset volume_api before cinder cloud tests * Fix rpc control_exchange regression * Add generic customization hooks via decorator * add metadata support for overlapping networks * Split out part of compute's init_host * Use elevated cxtx in resource_tracker.resize_claim * Fix test_migrations for postgres * Add vpn ip/port setting support for CloudPipe * Access DB values as dict not as attributes. Part 2 * Enable debug in run_tests using pdb * Add POWERVM_STARTING state to powervm driver * Fix test_inject_admin_password for OSX * Multi host DHCP networking and local DNS resolving * use file instead of tap for non-blockdevice images on Xen * use libvirt getInfo() to receive number of physical CPUs * Don't run the periodic task if ticks_between_runs is below zero * Fix args to AggregateError exception * Fix typo in inherit_properties_from_image * Access DB values as dict not as attributes * Fix KeyError of log message in compute/api.py * Fix import problem in test_virt_disk_vfs_localfs * Remove start_guests_on_host_boot config option * Add aggregate_host_add and _delete to conductor * Imported Translations from Transifex * Call plug_vifs() for all instances in init_host * Make compute manager use conductor for instance_gets * Fixes HyperV compute "resume" tests * Convert datetimes for conductor instance_update * Update migration so it supports PostgreSQL * Include 'hosts' and 'metadetails' in aggregate * Verify doc/api_samples files along with the templates * Remove default_image config option * Move ec2 config opts to nova.api.ec2.cloud * Move imagecache code from nova.virt.libvirt.utils * Use flags() helper method to override config in tests * RetryFilter checks 'node' as well as 'host' * Make resize and multi-node work properly together * Migration model update for multi-node resize fix * Add version to conductor migration_update message * Validate rxtx_factor as a float * Display errors when running nosetests * Respect the base_dir_name flag in imagebackend * Add exceptions to baremetal/db/api * Clean up unused methods in scheduler/driver * Provide better error message for aggregate-create * Imported Translations from Transifex * Allow multi_host compute nodes to share dhcp ip * Add blank nova/virt/baremetal/__init__.py * Add migration_update to conductor * Remove unnecessary topic argument * Add pluggable ServiceGroup monitoring APIs * Add SSL support to utils.generate_glance_url() * Add eventlet db_pool use for mysql * Make compute manager use nova-conductor for instance_update * Missing instance_uuid in floating_ip notifications * Make nova-dhcpbridge use CONFIG_FILE over FLAGFILE * Rename instance_info_cache unique key constraints * Cleanup compute multi-node assignment of node * Imported Translations from Transifex * maint: remove an unused import from libvirt.utils * Encode consoleauth token in utf-8 to make it a str * nova-dhcpbridge should require the FLAGFILE is set * Added cpu_info report to HyperV Compute driver * Remove stale flags unit tests * Truncate large console logs in libvirt * Move global fixture setup into nova/test.py * Complete API samples for Hosts extension * Fix HostDeserializer to enable multiple line xml * adjust rootwrap filters for recent file injection changes * Don't hard code the xen hvmloader path * Don't update arch twice when create server * remove db access in xen driver * Imported Translations from Transifex * Move compute_driver into nova.virt.driver * Re-organize compute opts a bit * Move compute opts from nova.config * Add a CONTRIBUTING file * Compute doesn't set the 'host' field in instance * Xenapi: Don't resize down if not auto_disk_config * Cells: Re-add DB model and calls * Use more specific SecurityGroupHandler calls * Fix wait_for_deleted function in SmokeTests * Wrap log messages with _() * Add methods to Host operations to fake hypervisor * Move sql options to nova.db.sqlalchemy.session * Add debug logging to disk mount modules * Remove the libguestfs disk mount API implementation * Remove img_handlers config parameter usage * Convert file injection code to use the VFS APIs * Introduce a VFS implementation backed by the libguestfs APIs * Introduce a VFS implementation mapped to the host filesystem * Adds API for bulk creation/deletion of floating IPs * Remove obsolete config drive init.d example * Imported Translations from Transifex * Rename sql_pool_size to sql_max_pool_size * Detect shared storage; handle base cleanup better * Allow VMs to be resumed after a hypervisor reboot * Fix non-primitive uses of instance in compute/manager * Remove extra space in exception * Adds missing index migrations by instance/status * Convert migrations.instance_uuid to String(36) * Add missing binary * Change all tenants servers listing as policy-based * Fixes a bug in get_info in the Hyper-V Driver * refactor: extract method: connect_volume * Handle instances not being found in EC2 API responses * Pin pep8 to 1.3.3 * Return an error response if the specified flavor does not exists. (v4) * Send block device mappings to rebuild_instance * Move db lookup for block device mappings * Use CONF.import_opt() for nova.config opts * Imported Translations from Transifex * Remove nova.config.CONF * Add keystoneclient to pip-requires * Pass rpc connection to pre_start_hook * Fix typo: hpervisor=> hypervisor * Fix reversed args to call to _reschedule * Add the beginnings of the nova-conductor service * remove old baremetal driver * Remove useless function quota_usage_create * Fix calls to private method in linux_net * Drop unused PostgreSQL sequences from Folsom * Compact pre-Grizzly database migrations * Fix os-hosts extension can't return xml response correctly * Set node_availability_zone in XenAPIAggregateTestCase * Ignore editor backup files * Imported Translations from Transifex * Remove nova.flags * Remove FLAGS * Make fping extension use CONF * Use disk image path to setup lxc container * Use the auth_token middleware from keystoneclient * improve session handling around instance_ methods * add index to fixed_ips * add instance_type_extra_specs to instances * Change a toplevel function comment to a docstring * Ensure cat process is terminated * Add some sqlalchemy tweakables * Fixes an error reporting bug on Hyper-V * update api_samples add os-server-start-stop * update api_samples add os-services module * Switch to using eventlet_backdoor from oslo * Sync eventlet_backdoor from oslo * Added sample tests to consoles API * Fix use of 'volume' variable name * Ditch unused import and variable * Make ec2_instance_create db method consistant across db apis * Adds documentation for Hyper-V testing * Adds support for ConfigDriveV2 in Hyper-V * don't explode if a 413 didn't set Retry-After * Fix a couple uses of FLAGS * Remove nova.flags imports from scheduler code * Remove some unused imports from compute/* * Remove importing of flags from compute/* * Remove nova.flags imports from bin/* * Move nova shared config options to nova.config * Fix use_single_default_gateway * Update api_samples README.rst to use tox * Do not alias stdlib uuid module as uuidutils, since nova has uuidutils * Allow group='foo' in self.flags() for tests * updated api_samples with real hypervisor_hostname * Issue a hard shutdown if clean fails on resize up * Introduce a VFS api abstraction for manipulating disk images * Fix network RPC API backwards compat * create_db_entry_for_new_instance did not call sgh for default * Add support for backdoor_port to be returned with a rpc call * Refactor scheduling filters * Unpin amqplib and kombu requirements * Add module for loading specific classes * Make sure instance data is always refreshed * Move all mount classes into a subdirectory * Add support for resizes to resource tracker * Fixes create instance *without* config drive test * Update db entry before upate the DHCP host file * Remove gen_uuid() * Enhance compute capability filter to check multi-level * API extension for fpinging instances * Allow controller extensions to extend update/show * Isolate tests from the environment variable http_proxy * Handle image cache hashing on shared storage * fix flag type define error * Simplify libvirt volume testing code * Migrate floating ip addresses in multi_host live_migration * Add DB query to get in-progress migrations * Try hard shutdown if clean fails on resize down * Restore self.test_instance at LibvirtConnTestCase.setUp() * Fixes usage of migrate_instance_start * added getter methods for quantumv2 api * fix LVM backed VM logial volumes can't be deleted * Clean up __main__ execution from two tests for consistency * Imported Translations from Transifex * Update uuidutils from openstack common * Remove volume.driver and volume.iscsi * Use base image for rescue instance * Make xenapi shutdown mode explicit * Fix a bug in XenAPISession's use of virtapi * Ban db import from nova/virt * Update vol mount smoketest to wait for volume * Add missing webob to exc * Add missing exception NetworkDuplicated * Fix misuse of exists() * Rename config to vconfig * Move agent_build_get_by_triple to VirtAPI * Fix _setup_routes() signature in APIRouter * Move libvirt specific cgroups setup code out of nova.virt.disk.api * make libvirt with Xen more workable * script for configuring a vif in Xen in non-bridged mode * Upgrade pylint version to 0.26.0 * Removes fixed_ip_get_network * improve session handling around virtual_interfaces * improve sessions for reservation * improve session handling around quotas * Remove custom test assertions * Add nova option osapi_compute_unique_server_name_scope * Add REST API support for list/enable/disable nova services * Switch from FLAGS to CONF in nova.compute * Switch from FLAGS to CONF in tests * Get rid of pylint E0203 in filter_scheduler.py * Updated scheduler and compute for multiple capabilities * Switch from FLAGS to CONF in nova.db * Removed two unused imports * Remove unused functions * Fixes a bug in api.metadata.base.lookup() on Windows * Fixes a bug in nova.utils, due to Windows compatibility issues * improve session handling of dnsdomain_list * Make tox.ini run pep8/hacking checks on bin * Fix import ordering in /bin scripts * add missing opts to test_db_api.py * clean up dnsdomain_unregister * Make utils.mkfs() set label when fs=swap * Another case of dictionary access * Remove generic topic support from filter scheduler * Clarify server_name, hostname, host * Refactor scheduling weights * update nova.conf.sample * Check instance_type in compute capability filter * Sync latest code from oslo-incubator * Adds REST API support for Fixed IPs * Added separate bare-metal MySQL DB * Added bare-metal host manager * Remove unused volume exceptions * Adds a conf option for custom configdrive mkisofs * Fixed HyperV to get disk stats of instances drive * powervm: failed spawn should raise exception * Enable Quantum linux bridge VIF driver to use "bridge" type * Remove nova-volume DB * make diagnostics workable for libvirt with Xen * Avoid unnecessary system_metadata db lookup * Make instance_system_metadata load with instance * Add some xenapi Bittorrent tests * Move security groups and firewall ops to VirtAPI * Move host aggregate operations to VirtAPI * Simplify topic handling in network rpcapi * Sync rpc from openstack-common * Send instance_type to resize_instance * Remove instance_type db lookup in prep_resize * Send all aggregate data to remove_aggregate_host * Fix incorrect LOG.error usage in _compare_cpu * Limit formatting routes when adding resources * Removes unnecessary db query for instance type * Fix verification in test_api_samples.py * Yield in between hash runs for the image cache manager * Remove unused function require_instance_exists * Refactor resource tracker claims and test logic * Remove out-of-date comment * Make HostManager.get_all_host_states() return an iterator * Switch from FLAGS to CONF in nova.virt * 'BackupCreate' rotation parameter >= 0 * Corrects usage of db.api.network_get * Switch from FLAGS to CONF in nova.console * Map NotAuthorized to 403 in floating ips extension * Decouple EC2 API from using instance id * libvirt: Regenerates xml instead of using on-disk * Imported Translations from Transifex * Fix to include error message in instance faults * Include hostname in notification payloads * Fix quota updating during soft delete and restore * Fix warnings found with pyflakes * make utils.mkfs() more general * Fixes snapshot instance failure on libvirt * Make ComputeDrivers send hypervisor_hostname * Fixed instance deletion issue from Nova API * De-duplicate option: console_public_hostname * Don't verify image hashes if checksumming is disabled * Imported Translations from Transifex * Look up stuck-in-rebooting instances in manager * Use chance scheduler in EC2 tests * Send all aggregate data to add_aggregate_host * Send all migration data to finish_revert_resize * Send all migration data to revert_resize * Fix migrations when not using multi-host network * Fix bandwidth polling exception * Fixes volume attach issue on Hyper-V * Shorten self.compute.resource_tracker in test_compute.py * Cleanup nova.db.sqlalchemy.api import * Use uuidutils.is_uuid_like for uuid validation * Add uuidutils module * Imported Translations from Transifex * Switch from FLAGS to CONF in nova.scheduler * Switch from FLAGS to CONF in nova.network * Switch from FLAGS to CONF in misc modules * Switch from FLAGS to CONF in nova.api * Switch from FLAGS to CONF in bin * Remove flags.DECLARE * Move parse_args to nova.config * Forbid resizing instance to deleted instance types * Imported Translations from Transifex * Fix unused variables and wrong indent in test_compute * Remove unnecessary db call from xenapi/vmops * xenapi: place boot lock when doing soft delete * Detangle soft delete and power off * Fix signing_dir option for auth_token middleware * Fix no attribute 'STD_OUT_HANDLE' on windows * Use elevated context in disassociate_floating_ip * Remove db.instance_get* from nova/virt * sync deprecated log method from openstack-common * move python-cinderclient to pip-requires * Tiny resource tracker cleanup * Fix Quantum v2 API method signatures * add doc to standardize session usage * improve sessions around floating_ip_get_by_address * Bump the base rpc version of the network api * Eliminates simultaneous schedule race * Introduce VirtAPI to nova/virt * Add some hooks for managers when service starts * Fix backwards compat of rpc to compute manager * xenapi: Make agent optional * Add xenapi host_maintenance_mode() test * refactor: extract _attach_mapped_block_devices * Make bdms primitive in rpcapi.terminate_instance * Ability to specify a host restricted to admin * Improve EC2 describe_security_groups performance * Increased MAC address range to reduce conflicts * Move to a more canonicalized output from qemu-img info * Read deleted flavors when using to_xml() * Fix copy-paste bug in block_device_info_generation * Remove nova-volume scheduling support * Remove duplicate api_paste_config setting * Fixes hypervisor based image filtering on Hyper-V * make QuantumV2 support requested nic ordering * Add rxtx_factor to network migration logic * Add scheduler retries for prep_resize operations * Add call to reset quota usage * Make session.py reusable * Remove redundant code from PowerVM driver * Force earlier version of sqlalchemy * refactor: extract method vm_ref_or_raise * Use env to set environ when starting dnsmasq * pep8 fixes for nova-manage * Fix VM deletion from down compute node * Remove database usage from libvirt check_can_live_migrate_destination * Clean up xenapi VM records on failed disk attaches * Remove nose detailed error reporting * Validate is-public parameter to flavor creation * refactor: extract _terminate_volume_connections * improve sessions around compute_node_* * Fix typo in xenapi/host.py * Remove extra print line in hacking.py * Ensures compute_driver flag can be used by bdm * Add call to trigger_instance[add/remove]_security_group_refresh quantum * Validates Timestamp or Expiry time in EC2 requests * Add API samples to Admin Actions * Add live migration helper methods to fake hypervisor driver * Use testtools as the base testcase class * Clean up quantumv2.get_client * Fix getattr usage * Imported Translations from Transifex * removes the nova-volume code from nova * Don't elevate context when calling run_instance * remove session parameter from fixed_ip_get * Make instance_get_all() not require admin context * Fix compute tests abusing admin context * Fix use of elevated context for resize methods * Fix check for memory_mb * Imported Translations from Transifex * Fix nova-network MAC collision logic * Fix rpcapi version for new methods * Remove useless return * Change hacking.py N306 to use logical_lines * Add missing live migration methods to ComputeDriver base class * Fix hacking.py naivete regarding lines that look like imports * details the reboot behavior that a virt driver should follow * xenapi: refactor: Agent class * Send usage event on revert_resize * Fix config-file overrides for nova-dhcpbridge * Make nova-rootwrap optional * Remove duplicated definition of is_loaded() * Let scheduler know services' capabilities at startup * fetch_images() method no more needed * Fix hardcoded topic strings with constants * Save exceptions earlier in finish_resize * Correct _extract_query_params in image.glance * Fix Broken XML Namespace Handling * More robust checking for empty requested_networks * Imported Translations from Transifex * Rehydrate NetworkInfo in reboot_instance() * Update common * Use cat instead of sleep for rootwrap test * Addtional 2 packages for dev environment on ubuntu * Let VlanManager keep network's DNS settings * Improve the performance of quantum detection * Support for nova client list hosts with specific zone * Remove unused imports in setup.py * Fixes fake for testing without qemu-img * libvirt: persist volume attachments into config * Extend IPv6 subnets to /64 if network_size is set smaller than /64 * Send full migration data to finish_resize * Send full migration to confirm_resize * Send full migration to resize_instance * Migrate to fileutils and lockutils * update sample for common logging * Add availability zone extension to API samples test * Refactor: config drive related functions * Fix live migration volume assignment * Remove unused table options dicts * Add better log line for undefined compute_driver * Remove database usage from libvirt imagecache module * Return empty list when listing servers with bad status value * Consistent Rollback for instance creation failures * Refactor: move find_guest_agent to xenapi.agent * Fix Incorrect Exception when metadata is over 255 characters * Speed up volume and routing tests * Speed up api.openstack.compute.contrib tests * Allow loading only selected extensions * Migrate network of an instance * Don't require quantumclient when running nova-api * Handle the case where we encounter a snap shot correctly * Remove deprecated root_helper config * More specific exception handling in migration 091 * Add virt driver capabilities definition * Remove is_admin_context from sqlalchemy.api * Remove duplicate methods from network/rpcapi.py * SanISCSIDriver SSH execution fixes * Fix bad Log statement in nova-manage * Move mkfs from libvirt.utils to utils * Fixes bug Snapshotting LXC instance fails * Fix bug in a test for the scheduler DiskFilter * Remove mountpoint from parse_volume_info * limit the usage of connection_info * Sync with latest version of openstack.common.timeutils * nova-compute sends its capabilities to schedulers ASAP * Enable custom eventlet.wsgi.server log_format * Fix the fail-on-zero-tests case so that it is tolerant of no output * add port support when QuantumV2 subclass is used * Add trove classifiers for PyPI * Fix and enable pep8 E502, E712 * Declare vpn client option in pipelib * Fix nova-volume-usage-audit * Fix error on invalid delete_on_termination value * Add Server diagnostics extension api samples * Add meaningful server diagnostic information to fake hypervisor * Use instance_exists to check existence * Fix nova-volume-usage-audit * Imported Translations from Transifex * Avoid leaking BDMs for deleted instances * Deallocate network if instance is deleted in spawn * Create Flavors without Optional Arguments * Update policies * Add DNS records on IP allocation in VlanManager * update kwargs with args in wrap_instance_fault * Remove ComputeDriver.update_host_status() * Do not call directly vmops.attach_volume * xenapi: fix bfv behavior when SR is not attached * Use consoleauth rpcapi in nova-novncproxy * Change install_venv to use setup.py develop * Fixes syntax error in nova.tools.esx.guest_tools.py * Allow local rbd user and secret_uuid configuration * Set host prior to allocating network information * Remove db access for block devices and network info on reboot * Remove db access for block devices on terminate_instance * Check parameter 'marker' before make request to glance * Imported Translations from Transifex * Internationalize nova-manage * Imported Translations from Transifex * Fixes live_migration missing migrate_data parameter in Hyper-V driver * handles empty dhcp_domain with hostname in metadata * xenapi: Tag volumes in boot from volume case * Stops compute api import at import time * Fix imports in openstack compute tests * Make run_tests.sh fail if no tests are actually run * Implement snapshots for raw backend * Used instance uuid rather than id in remove-fixed-ip * Migrate DHCP host info during resize * read_deleted snapshot and volume id mappings * Make sure sleep can be found * Pass correct task_state on snapshot * Update run_tests.sh pep8 ignore list for pep8 1.2 * Clean up imports in test_servers * Revert "Tell SQLite to enforce foreign keys." * Add api samples to simple tenant usage extension * Avoid RPC calls while holding iptables lock * Add util for image conversion * Add util for disk type retrieval * Fixes test_libvirtr spawn_with_network_info test * Remove unneeded temp variable * Add version to network rpc API * Remove cast_to_network from scheduler * Tell SQLite to enforce foreign keys * Use paramiko.AutoAddPolicy for the smoketests * nova-manage doesn't validate key to update the quota * Dis-associate an auto-assigned floating IP should return proper warning * Proxy floating IP calls to quantum * Handle invalid xml request to return BadRequest * Add api-samples to Used limits extension * handle IPv6 race condition due to hairpin mode * Imported Translations from Transifex * XenAPI should only snapshot root disk * Clarify trusted_filter conf options * Fix pep8 error in bin/nova-manage * Set instance host field after resource claim * powervm: add polling timeout for LPAR stop command * Drop claim timeouts from resource tracker * Update kernel_id and ramdisk_id while rebuilding instance * Add Multiple Create extension to API sample tests * Fix typo in policy docstring * Fix reserve_block_device_name while attach volume * Always use bdm in instance_block_mapping on Xen * Centralize sent_meta definition * Move snapshot image property inheritance * Set read_deleted='yes' for instance_id_mappings * Fix XML response for return_reservation_id * Stop network.api import on network import * libvirt: ignore deleted domain while get block dev * xenapi: Refactor snapshots during resize * powervm: remove broken instance filtering * Add ability to download images via BitTorrent * powervm: exception handling improvements * Return proper error messages while associating floating IP * Create util for root device path retrieval * Remove dependency on python-ldap for tests * Add api samples to Certificates extension * Add nova-cert service to integrated_helpers * Compare lists in api samples against all matches * ip_protocol for ec2 security groups * Remove unneeded lines from aggregates extension API sample tests * Remove deprecated Folsom code: config convert * Make resource tracker uses faster DB query * Remove deprecated Folsom code: bandwith_poll_interval * Add TestCase.stub_module to make stubbing modules easier * Imported Translations from Transifex * Update tools hacking for pep8 1.2 and beyond * Remove outdated moduleauthor tags * remove deprecated connection_type flag * Add aggregates extension to API samples test * Update RPM SPEC to include new bandwidth plugin * Remove TestCase.assertNotRaises * Imported Translations from Transifex * Imported Translations from Transifex * Use self.flags() instead of manipulating FLAGS by hand * Use test.TestCase provided self.mox and self.stubs * Remove unnecessary setUp, tearDown and __init__ in tests * xenapi: implement resume_state_on_host_boot * Revert "Add full test environment." * Synchronize docstring with actual implementation * Num instances scheduler filter * Add api samples to cloudpipe extension * Fix CloudPipe extension XML serialization * Max I/O ops per host scheduler filter * libvirt: continue detach if instance not found * libvirt: allows attach and detach from all domains * Fixes csv list required for qemu-img create * Added compute node stats to HostState * libvirt: Improve the idempotency of iscsi detach * Pass block_device_info to destroy in revert_resize * Enable list with no dict objects to be sorted in api samples * Fixes error message for flavor-create duplicate ID * Loosen anyjson dependency to avoid clash with ceilometer * xenapi: make it easier to recover from failed migrations * Remove unnecessary check if migration_ref is not None * Bump the version of SQLAlchemy in pip-requires * optimize slightly device lookup with LXC umounts * Support for several HA RabbitMQ servers * xenapi: Removing legacy swap-in-image * xenapi: increase timeout for resetnetwork agent request * Replaced default hostname function from gethostname to getfqdn * Fix issues deleting instances in RESIZED state * Modified 404 error response to show specific message * Updated code to update attach_time of a volume while detaching * Check that an image is active before spawning instances * Fix issues with device autoassignment in xenapi * Deleting security group does not mark rules as deleted * Collect more accurate bandwidth data for XenServer * Zmq register opts fix in receiver * Revert explicit usage of tgt-adm --conf option * Fix booting a raw image on XenServer * Add servers/ips api_samples tests * LOG.exception() should only be used in exception handler * Fix XenServer's ability to boot xen type images * all_extensions api_samples testing for server actions * Fixes remove_export for IetAdm * libvirt: Fix _cleanup_resize * Imported Translations from Transifex * xenapi: fix undefined variable in logging message * Spelling: ownz=>owns * Fix NetAppCmodeISCSIDriver._get_lun_handle() method * Integration tests virtual interfaces API extension * Allow deletion of instance with failed vol cleanup * Fixes snapshotting of instances booted from volume * Move fakeldap.py from auth dir to tests * Remove refs to ATAoE from nova docs * Imported Translations from Transifex * Set volume status to error if scheduling fails * Update volume detach smoke test to check status * Fix config opts for Storwize/SVC volume driver * Ensure hybrid driver creates veth pair only once * Cleanup exception handling * Imported Translations from Transifex * Add lun number (0) to model_update in HpSanDriver * libvirt: return after soft reboot successfully completes * Fixes to the SolarisISCSI Driver * Fix live migration when volumes are attached * Clarify dangerous use of exceptions in unit tests * Cleanup test_api_samples:_compare_result * Fix testContextClaimWithException * Fix solidfire unit tests * Stop double logging to the console * Recreate nw_info after auto assigning floating ip * Re-generate sample config file * Use test.TestingException instead of duplicating it * Fix startup with DELETED instances * Fix solidfire option declaration * Restore SIGPIPE default action for subprocesses * Raise NotFound for non-existent volume snapshot create * Catch NotFound exception in FloatingIP add/remove * Adds API sample testing for rescue API extension * Fix bugs in resource tracker and cleanup * Replace builtin hash with MD5 to solve 32/64-bit issues * Properly create and delete Aggregates * No stack trace on bad nova aggregate-* command * Clean up test_state_revert * Fix aggregate_hosts.host migration for sqlite * Call compute manager methods with instance as keyword argument * Adds deserialization for block_device_mapping * Fix marker pagination for /servers * Send api.fault notification on API service faults * Always yield to other greenthreads after database calls * fix unused import * Don't include auto_assigned ips in usage * Correct IetAdm remove_iscsi_target * Cleanup unused import in manager.py * xapi: fix create hypervisor pool * Bump version to 2013.1 * Add Keypairs extension to API samples test * sample api testing for os-floating-ips extension * Update quota when deleting volume that failed to be scheduled * Update scheduler rpc API version * Added script to find unused config options * Make sure to return an empty subnet list for a network without sunbet * Fix race condition in CacheConcurrencyTestCase * Makes scheduler hints and disk config xml correct * Add lookup by ip via Quantum for metadata service * Fix over rate limit error response * Add deserialization for multiple create and az * Fix doc/README.rst to render properly * Add user-data extension to API samples tests * Adds API sample testing for Extended server attributes extension * Inherit the base images qcow2 properties * Correct db migration 91 * make ensure_default_security_group() call sgh * add ability to clone images * add get_location method for images * Adds new volume API extensions * Add console output extension to API samples test * Raise BadRequest while creating server with invalid personality * Update 'unlimited' quota value to '-1' in db * Modified 404 error response for server actions * Fix volume id conversion in nova-manage volume * Improve error handling of scheduler * Fixes error handling during schedule_run_instance * Include volume_metadata with object on vol create * Reset the task state after backup done * Allows waiting timers in libvirt to raise NotFound * Improve entity validation in volumes APIs * Fix volume deletion when device mapper is used * Add man pages * Make DeregisterImage respect AWS EC2 specification * Deserialize user_data in xml servers request * Add api samples to Scheduler hints extension * Include Schedule Hints deserialization to XML API * Add admin actions extension * Allow older versions of libvirt to delete vms * Add security groups extension to API samples test * Sync a change to rpc from openstack-common * Add api_samples tests for servers actions * Fix XML deserialization of rebuild parameters * All security groups not returned to admins by default * libvirt: Cleanup L2 and L3 rules when confirm vm resize * Corrects use of instance_uuid for fixed ip * Clean up handling of project_only in network_get * Add README for doc folder * Correct typo in memory_mb_limit filter property * Add more useful logging around the unmount fail case * Imported Translations from Transifex * Make compute/manager.py use self.host instead of FLAGS.host * Add a resume delete on volume manager startup * Remove useless _get_key_name() in servers API * Add entity body validation helper * Add 422 test unit test for servers API * Use tmpdir and avoid leaving test files behind * Includes sec group quota details in limits API response * Fixes import issue on Windows * Overload comment in generated SSH keys * Validate keypair create request body * Add reservations parameter when cast "create_volume" to volume manager * Return 400 if create volume snapshot force parameter is invalid * Fix FLAGS.volumes_dir help message * Adds more servers list and servers details samples * Makes key_name show in details view of servers * Avoid VM task state revert on instance termination * Avoid live migrate overwriting the other task_state * Backport changes from Cinder to Nova-Volume * Check flavor id on resize * Rename _unplug_vifs to unplug_vifs * PowerVM: Establish SSH connection at use time * libvirt: Fix live block migration * Change comment for function _destroy * Stop fetch_ca from throwing IOError exceptions * Add 'detaching' to volume status * Reset task state before rescheduling * workaround lack of quantum/nova floatingip integration * fix rpcapi version * Added description of operators for extra_specs * Convert to ints in VlanManager.create_networks * Remove unused AddressAlreadyAllocated exception * Remove an unused import * Make ip block splitting a bit more self documenting * Prevent Partial terminations in EC2 * Add flag cinder_endpoint_template to volume.cinder * Handle missing network_size in nova-manage * Adds API sample test for Flavors Extra Data extension * More specific lxml versions in tools/pip-requires * Fixes snat rules in complex networking configs * Fix flavor deletion when there is a deleted flavor * Make size optional when creating a volume from a snapshot * Add documentation for scheduler filters scope * Add and fix tests for attaching volumes * Fix auth parameter passed to libvirt openAuth() method * xapi: Fix live block migration * Add a criteria to sort a list of dict in api samples * delete a module never used * Update SolidFire volume driver * Adds get_available_resource to hyperv driver * Create image of volume-backed instance via native API * Improve floating IP delete speed * Have device mapping use autocreated device nodes * remove a never used import * fix unmounting of LXC containers in the presence of symlinks * Execute attach_time query earlier in migration 98 * Add ServerStartStop extension API test * Set install_requires in setup.py * Add Server Detail and Metadata tests * xenapi: Make dom0 serialization consistent * Refer to correct column names in migration 98 * Correct ephemeral disk cache filename * Stop lock decorator from leaving tempdirs in tests * Handle missing 'provider_location' in rm_export * Nail the pip requirement at 1.1 * Fix typo in tgtadm LOG.error() call * Call driver for attach/detach_volume * rbd: implement create_volume_from_snapshot * Use volume driver specific exceptions * Fake requests in tests should be to v1 * Implement paginate query use marker in nova-api * Simplify setting up test notifier * Specify the conf file when creating a volume * Generate a flavorid if needed at flavor creation * Fix EC2 cinder volume creation as an admin user * Allow cinder catalog match values to be configured * Fix synchronized decorator path cleanup * Fix and cleanup compute node stat tracking * avoid the buffer cache when copying volumes * Add missing argument to novncproxy websockify call * Use lvs instead of os.listdir in _cleanup_lvm * Fixing call to hasManagedSaveImage * Fix typo in simple_tenant_usage tests * Move api_samples to doc dir * Add a tunable to control how many ARPs are sent * Get the extension alias to compose the path to save the api samples * Add scope to extra_specs entries * Use bare container format by default * Sync some updates from openstack-common * Fix simple_tenant_usage's handing of future end times * Yield to another greenthread when some time-consuming task finished * Automatically convert device names * Fix creation of iscsi targets * Makes sure new flavors default to is_public=True * Optimizes flavor_access to not make a db request * Escape ec2 XML error responses * Skip tests in OSX due to readlink compat * Allow admins to de-allocate any floating IPs * Fix xml metadata for volumes api in nova-volume * Re-attach volumes after instance resize * Speed up creating floating ips * Adds API sample test for limits * Fix vmwareapi driver spawn() signature * Fix hyperv driver spawn() signature * Add API samples to images api * Add method to manage 'put' requests in api-sample tests * Add full python path to test stubbing modules for libvirt * Rename imagebackend arguments * Fixes sqlalchemy.api.compute_node_get_by_host * Fix instances query for compute stats * Allow hard reboot of a soft rebooting instance * On rebuild, the compute.instance.exists * Fix quota reservation expiration * Add api sample tests for flavors endpoint * Add extensions for flavor swap and rxtx_factor * Address race condition from concurrent task state update * Makes sample testing handle out of order output * Avoid leaking security group quota reservations * Save the original base image ref for snapshots * Fixed boot from snapshot failure * Update zmq context cleanup to use term * Fix deallocate_fixed_ip invocation * fix issues with Nova security groups and Quantum * Clear up the .gitignore file * Allow for deleting VMs from down compute nodes * Update nova-rpc-zmq-receiver to load nova.conf * FLAG rename: bandwith_poll_*=>bandwidth_poll_* * Spelling: Persistant=>Persistent * Fix xml metadata for volumes extension * delete unused valiables * Clean up non-spec output in flavor extensions * Adds api sample testing for extensions endpoint * Makes api extension names consistent * Fixes spawn method signature for PowerVM driver * Spelling fix Retrive=> Retrieve * Update requires to glanceclient >=0.5.0 * Sort API extensions by alias * Remove scheduler RPC API version 1.x * Add version 2.0 of the scheduler RPC API * Remove some remnants of VSA support * hacking: Add driver prefix recommendation * Implements PowerVM get_available_resource method * Add a new exception for live migration * Assume virt disk size is consumed by instances * External locking for image caching * Stop using scheduler RPC API magic * Adds api sample testing for versions * Do not run pylint by default * Remove compute RPC API version 1.x * Add version 2.0 of compute RPC API * Accept role list from either X-Roles or X-Role * Fix PEP8 issues * Fix KeyError when test_servers_get fails * Update nova.conf.sample * Fixes backwards compatible rpc schedule_run * Include launch-index in openstack style metadata * Port existing code to utils.ensure_tree * Correct utils.execute() to check 0 in check_exit_code * Add the self parameter to NoopFirewallDriver methods * request_spec['instance_uuids'] as list in resize * Fix column variable typo * Add ops to aggregate_instance_extra_specs filter * Implement project specific flavors API * Correct live_migration rpc call in test * Allow connecting to a ssl-based glance * Move ensure_tree to utils * Define default mode and device_id_string in Mount * Update .mailmap * Fix path to example extension implementation * Remove test_keypair_create_quota_limit() * Remove duplicated test_migrate_disk_and_power_off() * Add missing import webob.exc * Fix broken SimpleScheduler.schedule_run_instance() * Add missing user_id in revoke_certs_by_user_and_project() * Rename class_name to project_id * Use the compute_rpcapi instance not the module * Remove duplicated method VM_migrate_send * Add missing context argument to start_transfer calls * Remove unused permitted_instance_types * Add lintstack error checker based on pylint * Make pre block migration create correct disk files * Remove unused and old methods in hyperv and powervm driver * Trap iscsiadm error * Check volume status before detaching * Simplify network create logic * Clean up network create exception handling * Adding indexes to frequently joined database columns * Ensure hairpin_mode is set whenever vifs is added to bridge * Returns hypervisor_hostname in xml of extension * Adds integration testing for api samples * Fix deallocate_fixed_ip() call by unifying signature * Make instance_update_and_get_original() atomic * Remove unused flags * Remove test_instance_update_with_instance_id test * Remove unused instance id-to-uuid function * Re-work the handling of firewall_driver default * Include CommonConfigOpts options in sample config * Re-generate nova.conf.sample * Ensure log formats are quoted in sample conf * Don't include hostname and IP in generated sample conf * Allow generate_sample.sh to be run from toplevel dir * Let admin list instances in vm_states.DELETED * Return actual availability zones * Provide a hint for missing EC2 image ids * Check association when removing floating ip * Add public network support when launching an instance * Re-define libvirt domain on "not found" exception * Add two prereq pkgs to nova devref env guide * Fix hyperv Cfgs: StrOpt to IntOpt * continue deleting instance even if quantum port delete fails * Typo fix: existant => existent * Fix hacking.py git checks to propagate errors * Don't show user-data when its not sent * Clarify nwfilter not found error message * Remove unused _create_network_filters() * Adds missing assertion to FloatingIP tests * Restore imagebackend in test_virt_drivers.py * Add nosehtmloutput as a test dependency * Remove unused exceptions from nova/exception.py * Cleanup pip dependencies * Make glance image service check base exception classes * Add deprecated warning to SimpleScheduler * Have compute_node_get() join 'service' * XCP-XAPI version fix * add availability_zone to openstack metadata * Remove stub_network flag * Implements sending notification on metadata change * Code clean up * Implement network creation in compute API * Debugged extra_specs_ops.py * Fix typo in call in cinder.API unreserve_volume * xenapi: Tag nova volumes during attach_volume * Allow network to call get_fixed_ip_by_address * Add key_name attribute in XML servers API * Fix is_admin check via policy * Keep the ComputeNode model updated with usage * Remove hard-coded 'admin' role checking and use policy instead * Introduce ImagePropertiesFilter scheduler filter * Return HTTP 422 on bad server update PUT request * Makes sure instance deletion ok with deleted data * OpenStack capitalization added to HACKING.rst * Fix get_vnc_console race * Fix a TypeError that occurs in _reschedule * Make missing imports flag in hacking settable * Makes sure tests don't leave lockfiles around * Update FilterScheduler doc * Disable I18N in Nova's test suites * Remove logging in volume tests * Refactor extra specs matching into a new module * Fix regression in compute_capabilities filter * Refactor ComputeCapabilitiesFilter test cases * Revert per-user-quotas * Remove unused imports * Fix PEP8 issues * Sync changes from openstack common * Implement GET (show) in OS API keypairs extension * Fix spelling typos * Ignoring *.sw[op] files * xenapi: attach root disk during rescue before boot * Allows libvirt to set a serial number for a volume * Adds support for serial to libvirt config disks * Remove unused variables * Always create the run_instance records locally * Fix use of non-existant var pool * Adds Hyper-V support in nova-compute (with new network_info model), including unit tests * Update sqlite to use PoolEvents for regexp * Remove unused function in console api * Allow nova to guess device if not passed to attach * Update disk config to check for 'server' in req * Changes default behavior of ec2 * Make ComputeFilter verify compute-related instance properties * Collect instance capabilities from compute nodes * Move volume size validation to api layer * Change IPtablesManager to preserve packet:byte counts * Add get_key_pair to compute API * Defined IMPL in global ipv6 namespace * xenapi: remove unnecessary json decoding of injected_files * Remove unnecessary try/finally from snapshot * Port pre_block_migration to new image caching * Adding port attribute in network parameter of boot * Add support for NFS-based virtual block devices * Remove assigned, but unused variables from nova/db/sqlalchemy/api.py * xenapi: Support live migration without pools * Restore libvirt block storage connections on reboot * Added several operators on instance_type_extra_specs * Revert to prior method of executing a libvirt hard_reboot * Set task_state=None when finished snapshotting * Implement get_host_uptime in libvirt driver * continue config-drive-v2, add openstack metadata api * Return values from wrapped functions in decorators * Allow XML payload for volume creation * Add PowerVM compute driver and unit tests * Revert task_state on failed instance actions * Fix uuid related bug in console/api * Validate that min_count & max_count parameters are numeric * Allow stop API to be called in Error * Enforce quota limitations for instance resize * Fix rpc error with live_migration * Simple checks for instance user data * Change time.sleep to greenthread.sleep * Add missing self. for parent * Rewrite image code to use python-glanceclient * Fix rpc error with live_migration * volumes: fix check_for_export() in non-exporting volume drivers * Avoid {} and [] as default arguments * Improve bw_usage_update() performance * Update extra specs calls to use deleted: False * Don't stuff non-db data into instance dict * Fix type error in state comparison * update python-quantumclient dependency to >=2.0 * Key auto_disk_config in create server off of ext * Implement network association in OS API * Fix TypeError conversion in API layer * Key requested_networks off of network extension * Key config_drive off of config-drive extension * Make sure reservations is initialized * import module, not type * Config drive v2 * Don't accept key_name if not enabled * Fix HTTP 500 on bad server create * Default behavior should restrict admins to tenant for volumes * remove nova code related to Quantum v1 API * Make sure ec2 mapping raises proper exceptions * Send host not ComputeNode into uptime RPC call * Making security group refresh more specific * Sync with latest version of openstack.common.cfg * Sync some cleanups from openstack.common * maint: compare singletons with 'is' not '==' * Compute restart causes period of network 'blackout' * Revert "Remove unused add_network_to_project() method" * Add error log for live migration * Make FaultWrapper handle exception code = None * Don't accept scheduler_hints if not enabled * Avoid double-reduction of quota for repeated delete * Traceback when over allocating IP addresses * xenapi: ensure all calls to agent get logged * Make update_db an opt arg in scheduler manager * Key min_count, max_count, ret_res_id off of ext * Key availability_zone in create server off of ext * Fix the inject_metadata_into_fs in the disk API * Send updated instance model to schedule_prep_resize * Create unique volumes_dir for testing * Fix stale instances being sent over rpc * Fix setting admin_pass in rescue command * Key user_data in create server off of extension * Key block_device_mapping off of volume extension * Moves security group functionality into extension * Adds ability to inherit wsgi extensions * Fixes KeyError when trying to rescue an instance * Make TerminateInstances compatible with EC2 api * Uniqueness checks for floating ip addresses * Driver for IBM Storwize and SVC storage * scheduler prep_resize should not update instance['host'] * Add a 50 char git title limit test to hacking * Fix a bug on remove_volume_connection in compute/manager.py * Fix a bug on db.instance_get_by_uuid in compute/manager.py * Make libvirt_use_virtio_for_bridges flag works for all drivers * xenapi: reduce polling interval for agent * xenapi: wait for agent resetnetwork response * Fix invalid exception format strings * General host aggregates part 2 * Update devref for general host aggregates * Cleanup consoles test cases * Return 409 error if get_vnc_console is called before VM is created * Move results filtering to db * Prohibit file injection writing to host filesystem * Added updated locations for iscsiadm * Check against unexpected method call * Remove deprecated use Exception.message * Remove temporary hack from checks_instance_lock * Remove temporary hack from wrap_instance_fault * Fix up some instance_uuid usage * Update vmops to access metadata as dict * Improve external locking on Windows * Fix traceback when detaching volumes via EC2 * Update RPC code from common * Fixes parameter passing to tgt-admin for iscsi * Solve possible race in semaphor creation * Rename private methods of compute manager * Send full instance to compute live_migration * Add underscore in front of post_live_migration * Send full instance to scheduler live_migration * Send full instance to run_instance * Use dict style access for image_ref * Use explicit arguments in compute manager run_instance * Remove topic from scheduler run_instance * Use explicit args in run_instance scheduler code * Update args to _set_vm_state_and_notify * Reduce db access in prep_resize in the compute manager * Remove instance_id fallback from cast_to_compute_host() * Remove unused InstanceInfo class * Adds per-user-quotas support for more detailed quotas management * Remove list_instances_detail from compute drivers * Move root_helper deprecation warning into execute * Flavor extra specs extension use instance_type id * Fix test_resize_xcp testcase - it never ran * tests: avoid traceback warning in test_live_migration * ensure_tree calls mkdir -p * Only log deprecated config warnings once * Handle NetworkNotFound in _shutdown_instance * Drop AES functions and pycrypto dependency * Simplify file hashing * Allow loaded extensions to be checked from servers * Make extension aliases consistent * Remove old exception type * Fix test classes collision * Remove unused variables * Fix notification logic * Improve external lock implementation * maint: remove an unused import in libvirt.driver * Require eventlet >= 0.9.17 * Remove **kwargs from prep_resize in compute manager * Updates to the prep_resize scheduler rpc call * Migrate a notifier patch from common: * Update list_instances to catch libvirtError * Audit log messages in nova/compute/api.py * Rename _self to self according to Python convention * import missing module time * Remove unused variables * Handle InstanceNotFound in libvirt list_instances * Fix broken pep8 exclude processing * Update reset_db to call setup if _DB is None * Migrate a logging change from common: * Send 'create volume from snapshot' to the proper host * Fix regression with nova-manage floating list * Remove unused imports * Simple refactor of some db api tests * fix unmounting of LXC containers * Update usage of 'ip' to handle more return codes * Use function registration for policy checks * Check instance lock in compute/api * Fix a comment typo in db api * Audit log messages in nova/compute/manager.py * XenAPI: Add script to destroy cached images * Fix typo in db test * Fix issue with filtering where a value is unicode * Avoid using logging in signal handler * Fix traceback when using s3 * Don't pass kernel args to Xen HVM instances * Sync w/ latest openstack common log.py * Pass a full instance to rotate_backups() * Remove agent_update from the compute manager * Move tests.test_compute_utils into tests.compute * Send a full instance in terminate_instance * maint: don't require write access when reading files * Fix get_diagnostics RPC arg ordering * Fix failed iscsi tgt delete errors with new tgtadm * Deprecate root_helper in favor of rootwrap_config * Use instance_get instead of instance_by * Clarify TooManyInstances exception message * Setting root passwd no longer fails silently * XenAPI: Fix race-condition with cached images * Prevent instance_info_cache from being altered post instance * Update targets information when creating target * Avoid recursion from @refresh_cache * Send a full instance in change_instance_metadata * Send a full instance in unrescue_instance * Add check exit codes for vlans * Compute: Error out instance on rebuild and resize * Partially revert "Remove unused scheduler functions" * Use event.listen() instead of deprecated listeners kwarg * Avoid associating floating IP with two instances * Tidy up nova.image.glance * Fix arg to get_instance_volume_block_device_info() * Send a full instance in snapshot_instance * Send a full instance in set_admin_password * Send a full instance in revert_resize * Send a full instance in rescue_instance * Send a full instance in remove_volume_connection * Send a full instance in rollback_live_migration_at_destination * Send a full instance in resume_instance * Send a full instance in resize_instance * Send a full instance in reset_network * Convert virtual_interfaces to using instance_uuid * Compute: VM-Mode should use instance dict * Fix image_type=base after snapshot * Send a full instance in remove_fixed_ip_from_instance * Send a full instance in rebuild_instance * Reverts fix lp1031004 * sync openstack-common log changes with nova * Set default keystone auth_token signing_dir loc * Resize.end now includes the correct instance_type * Fix rootwrapper with tgt-admin * Use common parse_isotime in GlanceImageService * Xen: VHD sequence validation should handle swap * Revert "Check for selinux before setting up selinux." * reduce debugging from utils.trycmd() * Avoid error during snapshot of ISO booted instance * Add a link from HACKING to wiki GitCommitMessages page * Instance cleanups from detach_volumes * Check for selinux before setting up selinux * Prefer instance in reboot_instance * maint: libvirt imagecache: remove redundant interpreter spec * Support external gateways in VLAN mode * Turn on base image cleanup by default * Make compute only auto-confirm its own instances * Fix state logic for auto-confirm resizes * Explicitly send primitive instances via rpc * Allow _destroy_vdis if a mapping has no VDI * Correct host count in instance_usage_audit_log extension * Return location header on volume creation * Add persistent volumes for tgtd * xenapi: Use instance uuid when calling DB API * Fix HACKING violation in nova/api/openstack/volume/types.py * Remove ugly instance._rescue hack * Convert to using dict style key lookups in XenAPI * Implements notifications for more instance changes * Fix ip6tables support in xenapi bug 934603 * Moving where the fixed ip deallocation happens * Sanitize xenstore keys for metadata injection * Don't store system_metadata in xenstore * use REDIRECT to forward local metadata request * Only enforce valid uuids if a uuid is passed * Send a full instance in pre_live_migration * Send a full instance in power_on_instance and start_instance * Send a full instance in power_off_instance and stop_instance * Make instance_uuid backwards compat actually work * Send a full instance via rpc for post_live_migration_at_destination * Send a full instance via rpc for inject_network_info * Send a full instance via rpc for inject_file * Send a full instance via rpc for get_vnc_console * Remove get_instance_disk_info from compute rpcapi * Send a full instance via rpc for get_diagnostics * Send a full instance via rpc for finish_revert_resize * Ensure instance is moved to ERROR on suspend failure * Avoid using 'is' operator when comparing strings * Revert "Add additional capabilities for computes" * Allow power_off when instance doesn't exist * Fix resizing VDIs on XenServer >= 6 * Refactor glance image service code * Don't import libvirt_utils in disk api * Call correct implementation for quota_destroy_all_by_project * Remove return values from some compute RPC methods * Reinstate instance locked error logging * Send a full instance via rpc for finish_resize * Fix exception handling in libvirt attach_volume() * Convert fixed_ips to using instance_uuid * Trim volume type representation * Fix a couple of PEP8 nits * Replace subprocess.check_output with Popen * libvirt driver: set os_type to support xen hvm/pv * Include architecture in instance base options passed to the scheduler * Fix typo of localhost's IP * Enhance nova-manage to set flavor extra specs * Send a full instance via rpc for detach_volume * Remove unused methods from compute rpcapi * Send a full instance via rpc for confirm_resize * Send a full instance via rpc for check_can_live_migrate_source * Send a full instance via rpc for check_can_live_migrate_destination * Remove unused scheduler functions * Send a full instance via rpc for attach_volume * Send a full instance via rpc for add_fixed_ip_to_instance * Send a full instance via rpc for get_console_output * Send a full instance via rpc for suspend_instance * Send a full instance via rpc for (un)pause_instance * Don't use rpc to lock/unlock an instance * Convert reboot_instance to take a full instance * Update decorators in compute manager * Include name in a primitive Instance * Shrink Simple Scheduler * Allow soft deletes from any state * Handle NULL deleted_at in migration 112 * Add support for snapshots and volume types to netapp driver * Inject instance metadata into xenstore * Add missing tempfile import to libvirt driver * Fix docstring for SecurityGroupHandlerBase * Don't log debug auth token when using cinder * Remove temporary variable * Define cross-driver standardized vm_mode values * Check for exception codes in openstack API results * Add missing parameters to novas cinder api * libvirt driver: set driver name consistently * Allow floating IP pools to be deleted * Fixes console/vmrc_manager.py import error * EC2 DescribeImageAttribute by kernel/ramdisk * Xen: Add race-condition troubleshooting script * Return 400 in get_console_output for bad length * update compute_fill_first_cost_fn docstring * Xen: Validate VHD footer timestamps * Xen: Ensure snapshot is torn down on error * Provide rootwrap filters for nova-api-metadata * Fix a bug in compute_node_statistics * refactor all uses of the `qemu-img info` command * Xen: Fix snapshots when use_cow=True * tests: remove misleading docstrings on libvirt tests * Update NovaKeystoneContext to use jsonutils * Use compute_driver in vmware driver help messages * Use compute_driver in xenapi driver help messages * Add call to get hypervisor statistics * Adds xcp disk resize support * Log snapshot UUID and not OpaqueRef * Remove unused user_id and project_id arguments * Fix wrong regex in cleanup_file_locks * Update jsonutils from openstack-common * Return 404 when attempting to remove a non-existent floating ip * Implements config_drive as extension * use boto's HTTPResponse class for versions of boto >=2.5.2 * Migrations for deleted data for previously deleted instances * Add image_name to create and rebuild notifications * Make it clear subnet_bits is unused in ipam case * Remove unused add_network_to_project() method * Adding networking rules to vm's on compute service startup * Avoid unrecognized content-type message * Updates migration 111 to work w/ Postgres * fixes for nova-manage not returning a full list of fixed IPs * Adds non_inheritable_image_properties flag * Add git commit message validation to hacking.py * Remove unnecessary use of with_lockmode * Improve VDI chain logging * Remove profane words * Adds logging for renaming and hardlinking * Don't create volumes if an incorrect size was given * set correct SELinux context for injected ssh keys * Fixes nova-manage fixed list with deleted networks * Move libvirt disk config setup out of main get_guest_config method * Refactor libvirt imagebackend module to reduce code duplication * Move more libvirt disk setup into the imagebackend module * Don't hardcode use of 'virtio' for root disk in libvirt driver * Ensure to use 'hdN' for IDE disk device in libvirt driver * Don't set device='cdrom' for all disks in libvirt driver * Move setup of libvirt disk cachemode into imagebackend module * Get rid of pointless 'suffix' parameter in libvirt imagebackend * Revert "Attach ISO as separate disk if given proper instruction" * Ensure VHDs in staging area are sequenced properly * Fix error in error handler in instance_usage_audit task * Fix SQL deadlock in quota reservations * Ensure 413 response for security group over-quota * fixes for nova-manage network list if network has been deleted * Allow NoMoreFloatingIps to bubble up to FaultWrapper * Fix cloudpipe keypair creation. Add pipelib tests * Don't let failure to delete filesystem block deletion of instances in libvirt * Static FaultWrapper status_to_type map * Make flavorextradata ignore deleted flavors * Tidy up handling of exceptions in floating_ip_dns * Raise NotImplementedError, not NotImplemented singleton * Fix the mis-use of NotImplemented * Update FilterSchedulerTestCase docstring * Remove unused testing.fake * Make snapshot work for stopped VMs * Split ComputeFilter up * Show all absolute quota limits in /limits * Info log to see which compute driver has loaded * Rename get_lock() to _get_lock() * Remove obsolete line in host_manager * improve efficiency of image transfer during migration * Remove unused get_version_from_href() * Add debug output to RamFilter * Fixes bare-metal spawn error * Adds generic retries for build failures * Fix docstring typo * Fixes XenAPI driver import in vm_vdi_cleaner * Display key_name only if keypairs extension is used * Fix EC2 CreateImage no_reboot logic * Reject EC2 CreateImage for instance-store * EC2 DescribeImages reports correct rootDeviceType * Support EC2 CreateImage API for boot-from-volume * remove unused clauses[] variable * Partially implements blueprint xenapi-live-migration * Improved VM detection for bandwidth polling (XAPI) * Sync jsonutils from openstack-common * Adding granularity for quotas to list and update * Remove VDI chain limit for migrations * Refactoring required for blueprint xenapi-live-migration * Add the plugin framework from common; use and test * Catch rpc up to the common state-of-the-art * Support requested_networks with quantum v2 * Return 413 status on over-quota in the native API * Fix venv wrapper to clean *.pyc * Use all deps for tools/hacking.py tests in tox * bug 1024557 * General-host-aggregates part 1 * Attach ISO as separate disk if given proper instruction * Extension to show usage of limited resources in /limits response * Fix SADeprecationWarning: useexisting is deprecated * Fix spelling in docstrings * Fix RuntimeWarning nova_manage not found * Exclude openstack-common from pep8 checks * Use explicit destination user in xenapi rsync call * Sync gettextutils fixes from openstack-common * Sync importutils from openstack-common * Sync cfg from openstack-common * Add SKIP_WRITE_GIT_CHANGELOG to setup.py * Remove unnecessary logging from API * Sync a commit from openstack-common * Fix typo in docstring * Remove VDI chain limit for snapshots * Adds snapshot_attached_here contextmanager * Change base rpc version to 1.0 in compute rpcapi * Use _lookup_by_name instead of _conn.lookupByName * Use the dict syntax instead of attribute to access db objects * Raise HTTP 500 if service catalog is not json * Floating_ip create /31,32 shouldn't silent error * Convert remaining network API casts to calls * network manager returns empty list, not raise an exception * add network creation call to network.api.API * overriden VlanManager.create_networks must return a result * When over quota for floating ips, return HTTPRequestEntityTooLarge * Remove deprecated auth-related db code * Fix .mailmap to generate unique AUTHORS list * Imports base64 to fix xen file injection * Remove deprecated auth from GlanceImageService * Adds bootlocking to the xenserver suspend and resume * ensure libguestfs mounts are cleaned up * Making docs pretty! * allows setting accessIPvs to null via update call * Re-add nova.virt.driver import to xenapi driver * Always attempt to delete entire floating IP range * Adds network labels to the fixed ips in usages * only mount guest image once when injecting files * Remove unused find_data_files function in setup.py * Use compute_api.get_all in affinity filters * Refactors more snapshot code into vm_utils * Clarifying which vm_utils functions are private * Refactor instance_usage_audit. Add audit tasklog * Fixes api fails to unpack metadata using cinder * Remove deprecated auth docs * Raise Failure exception when setting duplicate other_config key * Split xenapi agent code out to nova.virt.xenapi.agent * ensure libguestfs has completed before proceeding * flags documentation to deprecate connection_type * refactor baremetal/proxy => baremetal/driver * refactor xenapi/connection => xenapi/driver * refactor vmwareapi_conn => vmwareapi/driver * Don't block instance delete on missing block device volume * Adds diagnostics command for the libvirt driver * associate_floating_ip an ip already in use * When deleting an instance, avoid freakout if iscsi device is gone * Expose over-quota exceptions via native API * Fix snapshots tests failing bug 1022670 * Remove deprecated auth code * Remove deprecated auth-related api extensions * Make pep8 test work on Mac * Avoid lazy-loading errors on instance_type * Fetch kernel/ramdisk images directly * Ignore failure to delete kernel/ramdisk in xenapi driver * Boot from volume for Xen * Fix 'instance %s: snapshotting' log message * Fix KeyError 'key_name' when KeyPairExists raised * Propagate setup.py change from common * Properly name openstack.common.exception * Janitorial: Catch rpc up with a change in common * Make reboot work for halted xenapi instances * Removed a bunch of cruft files * Update common setup code to latest * fix metadata file injection with xen * Switch to common notifiers * Implements updating complete bw usage data * Fix rpc import path in nova-novncproxy * This patch stops metadata from being deleted when an instance is deleted * Set the default CPU mode to 'host-model' for Libvirt KVM/QEMU guests * Fallback to fakelibvirt in test_libvirt.py test suite * Properly track VBD and VDI connections in xenapi fake * modify hacking.py to not choke on the def of _() * sort .gitignore for readability * ignore project files for eclipse/pydev * Add checks for retrieving deleted instance metadata for notification events * Allow network_uuids that begin with a prefix * Correct typo in tools/hacking.py l18n -> i18n * Add *.egg* to .gitignore * Remove auth-related nova-manage commands * Remove unnecessary target_host flag in xenapi driver tests * Remove unnecessary setUp() method in xenapi driver tests * Finish AUTHORS transition * Don't catch & ignore exceptions when setting up LXC container filesystems * Ensure system metadata is sent on new image creation * Distinguish over-quota for volume size and number * Assign service_catalog in NovaKeystoneContext * Fix some hacking violations in the quantum tests folsom-2 -------- * Fix missing nova.log change to nova.openstack.common.log * Add Cinder Volume API to Nova * Modifies ec2/cloud to be able to use Cinder * Fix nova-rpc-zmq-receiver * Drop xenapi session.get_imported_xenapi() * Fix assertRaises(Exception, ...) HACKING violation * Make possible to store snapshots not in /tmp directory * Prevent file injection writing to host filesystem * Implement nova network API for quantum API 2.0 * Expand HACKING with commit message guidelines * Add ServiceCatalog entries to enable Cinder usage * Pass vdi_ref to fake.create_vbd() not a string * Switch to common logging * use import_object_ns for compute_driver loading * Add compatibility for CPU model config with libvirt < 0.9.10 * Sync rpc from openstack-common * Redefine the domain's XML on volume attach/detach * Sync jsonutils from openstack-common * Sync iniparser from openstack-common * Sync latest importutils from openstack-common * Sync excutils from openstack-common * Sync cfg from openstack-common * Add missing gettextutils from openstack-common * Run hacking tests as part of the gate * Remove duplicate volume_id * Make metadata content match the requested version of the metadata API * Create instance in DB before block device mapping * Get hypervisor uptime * Refactoring code to kernel Dom0 plugin * Ability to read deleted system metadata records * Add check for no domains in libvirt driver * Remove passing superfluous read_deleted argument * Flesh out the README file with a little more useful information * Remove unused 'get_open_port' method from libvirt utils * deallocate_fixed_ip attempts to update deleted ip * Dom0 plugin now returns data in proper format * Add PEP8 checks back for Dom0 plugins * Add missing utils declaration to RPM spec * Fixes bug 1014194, metadata keys are incorrect for kernel-id and ramdisk-id * Clean up cruft in nova.image.glance * Retry against different Glance hosts * Fix some import ordering HACKING violations * Deal with unknown instance status * OS API should return SHUTOFF, not STOPPED * Implement blueprint ec2-id-compatibilty * Add multi-process support for API services * Allow specification of the libvirt guest CPU model per host * Refactor Dom0 Glance plugin * Switch libvirt get_cpu_info method over to use config APIs * Remove tpool stub in xenapi tests * Use setuptools-git plugin for MANIFEST * Remove duplicate check of server_dict['name'] * Add missing nova-novncproxy to tarballs * Add libvirt config classes for handling capabilities XML doc * Refactor libvirt config classes for representing CPU models/features * Fix regression in test_connection_to_primitive libvirt testcase * Rename the instance_id column in instance_info_caches * Rename GlanceImageService.get to download * Use LOG.exception instead of logging.exception * Align run_tests.py pep8 with tox * Add hypervisor information extension * Remove GlanceImageService.index in favor of detail * Swap VDI now uses correct name label * Remove image service show_by_name method * Cleanup of image service code * Adds default fall-through to the multi scheduler. Fixes bug 1009681 * Add missing netaddr import * Make nova list/show behave nicely on instance_type deletion * refactor libvirt from connection -> driver * Switch to using new config parsing for vm_vdi_cleaner.py * Adds missing 'owner' attribute to image * Ignore floatingIpNotAssociated during disassociation * Avoid casts in network manager to prevent races * Stop nova_ipam_lib from changing the timeout setting * Remove extra DB calls for instances from OS API extensions * Allow single uuid to be specified for affinity * Fix invalid variable reference * Avoid reset on hard reboot if not supported * Fix several PEP-8 issues * Allow access to metadata server '/' without IP check * Fix db calls for snaphsot and volume mapping * Removes utils.logging_error (no longer used) * Removes utils.fetch_file (no longer used) * Improve filter_scheduler performance * Remove unnecessary queries for network info in notifications * Re-factor instance DB creation * Fix hacking.py failures.. * fix libvirt get_memory_mb_total() with xen * Migrate existing routes from flat_interface * Add full test environment * Another killfilter test fix for Fedora 17 * Remove unknown shutdown kwarg in call to vmops._destroy * Refactor vm_vdi_cleaner.py connection use * Remove direct access to glance client * Fix import order of openstack.common * metadata: cleanup pubkey representation * Make tgtadm the default iscsi user-land helper * Move rootwrap filters definition to config files * Fixes ram_allocation_ratio based over subscription * Call libvirt_volume_driver with right mountpoint * XenAPI: Fixes Bug 1012878 * update refresh_cache on compute calls to get_instance_nw_info * vm state and task state management * Update pylint/pep8 issues jenkins job link * Addtional CommandFilters to fix rootwrap on SLES * Tidy up exception handling in contrib api consoles * do sync before fusermount to avoid busyness * Fix bug 1010581 * xenapi tests: changes size='0' to size=0 * fixes a bug in xenapi tests where a string should be int * Minor HACKING.rst exception fix * Make libvirt LoopingCalls actually wait() * Add instance_id in Usage API response * Set libvirt_nonblocking to true by default for Folsom * Admin action to reset states * Use rpc from openstack-common * add nova-manage bash completion script * Spelling fixes * Fix bug 1014925: fix os-hosts * Adjust the libvirt config classes' API contract for parsing * Move libvirt version comparison code into separate function helper * Remove two obsolete libvirt cheetah templates from MANIFEST.in * Propose nova-novncproxy back into nove core * Fix missing import in compute/utils.py * Add instance details to notifications * Xen Storage Manager: tests for xensm volume driver * SM volume driver: DB changes and tests * moved update cache functionality to the network api * Handle missing server when getting security groups * Imports cleanup * added deprecated.warn helper method * Enforce an instance uuid for instance_test_and_set * Replaces functions in utils.py with openstack/common/timeutils.py * Add CPU arch filter scheduler support * Present correct ec2id format for volumes and snaps * xensm: Fix xensm volume driver after uuid changes * Cleanup instance_update so it only takes a UUID * Updates the cache * Add libvirt min version check * Ensure dnsmasq accept rules are preset at startup * Re-add private _compute_node_get call to sql api * bug #996880 change HostNotFound in hosts to HTTPNotFound * Unwrap httplib.HTTPConnection after WsgiLimiterProxyTest * Log warnings instead of full exceptions for AMQP reconnects * Add missing ack to impl_qpid * blueprint lvm-disk-images * Remove unused DB calls * Update default policies for KVM guest PIT & RTC timers * Add support for configuring libvirt VM clock and timers * Dedupe native and EC2 security group APIs * Add two missing indexes for instance_uuid columns * Revert "Fix nova-manage backend_add with sr_uuid" * Adds property to selectively enable image caching * Remove utils.deprecated functions * Log connection_type deprecation message as WARNING * add unit tests for new virt driver loader * Do not attempt to kill already-dead dnsmasq * Only invoke .lower() on non-None protocols * Add indexes to new instance_uuid columns * instance_destroy now only takes a uuid * Do not always query deleted instance_types * Rename image to image_id * Avoid partially finished cache files * Fix power_state mis-use bug 1010586 * Resolve unittest error in rpc/impl_zmq * Fix whitespace in sqlite steps * Make eventlet backdoor play nicer with gettext * Add user_name project_name and color option to log * fixes bug 1010200 * Fixes affinity filters when hints is None * implement sql-comment-string stack traces * Finalize tox config * Fixes bug lp:999928 * Convert consoles to use instance uuid * Use OSError instead of ProcessExecutionError * Replace standard json module with openstack.common.jsonutils * Don't query nova-network on startup * Cleans up power_off and power_on semantics * Refactor libvirt create calls * Fix whitespace in sqlite steps * Update libvirt imagecache to support resizes * separate Metadata logic away from the web service * Fix bug 1006664: describe non existent ec2 keypair * Make live_migration a first-class compute API * Add zeromq driver. Implements blueprint zeromq-rpc-driver * Fix up protocol case handling for security groups * Prefix all nova binaries with 'nova-' * Migrate security_group_instance_association to use a uuid to refer to instances * Migrate instance_metadata to use a uuid to refer to instances * Adds `disabled` field for instance-types * More meaningful help messages for libvirt migration options * fix the instance quota overlimit message * fix bug lp:1009041,add option "-F" to make mkfs non-interactive * Finally ack consumed message * Revert "blueprint " * Use openstack-common's policy module * Use openstack.common.cfg.CONF * bug #1006094 correct typo in addmethod.openstackapi.rst * Correct use of uuid in _get_instance_volume_bdm * Unused imports cleanup (folsom-2) * Quantum Manager disassociate floating-ips on instance delete * defensive coding against None inside bdm resolves bug 1007615 * Add missing import to quantum manager * Add a comment to rpc.queue_get_for() * Add shared_storage_test methods to compute rpcapi * Add get_instance_disk_info to the compute rpcapi * Add remove_volume_connection to the compute rpcapi * blueprint * Implements resume_state_on_host_boot for libvirt * Fix libvirt rescue to work with whole disk images * Finish removing xenapi.HelperBase class * Remove network_util.NetworkHelper class * Remove volume_util.VolumeHelper class * Remove vm_utils.VMHelper class * Start removing unnecessary classes from XenAPI driver * XenAPI: Don't hardcode userdevice for VBDs * convert virt drivers to fully dynamic loading * Add compare_cpu to the compute rpcapi * Add get_console_topic() to the compute rpcapi * Add refresh_provider_fw_rules() to compute rpcapi * Use compute rpcapi in nova-manage * Add post_live_migration_at_destination() to compute rpcapi * Add pre_live_migration() to the compute rpcapi * Add rollback_live_migration_at_destination() to compute rpcapi * Add finish_resize() to the compute rpcapi * Add resize_instance() to the compute rpcapi * Add finish_revert_resize() to the compute rpcapi * Add get_console_pool_info() to the compute rpcapi * Fix destination host for remove_volume_connection * Don't deepcopy RpcContext * Remove resize function from virt driver * Cleans up extraneous volume_api calls * Remove list_disks/list_interfaces from virt driver * Remove duplicate words in comments * Implement blueprint host-topic-matchmaking * Remove unnecessary setting of XenAPI module attribute * Prevent task_state changes during VERIFY_RESIZE * Eliminate a race condition on instance deletes * Make sure an exception is logged when config file isn't found * Removing double quotes from sample config file * Backslash continuation removal (Nova folsom-2) * Update .gitignore * Add a note on why quota classes are unused in Nova * Move queue_get_for() from db to rpc * Sample config file tool updates * Fix instance update notification publisher id * Use cfg's new global CONF object * Make xenapi fake match real xenapi a bit closer * Align ApiEc2TestCase to closer match api-paste.ini * Add attach_time for EC2 Volumes * fixing issue with db.volume_update not returning the volume_ref * New RPC tests, docstring fixes * Fix reservation_commit so it works w/ PostgreSQL * remove dead file nova/tests/db/nova.austin.sqlite * Fix the conf argument to get_connection_pool() * Remove Deprecated auth from EC2 * Revert "API users should not see deleted flavors." * Grammar fixes * Record instance architecture types * Grammar / spelling corrections * cleanup power state (partially implements bp task-management) * [PATCH] Allow [:print:] chars for security group names * Add scheduler filter for trustedness of a host * Remove nova.log usage from nova.rpc * Remove nova.context dependency from nova.rpc * _s3_create update only pertinent metadata * Allow adding fixed IPs by network UUID * Fix a minor spelling error * Run coverage tests via xcover for jenkins * Localize rpc options to rpc code * clean-up of the bare-metal framework * Use utils.utcnow rather than datetime.utcnow * update xen to use network_model * fixes bug 1004153 * Bugfix in simple_tenant_usage API detail view * removed a dead db function register_models() * add queue name argument to TopicConsumer * Cleanup tools/hacking using flake8 * Expose a limited networks API for users * Added a instance state update notification * Remove deprecated quota code * Update pep8 dependency to v1.1 * Nail pep8 dependencies to 1.0.1 * API users should not see deleted flavors * Add scheduler filter: TypeAffinityFilter * Add help string to option 'osapi_max_request_body_size' * Permit deleted instance types to be queried for active instances * Make validate_compacted_migration into general diff tool * Remove unused tools/rfc.sh * Finish quota refactor * Use utils.parse_strtime rather than datetime.strptime folsom-1 -------- * Add version to compute rpc API * Add version to scheduler rpc API * Add version to console rpc API * Remove wsgiref from requirements * More accurate rescue mode testing for XenAPI * Add tenant id in self link in /servers call for images * Add migration compaction validation tool * Enable checking for imports in alphabetical order * Include volume-usage-audit in tarballs * Fix XenServer diagnostics to provide correct details * Use cfg's new behavior of reset() clearing overrides * Sync with latest version of openstack.common.cfg * Only permit alpha-numerics and ._- for instance type names * Use memcache to store consoleauth tokens * cert/manager.py not using crypto.fetch_crl * Cleanup LOG.getLoggers to use __name__ * Imported Translations from Launchpad * Alphabetize imports in nova/tests/ * Fix Multi_Scheduler to process host capabilities * fixed_ip_get_by_address read_deleted from context * Fix for Quantum LinuxBridge Intf driver plug call * Add additional logging to compute filter * use a RequestContext object instead of context module * make get_all_bw_usage() signature match for fake virt driver * Add unit test coverage for bug 1000261 * Moving network tests into the network folder * Add version to consoleauth rpc API * Add version to the cert rpc API * Add base support for rpc API versioning * fixes typo that completely broken Quantum/Nova integration * Make Iptables FW Driver handle dhcp_server None * Add aliases to .mailmap for comstud and belliott * Add eventlet backdoor to facilitate troubleshooting * Update nova's copy of image metadata on rebuild * Optional timeout for servers stuck in build * Add configurable timeout to Quantum HTTP connections * Modify vm_vdi_cleaner to handle `-orig` * Add __repr__ to least_cost scheduler * Bump XenServer plugin version * handle updated qemu-img info output * Rearchitect quota checking to partially fix bug 938317 * Add s3_listen and s3_listen_port options * Misused and not used config options * Remove XenAPI use of eventlet tpool * Fixed compute periodic task. Fixes bug 973331 * get instance details results in volumes key error * Fix bug 988034 - Quantum Network Manager - not clearing ips * Stop using nova.exception from nova.rpc * Make use of openstack.common.jsonutils * Alphabetize imports in nova/api/ * Remove unused _get_target code from xenapi * Implement get_hypervisor_hostname for libvirt * Alphabetize imports * Alphabetize imports in nova/virt/ * Adding notifications for volumes * Pass 'nova' project into ConfigOpts * fixes bug 999206 * Create an internal key pair API * Make allocation failure a bit more friendly * Avoid setting up DHCP firewall rules with FlatManager * Migrate missing license info * Imported Translations from Launchpad * Fix libvirt Connection.get_disks method * Create a utf8 version of the dns_domains table * Setup logging, particularly for keystone middleware * Use default qemu-img cluster size in libvirt connection driver * Added img metadata validation. Fixes bug 962117 * Remove unnecessary stubout_loopingcall_start * Actually use xenapi fake setter * Provide a transition to new .info files * Store image properties with instance system_metadata * Destroy system metadata when destroying instance * Fix XenServer windows agent issue * Use ConfigOpts.find_file() to find paste config * Remove instance Foreign Key in volumes table, replace with instance_uuid * Remove old flagfile support * Removed unused snapshot_instance method * Report memory correctly on Xen. Fixes bug 997014 * Added image metadata to compute.instance.exists * Update PostgreSQL sequence names for zones/quotas * Minor help text related changes * API does need new image_ref on rebuild immediately * Avoid unnecessary inst lookup in vmops _shutdown * implement blueprint floating-ip-notification * Defer image_ref update to manager on rebuild * fix bug 977007,make nova create correct size of qcow2 disk file * Remove unnecessary shutdown argument to _destroy() * Do not fail on notify when quantum and melange are out of sync * Remove instance action logging mechanism * httplib throw "TypeError: an integer is required" when run quantum * fix bug 992008, we should config public interface on compute * A previous patch decoupled the RPC drivers from the nova.flags, breaking instance audit usage in the process. This configures the xvpvncproxy to configure the RPC drivers properly with FLAGS so that xvpvncproxy can run * Fix bug 983206 : _try_convert parsing string * pylint cleanup * Fix devref docs * Remove Deprecated AuthMiddleware * Allow sitepackages on jenkins * Replaces exceptions.Error with NovaException * Docs for vm/task state transitions * Fix a race with rpc.register_opts in service.py * Mistake with the documentation about cost function's weight corrected * Remove state altering in live-migration code * Register fake flags with rpc init function * Generate a Changelog for Nova * Find context arg by type rather than by name * Default auto-increment for int primary key columns * Adds missing copyright to migration 082 * Add instance_system_metadata modeling * Use fake_libvirt_utils for libvirt console tests * Fix semantics for migration test environment var * Clean up weighted_sum logic * Use ConfigOpts.find_file() to locate policy.json * Sync to newer openstack.common.cfg * Fix test_mysql_innodb * Implement key pair quotas * Ensure that the dom0 we're connected to is the right one * Run ip link show in linux_net._device_exists as root * Compact pre-Folsom database migrations * Remove unused import * Pass context to notification drivers when we can * Use save_and_reraise_exception() from common * Fix innodb tests again * Convert Volume and Snapshot IDs to use UUID * Remove unused images * Adding 'host' info to volume-compute connection information * Update common.importutils from openstack-common * Provide better quota error messages * Make kombu support optional for running unit tests * Fix nova.tests.test_nova_rootwrap on Fedora 17 * Xen has to create it's own tap device if using libvirt and QuantumLinuxBridgeVIFDriver * Fix test_migrations to work with python 2.6 * Update api-paste.ini to remove unused settings * Fix test_launcher_app to ensure service actually got started * Minor refactor of servers viewbuider * A previous patch decoupled the RPC drivers from the nova.flags, breaking instance audit usage in the process. This configures the instance audit usage to configure the RPC drivers properly with FLAGS so that the job can run * Allow blank passwords in changePassword action * Allow blank adminPass on server create * Return a BadRequest on bad flavors param values * adjust logging levels for utils.py * Update integration tests to listen on 127.0.0.1 * Log instance consistently * Create name_label local variable for logging message * Remove hack for xenapi driver tests * Migrate block_device_mapping to use instance uuids * Remove unnecessary return statements * Clean up ElementTree usage * Adds better bookending and robustness around the instance audit usage generation * Pass instance to resize_disk() to fix exception * Minor spelling fix * Removes RST documentation and moves it to openstack-manuals * Trivial spelling fix * Remove workaround for sqlalchemy-migration < 0.6.4 * Remove unnecessary references to resize_confirm_window flag * Fix InnoDB migration bug in migrate script 86 * Use openstack.common.importutils * Ignore common code in coverage calculations * Use additional task states during resize * Add libvirt get_console_output tests: pty and file * Keep uuid with bandwidth usage tracking to handle the case where a MAC address could be recycled between instances * Added the validation for name check for rebuild of a server * Make KillFilter to handle 'deleted' w/o rstrip * Fix instance delete notifications * Disconnect stale instance VDIs when starting nova-compute * Fix timeout in EC2 CloudController.create_image() * Add additional capabilities for computes * Move image checksums into a generic file * Add instance to several log messages * Imports to human alphabetical order * Fixes bug 989271, fixes launched_at date on notifications * Enable InnoDB checking * make all mysql tables explicitly innodb * Use instance_get_by_uuid since we're looking up a UUID * Use nova_uuid attribute instead of trying to parse out name_label * Add a force_config_drive flag * Fix 986922 * Improvement for the correct query extraction * Fixes bug 983024 * Make updating hostId raises BadRequest * Disallow network creation when label > 255. Fixes bug 965008 * Introduced _atomic_restart_dhcp() Fixes Bug 977875 * Make the filename that image hashes are written to configurable * Xen: Pass session to destroy_vdi * Add instance logging to vmware_images.py * Add instance logging to vmops.py * fix bug #980452 set net.ipv4.ip_forward=1 on network * Log instance * Log instance information for baremetal * Include instance in log message * Log instance * Ensure all messages include instance * Add instance to log messages * Include instance in log message * Refactor nova.rpc config handling * Don't leak RPC connections on timeouts or other exceptions * Small cleanup to attach_volume logging * Implements EC2 DescribeAddresses by specific PublicIp * Introduced flag base_dir_name. Fixes bug 973194 * Set a more reasonable default RPC thread pool size * Number of missing imports should always be shown * Typo fix in bin/instance-usage-audit * Improved tools/hacking.py * Scope coverage report generation to nova module * Removes unnecessary code in _run_instance * Validate min_ram/min_disk on rebuild * Adding context to usage notifications * Making `usage_from_instance` private * Remove __init__.py from locale dir * Fixes bug 987335 * allow power state "BLOCKED" for live migrations if using Xen by libvirt * Exclude xenapi plugins from pep8/hacking checks * Imported Translations from Launchpad * Remove unnecessary power state translation messages * Add instance logging * Use utils.save_and_reraise_exception * Removing XenAPI class variable, use session instead * Log instance consistently * Keep nova-manage commands sorted * Log instances consistently * Moves `usage_from_instance` into nova.compute.utils * Log instance * nova.virt.xenapi_conn -> nova.virt.xenapi.connection * Remove unused time keyword arg * Remove unused variable * support a configurable libvirt injection partition * Refactor instance image property inheritance out to a method * Refactor availability zone handling out to a method * Include name being searched for in exception message * Be more tolerant of deleting failed builds * Logging updates in IptablesFirewallDriver * Implement security group quotas * Do not allow blank adminPass attribute on set password * Make rebuilds with an emtpy name raise BadRequest * Updates launched_at in the finish and revert_migration calls * Updated instance state on resize error * Reformat docstrings in n/c/a/o/servers as per HACKING * fix bug 982360, multi ip block for dmz_cidr * Refactor checking instance count quota * Small code cleanup for config_disk handling * Refactors kernel and ramdisk handling into their own method * Improve instance logging in compute/manager * Add deleted_at to instance usage notification * Simplify _get_vm_opaque_ref in xenapi driver * Test unrescue works as well * Remove unused variable * Port types and extra specs to volume api * Make exposed methods clearer in xenapi.vmops * Fix error message to report correct operation * Make run_tests.sh just a little bit less verbose * Log more information when sending notifications * xenapi_conn -> xenapi.connection * Renamed current_audit_period function to last_completed_audit_period to clarify its purpose * QuantumManager will start dnsmasq during startup. Fixes bug 977759 * Fixed metadata validation err. Fixes bug 965102 * Remove python-novaclient dependency from nova * Extend instance UUID logging * Remove references to RemoteError in os-networks * Fix errors in os-networks extension * Removes dead code around start_tcp in Server * Improve grammar throughout nova * Improved localization testing * Log kwargs on a failed String Format Operation * Standardize quota flag format * Remove nova Direct API * migration_get_all_unconfirmed() now uses lowercase "finished" Fixes bug 977719 * Run tools/hacking.py instead of pep8 mandatory * Delete fixed_ips when network is deleted * Remove unecessary --repeat option for pep8 * Create compute.api.BaseAPI for compute APIs to use * Give all VDIs a reasonable name-label and name-description * Remove last two remaining hyperV references * bug 968452 * Add index to fixed_ips.address * Use 'root' instead of 'os' in XenAPI driver * Information about DifferentHostFilter and SameHostFilter added * HACKING fixes, sqlalchemy fix * Add test to check extension timestamps * Fixes bug 952176 * Update doc to mention nova tool for type creation * Change Diablo document reference to trunk * Imported Translations from Launchpad * Cloudpipe tap vpn not always working * Allow instance logging to use just a UUID * Add the serialization of exceptions for RPC calls * Cleanup xenapi driver logging messages to include instance * Stop libvirt test from deleting instances dir * Move product_version to XenAPISession * glance plugin no longer takes num_retries parameter * Remove unused user_id and project_id parameters to fetch_image() * Cleanup _make_plugin_call() * Push id generation into _make_agent_call() * Remove unused path argument for _make_agent_call() * Remove unused xenstore methods * Combine call_xenapi and call_xenapi_request * Fixed bug 962840, added a test case * Use -1 end-to-end for unlimited quotas * fix bug where nova ignores glance host in imageref * Remove unused _parse_xmlrpc_value * Fix traceback in image cache manager * Fixes regression in release_dhcp * Use thread local storage from openstack.common * Extend FilterScheduler documentation * Add validation on quota limits (negative numbers) * Get unit tests functional in OS X * Make sure cloudpipe extension can retrieve network * Treat -1 quotas as unlimited * Auto-confirming resizes would bail on exceptions * Grab the vif directly on release instead of lookup * Corrects an AttributeError in the quota API * Allow unprivileged RADOS users to access rbd volumes * Remove nova.rpc.impl_carrot * Sync openstack.common.cfg from openstack-common * add libvirt_inject_key flag fix bug #971640 * Do not fail to build a snapshot if base image is not found * fix TypeError with unstarted threads in nova-network * remove unused flag: baremetal_injected_network_template baremetal_uri baremetal_allow_project_net_traffic * Imported Translations from Launchpad * fixed postgresql flavor-create * Add rootwrap for touch * Ensure floating ips are recreated on reboot * Handle instances being missing while listing floating IPs * Allow snapshots in error state to be deleted * Ensure a functional database connection * Add a faq to vnc docs * adjust logging levels for linux_net * Handle not found in check for disk availability * Acccept metadata ip so packets aren't snatted * bug 965335 * Export user id as password to keystone when using noauth * Check that DescribeInstance works with deleted image * Check that volume has no snapshots before deletion * Fix libvirt rescue * Check vif exists before releasing ip * Make kombu failures retry on IOError * Adds middleware to limit request body sizes * Add validation for OSAPI server name length * adjust logging levels for libvirt error conditions * Fix exception type in _get_minram_mindisk_params * fixed bug lp:968019 ,fix network manager init floating ip problem * When dnsmasq fails to HUP log an error * Update KillFilter to handle 'deleted' exe's * Fix disassociate query to remove foreign keys * Touch in use image files when they're checked * Base image signature files are not images * Support timestamps as prefixes for traceback log lines * get_instance_uuids_by_ip_filter to QM * Updated docstrings in /tools as per HACKING * Minor xenapi driver cleanups * Continue on the the next tenant_id on 400 codes * Fix marker behavior for flavors * Remove auth_uri, already have auth_host, auth_port * A missing checksum does not mean the image is corrupt * Default scheduler to spread-first * Reduce the image cache manager periodic interval * Handle Forbidden and NotAuthenticated glance exc * Destroy src and dest instances when deleting in RESIZE_VERIFY * Allow self-referential groups to be created * Fix unrescue in invalid state * Clean up the shared storage check (#891756) * Don't set instance ACTIVE until it's really active * Fix traceback when sending invalid data * Support sql_connection_debug to get SQL diagnostic information * Improve performance of safe_log() * Fix 'nova-manage config convert' * Add another libvirt get_guest_config() test case * Fix libvirt global name 'xml_info' is not defined * Clean up read_deleted support in host aggregates code * ensure atomic manipulation of libvirt disk images * Import recent openstack-common changes * makes volume versions display properly * Reordered the alphabet * Add periodic_fuzzy_delay option * Add a test case for generation of libvirt guest config * Convert libvirt connection class to use config APIs for CPU comparisons * Introduce a class for storing libvirt CPU configuration * Convert libvirt connection class to use config APIs for guests * Convert libvirt connection class to use config APIs for filesystem devices * Introduce a class for storing libvirt snapshot configuration * Move NIC devices back after disk devices * Convert libvirt connection class to use config APIs for disk devices * Convert libvirt connection class to use config APIs for input devices * Convert libvirt connection class to use config APIs for serial/console devices * Convert libvirt connection class to use config APIs for graphics * Convert libvirt vif classes over to use config API * Convert libvirt volume classes over to use config API * Delete the test_preparing_xml_info libvirt test * Introduce a set of classes for storing libvirt guest configuration * Send a more appropriate error response for 403 in osapi * Use key in locals() that actually exists * Fix launching of guests where instances_path is on GlusterFS * Volumes API now uses underscores for attrs * Remove unused certificate SQL calls * Assume migrate module missing __version__ is old * Remove tools/nova-debug * Inlining some single-use methods in XenAPI vmops * Change mycloud.com to example.com (RFC2606) * Remove useless dhcp_domain flags in EC2 * Handle correctly QuotaError in EC2 API * Avoid unplugging VBDs for rescue instances * Imported Translations from Launchpad * Rollback create_disks handles StorageError exception * Capture SIGTERM and Shut down python services cleanly * Fixed status validation. Fixes bug 960884 * Clarify HACKING's shadow built-in guidance * Strip auth token from log output * Fail-fast for invalid read_deleted values * Only shutdown rescue instance if it's not already shutdown * Modify nova.wsgi.start() should check backlog parameter * Fix unplug_vbd to retry a configurable number of times * Don't send snapshot requests through the scheduler * Implement quota classes * Fixes bug 949038 * Open Folsom * Fixes bug 957708 * Improvements/corrections to vnc docs * Allow rate limiting to be disabled via flag * Improve performance of generating dhcp leases * Fix lxc console regression * Strip out characters that should be escaped from console output * Remove unnecessary data from xenapi test * Correct accessIPv6 error message * Stop notifications from old leases * Fix typo in server diagnostics extension * Stub-implement floating-ip functions on FlatManager * Update etc/nova.conf.sample for ship * Make sqlite in-memory-db usable to unittest * Fix run/terminate race conditions * Workaround issue with greenthreads and lockfiles * allow the compute service to start with missing libvirt disks * Destroy rescue instance if main instance is destroyed * Tweak security port validation for ICMP * Debug messages for host filters * various cleanups * Remove Virtual Storage Array (VSA) code * Re-instate security group delete test case * db api: Remove check for security groups reference * Allow proper instance cleanup if state == SHUTOFF * Use getLogger for nova-all * Stop setting promisc on bridge * Fix OpenStack Capitalization * Remove improper use of redirect for hairpin mode * Fix OpenStack Capitalization * HACKING fixes, TODO authors * Keep context for logging intact in greenthreads * fix timestamps to match documented ec2 api * Include babel.cfg in tarballs * Fix LXC volume attach issue * Make extended status not admin-only by default * Add ssl and option to pass tenant to s3 register * Remove broken bin/*spool* tools * Allow errored volumes to be deleted * Fix up docstring * libvirt/connection.py: Set console.log permissions * nonblocking libvirt mode using tpool * metadata speed - revert logic changes, just caching * Refix mac change to work around libvirt issue * Update transfer_vhd to handle unicode correctly * Fixes bug 954833 By adding the execute bit to the xenhost xenapi plugin * Cleanup flags * fix bug 954488 * Fix backing file cp/resize race condition * Use a FixedIp subquery to find networks by host * Changes remove_fixed_ip to pass the instance host * Map image ids to ec2 ids in metadata service * Remove date_dhcp_on_disassociate comment and docs * Make fixed_ip_disassociate_all_by_timeout work * Refactor glance id<->internal id conversion for s3 * Sort results from describe_instances in EC2 API * virt/firewall: NoopFirewallDriver::instance_filter_exists must return True * fix nova-manage floating delete * fixed list warn when ip allocated to missing inst * Removes default use of obsolete ec2 authorizor * Additional extensions no longer break unit-tests * Use cPickle and not just pickle * Move (cast|call)_compute_message methods back into compute API class * Fix libvirt get_console_output for Python < 2.7 * doc/source/conf.py: Fix man page building * Update floating auto assignment to use the model * Make nova-manage syslog check /var/log/messages * improve speed of metadata * Fix linux_net.py interface-driver loading * Change default of running_deleted_instance_action * Nuke some unused SQL api calls * Avoid nova-manage floating create /32 * Add a serializer for os-quota-sets/defaults * Import nova.exception so exception can be used * refactoring code, check connection in Listener. refer to Bug #943031 * Fix live-migration in multi_host network * add convert_unicode to sqlalchemy connection arguments * Fixes xml representation of ext_srv_attr extension * Sub in InstanceLimitExceeded in overLimit message * Remove update lockmode from compute_node_get_by_host * Set 'dhcp_server' in _teardown_network_on_host * Bug #922356 QuantumManager does not initiate unplug on the linux_net driver * Clean up setup and teardown for dhcp managers * Display owner in ec2 describe images * EC2 KeyName validation * Fix issues with security group auths without ports * Replaced use of webob.Request.str_GET * Allow soft_reboot to work from more states: * Make snapshots with qemu-img instead of libvirt * Use utils.temporary_chown to ensure permissions get reset * Add VDI chain cleanup script * Reduce duplicated code in xenapi * Since 'net' is of nova.network.model.VIF class and 'ips' is an empty list, net needs to be pulled from hydrated nw_info.fixed_ips(), and appended to ips * Fix nova-manage backend_add with sr_uuid * Update values in test_flagfile to be different * Switch all xenapi async plugin calls to be sync * Hack to fixup absolute pybasedir in nova.conf.sample * fixup ldapdns default config * Use cache='none' for all disks * Update cfg from openstack-common * Add pybasedir and bindir options * Simply & unify console handling for libvirt drivers * Cleanup XenAPI tests * fix up nova-manage man page * Don't use glance when verifying images * Fixes os-volume/snapshot delete * Use a high number for our default mac addresses * Simplify unnecessary XenAPI Async calls to be synchronous * Remove an obsolete FIXME comment * Fixing image snapshots server links * Wait for rescue VM shutdown to complete before destroying it * Renaming user friendly fault name for HTTP 409 * Moving nova/network tests to more logical home * Change a fake classes variable to something other than id * Increase logging for xenapi plugin glance uploads * Deprecate carrot rpc code * Improve vnc proxy docs * Require a more recent version of glance * Make EC2 API a bit more user friendly * Add kwargs to RequestContext __init__ * info_cache is related to deleted instance * Handle kwargs in deallocate_fixed_ip for FlatDHCP * Add a few missing tests regarding exception codes * Checks image virtual size before qemu-img resize * Set logdir to a tempdir in test_network * Set lock_path to a tempdir in TestLockCleanup * Exceptions unpacking rpc messages shouldn't hang the daemon * Use sqlalchemy reflection in migration 080 * Late load rabbit_notifier in test_notifier * boto shouldn't be required for production deploys * Don't use ec2 IDs in scheduler driver * pyflakes cleanups on libvirt/connection.py * Validate VDI chain before moving into SR * Fix racey snapshots * Don't swallow snapshot exceptions * allow block migration to talk to glance/keystone * Remove cruft and broken code from nova-manage * Update paste file to use service tenant * Further cleanup of XenAPI * Fix XML namespaces for limits extensions and versions * Remove the feature from UML/LXC guests * setup.py: Fix doc building * Add adjustable offset to audit_period * nova-manage: allow use of /32 IP range * Clear created attributes when tearing down tests * Fix multi_host column name in setup_networks.. * HACKING fixes, all but sqlalchemy * Remove trailing whitespaces in regular file * remove undocumented, unused mpi 'extension' to ec2 metadata * Minor clarifications for the help strings in nova config options * Don't use _ for variable name * Make test_compute console tests more robust * test_compute stubs same thing multiple times * Ignore InstanceNotFound when trying to set instance to ERROR * Cleans up the create_conf tool * Fix bug 948611. Fix 'nova-manage logs errors' * api-paste.ini: Add /1.0 to default urlmap * Adds nova-manage command to convert a flagfile * bug 944145: race condition causes VM's state to be SHUTOFF * Cleanup some test docstrings * Cleans up a bunch of unused variables in XenAPI * Shorten FLAGS.rpc_response_timeout * Reset instance to ACTIVE when no hosts found * Replaces pipelines with flag for auth strategy * Setup and teardown networks during migration * Better glance exception handling * Distinguish rootwrap Authorization vs Not found errors * Bug #943178: aggregate extension lacks documentation * Rename files/dirs from 'rabbit' to 'rpc' * Change references to RabbitMQ to include Qpid * Avoid running code that uses logging in a thread * No longer ignoring man/novamanage * Fixing incorrect use of instance keyword in logging * Fix rst formatting and cross-references * Provide a provider for boto.utils * Only pass image uuids to compute api rebuild * Finally fix the docs venv bug * Get rid of all of the autodoc import errors * Rename DistributedScheduler as FilterScheduler * Allows new style config to be used for --flagfile * Add support for lxc consoles * Fix references to novncproxy_base_url in docs * Add assertRaises check to tools/hacking.py as N202 * fix restructuredtext formatting in docstrings that show up in the developer guide * Raise 409 when rescuing instance in RESCUE mode * Log a certain rare instance termination exception * Update fixed_ip_associate to not use relationships * Remove unnecessary code in test setUp/tearDown * Imported Translations from Launchpad * Only raw string literals should be used with _() * assertRaises(Exception, ...) considered harmful * Added docs on MySQL queries blocking main thread * Fix test_attach_volume_raise_exception * Fix test_unrescue to actually test unrescue * bug #941794 VIF and intf drivers for Quantum Linux Bridge plugin * Ensures that we don't exceed iptables chain max * Allows --flat_interface flag to override db * Use self.mox instead of create a new self.mocker * Fix test_migrate_disk_and_power_off_exception * fakes.fake_data_store doesn't exist, so don't reset it * populate glance 'name' field through ec2-register * Remove unused _setup_other_managers method from test case * Remove unused test_obj parameter to setUp() * Use stubout instead of manually stubbing out os.path.exists * Remove superfluous __init__ from test case * Use test.TestCase instead of manually managing stubout * Handle InstanceNotFound during server update * Use stubout instead of manually stubbing out versions.VERSIONS * Remove unused session variable in test setup * Cleanup swap in _create_vm undo * Do not invoke kill dnsmasq if no pid file was found * Fixes for ec2 images * Retry download_vhd with different glance host each time * Display error for invalid CIDR * Remove empty setUp/tearDown methods * Call super class tearDown correctly * Fixes bug 942556 and bug 944105 * update copyright, add version information to footer * Refactor spawn to use UndoManager * Fail gracefully when the db doesn't speak unicode * Remove unnecessary setting up and down of mox and stubout * Remove unnecessary variables from tests * Ensure image status filter matches glance format * fix for bug 821252. Smarter default scheduler * blueprint sphinx-doc-cleanup bug 944381 * Adds soft-reboot support to libvirt * Minor cleanup based on HACKING * libvirt driver calls unplug() twice on vm reboot * Add missing format string type on some exception messages * Fixing a request-id header bug * Test creating a server with metadata key too long * Fixes lp931801 and a key_error * notifications for delete, snapshot and resize * Ensure that context read_deleted is only one of 'no', 'yes' or 'only' * register Cell model, not Zone model * Option expose IP instead of dnshost in ec2 desc' * Fix _sync_power_states to obtain correct 'state' * Ensures that keypair names are only AlphaNumeric * Cast vcpu_weight to string before calling xen api * Add missing filters for new root commands * Destroy VM before VDIs during spawn cleanup * Include hypervisor_hostname in the extended server attributes * Remove old ratelimiting code * Perform image show early in the resize process * Adds netapp volume driver * Fixes bug 943188 * Remove unused imports and variables from OS API * Return empty list when volume not attached * Be consistent with disabling periodic tasks * Cast volume-related ids to str * Fix for bug 942896: Make sure network['host'] is set * Allow xvd* to be supplied for volume in xenapi * Initialize progress to 0 for build and resize * Fix issue starting nova-compute w/ XenServer * Provide retry-after guidance on throttled requests * Use constant time string comparisons for auth * Rename zones table to cells and Instance.zone_name to cell_name * Ensure temporary file gets cleaned up after test * Fixes bug 942549 * Use assertDictMatch to keep 2.6 unit tests passing * Handle case where instance['info_cache'] is None * sm volume driver: fix backend adding failure * sm vol driver: Fix regression in sm_backend_conf_update * TypeError API exceptions get logged incorrectly * Add NoopFirewallDriver * Add utils.tempdir() context manager for easy temp dirs * Check all migrations have downgrade in test_misc * Remove monkey patching in carrot RPC driver * Call detach_volume when attach fails * Do not hit the network_api every poll * OS X Support fixed, bug 942352 * Make scheduler filters more pluggable * Adds temporary chown to sparse_copy * make nova-network usable with Python < 2.6.5 * Re-adds ssl to kombu configuration and adds flags that are needed to pass through to kombu * Remove unused import * Make sure detail view works for volume snaphots * Imported Translations from Launchpad * Decode nova-manage args into unicode * Cleanup .rescue files in libvirt driver unrescue * Fixes cloudpipe extension to work with keystone * Add missing directive to tox.ini * Update EC2KeystoneAuth to grab tenant 'id' * Monkey patch migrate < 0.7.3 * Fixes bug lp#940734 - Adding manager import so AuthMiddleware works * Clean stale lockfiles on service startup : fixes bug 785955 * Fix nova-manage floating create docs * Fix MANIFEST.in to include missing files * Example config_drive init script, label the config drive * fix unicode triggered failure in AuthManager * Fix bug 900864 Quantum Manager flag for IP injection * Include launch_index when creating instances * Copy data when migration dst is on a different FS * bigger-than-unit test for cleanup_running_deleted_instances * Nova options tool enhancements * Add hypervisor_hostname to compute_nodes table and use it in XenServer * Fixes error if Melange returns no networks * Print error if nova-manage should be run as root * Don't delete security group in use from OS API * nova-network can't deallocate ips from deleted instances * Making link prefixes support https * Prevent infinite loop in PublishErrorsHandler * blueprint host-aggregates: host maintenance - xenapi implementation * bug 939480 * libvirt vif-plugging fixes. Fixes bug 939252 , bug 939254 * Speeding up resize down with sparse_copy * Remove network_api fallback for info_cache from APIs * Improve unit test coverage per bug/934566 * Return 40x for flavor.create duplicate * refactor a conditional for testing and understanding * Disable usb tablet support for LXC * Add Nexenta volume driver * Improve unit test coverage per bug/934566 * nova-manage: Fix 'fixed list' * Add lun number to provider_location in create_volume * Fixes bug 938876 * Fix WeightedHost * Fix instance stop in EC2 create_image * blueprint host-aggregates: improvements and clean-up * Move get_info to taking an instance * Support fixed_ip range that is a subnet of the network block * xenapi: nova-volume support for multiple luns * Fix error that causes 400 in flavor create * Makes HTTP Location Header return as utf-8 as opposed to Unicode * blueprint host-aggregates: host maintenance * blueprint host-aggregates: xenapi implementation * Rework base file checksums * Avoid copying file if dst is a directory * Add 'nova-manage export auth' * Alter output format of volume types resources * Scheduler notifications added * Don't store connection pool in RpcContext * Fix vnc docs: novaclient now supports vnc consoles * Clarify use of Use of deprecated md5 library * Extract get_network in quantum manager * Add exception SnapshotIsBusy to be handled as VolumeIsBusy * Exception cleanup * Stop ignoring E202 * Support tox-based unittests * Add attaching state for Volumes * Fix quantum get_all_networks() signature (lp#936797) * Escape apostrophe in utils.xhtml_escape() (lp#872450) * Backslash continuations (nova.api.openstack) * Fix broken method signiture * Handle OSError which can be thrown when removing tmpdir. Fixes bug 883326 * Update api-paste.ini with new auth_token settings * Imported Translations from Launchpad * Don't tell Qpid to reconnect in a busy loop * Don't inherit controllers from each other, we don't want the methods of our parent * Improve unit test coverage per bug/934566 * Setting access ip values on server create * nova.conf sample tool * Imported Translations from Launchpad * Add support for admin_password to LibVirt * Add ephemeral storage to flavors api * Resolve bug/934566 * Partial fix for bug 919051 * fix pre_block_migration() interaction with libvirt cache * Query directly for just the ip * bug 929462: compile_diagnostics in xenapi erronously catch XenAPI.Failure * Use new style instance logging in compute api * Fix traceback running instance-usage-audit * Actual fix for bug 931608 * Support non-UTC timestamps in changes-since filter * Add additional information to servers output * Adding traceback to async faults * Pulls the main components out of deallocate * Add JSONFormatter * Allow file logging config * LOG.exception does not take an exc_info keyword * InstanceNotFound exceptions for terminate_intance now Log warning instead of throwing exeptions * bug 933620: Error during ComputeManager._poll_bandwidth_usage * Make database downgrade works * Run ovs-ofctl as root * 077_convert_to_utf8: Convert *all* FK tables early * Fix bug 933147 Security group trigger notifications * Fixes nova-volume support for multiple luns * Normalize odd date formats * Remove all uniqueness constraints in migration 76 * Add RPC serialization checking, fix exposed problems * Don't send a SQLAlchemy model over rpc * Adds back e2fsck exit code checking * Syncs vncviewer mouse cursor when connected to Windows VMs * Backslash continuations (nova.tests) * The security_group name should be an XML attribute * Core modifications for future zones service * Remove instance_get stubs from server action tests * removed unused method and added another test * Enables hairpin_mode for virtual bridge ports, allowing NAT reflection * Removed zones from api and distributed scheduler * Fix bug 929427 * Tests for a melange_ipam_lib, who is missing tests * Create a flag for force_to_raw for images * Resolve bug/927714 -- get instance names from db * Fix API extensions documentation, bug 931516 * misc networking fixes * Print friendly message if no floating IPs exist * Catch httplib.HTTPException as well * Expand Quantum Manager Unit Tests + Associated Fixes * bw_usage takes a MAC address now * Adding tests for NovaException printing * fix a syntax error in libvirt.attach_volume() with lxc * Prevent Duplicate VLAN IDs * tests: fix LdapDNS to allow running test_network in isolation * Fix the description of the --vnc_enabled option * Different exit code in new versions of iscsiadm * improve injection diagnostics when nbd unavailable. Bug 755854 * remove unused nwfilter methods and tests * LOG.exception only works while in an exception handler * _() works best with string literals * Remove unnecessary constructors for exceptions * Don't allow EC2 removal of security group in use * improve stale libvirt images handling fix. Bug 801412 * Added resize support for Libvirt/KVM * Update migration 076 so it supports PostgreSQL * Replace ApiError with new exceptions * Simple way of returning per-server security groups * Declare deprecated auth flag before its used * e2fsck needs -y * Standardize logging delaration and use * Changing nova-manage error message * Fix WADL/PDF docs referenced in describedby links * bug 931604: improve how xenapi RRD records are retrieved * Resolve bug/931794 -- add uuid to fake * Use new style instance logging in compute manager * clean pyc files before running unit tests * Adding logging for 500 errors * typo fix * run_tests.sh fix * get_user behavior in ldapdriver * Fsck disk before removing journal * Don't query database with an empty list for IN clause * Use stubs in libvirt/utils get_fs_info test * Adding (-x | --stop) option back to runner.py * Remove duplicate variable * Fixing a unicode related metadata bug * bug 931356: nova-manage prints libvirt related warnings if libvirt isn't installed * Make melange_port an integer * remove a private duplicate function * Changes for supporting fast cloning on Xenserver. Implements blueprint fast-cloning-for-xenserver 1. use_cow_images flag is reused for xenserver to check if copy on write images should be used. 2. image-id is used to tag an image which has already been streamed from glance. 3. If cow is true, when an instance of an image is created for the first time on a given xenserver, the image is streamed from glance and copy on write disk is created for the instance. 4. For subsequent instance creation requests (of the same image), a copy on write disk is created from the base image that is already present on the host. 5. If cow is false, when an instance of an image is created for the first time on a host, the image is streamed from glance and its copy is made to create a virtual disk for the instance. 6. For subsequent instance creation requests, a copy of disk is made for creating the disk for the instance. 7. Snapshot creation code was updated to handle cow=true. Now there can be upto 3 disks in the chain. The base disk needs to be uploaded too. 8. Also added a cache_images flag. Depending on whether the flag is turned on on not, images will be cached on the host * Completes fix for LP #928910 - libvirt performance * Add some more comments to _get_my_ip() * remove unused and buggy function from S3ImageService * Fix minor typo in runner.py * Remove relative imports from scheduler/filters * Converting db tables to utf8 * remove all instance_type db lookups from network * Remedies LP Bug #928910 - Use libvirt lookupByName() to check existence * Force imageRef to be a string * Retry on network failure for melange GET requests * Handle network api failures more gracefully * Automatic confirmation of resizes on libvirt * Fix exception by passing timeout as None * Extend glance retries to show() as well * Disable ConfigParser interpolation (lp#930270) * fix FlatNetworkTestCase.test_get_instance_nw_info * remove unused and buggy function from baremetal proxy * Remove unused compute_service from images controller * Backslash continuations (nova.virt.baremetal) * fixed bug 928749 * Log instance id consistently inside the firewall code * Remove the last of the gflags shim layer * Fix disk_config typo * Pass instance to log messages * Fix logging in xenapi vmops * Ensures that hostId's are unique * Fix confirm_resize policy handling * optimize libvirt image cache usage * bug 929428: pep8 validation on all xapi plugins * Move translations to babel locations * Get rid of distutils.extra * Backslash continuations (network, scheduler) * Remove unnecessary use of LoopingCall in nova/virt/xenapi/vm_utils.py * Stop using LoopingCall in nova.virt.xenapi_conn:wait_for_task() * Handle refactoring of libvirt image caching * linux_net: Also ignore shell error 2 from ip addr * Consistently update instance in nova/compute/manager.py * Use named logger when available * Fix deprecated warning * Add support for LXC volumes * Added ability to load specific extensions * Add flag to include link local in port security * Allow e2fsck to exit with 1 * Removes constraints from instance and volume types * Handle service failures during finish_resize gracefully * Set port security for all allocated ips * Move connection pool back into impl_kombu/qpid * pep8 check on api-paste.ini when using devstack * Allows test_virt_drivers to work when run alone * Add an alias to the ServerStartStop extension * tests.integrated fails with devstack * Backslash continuations (nova.virt) * Require newer versions of SA and SA-Migrate * Optimizes ec2 keystone usage and handles errors * Makes sure killfilter doesn't raise ValueError * Fixes volume snapshotting issues and tests * Backslash continuations (misc.) * nova-rootwrap: wait() for return code before exit * Fix bug 921814 changes handling of adminPass in API * Send image properties to Glance * Check return code instead of output for iscsiadm * Make swap default to vdb if there is no ephemeral * Handle --flagfile by converting to .ini style * Update cfg from openstack-common * Fix xvpvncproxy error in nova-all (lp#928489) * Update MANIFEST.in to account for moved schemas * Remove ajaxterm from Nova * Adding the request id to response headers. Again * Update migration to work when data already exists * Fix support for --flagfile argument * Implements blueprint heterogeneous-tilera-architecture-support * Add nova/tests/policy.json to tarball * Fix quantum client filters * Store the correct tenant_id/project_id * dont show blank endpoint headers * Pass in project_id in ext. authorizer * Fix _poll_bandwidth_usage if no network on vif * Fix nova.virt.firewall debugging message to use UUID * Fix debugging log message to print instance UUID * mkfs takes vfat, not fat32 * Pass partition into libvirt file injection * bug 924266: connection_type and firewall_driver flags mismatch * bug 927507: fix quantum manager get_port_by_attachment * Fix broken flag in test_imagecache * Don't write a dns directive if there are no dns records in /etc/network/interfaces * Imported Translations from Launchpad * Backslash continuations (nova.db) * Add initiator to initialize_connection * Allows nova to read files as root * Re-run nova-manage under sudo if unable to read conffile * Fix status transition when reverting resize * Adds flags for href prefixes * X_USER is deprecated in favor of X_USER_ID * Move cfg to nova.openstack.common * Use Keystone Extension Syntax for EC2 Creds * Remove duplicate instances_path option * Delete swap VDI if not used * Raise ApiError in response to InstanceTypeNotFound * Rename inst in _create_image, and pass instance to log msgs * Fix bug #924093 * Make sure tenant_id is populated * Fix for bug 883310 * Increased coverage of nova/auth/dbdriver.py to 100%. Fixes 828609 * Make crypto use absolute imports * Remove duplicate logging_debug_format option * blueprint nova-image-cache-management phase1 * Set rescue instance hostnames appropriately * Throw an user error on creating duplicate keypairs Fixes bug 902162 * Fixes uuid lookup in virtual interfaces extension * Add comments to injected keys and network config * Remove hard coded m1.tiny behavior * Fix disassociation of fixed IPs when using FlatManager * Provides flag override for vlan interface * remove auto fsck feature from file injection. Bug 826794 * DRYing up Volume/Compute APIRouters * Excise M2Crypto! * Add missing dev. Fixes LP: #925607 * Capture bandwidth usage data before resize * Get rid of DeprecationWarning during db migration * Don't block forever for rpc.(multi)call response * Optionally disable file locking * Avoid weird test error when mox is missing * fix stale libvirt images on download failure. Bug 801412 * cleanup test case to use integers not strings * Respect availability_zone parameter in nova api * Fix admin password skip check * Add support for pluggable l3 backends * Improve dom0 and template VM avoidance * Remove Hyper-V support * Fix logging to log correct filename and line numbers * Support custom routes for extensions * Make parsing of usage stats from XS more robust * lockfile.FileLock already appends .lock * Ties quantum, melange, and nova network model * Make sure multiple calls to _get_session() aren't nested * bug 921087: i18n-key and local-storage hard-coded in xenapi * optimize libvirt raw image handling. Bug 924970 * Boto 2.2.x failes. Capping pip-requires at 2.1.1 * fixed bug 920856 * Expand policies for admin_actions extension * Correct checking existence of security group rule * Optionally pass a instance uuid to log methods * remove unsupported ec2 extensions * Fix VPN ping packet length * Use single call in ExtendedStatus extension * Add mkswap to rootwrap * Use "display_name" in "nova-manage vm list" * Fix broken devref docs * Allow for auditing of API calls * Use os.path.basename() instead of string splitting * Remove utils.runthis() * Empty connection pool after test_kombu * Clear out RPC connection pool before exit * Be more explicit about emptying connection pool * fixes melange ipam lib * bug 923798: On XenServer the DomU firewall driver fails with NotImplementedError * Return instancesSet in TerminateInstances ec2 api * Fix multinode libvirt volume attachment lp #922232 * Bug #923865: (xenapi driver)instance creation fails if no guest agent is avaiable for admin password configuration * Implementation of new Nova Volume driver for SolidFire ISCSI SAN * Handle kepair delete when not found * Add 'all_tenants' filter to GET /servers * Use name filter in GlanceImageService show_by_name * Raise 400 if bad kepair data is provided * Support file injection on boot w/ Libvirt * Refactor away the flags.DEFINE_* helpers * Instances to be created with a bookmark link * fix `nova-manage image convert` exception * Added validation of name when creating a new keypair * Ignore case in policy role checks * Remove session arg from sm_backend_conf_update * Remove session arguments from db.api * Add a note explaining why unhandled exceptions shouldn't be returned to users * Remove fetching of networks that weren't created via nova-manage * uses the instance uuid in libvirt by introducing a new variable 'uuid' for the used template instead of using a random uuid in libvirt * Fixing a rebuild race condition bug * Fixes bug 914418 * Remove LazySerializationMiddleware * Bug #921730: plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore no longer in use * Adding live migration server actions * bug 921931: fix Quantum Manager VM launch race condition * Fix authorization checks for simple_usage.show * Simplify somewhat complicated reduce() into sum() * Ignore connection_type when no instances exist * Add authorization checks to flavormanage extension * rootwrap: Fix KillFilter matching * Fix uptime calculation in simple_usage * Fixing rebuilds on libvirt, seriously * Don't pass filter_properites to managers * Fixing rebuilds on libvirt * Fix bug 921715 - 'nova x509-create-cert' fails * Return 403 instead of 401 when policies reject * blueprint host-aggregates: OSAPI extensions * blueprint host-aggregates: OSAPI/virt integration, via nova.compute.api * Fixes bug 921265 - i'nova-manage flavor create|list' * Remove unused flags.Help*Flag * Convert vmwareapi code to UNIX style line endings * Blueprint xenapi-provider-firewall and Bug #915403 * Adds extension for retrieving certificates * Add os-start/os-stop server actions to OSAPI * Create nova cert worker for x509 support * Bug #916312: nova-manage network modify --network flag is inconsistent * Remove unused nova/api/mapper.py * Add nova.exception.InvalidRPCConnectionReuse * Add support for Qpid to nova.rpc * Add HACKING compliance testing to run_test.sh * Remove admin_only ext attr in favor of authz * usage: Fix time filtering * Add an API extension for creating+deleting flavors * extensions: Allow registering actions for create + delete * Explicitly encode string to utf8 before passing to ldap * Make a bunch of dcs into single-entry lists * Abstract out _exact_match_filter() * Adds a bandwidth filter DB call * KVM and XEN Disk Management Parity * Tweak api-paste.ini to prepare for a devstack change * Remove deprecated serialization code * Add affinity filters updated to use scheduler_hints and have non-douchey names * Do not output admin_password in debug logs * Handle error in associate floating IP (bug 845507) * Brings back keystone middleware * Remove sensitive info from rpc logging * Error out instance on set password failure * Fixed limiting for flavors * Adds availability zone filter * Fixes nova-manage fixed list * API version check cleanups * ComputeNode Capacity support * blueprint host-aggregates: maintenance operations to host OSAPI exts * Add a specific filter for kill commands * Fix environment passing in DnsmasqFilter * Cleanups for rootwrap module * Fix 'nova-manage config list' * Add context and request spec to filter_properties * Allow compute manager prep_resize to accept kwargs * Adds isolated hosts filter * Make start_instance cast directly to compute host * Refactor compute api messaging calls to compute manager * Refactor test_scheduler into unit tests * Forgot to update chance scheduler for ignore_hosts change * Add SchedulerHints compute extension * Add floating IP support to Quantum Manager * Support filter based on CPU core (over)allocation * bug 917397 * Add option to force hosts to scheduler * Change the logic for deleting a record dns_domains * Handle FlavorNotFound on server list w/ filter * ERROR out instance if unrescue fails * Fix xenapi rescue without swap * Pull out ram_filter into a separate filter * pass filter_properties into scheduling requests for run_instance * Fixes bug #919390 - Block Migration fails when keystone is un use * Fix nova-manage floating list (fixes bug 918804) * Imported Translations from Launchpad * scheduler host_manager needs service for filters * Allow Quantum Manager to run in "Flat" mode * aws/ec2 api validation * Fix for bug 918502 * Remove deprecated extension code * Validating image id for rebuild * More cleanup of Imports to match HACKING * chmod nova-logspool * nova/network: pass network_uuid to linuxnet_interface_driver and vif driver * Clean up crypto.py * Fix missing imports and bad call caught by pyflakes * Clarify error messages for admin passwords * Log uuid when instances fail to spawn * Removed references to FLAGS.floating_ip_dns_domains * Removed some vestigial default args from DNS drivers * Allow config of vncserver_proxyclient_address * Rename 'zone' to 'domain.' * disk_config extension now uses OS prefix * Do not write passwords to verbose logs. bug 916167 * Automatically clean up DNS when a floating IP is deallocated * Fix disassociating of auto assigned floating ips * Cleanup Imports to match HACKING guidelines * Added an LDAP/PowerDNS driver * Add dns domain manipulation to nova * fixes bug lp914962 * Fixed bug 912701 * Fix bug #917615 * Separate scheduler host management * Set instance_ref property when creating snapshots * Implements blueprint vnc-console-cleanup * Rebuild/Resize support for disk-config * Allow instances in 'BUILD' state to be deleted * Stop allowing blank image names on snapshot/backup * Only update if there are networks to update * Drop FK constraint if it exists in migration 064 * Fix an error that prevents message from getting substituted * blueprint host-aggregates * Add missing scripts to setup.py (lp#917676) * Fixes bug 917128 * Clean up generate fingerprint * Add policy checking to nova.network.api.API * Add default policy rule * Super is not so super * Fixed the log line * Add tests for volume list and detail through new volume api, and fix error that the tests caught * Typofix for impl_kombu * Refactoring logging _log function * Update some extensions (1) * DECLARE osapi_compute_listen_port for auth manager * Increase robustness of image filtering by server * Update some extensions (2) * Implement BP untie-nova-network-models * Add ipv4 and ipv6 validation * greenlet version inconsistency * Add policy checks to Volume.API * Remove unused extension decorator require_admin * Fix volume api typo * Convert nova.volume.api.API to use volume objects * Remove a whole bunch of unused imports * have all quota errors return an http 413 * This import is not used * Refactor request and action extensions * Prefixing the request id with 'req-' to decrease confusion when looking at logs * Fixing a bug that was causing the logging to display the context info for the wrong user. bug: 915608 * Modify the fake ldap driver to fix compatibility * Create an instance DNS record based on instance UUID * Implements blueprint separate-nova-volumeapi * Implement more complete kombu reconnecting * First implementation of bp/live-migration-resource-calc * Remove 'status' from default snapshot properties * Clean up disk_format mapping in xenapi.vm_utils * Remove skipping of 2 tests * Make authz failures use proper response code * Remove compute.api.API.add_network_to_project * Adds test for local.py * Fix policy import in nova.compute.api * Remove network_api from Servers Controller * minor fix in comment * Updates linux_net to ignore some shell errors * Add policy checks to Compute.API * Ensure nova is compatible with WebOb 1.2+ * improve handling of the img_handlers config list * Unbreak start instance and fixes bug 905270 * catch InstanceInvalidState in more places * Fix some cfg test case naming conflicts * Remove 'location' from GlanceImageService * Makes common/cfg.py raise AttributeError * Call to instance_info_cache_delete to use uuid * Bug #914907: register_models in db/sqlalchemy/models.py references non-existent ExportDevice * Update logging in compute manager to use uuids * Do not overwrite project_id from request params * Add optional revision field to version number * Imported Translations from Launchpad * nova-manage floating ip fixes * Add a modify function to the floating ip dns api * Adding the request id to response headers * Add @utils.deprecated() * Blueprint xenapi-security-groups * Fix call to compute_api.resize from _migrate * Fix metadata mapping in s3._s3_parse_manifest * Fix libguestfs operation with specified partitions * fix reboot_instance typo * Fix bad test cases in smoketest * fix bug 914049: private key in log * Don't overwrite local context on elevated * Bug 885267: Fix GET /servers during instance delete * Adds support for floating ip pools * Adds simple policy engine support * Refactors utils.load_cached_file * Serialization, deserialization, and response code decorators * Isolate certain images on certain hosts * Workaround bug 852095 without importing mox * Bug #894683: nova.service does not handle attribute specific exceptions and client hangs * Bug #912858: test_authors_up_to_date does not deal with capitalized names properly * Adds workaround check for mox in to_primitive * preload cache table and keep it up to date * Use instance_properties in resize * Ensure tests are python 2.6 compatible * Return 409s instead of 500s when deleting certain instances * Update HACKING.rst * Tell users what is about to be installed via sudo * Fix LP912092 * Remove small unneeded code from impl_kombu * Add missing space between XML attributes * Fix except format to match HACKING * Set VLAN MTU size when creating the vlan interface * Add instance_name field to console detail command which will give the caller the necessary information to actually connect * Fix spelling of variable * Remove install_requires processing * Send event notifications for suspend and resume * Call mkfs with the correct order of arguments * Fix bug 901899 * Fix typo in nova/rootwrap/compute.py. Fixes LP: #911880 * Make quantum_use_dhcp falsifiable * Fixing name not defined * PEP8 type comparison cleanup * Add cloudpipe/vpn api to openstack api contrib * Every string does not need to be internationalized * Adds running_deleted_instance_reaper task * libvirt: implements boot from ISO images * Unused db.api cleanup * use name gateway_v6 instead of gateway6 * PEP8 remove direct type comparisons * Install a good version of pip in the venv * Bug #910045: UnboundLocalError when failing to get metrics from XenAPI hosts * re-raising exceptions fix * use dhcp_lease_time for dnsmasq. Fix bug 894218 * Clean up pylint errors in top-level files * Ensure generated passwords meet minimum complexity * Fixing novaclient_converter NameError * Bug 820059: bin/nova-manage.py VpnCommands.spawn calls non-existant method VpnCommands._vpn_for - fixed * Bug 751229: Floating address range fixed * Brings some more files up to HACKING standards * Ensure queue is declared durable so messages aren't dropped * Create notification queues as durable * Adding index to instances project_id column * Add an API for associating floating IPs with DNS entries * 'except:' to 'except Exception:' as per HACKING * Adds EC2 ImportKeyPair API support * Take the availability zone from the instance if available * Update glance Xen plugin w/ purge props header * Converting zones into true extension * Convering /users to admin extension * Add a DECLARE for dhcp_doamin flag to metadata handler * Support local target for Solaris, use 'safe' command-line processing * Add 'os-networks' extension * Converting accounts resource to admin extension * Add exit_code, stdout, stderr etc to ProcessExecutionException * Fixes LP bug #907898 * Switch extension namespace * Refactor Xen Vif drivers. Fixes LP907850 * Remove code in migration 064 to drop an fkey that does not exist. Fixes LP bug #907878 * Help clarify rpc API with docs and a bit of code * Use SQLAlchemy to drop foreign key in DB migrate * Move createBackup server action into extension * Bug#898257 support handling images with libguestfs * Bug#898257 abstract out disk image access methods * Move 'actions' subresource into extension * Make os-server-diagnostics extension admin-only * Remove unneeded broken test case * Fix spelling typos in comments * Allow accessIPv4 and accessIPv6 on rebuild action * Move 'diagnostics' subresource to admin extension * Cleaning up imports in compute and virt * Cleaning up imports in nova.api * Make reroute_compute use functools.wraps. Fixes LP bug #906945 * Removing extra code from servers controller * Generate instance faults when instance errors * Clarify NoValidHost messages * Fix one last bug in os-console-output extension * Fix os-console-output extension integration * Set Location header in server create and rebuild actions * Consistently use REBUILDING vm_state * Improve the minidns tests to handle zone matching * Remove unused FLAGS.block_size * Make UUID format checking more correct * Set min_ram and min_disk on snapshot * Add support for port security to QuantumManager * Add a console output action to servers * Creating mechanism that loads Admin API extensions * Document return type from utils.execute() * Renamed the instance_dns_driver to dns_driver for more general use * Specify -t rsa when calling ssh-keygen * create_export and ensure_export should pass up the return value, to update the database * Imported Translations from Launchpad * avoid error and trace on dom.vcpus() in lxc * Properly passes arg to run_iscsiadm to fix logout * Makes disassociate by timeout work with multi-host * Call get_instance_nw_info with elevated context, as documented in nova/network/manager.py * Adds missing joinedload for vif loading * missing comments about extensions to ec2 * Pull resource extensions into APIRouter * IPAM drivers aren't homogenous bug 903230 * use env to find 'false'. Fix for OS X * Fix scheduler error handler * Starting work on exposing service functionality * Bugfix for lp904932 * Ensure fkey is dropped before removing instance_id * Fixes bug 723235 * nova.virt.libvirt.firewall: set static methods * Expose Asynchronous Fault entity in the OSAPI * Fix nova-manage flags declaration * Remove useless flags declaration * Remove useless input_chain flags * Make XenAPI agent configuration synchronous * Switch disk_config extension to use one DB query * Update utils.execute so that check_exit_code handles booleans. Fixes LP bug #904560 * Rename libvirt_uri to uri * Make libvirt_uri a property * Making pep8 output less verbose * Refactors handling of detach volume * Fixes bug 887402 * Bug 902626 * Make various methods static * Pass additional information from nova to Quantum * Refactor vm_state and task_state checking * Updates OVS rules applied to IPv4 VIFs * Follow-on to I665f402f to convert rxtx_quota to rxtx_factor in nova-manage and a couple of tests * Make sure the rxtx_cap is used to set qos info * Fix some errors found by pychecker * Fix tgtadm off by one error. Fixes bug #871278 * Sanitize EC2 manifests and image tarballs * floating-ip: return UUID of instance rather than ID * Renaming instance_actions.instance_id column to instance_uuid. blueprint: internal-uuids * Fix for bug 902175 * fixed typos. removed an unused import * Vm state management and error states * Added support for creating nova volume snapshots using OS API * Fix error when subnet doesn't have a cidr set * bug 899767: fix vif-plugging with live migration * Fixing snapshot failure task_state * Imported Translations from Launchpad * Moves find config to utils because it is useful * fixed_ips by vif does not raise * Add templates for selected resource extensions * Fix network forwarding rule initialization in QuantumManager * _check_image_size returns are consistent * Fixed the perms on the linux test case file so that nose will run it * Add preparation for asynchronous instance faults * Add templates for selected resource extensions * Use more informative message when violating quota * Log it when we get a lock * removing TODO as we support Windows+XenServer and have no plans to support quiesce or VSS at the moment * Adds network model and network info cache * Rename .nova-venv to .venv * revert using git for novaclient * Port nova.flags to cfg * Make cfg work on python 2.6 * Relax novaclient and remove redis dependency * Relax dependency on boto 1.9b and nova-adminclient * Make QuantumManager no longer depend on the projects table * Imported Translations from Launchpad * Fix for bug 901459 * Updated the test runner module with a sys.path insert so that tests run in and outside a virtual environment * Add ability to see deleted and active records * Set instance['host'] to the original host value on revert resize * Fix race condition in XenAPI when using .get_all * Clean up snapshot metadata * Handle the 'instance' half of blueprint public-and-private-dns * Refactors periodic tasks to use a decorator * Add new cfg module * Remove extra_context support in Flags * A more secure root-wrapper alternative * Remove bzr related code in tests/test_misc * Change cloudServersFault to computeFault * Update associate_floating_ip to use instance objs * vm_state:=error on driver exceptions during resize * Use system M2Crypto package on Oneiric, bug 892271 * Update compute manager so that finish_revert_resize runs on the source compute host. Fixes bug #900849 * First steps towards consolidating testing infrastructure * Remove some remnants of ChangeLog and vcsversion.py generation * Pass '-r' option to 'collie cluster status' * Remove remnants of babel i18n infrastructure * Fixes a typo preventing attaching RBD volumes * Remove autogenerated pot file * Make admin_password keyword in compute manager run_instance method match what we send in the compute API. Fixes bug #900591 * remove duplicate netaddr in nova/utils * cleanup: remove .bzrignore * add index to instance_uuid column in instances * Add missing documentation for shared folder issue with unit tests and Python lock file * Updated nova-manage to work with uuid images Fixes bug 899299 * Add availabity_zone to the refresh list * Document nova-tarball Jenkins job * Adds extension documentation for some but not all extensions * Add templates for selected resource extensions * EC2 rescue/unrescue is broken, bug 899225 * Better exception handling during run_instance * Implement resize down for XenAPI * Fix for EC2 API part of bug 897164 * Remove some unused imports from db * Replacing instance id's in in xenapi.vmops and the xen plugin with instance uuids. The only references to instance id's left are calls to the wait_for_task() method. I will address that in another branch. blueprint: internal-uuids * Convert get_lock in compute to use uuids * Fix to correctly report memory on Linux 3.X * Replace more cases of instance ids with uuids * Make run_instance only support instance uuids * Updates simple scheduler to allow strict availability_zone scheduling * Remove VIF<->Network FK dependancy * Adds missing image_meta to rescue's spawn() calls * Bug #898290: iSCSI volume backend treats FLAGS.host as a hostname * split rxtx_factor into network and instance_type * Fixing get_info method implementations in virt drivers to accept instance_name instead of instance_id. The abstract class virt.ComputeDriver defines get_info as: def get_info(self, instance_name). blueprint: internal-uuids * Fixes bug 767947 * Remove unused ec2.action_args * Fix typo: priviledges -> privileges * Bug #896997: nova-vncproxy's flash socket policy port is not configurable * Convert compute manager delete methods to objects * Removing line dos line endings in vmwareapi_conn.py * reboot & rebuild to use uuids in compute manager * Fix for bug 887712 * Add NAT/gateway support to QuantumManager * Fix QuantumManager update_dhcp calls * Fix RPC responses to allow None response correctly * Use uuids for compute manager agent update * power_on/power_off in compute manager to use uuids * Use uuids for file injection * removed logic of throwing exception if no floating ip * Adding an install_requires to the setup call. Now you can pip install nova on a naked machine * Removing obsolete bzr-related clauses in setup.py * Makes rpc_allocate_fixed_ip return properly * Templatize extension handling * start/stop in compute manager to use uuids * Updating {add,remove}_fixed_ip_from_instance in compute.api and compute.manager to use instance uuid instead of instance id. blueprint internal-uuids * Use instance uuids for consoles and diagnostics * Fixes bug 888649 * Fix Bug #891718 * Bug #897091: "nova actions" fails with HTTP 400 / TypeError if a server action has been performed * Bug #897054: stack crashes with AttributeError on e.reason if the server returns an error * Refactor a few things inside the xenapi unit tests * New docs: unit tests, Launchpad, Gerrit, Jenkins * Fix trivial fourth quote in docstring * Fix deprecation warnings * Fix for bug 894431 * Remove boot-from-volume unreachable code path (#894172) * reset/inject network info in compute to use uuid * Updating set_admin_password in compute.api and compute.manager to use instance uuids instead of instance ids. Blueprint internal-uuids * rescue/unrescue in compute manager to use uuids * Updated development environment docs * Call df with -k instead of -B1 * Make fakelibvirt python2.6 compatible * Clean up compute api * Updating attach/detach in compute.api and compute.manager to use instance uuid instead of instance id. blueprint internal-uuids * Change compute API.update() to take object+params * Use XMLDictSerializer for resource extensions * Updating {add,remove}_security_group in compute.api to use instance uuids instead of instance ids. blueprint internal-uuids * Extend test_virt_driver to also test libvirt driver * poll_rebooting_instances passes an instance now * Revert "Fixes bug 757033" * Put instances in ERROR state when scheduler fails * Converted README to RST format * Workaround xenstore race conditions * Fix a minor memory leak * Implement schedule_prep_resize() * Fixes bug 886263 * snapshot/backup in compute manager to use uuids * Fixes bug 757033 * Converting tests to use v2 * lock/unlock in compute manager to use uuids * suspend/resume in compute manager to use uuids * Refactor metadata code out of ec2/cloud.py * pause/unpause in compute manager to use uuids * Creating new v2 namespace in nova.api.openstack * Add a "libvirt_disk_prefix" flag to libvirt driver * Added RST docs on how to use gettext * Refactoring/cleanup of some view builders * Convert remaining calls to use instance objects * Make run instances respect availability zone * Replacing disk config extension to match spec * Makes sure gateways forward properly * Convert security_group calls to use instance objs * Remove hostname update() logic in compute.API * Fixes bug 890206 * Follow hostname RFCs * Reference Ron Pedde's cleanup script for DevStack * Remove contrib/nova.sh and other stale docs * Separate metadata api into its own service * Add logging, error handling to the xenstore lib * Converting lock/unlock to use instance objects * Deepcopy optparse defaults to avoid re-appending multistrings (#890489) * install_venv: apply eventlet patch correctly with python 2.7 (#890461) * Fix multistring flags default handling (#890489) * Fixing image create in S3ImageService * Defining volumes table to allow FK constraint * Converting network methods to use instance objects * Handle null ramdisk/kernel in euca-describe-images * Bind engine to metadata in migration 054 * Adding downgrade for migration 57 plus test * Log the URL to an image_ref and not just the ID * Converting attach_volume to use instance object * Converting rescue/unrescue to use instance objects * Converting inject_file to use instance objects * Bug #888719: openvswitch-nova runs after firstboot scripts * Bug #888730: vmwareapi suds debug logging very verbose * Converting consoles calls to use instance objects * Converting fixed ip calls to use instance objects * Convert pause/unpause, sus/res to use instance obj * fix rebuild sha1 not string error * Verify security group parameters * Converting set password to use instance objects * Converting snapshot/backup to use instance objects * Refactor of QuotaError * Fix a notification bug when creating instances * Converting metadata calls to use instance objects * nova-manage: exit with status 1 if an image registration fails * Converting start and stop to use instance objects * Converting delete to use instance objects * Capture exceptions happening in API layer * Removed some old cruft * Add more error handling to glance xenapi plugin * Fixes bug 871877 * Replace libvirt driver's use of libxml2 with ElementTree * Extend fake image service to let it hold image data * Bug #887805 Error during report_driver_status(): 'LibvirtConnection' object has no attribute '_host_state' * More spelling fixes inside of nova * Fixes LP878319 * Fix exception reraising in volume manager * Adding Chuck Short to .mailmap * Undefine libvirt saved instances * Split compute api/manager tests within module * Workaround for eventlet bug with unit tests in RHEL6.1 * Apply M2Crypto fix for all Fedora-based distributions * Fix failing libvirt test (bug 888250) * Spelling fixes in nova/api comments * Get MAC addresses from Melange * Refactor logging_error into utils * Converting rebuild to use instance objects * Converting resize to use instance objects * Converting reboot to use instance objects * Reducing the number of compute calls to Glance essex-1 ------- * Remove duplicate method (2) * Move tests for extensions to contrib directory * Remove duplicate method * Remove debugging print * Adds extended status information via the Admin API to the servers calls * Wait until the instance is booted before setting VCPU_params * changes logging reference in zone_manager.py * Exception cleanup in scheduler * Fixing create_vbd call per VolumeHelper refactoring * Switch glance XenAPI plugin to use urllib2 * Blueprint lasterror * Move failed instances to error state * Adding task_states.REBOOTING_HARD * Set task state to UPDATING_PASSWORD when needed * Clean up docstrings for faults.Fault and it's usage * Fix typo in docstring * Add DHCP support to the QuantumManager and break apart dhcp/gateway * Change network delete to delete by uuid or cidr * Bug #886353: Faults raised by OpenStack API Resource handlers fail to be reported properly * Define faults.Fault.__str__ * Speed up tests a further 35 seconds * Removing duplicate kernel/ramdisk check in OSAPI * Remove unnecessary image list in OSAPI * Add auto-reloading JSON config file support to scheduler * Change floating-snat to float-snat * Allows non-admin users to use simple scheduler * Skip libvirt tests when libvirt not present * Correcting libvirt tests that were failing * Fix for launchpad bug #882568 * Gracefully handle Xen resize failure * Don't update database before resize * fix bug 816630 * Set nova-manage to executable Fixes LP885778 * Fixing immediate delete after boot on Libvirt * exception.KeypairNotFound usage correction * Add local storage of context for logging * Reserve memory/disk for dom0/host OS * Speed up tests yet another 45 seconds * APIs should not wait on scheduler for builds in single zone deployment * Added some documentation to db.api module docstring * Updated rst docs to include threading model * Adds documentation for Xen Storage Manager * Xen Storage Manager Volume Driver * Drop extra XML tests and remove _json suffix from names * Fix empty group_id to be considered invalid * Stop nova-ajax-console-proxy configuring its own logging * Bug 884863: nova logs everything to syslog twice * Log the exception when we get one * Use fat32 for Windows, linux-swap for Linux swap partitions * Fix KeyError when passed unknown format of time * flatten distributed scheduler * Bug #884534: nova-ajax-console-proxy crashes on shutdown * Bug 884527: ajax_console_proxy_port needs to be an integer * Too much information is returned from POST /servers * Disable SQLite synchronous mode during tests * Creating uuid -> id mapping for S3 Image Service * Fix 'begining' typo in system usage data bug 884307 * Fixes lp883279 * Log original dropped exception when a new exception occurs * Fix lp:861160 -- newly created network has no uuid * Bug #884018: "stack help" prints stacktrace if it cannot connect to the server * Optional --no-site-packages in venv * fixes bug 883233. Added to Authors fix typo in scheduler/driver.py assert_compute_node_has_enough_memory * Updated NoAuth to account for requests ending in / * Retry failed SQL connections (LP #876663) * Removed autogenerated API .rst files * Fix to a documentation generation script * Added code to libvirt backend to report state info * Adding bulk create fixed ips. The true issue here is the creation of IPs in the DB that are not currently used(we are building the entire block). This fix is just a bandaid, but it does cut ~25 seconds off of the quantum tests on my laptop * Fix overzealous use of faults.Fault() wrapper * Revert how APIs get IP address info for instances * Support server uuids with security groups * Support using server uuids when accessing consoles * Adding support for retrying glance image downloads * Fix deletion of instances without fixed ips * Speed up test suite by 20 seconds * Removed callback concept on VM driver methods: * Fix file injection for OSAPI rebuilds. Fixes 881649 * Replaces all references to nova.db.api with nova.db * venv: update distribute as well as pip * Fix undefined glance_host in get_glance_client * Fix concurrency of XenAPI sessions * Server metadata must support server uuids * Add .gitreview config file for gerrit * Convert instancetype.flavorid to string * Make sure networks returned from get_instance_nw_info have a label * Use UUIDs instead of IDs for OSAPI servers * Improve the liveness checking for services * Refactoring of extensions * Moves a-zone scheduling into simple scheduler * Adds ext4 and reiserfs to _mount_filesystem() * Remove nova dependency on vconfig on Linux * Upgrade pip in the venv when we build it * Fixes bug 872459 * Repartition and resize disk when marked as managed * Remove dead DB API call * Only log instance actions once if instance action logging is enabled (now disabled by default) * Start switching from gflags to optparse * Don't leak exceptions out to users * Fix EC2 test_cloud timing issues * Redirects requests from /v#.# to /v#.#/ * Chain up to superclass tearDown in ServerActionsTest * Updated RST docs: bzr/launchpad -> git/github * Refactoring nova.tests.api.openstack.test_flavors * Refactoring image and server metadata api tests * Refactoring nova.tests.api.openstack.test_servers * Refactoring nova.tests.api.openstack.test_images * Utility script that makes enforcing PEP8 within git's pre-commit hook as easy as possible * Add XML templates * Remove OSAPI v1.0 * Remove unused flag_overrides from TestCase * Cancel any clean_reboot tasks before issuing the hard_reboot * Makes snapshots work for amis. Fixes bug 873156 * Xenapi driver can now generate swap from instance_type * Adds the ability to automatically issue a hard reboot to instances that have been stuck in a 'rebooting' state for longer than a specified window * Remove redundant, dead code * Added vcpu_weight to models * Updated links in the README that were out of date * Add INPUT chain rule for EC2 metadata requests (lp:856385) * Allow the user to choose either ietadm or tgtadm (lp:819997) * Remove VolumeDriver.sync_exec method (lp:819997) * Adds more usage data to Nova's usage notifications * Fixes bug 862637 -- make instance_name_template more flexible * Update EC2 get_metadata calls to search 'deleted': False. Fixes nova smoke_tests!!! * Use new ip addr del syntax * Updating HACKING to make split up imports into three blocks * Remove RateLimitingMiddlewareTest * Remove AoE, Clean up volume code * Adds vcpu_weight column to instance_types table and uses this value when building XenServer instances * Further changes to the cleaner * Remove duplicated functions * Reference orphaned_instance instead of instance * Continue to the next iteration of the loop if an instance is not found * Explicit errors on confirm/revertResize failures * Include original exception in ClassNotFound exception * Enable admin access to EC2 API server * Make sure unknown extensions return 404 * Handle pidfile exception for dnsmasq * Stop returning correct password on api calls * Restructure host filtering to be easier to use * Add support for header version parameter to specify API version * Set error state on spawn error + integration test * Allow db schema downgrades * moved floating ip db access and sanity checking from network api into network manager added floating ip get by fixed address added fixed_ip_get moved floating ip testing from osapi into the network tests where they belong * Adds a script that can automatically delete orphaned VDIs. Also had to move some flags around to avoid circular imports * Improve access check on images * Updating image progress to be more granular. Before, the image progress had only 2 states, 0 and 100. Now it can be 0, 25, 50 or 100 * Deallocate ip if build fails * Ensure non-default FLAGS.logfile_mode is properly converted to an octet * Moving admin actions to extension * Fixes bug 862633 -- OS api consoles create() broken * Adds the tenant id to the create images response Location header Fixes bug 862672 * Fixes bug 862658 -- ec2 metadata issue getting IPs * Added ==1.0.4 version specifier to kombu in pip-requires to ensure tests pass in a clean venv * bug lp845714 * install_venv: pip install M2Crypto doesn't work on Fedora * install_venv: add support for distro specific code * install_venv: remove versioned M2Crypto dependency * install_venv: don't use --no-site-packages with virtualenv * install_venv: pass the --upgrade argument to pip install * install_venv: refactor out pip_install helper * Replace socat with netcat * api.ec2.admin unit tests * Fixes Bug #861293 nova.auth.signer.Signer now honors the SignatureMethod parameter for SHA1 when creating signatures * Enforce snapshot cleanup * bug 861310 * Change 'recurse_zones' to 'local_zone_only' * Fixes euca-describe-instances failing or not showing IPs * Fixes a test failure in master * Fixed bug lp850602. Adding backing file copy operation on kvm block migration * Add nova-all to run all services * Snapshots/backups can no longer happen simultaneously. Tests included * Accept message as sole argument to NovaException * Use latest version of SQLAlchemy * Fix 047 migration with SQLAlchemy 0.7.2 * Beef up nova/api/direct.py tests * Signer no longer fails if hashlib.sha256 is not available. test_signer unit test added * Make snapshots private by default * use git config's review.username for rfc.sh * Raise InsufficientFreeMemory * Adding run_test.sh artifacts to .gitignore * Make sure options is set before checking managed_disk setting. Fixes bug 860520 * compute_api create*() and schedulers refactoring * Removed db_pool complexities from nova.db.sqlalchemy.session. Fixes bug 838581 * Ensure minRam and minDisk are always integers * Call endheaders when auth_token is None. Fixes bug 856721 * Catch ImageNotFound on image delete in OSAPI * Fix the grantee group loading for source groups * Add next links to images requests * put fully qualified domain name in local-hostname * Removing old code that snuck back in * Makes sure to recreate gateway for moved ip * Fix some minor issues due to premature merge of original code * * Rework osapi to use network API not FK backref * Fixes lp854585 * Allow tenant networks to be shared with domain 0 * Use ovs-vsctl iface-to-br to look up the bridge associated with the given VIF. This avoids assuming that vifX.Y is attached to xenbrY, which is untrue in the general case * Made jenkins email pruning more resilient * Fixing bug 857712 * Adds disk config * Adding xml schema validation for /versions resource * Fix bug 856664 overLimit errors now return 413 * Don't use GitPython for authors check * Fix outstanding pep8 errors for a clean trunk * Add minDisk and minRam to OSAPI image details * Fix rfc.sh's check for the project * Add rfc.sh to help with gerrit workflow * This patch adds flavor filtering, specifically the ability to flavor on minRam, minDisk, or both, per the 1.1 OSAPI spec * Add next links for server lists in OSAPI 1.1. This adds servers_links to the json responses, and an extra atom:link element to the servers node in the xml response * Update exception.wrap_exception so that all exceptions (not just Error and NovaException types) get logged correctly * Merging trunk * Adding OSAPI tests for flavor filtering * This patch adds instance progress which is used by the OpenStack API to indicate how far along the current executing action is (BUILD/REBUILD, MIGRATION/RESIZE) * Merging trunk * Fixes lp:855115 -- issue with disassociating floating ips * Renumbering instance progress migration * Fixing tests * Keystone support in Nova across Zones * trunk merge fixup * Fix keys in ec2 conversion to make sure not to use unicode * Adds an 'alternate' link to image views per 3.10 and 3.11 of http://docs.openstack.org/cactus/openstack-compute/developer/openstack-compute-api-1.1/content/LinksReferences.html * Typo * Fixing tests * Fixing tests * make sure kwargs are strings and not unicode * Merging trunk * Adding flavor filtering * Instance deletions in Openstack are immediate. This can cause data to be lost accidentally * Makes sure ips are moved on the bridge for nodes running dnsmasq so that the gateway ip is always first * pep8 * add tests and fix bug when no ip was set * fix diverged branch * migration conflict fixed * clean up based on cerberus review * clean up based on cerberus review * Remove keystone middlewares * fix moving of ips on flatdhcp bridge * Merged trunk * merged trunk * update floating ips tests * floating ip could have no project and we should allow access * actions on floating IPs in other projects for non-admins should not be allowed * floating_ip_get_by_address should check user's project_id * Pep8 fixes * Merging trunk * Refactoring instance_type_get_all * remove keystone url flag * merge trunk, fix conflicts * remove keystone * Include 'type' in XML output * Minor cleanup * Added another unit test * Fixed unit tests with some minor refactoring * Fix the display of swap units in nova manage * Refactored alternate link generation * pep8 fixes * Added function to construct a glance URL and unit test * merge from trunk * convert images that are not 'raw' to 'raw' during caching to node * show swap in Mb in nova manage * Address Soren's comments: * clean up temp files if an ImageUnacceptable is going to be raised Note, a qemu-img execution error will not clean up the image, but I think thats reasonable. We leave the image on disk so the user can easily investigate. * Change final 2 arguments to fetch_to_raw to not start with an _ * use 'env' utility to change environment variables LC_ALL and LANG so that qemu-img output parsing is not locale dependent. Note, I considered the following, but found using 'env' more readable out, err = utils.execute('sh', '-c', 'export LC_ALL=C LANG=C && exec "$@"', 'qemu-img', 'info', path) * Add iptables filter rules for dnsmasq (lp:844935) * create disk.local the same way ephemerals are created (LP: #851145) * merge with trunk r1601 * fix call to gettext * Fixed --uuid network command in nova-manage to desc to "uuid" instead of "net_uuid" * removes warning set forth in d3 for deprecated setting of bridge automagically * Update migration 047 to dynamically lookup the name of the instance_id fkey before dropping it. We can't hard code the name of the fkey since we didn't name it explicitly on create * added to authors cuz trey said I cant patch otherwise! * Fixed --uuid network command in nova-manage to desc to "uuid" instead of "net_uuid" * merged with trunk * Update migration 047 to dynamically lookup the name of the instance_id fkey before dropping it. We can't hard code the name of the fkey since we didn't name it explicitly on create * oops, add project_id and 'servers' to next links * Fixes migration for Mysql to drop the FK on the right table * Reverted some changes to instance_get_all_by_filters() that was added in rev 1594. An additional argument for filtering on instance uuids is not needed, as you can add 'uuid: uuid_list' into the filters dictionary. Just needed to add 'uuid' as an exact_match_filter. This restores the filtering to do a single DB query * fix syntax error in exception, remove "Dangerous!" comment * merged trunk and resolved conflict * run the alter on the right table * fix unrelated pep8 issue in trunk * use dictionary format for exception message * fix a test where list order was assumed * Removed the extra code added to support filtering instances by instance uuids. Instead, added 'uuid' to the list of exact_filter_match names. Updated the caller to add 'uuid: uuid_list' to the filters dictionary, instead of passing it in as another argument. Updated the ID to UUID mapping code to return a dictionary, which allows the caller to be more efficient... It removes an extra loop there. A couple of typo fixes * Reworked the export command to be nova-manage shell export --filename=somefile * Adds the ability to automatically confirm resizes after the `resize_confirm_window` (0/disabled by default) * use '_(' for exception messages * PEP8 cleanup * convert images that are not 'raw' to 'raw' during caching to node * now raising instead of setting bridge to br100 and warning as was noted * * Remove the foreign key and backrefs tying vif<->instance * Update instance filtering to pass ip related filters to the network manager * move/update tests * Adds an optional flag to force dhcp releases on instance termination. This allows ips to be reused without having to wait for the lease to timeout * remove urllib import * Fixing case where OSAPI server create would return 500 on malformed body * Fix the issue with the new dnsmasq where it tries and fails to bind to ipv6 addresses * Merging trunk * Renaming progress migration to 47 * merge with trunk * Added unit test * Corrected the status in DB call * don't try to listen on ipv6 addresses, or new dnsmasq goes boom * make our own function instead of using urllib.urlencode since we apparently don't suppor urlencoded strings yet * Merged trunk * remove unused import * merge the sknurt * remove the polymorph * Fix typo in comment * Fixes the handling of snapshotting in libvirt driver to actually use the proper image type instead of using raw for everything. Also cleans up an unneeded flag. Based on doude's initial work * merge with trunk * removing extra newline * catching AttributeError and adding tests * Remove vestigial db call for fixed_ips * Fixes the user credentials for installing a config-drive from imageRef * Some Linux systems can also be slow to start the guest agent. This branch extends the windows agent timeout to apply to all systems * remove extra line * get the interface using the network and instance * flag typo * add an optional flag to force dhcp release using dnsmasq-utils * Fix user_id, project_id reference for config_drive with imageRefs * Fix a bug that would make spawning new instances fail if no port/protocol is given (for rules granting access for other security groups) * When swap is specified as block device mapping, its size becomes 0 wrongly. This patch make it set to correct size according to instance_type * Fix pep8 issues * fixed grant user, added stdout support * This changes the interpretation of 'swap' for an instance-type to be in MB rather than GB * Fixing list prepend * Merging trunk * create disk.local the same way ephemerals are created (LP: #851145) * Fix failing test * Authorize to start a LXC instance withour, key, network file to inject or metadata * Update the v1.0 rescue admin action and the v1.1 rescue extension to generate 'adminPass'. Fixes an issue where rescue commands were broken on XenServer. lp#838518 * pep8 * merge the trunks * update tests to return fake_nw_info that is valid for the pre_live_migrate * make sure to raise since the tests require it * Pep8 Fix * Update test_volumes to use FLAGS.password_length * Zero out the progress when beginning a resize * Adding migration progress * Only log migration info if they exist * remove getting fixed_ips directly from the db * removed unused import * Fixes libvirt rescue to use the same strategy as xen. Use a new copy of the base image as the rescue image. It leaves the original rescue image flags in, so a hand picked rescue image can still be used if desired * Fixing tests, PEP8 failures * fix permissions * Add a FakeVirDomainSnapshot and return it from snapshotCreateXML. Fixes libvirt snapshot tests * merge the trunks * Merged trunk * I am using iputils-arping package to send arping command. You will need to install this package on the network nodes using apt-get command apt-get install iputils-arping * Removed sudo from the arguments * Add a FakeVirDomainSnapshot and return it from snapshotCreateXML. Fixes libvirt snapshot tests * merge from trunk * Make sure grantee_group is eagerly loaded * Merged trunk * compute/api: swap size issue * Update exception.wrap_exception so that all exceptions (not just Error and NovaException types) get logged correctly * Removes the on-disk internal libvirt snapshot after it has been uploaded to glance * cleaned up * remove debugging * Merging trunk * Allowing resizes to the same machine * trunk merge * updates Exception.NoMoreFixedIps to subclass NovaException instead of Error * NoMoreFixedIps now subclasses NovaException instead of Error * merge trunk * was trying to create the FK when Should have been dropping * pep8 * well since sqlalchemy-migrate and sqlalchemy can't agree on what the FK is called, we fall back on just manually dropping it * tests working again * the table is the table for the reason its a table * uhh dialect doesn't exist, beavis * update comment * if no public-key is given (--key), do not show public-keys in metadata service * it merges the trunk; or else it gets the conflicts again * exceptions properly passed around now * merge with trunk at revno 1573 * add the fake_network Manager to prevent rpc calls * This makes the OS api extension for booting from volumes work. The _get_view_builder method was replaced in the parent class, but the BootFromVolume controller was not updated to use the new method * remove undedded imports and skips * pep8 fixes * Added a unit test * pass-through all other parameters in next links as well * update for the id->uuid flip * Merged trunk * Adding flavor extra data extension * Merged trunk * fix test * revert last change * Added virt-level support for polling unconfirmed resizes * build the query with the query builder * Removing toprettyxml from OSAPI xml serialization in favor of toxml * use uuids everywhere possible * make sure to use the uuid * update db api for split filterings searches * update tests * delete the internal libvirt snapshot after it is saved to glance * cleanup prints in tests * cleanup prints in tests * Add a simple test for the OS boot from volume api * get rid of debugs * Merged from trunk and resolved conflicts * Execute arping command using run_as_root=True instead of sudo * Return three rules for describe_security_groups if a rule refers to a foreign group, but does not specify protocol/port * pep8 issues * added xml support for servers_list in response with tests * Merged trunk * added servers_links in v1.1 with tests * added build_list to servers controllers and view builder and kept all old tests passing * The 1.1 API specifies that two vendor content types are allowed in addition to the standard JSON and XML content types * pep8 * tests are back * PEP8 fix * Adding progress * In the unlikely case of an instance losing a host, make sure we still delete the instance when a forceDelete is done * 0 for the instance id is False ;) * Cleanup state management to use vm_state instead of task_state Add schedule_delete() method so delete() actually does what it says it does * merge trunk * write out xml for rescue * fix up the filtering so it does not return duplicates if both the network and the db filters match * fix rescue to use the base image, reset firewall rules, and accept network_info * make sure to pass in the context * move the FakeNetworkManager into fake_network * Fix issue where floating ips don't get recreated when a network host reboots * ip tests were moved to networking * add tests * fix typo * allow matching on fixed_ip without regex and don't break so all results are reported * add case where vif may not have an instance_id associated with it * fix typo * Initial pass at automatically confirming resizes after a given window * Use the correct method to get a builder * merge trunks * pep8 * move ip filtering over to the network side * fix pep8 whitespace error * add necessary fields to flavor.rng schema * get all the vifs * get all the vifs * make sure we are grabbing out just the ids * flavor_elem.setAttribute -> flavor_elem.set, flavor -> flavor_dict * minor changes to credentials for the correct format * Don't report the wrong content type if a mapped type doesn't exist * add stubs for future tests that need to be written * Test both content types for JSON and XML * Remove unnecessary vendor content types now that they are mapped to standard content types automatically * Add copyright * Map vendor content types to their standard content type before serializing or deserializing. This is so we don't have to litter the code with both types when they are treated identically * exporting auth to keystone (users, projects/tenants, roles, credentials) * make xml-api tests pass * update variable name after merge: flavor_node -> flavor_elem * resolve conflicts / merge with trunk revno 1569 * Fixes an issue where 'invalid literal for int' would occur when listing images after making a v1.1 server snapshot (with a UUID) * fixed tests * removing toprettyxml * add attributes to xml api * Remove debugging * Update test_libvirt so that flags and fakes are used instead of mocks for utils.import_class and utils.import_object. Fixes #lp849329 * fix the test so that it fakes out the network * fix white space for pep8 * fix test_extensions test to know of new extension FlavorExtraData * add extension description for FlavorExtraData * Adding migration for instance progress * Make tests pass * no need for the instance at all or compute * bump the migration * remove unused import, make call to network api to get vifs for the instance * merge the trunk * skip a bunch of tests for the moment since we will need to rework them * remove the vif joins, some dead code, and the ability to take in some instances for filtering * allow passing in of instances already * run the instances filter through the network api first, then through the db * add get_vifs_by_instance and stub get_instance_ids_by_ip_filter * change vifs to rpc call and add instance ids by ip * Multi-NIC support for vmwareapi virt driver in nova. Does injection of Multi-NIC information to instances with Operating system flavors Ubuntu, Windows and RHEL. vmwareapi virt driver now relies on calls to network manager instead of nova db calls for network configuration information of instance. Re-oranized VMWareVlanBridgeDriver and added session parmeter to methods to use existing session. Also removed session creation code as session comes as argument. Added check for flat_inject flag before attempting an inject operation * last of the api.openstack.test_images merge fixes * pep8 fixes * trunk merge * makes sure floating addresses are associated with host on associate so they come back * Deprecate aoe in preperation for removal in essex * Only allow up to 15 chars for a Windows hostname * pep8 * deprecate aoe * Fix instance rebooting (lp847604) by correcting a malformed cast in compute.api and an incorrect method signature in the libvirt driver * Fix mismerge * make tests pass * This patch teaches virt/libvirt how to format filesystem on ephemeral device depending on os_type so that the behaviour matches with EC2's. Such behaviour isn't explicitly described in the documentation, but it is confirmed by checking realy EC2 instances. This patch introduces options virt_mkfs as multistring. Its format is --virt_mkfs== When creating ephemeral device, format it according to the option depending on os_type. This addresses the bugs, https://bugs.launchpad.net/nova/+bug/827598 https://bugs.launchpad.net/nova/+bug/828357 * Test new vendor content types as well * Only allow up to 15 chars for a Windows hostname * Split accept tests to better match the name of the test * Remove debugging print * Inject hostname to xenstore upon creation * Update test_libvirt so that flags and fakes are used instead of mocks for utils.import_class and utils.import_object. Fixes #lp849329 * interpret 'swap' to be in MB, not in GB * Actually test expected matches received * Test new content-types * This branch changes XML Serializers and their tests to use lxml.etree instead of minidom * add additional data to flavor's ViewBuilder * Inject hostname to xenstore upon creation * drop the virtual_interfaces key back to instances * - remove translation of non-recognized attributes to user metadata, now just ignored - ensure all keys are defined in image dictionaries, defaulting to None if glance client doesn't provide one - remove BaseImageService - reorganize some GlanceImageService tests * And again * Update MANIFEST.in to match directory moves from rev1559 * we're back * Update MANIFEST.in to match directory moves from rev1559 * Moving tests/test_cloud.py to tests/api/ec2/test_cloud.py. They are EC2-specific tests, so this makes sense * Same as last time * Made tests version version links more robust * PEP8 cleanup * PEP8 cleanup * PEP8 cleanups * zone manager tests working * fixing import * working on getting tests back * relocating ec2 tests * merging trunk; resolving conflicts * Correctly map image statuses from Glance to OSAPI v1.1 * pep8 fixes in nova/db/sqlalchemy/api.py and nova/virt/disk.py * Add support for vendor content types * pep8 fixes * merging trunk; resolving conflicts * Update GlanceClient, GlanceImageService, and Glance Xen plugin to work with Glance keystone * Fix typo (woops) * pep8 fix * Some arches dont have dmidecode, check to see if libvirt is capable of running rather getInfo of the arch its running on * merging parent branch lp:~rackspace-titan/nova/glance-client-keystone * adding tests for deleted and pending_delete statuses * Fixes rogue usage of sudo that crept in * fixups * remove unused dep * add test for method sig * parent merge * migration move * bug fixes * merging trunk * Fixes shutdown of lxc containers * Make quoting consistent * Fix rogue usage of 'sudo' bypassing the run_as_root=True method * trunk merge * region name * tweaks * fix for lp847604 to unbreak instance rebooting * use 'qemu-image resize' rather than 'truncate' to grow image files * When vpn=true in allocate ip, it attempts to allocate the ip that is reserved in the network. Unfortunately fixed_ip_associate attempts to ignore reserved ips. This fix allows to filter reserved ip address only when vpn=True * Do not require --bridge_interface for FlatDHCPManager (lp:844944) * Makes nova-vncproxy listen for requests on the queue like it did before the bin files were refactored * Update GlanceClient, GlanceImageService, and Glance Xen plugin to work with Glance keystone * api/ec2/ebs: make metadata returns correct swap and ephemeral0 * api/ec2: make get_metadata() return correct mappings * virt/libvirt: format ephemeral device and add fs label when formating ext3 fs * Fix spelling mistake * Stock zones follows a fill-first methodology—the current zone is filled with instances before other zones are considered. This adds a flag to nova to select a spread-first methodology. The implementation is simply adding a random.shuffle() prior to sorting the list of potential compute hosts by weights * Pass reboot_type (either HARD or SOFT) to the virt layers from the API * merging trunk * fixing image status mapping * don't need random in abstract_scheduler.py anymore.. * pull-up from trunk; move spread_first into base_scheduler.py * trunk merge * adding auth tokens to child zone calls * Add comment to document why random.shuffle() works * Merged trunk * Make whitespace consistent * Use triple quotes for docstrings to be consistent * Remove the unnecessary sudo from qemu-img as it is unneeded and doesn't work with our current packaging * Remove chanes_since and key_name from basic server entity * Merged trunk * remove extra line for pep8 * remove unnecessary qemu-img flag, use base image type by default * shorten comment to < 79 chars * merged rbp * remove sudo from qemu-img commands * adds a fake_network module to tests to generate sensible network info for tests. It does not require using the db * Adding a can_read_deleted filter back to db.api.instance_get_all_by_filters that was removed in a recent merge * removing key_name and config_drive from non-detailed server entity * Authorize to start a LXC instance withour, key, network file to inject or metadata * Open Essex (switch version to 2012.1) * Last Diablo translations for Nova * Open Essex (switch version to 2012.1) * Last Diablo translations * pep 8 * Fixing security groups stuff * put key into meta-data, not top level 'data' * metadata key is 'public-keys', not 'keys' * fix for lp844364: fix check for fixed_ip association in os-floating-ips * if no public-key is given (--key), do not show public-keys in metadata service * NetworkManager's add_fixed_ip_to_instance calls _allocate_fixed_ips without vpn or requested_networks parameters. If vpn or requested_networks is not provided to the _allocate_fixed_ips method, it throws an exception. This issue is fixed now * Merged trunk * First pass at adding reboot_type to reboot codepath * child zone queries working with keystone now * Added docstring to explain usage of reserved keyword argument * One more bug fix to make zones work in trunk. Basic problem is that in novaclient using the 1.0 OSAPI, servers.create() takes an ipgroups argument, but when using the 1.1 OSAPI, it doesn't, which means booting instances in child zones won't work with OSAPI v1.0. This fix works around that by using keyword arguments for all the arguments after the flavor, and dropping the unused ipgroups argument * Fixes the reroute_compute decorator in the scheduler API so that it properly: * make check for fixed_ip association more defensive * Fix lp:844155 * Changing a behavior of update_dhcp() to write out dhcp options file. This option file make dnsmasq offer a default gateway to only NICs of VM belonging to a network that the first NIC of VM belongs to. So, first NIC of VM must be connected to a network that a correct default gateway exists in. By means of this, VM will not get incorrect default gateways * merged trunk * merging trunk * merging trunk * merged trunk * Make weigh_hosts() return a host per instance, instead of just a list of hosts * converting fix to just address ec2; updating test * Do not attempt to mount the swap VDI for file injection * Add a NOTE() * Merged trunk * Use .get instead * Do not attempt to mount the swap VDI for file injection * pull-up from trunk * pull-up from trunk * pull-up from trunk * adding can_read_deleted back to db api * Clean up shutdown of lxc containers * Cleanup some more comments * Cleanup some comments * fixes vncproxy service listening on rabbit * added tests for failure cases talking with zones * This code contains contains a new NetworkManager class that can leverage Quantum + Melange * comment fix * typo trying to raise InstanceNotFound when all zones returned nothing * create a new exception ZoneRequestError to use for returning errors when zone requests couldn't complete * pep8 fix for tests/api/openstack/test_servers.py which is an issue in trunk * catch exceptions from novaclient when talking to child zones. store them and re-raise if no other child zones return any results. If no exceptions are raised but no results are returned, raise a NotFound exception * added test to cover case where no local hosts are available but child hosts are * remove the short circuit in abstract scheduler when no local hosts are available * fix for lp844364: improve check for fixed_ip association * Ensure restore and forceDelete don't do anything unless the server is waiting to be reclaimed * actually shuffle the weighted_hosts list.. * Check task_state for queued delete * spread-first strategy * Make sure instance is deleted before allowing restore or forceDelete * Add local hostname to fix Authors test * delete_instance_interval -> reclaim_instance_interval * PEP8 cleanup * Restart compute with a lower periodic_interval to make test run faster * merge trunk * properly handle the id resetters * removed vestige * pull-up from trunk * fix a couple of typos in the added unit test * modified unit tests, set use_single_default_gateway flag to True whereever needed instead of setting it in the init method * exclude net tag from host_dhcp if use_single_default_gateway flag is set to false * forgot _id * had used wrong variable * Fixes a case where if a VIF is returned with a NULL network it might not be able to be deleted. Added test case for that fix * Fix for LP Bug #837867 * weigh_hosts() needs to return a list of hosts for the instances, not just a list of hosts * Merged trunk * Set flat_injected to False by default * changed the fixed_ip_generator * PEP8 cleanup * Wait longer for all agents, not just Windows * merged trunk * updated floating_ip generation * Tests for deferred delete, restore and forceDelete * An AMI image without ramdisk image should start * Added use_single_default_gateway to switch from multiple default gateways to single default gateway * Fixed unit test * reverting change to GlanceImageService._is_image_available * At present, the os servers.detail api does not return server.user_id or server.tenant_id. This is problematic, since the servers.detail api defaults to returning all servers for all users of a tenant, which makes it impossible to tell which user is associated with which server * reverting xenapi change * Micro-fix; "exception" was misspelled as "exceptions" * Fix a misspelling of "exception" * revert changes to display description * merged trunk * novaclient v1_0 has an ipgroups argument, but novaclient v1_1 doesn't * Set flat_injected to False by default * Fixes an issue where 'invalid literal for int' would occur when listing images after making a v1.1 server snapshot (with a UUID) * further cleanup * Default to 0 seconds (off) * PEP8 cleanups * Include new extension * Implement deferred delete of instances * trunk merge * cleaning up tests * zone name not overwritten * Update the v1.0 rescue admin action and the v1.1 rescue extension to generate 'adminPass'. Fixes an issue where rescue commands were broken on XenServer. lp#838518 * fix a mistaking of dataset and expected values on small test * fix a mistaking of deletion in ensure_floating_forward * revert codes for db * correct a method to collect instances from db add interface data to test * added me to Authors * meeging trunk * format for pep8 * format for pep8 * implement unit test for linux_net * Adjust test_api to account to multiple rules getting returned for a single set rule * Clean up security groups after use * Make a security group rule that references another security group return ipPermission for each of tcp, udp, and icmp * Multi-NIC support for vmwareapi virt driver in nova. Does injection of Multi-NIC information to instances with Operating system flavors Ubuntu, Windows and RHEL. vmwareapi virt driver now relies on calls to network manager instead of nova db calls for network configuration information of instance. Ensure if port group is properly associated with vlan_interface specified in case of VLAN networking for instances. Re-oranized VMWareVlanBridgeDriver and added session parmeter to methods to use existing session. Also removed session creation code as session comes as argument. Added check for flat_inject flag before attempting an inject operation. Removed stale code from vmwareapi stubs. Also updated some comments to be more meaningful. Did pep8 and pylint checks. Tried to improve pylint score for newly added lines of code * Fix bug #835919 that output a option file for dnsmasq not to offer a default gateway on second vif * Accidentally added instance to security group twice in the test. Fixed * Minor cleanup * Fixing xml serialization of limits resource * correct floating ip id to increment in fake_network * Add iptables filter rules for dnsmasq * Merged trunk * Change non E ascii characte * Launchpad automatic translations update * Instance record is not inserted in db if the security group passed to the RunInstances API doesn't exists * Added unit tests to check instance record is not inserted in db when security groups passed to the instances are not existing * removed unneeded import * rick nits * alex meade issues * Added list of security groups to the newly added extension (Createserverext) for the Create Server and Get Server detail responses * default description to name * use 'qemu-image resize' rather than 'truncate' to grow image files * remove extra description stuff * fix pep8 violation * feedback from jk0's review, including removing a lot of spaces from docstrings * revert description changes, use metadata['description'] if it is set to populate field in db * merged trunk * change db migrate script again to match other similar scripts * Fix for LP Bug #839269 * move networks declarations within upgrade/downgrade methods * more review cleanup * remove import of 'fake' from nova manager, now that we've moved that to test_quantum.py * Fixes a small bug which causes filters to not work at all. Also reworks a bit of exception handling to allow the exception related to the bug to propagate up * Email error again. Tired * Email error * Fixed review comments * Add documentation comment * pull-up from trunk * Forgot to handle return value * Add tests for flags 'snapshot_image_format' * Update snapshot image metada 'disk_format' * Add flag 'snapshot_image_format' to select the disk format of the snapshot image generated with the libvirt driver * missing migration * Email contact error * Update Authors file * Merged trunk * Correct tests associated * Fix protocol-less security groups * Adding feedparser to pip-requires * Removing xml functions that are no longer called * Launchpad automatic translations update * Glance can now perform its own authentication/authorization checks when we're using keystone * import filters in scheduler/host_filter.py so default_host_filter gets added to FLAGS; rework SchedulerManager() to only catch missing 'schedule_' attribute and report other missing attributes * move content of quantum/fake.py to test_quantum.py in unit testing class (most original content has been removed anyway) * melange testing cleanup, localization cleanup * remove references to MelangeIPAMTest, as they cannot be used yet * Deleted debug messages * Resolved conflicts and fixed pep8 errors * Fix a few references to state_description that slipped through * added unit tests and cleanup of import statements * renamed fake_network_info.py * trunk merge * moved cidr_v6 back * Probably shouldn't leave that commented out * Added test for NULL network * Fixed lp835242 * Fixes for minor network manager issues centered around deleting/accessing instances which don't have network information set * remove extra references to state_description * pull-up from trunk * merge unit test from Chris MacGown * Adds test for image.glance.GlanceImageService._is_image_available * - implements changes-since for servers resource - default sort is now created_at desc for instances * undo change in setting q_tenant_id in quantum_manager.create_network * additional review cleanup * docstring cleanup * merging trunk * Fixes NotFound exceptions to show the proper instance id in the ec2 api * typo * more review cleanup * another commit from brad * add specific exceptions for quantum client. Fix doc-strings in client.py * merge brad's changes that address most review feedback * fix for lp838583 - fixes bug in os-floating-ips view code that prevents instance_id from being returned for associated addresses * Accept keypair when you launch a new server. These properties would be stored along with the other server properties in the database (like they are currently for ec2 api) * Launchpad automatic translations update * merge trunk, fix tests * fix for lp838583 - return instance_id for associated floating_ips, add test * removing unnecessary imports * remove BaseImageService * pep8 * move GlanceImageService tests to proper module; remove translation of non-standard image attributes to properties; ensure all image properties are available, defaulting to None if not provided * merge trunk * Add comment for an uncommon failure case that we need to fix * Fix for LP Bug #838466 * Correctly yield images from glance client through image service * Simple usage extension for nova. Uses db to calculate tenant_usage for specified time periods * Fix for LP Bug #838251 * merge trunk, fix conflict * Validates that user-data is b64 encoded * Updated VersionsAtomSerializer.index to use lxml.etree to generate atom feed * remove extra test * merged trunk * Fixed and improved the way instance "states" are set. Instead of relying on solely the power_state of a VM, there are now explicitly defined VM states and VM task states which respectively define the current state of the VM and the task which is currently being performed by the VM * Updating test for xml to use lxml * expect key_name attribute in 1.1 * change to use _get_key_name to retrieve the key * Implements lp:798876 which is 'switch carrot to kombu'. Leaves carrot as the default for now... decision will be made later to switch the default to kombu after further testing. There's a lot of code duplication between carrot and kombu, but I left it that way in preparation for ripping carrot out later and to keep minimal changes to carrot * Disassociated previously associated floating ips when calling network_api.associate_floating_ip. Also guard against double-association in the network.manager * adding support for limiting in image service; updating tests with fixture ids and marker support * trunk merge * merging trunk * fix keypairs stubs * add explicit message for NoMoreFloatingIps exception * fix for chris behrens' comment - move tenant_id => project_id mapping to compute.api.get_all * moved key_name per review * zone_add fixed to support zone name * kludge for kombu 1.1.3 memory transport bug * merged trunk * Removed extraneous import and s/vm_state.STOP/vm_states.STOPPED/ * Merged trunk * Code cleanup * Use feedparser to parse the generated atom feeds in the tests for the versions resource * add test to verify 400 response when out of addresses * switched default to kombu per vishy * use kombu.connection.BrokerConnection vs kombu.connection.Connection so that older versions of kombu (1.0.4) work as well as newer * fix FloatingIpAlreadyInUse to use correct string pattern, convert ApiErrors to 400 responses * Fix for LP Bug #782364 * Fix for LP Bug #782364 * more logging info to help identify bad payloads * Removed test_parallel_builds in the XenAPI tests due to it frequently hanging indefinitely * logging change when rpc pool creates new connection * pep8 fix * make default carrot again and delay the import in rpc/__init__.py * Removed debug messages * Fix for LP Bug #837534 * add kombu to pip-requires and contrib/nova.sh * restore old way FLAGS.rpc_backend worked.. no short name support for consistency * fix remaining tests * Update RequestContext so that it correctly sets self.is_admin from the roles array. Additionally add a bit of code to ignore case as well * pep8, fix fakes * fix a bunch of direct usages of db in compute api * make two functions instead of fast flag and add compute api commands instead of hitting db directly * fixing bug * fixing short-ciruit condition * yielding all the images * merged trunk * changing default sort to created_at * The exception 'RamdiskNotFoundForImage' is no longer used * With OS API, if the property 'ramdisk_id' isn't set on the AMI image, Nova can not instantiate it. With EC2 API, the AMI image can be instantiate * adding an assert * Use getCapabilities rather than getInfo() since some versions of libvirt dont provide dmi information * supporting changes-since * Fix a bad merge on my part, this fixes rebuilds\! * disassociate floating ips before re-associating, and prevent re-association of already associated floating ips in manager * Update RequestContext so that it correctly sets self.is_admin from the roles array. Additionally add a bit of code to ignore case as well * Merged trunk * remove unneeded connection= in carrot Consumer init * pep8 fix for test_rpc_common.py * fix ajax console proxy for new create_consumer method * doc string cleanup * created nova/tests/test_rpc_common.py which contains a rpc test base class so we can share tests between the rpc implementations * ditched rpc.create_consumer(conn) interface... instead you now do conn.create_consumer(. * Update the EC2 ToToken middleware to use eventlet.green.httplib instead of httplib2. Fixes issues where the JSON request body wasn't getting sent to Keystone * remove brackets from mailmap entry * access db directly in networkmanagers's delete_network method, so stubbed test call works correctly * more logging info to help identify bad payloads * In the XenAPI simulator, set VM.domid, when creating the instance initially, and when starting the VM * remove 'uuid' param for nova-manage network delete that I had add previously * add alias to mailmap * update file name for db migrate script after merge (again) * update file name for db migrate script after merge * merged trunk * Fixes this bug by removing the test. The test has no asserts and seems to be raising more problems than it could solve * Removed test_parallel_builds * Merged trunk * Increased migration number * Fixes lp:813864 by removing the broken assert. The assert was a check for isinstance of 'int' that should have been 'long'. But it doesn't appear this assert really belongs, anyway * Merged trunk * Adds assertIn and assertNotIn support to TestCase for compatibility with python 2.6 This is a very minimal addition which doesn't require unittest2 * support the extra optional arguments for msg to assertIn and assertNotIn * removed broken assert for abstract_scheduler * pep8 fixes * fix for assertIn and assertNotIn use which was added in python 2.7. this makes things work on 2.6 still * merge trunk * restore fixed_ip_associate_pool in nova/db/sqlalchemy.py to its original form before this branch. Figured out how to make unit tests pass without requiring that this function changes * remove unused rpc connections in test_cloud and test_adminapi * carrot consumer thread fix * add carrot/kombu tests... small thread fix for kombu * add doc-strings for all major modules * remove fake IPAM lib, since qmanager must now access nova DB directly * Update the EC2 ToToken middleware to use eventlet.green.httplib instead of httplib2. Fixes issues where the JSON request body wasn't getting sent to Keystone * fix nova/tests/test_test.py * fix nova-ajax-console-proxy * fix test_rpc and kombu stuff * always set network_id in virtual_interfaces table, otherwise API commands that show IP addresses get confused * start to rework some consumer stuff * update melange ipam lib to use network uuid, not bridge * fix issue with setting 'Active' caused by Quantum API changes. Other misc fixes * Bug #835952: pep8 failures do not cause the tests to fail * Start domid's at 1, not 0, to avoid any confusion with dom0 * use 'uuid' field in networks table rather than 'bridge'. Specify project_id when creating instance in unit test * Bug #835964: pep8 violations in IPv6 code * In the XenAPI simulator, set VM.domid, when creating the instance initially, and when starting the VM * Bug #835952: pep8 failures do not cause the tests to fail * Bug #835964: pep8 violations in IPv6 code * Virtual Storage Array (VSA) feature. - new Virtual Storage Array (VSA) objects / OS API extensions / APIs / CLIs - new schedulers for selecting nodes with particular volume capabilities - new special volume driver - report volume capabilities - some fixes for volume types * fix FALGS typo * changes a few double quotes to be single, as the rest in the vicinity are * Default rabbit max_retries to forever Modify carrot code to handle retry backoffs and obey max_retries = forever Fix some kombu issues from cut-n-paste Service should make sure to close the RPC connection * Updated VersionsXMLSerializer and corresponding tests to use lxml * v1.0 of server create injects first users keypair * add tests to verify NotFound exceptions are wrapped with the proper ids * use db layer for aggregation * merged trunk * flag for kombu connection backoff on retries * more fixes * more work done to restore original rpc interfaces * merge changes from brad due to recent quantum API changes * Minor changes based on recent quantum changes * start of kombu implementation, keeping the same RPC interfaces * doubles quotes to single * changed format string in nova-manage * removed self.test ip and _setup_networking from libvirt * updated libvirt test * merge trunk * stubbed some stuff in test_libvirt * removed create_volumes, added log & doc comment about experimental code * reverted CA files * couple of pep8s * Tiny tweaks to the migration script * updated fake values * updated fake values * Merged trunk and fixed conflicts * updated fake values * updated fake values * forgot ) * update libvirt tests * Update compute API and manager so that the image_ref is set before spawning the rebuilt instance. Fixes issue where rebuild didn't actually change the image_id * added debug prints for scheduler * update libvirt * updated instance type fake model * added vcpus to instance flavor test model * added memory_mb to instance flavor test model * forgot test print statements * misplaced comma.. * Update compute API and manager so that the image_ref is set before spawning the rebuilt instance. Fixes issue where rebuild didn't actually change the image_id * Add brad to Authors file * replace accidental deletion in nova-mange * rearrange imports * fix for quantum api changes, change nova-mange to have quantum_list command * merge brad's fixes * add priority for static networks * driver: added vsa_id parameter for SN call * merged with rev.1499 * cosmetic cleanup * Updated server and image XML serializers to take advantage of the addresses and metadata serializers * VSA code redesign. Drive types completely replaced by Volume types * merged trunk * Just a couple of small changes I needed to get the migrations working with SQLAlchemy 0.7.x on Fedora 16 * Minor fixes * check log file's mode prior to calling chmod * The fix for run_iscsiadm in rev 1489 changed the call to use a tuple because values were being passed as tuples. Unfortunately a few calls to the method were still passing strings * Add a set of generic tests for the virt drivers. Update a bit of documentation to match reality * updated LimitsXMLSerializer to use etree and supply the xml declaration * merge underlying fix for testing * merged trunk * updated additional limits test * pep8 * pass all commands to run_iscsiadm as a tuple * altered fake network model * Updated limits serialization tests to use etree and added limits schema * Test fixup after last review feedback commit * Fix glance image authorization check now that glance can do authorization checks on its own; use correct image service when looking for ramdisk, etc.; fix a couple of PEP8 errors * forget a return * review feedback * Fixed integrated.test_xml to be more robust * typo * fixed a couple of syntax errors * Add bug reference * updated tests * updated libvirt tests to use fake_network_info * Bumped migration number * Merged trunk * Review feedback * pep8 * DRYed up code by moving _to_xml into XMLDictSerializer * updated addresses serializer to use etree instead of minidom * Added addresses schema * updated addresses xml serialization tests to use etree instead of minidom * Updated ServerXMLSerializer to use etree instead of minidom * added unit tests to instance_types for rainy day paths * Reverted two mistakes when looking over full diff * Updated MetadataXMLSerializer to use etree instead of minidom * Added: - volume metadata - volume types - volume types extra_specs * Added schemas Updated metadata tests to use etree instead of minidom * Servers with metadata will now boot on xenserver with flat_injected==False * moved import up * Verify resize needs to be set * changing comment * fixing bug * merged trunk * Updated ImagesXMLSerializer to use etree instead of minidom * Set error state when migration prep fails * Removed invalid test * Removed RESIZE-CONFIRM hack * Set state to RESIZING during resizing.. * Merged trunk * Another attempt at fixing hanging test * Once a network is associated with project, I can’t delete this network with ‘nova-manage network delete’. As you know, I can delete network by scrubbing the project with ‘nova-manage project scrub’. However it is too much. The cause of this problem is there is no modify command of network attribute * Update paste config so that EC2 admin API defaults to noauth * merged with volume types (based on rev.1490). no code rework yet * merged with volume_types. no code refactoring yet * merged with nova 1490 * added new tables to list of DBs in migration.py * removes french spellings to satisfy american developers * added virtio flag; associate address for VSA; cosmetic changes. Prior to volume_types merge * stub_instance fix from merge conflict * moved import to the top * fixing inappropriate rubyism in test code * Added fix for parallel build test * Fixed silly ordering issue which was causing tons of test failures * merged trunk * change snapshot msg too * forgot to add new extension to test_extensions * Add me to Authors * added Openstack APIs for volume types & extradata * Add comments for associate/dissociate logic * Updated ImageXMLSerialization tests to use etree instead of minidom Fixed incorrect server entity ids in tests * Merged from trunk * Add names to placeholders of formatting * The notifiers API was changed to take a list of notifiers. Some people might want to use more than one notifier so hopefully this will be accepted into trunk * use dict.get for user_id, project_id, and display_description in servers view as suggested by ed leaf, so that not all tests require these fields * Updated flavors xml serialization to use lxml instead of minidom * merge trunk, fix tests * fix more tests * Removed unused imports * Updated FlavorsXMLSerialization tests to use etree and validation instead of minidom * Merged from trunk * split test_modify() into specific unit tests * Added DELETED status to OSAPI just in case * Fixes iscsiadm commands to run properly * Fixed issue where we were setting the state to DELETED before it's actually deleted * merged with rev.1488 * Merged trunk and fixed conflicts * added volume type search by extra_spec * Fix for trying rebuilds when instance is not active * Fixed rebuild naming issue and reverted other fix which didn't fix anythin * Attempt to fix issue when deleting an instance when it's still in BUILD * Fix default hostname generator so that it won't use underscores, and use minus signs instead * merged with 1487 * pep8 compliant * Merged from trunk * - rebuilds are functional again - OSAPI v1.1 rebuild will accept adminPass or generate a new one, returning it in a server entity - OSAPI v1.0 will generate a new password, but it doesn't communicate it back to the user * Fix flag override in unit test * merged with rev.1485 * add rainy day test to to_global fixed to_global to catch correct error from incorrect mac addresses * Let's be more elegant * similar to lp828614: add rainy day test and fix exception error catch to AddrFormatError * check log file mode prior to chmod * added unit tests for version.py * Merged trunk * Fix for migrations * Conversion to SQLAlchemy-style * dict formatting * Commit without test data in migration * Commit with test data in migration * Do not require --bridge_interface for FlatDHCPManager * Fix quotas migration failure * Fix flavorid migration failure * fixed indentation * adding xml serialization and handling instance not found * removing extraneous imports * pep8 * Thou shalt not use underscores in hostnames * Catch exception for instances that aren't there * pep8 fixes * Couple of fixes to the review feedback changes * Launchpad automatic translations update * Address code review feedback from Rick and Matt * removing print statement * added volume metadata APIs (OS & volume layers), search volume by metadata & other * Update paste config so that EC2 admin API defaults to noauth * cleanup * updating tests * fix iscsi adm command * Fix pep8 * Merged from trunk * added volume_types APIs * Fix not found exceptions to properly use ec2_ips for not found * Stub out the DB in unit test. Fix 'nova-manage network modify' to use db.network_update() * rebuilds are functional again * Adds a use_deprecated_auth flag to make sure creds generated using nova-manage commands will work with noauth * Merged from upstream * Fixed some pep8 and pylint issues * Forgot to set the flag for the test * I added notifications decorator for each API call using monkey_patching. By this merge, users can get API call notification from any modules * Fixes bug that causes 400 status code when an instance wasn't attached to a network * fix for rc generation using noauth * Fixed doc string * Merged from upstream * Switched list_notifier to log an exception each time notify is called, for each notification driver that failed to import * updating tests * merging trunk * Fixed some docstring Added default publisher_id flagw * Removed blank line * Merged with trunk * Fixed typo and docstring and example class name * Updated migration number * Move use_ipv6 into flags. Its used in multiple places (network manager and the OSAPI) and should be defined at the top level * Merged trunk * PEP8 fixes * 'use the ipv6' -- 'use ipv6' * Move use_ipv6 into flags. Its used in multiple places (network manager and the OSAPI) and should be defined at the top level * Refresh translations * This branch does the final tear out of AuthManager from the main code. The NoAuth middlewares (active by default) allow a user to specify any user and project id through headers (os_api) or access key (ec2_api) * Implements first-pass of config-drive that adds a vfat format drive to a vm when config_drive is True (or an image id) * Launchpad automatic translations update * pulling all qmanager changes into a branch based on trunk, as they were previously stacked on top of melange * Moved migration and fixed tests from upstream * Merged trunk * Added the fixes suggested by Eric Windisch from cloudscaling.. * removing unnecessary tthing * merge trunk, resolve conflicts, fix tests * unindented per review, added a note about auth v2 * Our goal is to add optional parameter to the Create server OS 1.0 and 1.1 API to achieve following objectives:- * fixing exception logging * Fixes bug 831627 where nova-manage does not exit when given a non-existent network address * Move documentation from nova.virt.fake into nova.virt.driver * initial cut on volume type APIs * fix pep8 issue * Change parameters of 'nova-manage network modify'. Move common test codes into private method * Merged from trunk,resolved conflicts and fixed broken unit tests due to changes in the extensions which now include ProjectMapper * xml deserialization, and test fixes * syntax * update test_network test_get_instance_nw_info() * remove extra spaces * Fixed conflict with branch * merged trunk * The FixedIpCommandsTestCase in test_nova_manage previously accessed the database. This branch stubs out the database for these tests, lowering their run time from 104 secs -> .02 secs total * some readability fixes per ja feedback * fix comment * Update a few doc strings. Address a few pep8 issues. Add nova.tests.utils which provides a couple of handy methods for testing stuff * Make snapshot raise InstanceNotRunning when the instance isn't running * change NoAuth to actually use a tenant and user * Added Test Code, doc string, and fixed pip-requiresw * Merged trunk * Ensure that reserve and unreserve exit when an address is not found * Simple usage extension for nova. Uses db to calculate tenant_usage for specified time periods * Stubbed out the database in order to improve tests * logging as exception rather than error * Merged from upstream * Changed list_notifier to call sys.exit if a notification driver could not be found * merged trunk * implemented tenant ids to be included in request uris * Add a generic set of tests for hypervisor drivers * Upstream merge * Added ability to detect import errors in list_notifier if one or more drivers could not be loaded * Fix pep8 * delete debug code * Fixes for a number of tests * Use 'vm_state' instead of 'state' in instance filters query * Merged with Dan to fix some EC2 cases * Add 'nova-manage network modify' command * Fixes/updates to make test_cloud pass * Fix scheduler and integrated tests * Update migration number * Merged with Dan * Merged task_state -> task_states and fixed test_servers test * Update virt/fake to correct power state issue * fix test_servers tests * update test_security_group tests that have been added * Merged trunk * Renamed task_state to task_states.. * Ec2 API updates * merge with trunk * Fixing merge conflicts * Launchpad automatic translations update * Adds accessIPv4 and accessIPv6 to servers requests and responses as per the current spec * adding import * Fixes utils.to_primitive (again) to handle modules, builtins and whatever other crap might be hiding in an object * fixing bug lp:830817 * added test for bad project_id ... although it may not be used * added exception catch and test for bad project_id * added exception catch for bad prefix and matching test * added exception catch and test for bad prefix * comment strings * added unit tests for versions.py * Added OS APIs to associate/disassociate security groups to/from instances * add/remove security groups to/from the servers as server actions * lp:828610 * removed leftover netaddr import * added rainy day test for ipv6 tests. fixed ipv6.to_global to trap correct exception * Merged from trunk * pep8 * improve test coverage for instance types / flavors * Launchpad automatic translations update * Assorted fixes to os-floating-ips to make it play nicely with an in-progress novaclient implementation, as well as some changes to make it more consistent with other os rest apis. Changes include: * finished fake network info, removed testing shims * updated a maths * updated a maths * Merged trunk * Lots of modifications surrounding the OSAPI to remove any mention of dealing with power states and exclusively using vm_states and task_state modules. Currently there are still a number of tests failing, but this is a stopping place for today * who cares * added return * Merged from trunk and fixed review comments * fixed formatting string * typo * typo * typo * typo * typo * typo * added fake network info * Fixed review comments * Fixed typo * better handle malformed input, and add associated tests * Fixed typo * initial committ * Fixed NoneType returned bugw * merged trunk * Updated accessIPv4 and accessIPv6 to always be in a servers response * Fixed mistake on mergew * tweak to comment * Merged with trunkw * a few tweaks - remove unused member functions, add comment * incorporate feedback from brian waldon and brian lamar. Move associate/disassociate to server actions * merge from trunk * pep8 * Finished changing ServerXMLSerializationTest to use XML validation and lxml * Added monkey patching notification code function w * Updated test_show in ServerXMLSerializationTest to use XML validation * vm_state --> vm_states * Next round of prep for keystone integration * merge from trunk * Removes the incorrect hard-coded filter path * Revert irrelevant changes that accidentally crept into this patch :( * add tenant_id to api. without tenant_id, admins can't tell which servers belong to which tenants when retrieving lists * Merged from trunk * Fixes primitive with builtins, modules, etc * fix test_virtual interfaces for tenant_id stuff * fix test_rescue tests for tenant_id changes * Fix unit test for the change of 'nova-manage network list' format * Add copyright notices * merged trunk * Define FLAGS.default_local_format. By default it's None, to match current expected _create_local * Fix config_drive migration, per Matt Dietz * updated migration number * merge with trunk * Bump migration number * pep8 * Start improving documentation * Added uuid column in virtual_interfaces table, and an OpenStack extension API for virtual interfaces to expose these IDs. Also set this UUID as one of the external IDs in the OVS vif driver * Move documentation from nova.virt.fake to nova.virt.driver * add key_name/data support to server stub * add user_id and description. without user_id, there is no way for a tenant to tell which user created the server. description should be added for ec2 parity * merge * Bugfix for lp 828429. Its still not clear to me exactly how this code path is actually invoked when nova is used, so I'm looking for input on whether we should be adding a test case for this, removing the code as unused, etc. Thanks * remove security groups, improve exception handling, add tests * Merged trunk * merged trunk * Currently, rescue/unrescue is only available over the admin API. Non-admin tenants also need to be able to access this functionality. This patch adds rescue functionality over an API extension * Makes all of the binary services launch using the same strategy. * Removes helper methods from utils for loading flags and logging * Changes service.serve to use Launcher * Changes service.wait to actually wait for all the services to exit * Changes nova-api to explicitly load flags and logging and use service.serve * Fixes the annoying IOError when /etc/nova/nova.conf doesn't exist * tests pass * Fixes issue where ServersXMLSerializer was missing a method for update actions * follow same pattern as userdata (not metadata apporach) * rename the test method * Updated docs for the recent scheduler class changes * Passes empty string instead of None to MySQLdb driver if the DB password isn't set * merged trunk * added volume metadata. Fixed test_volume_types_extra_specs * declare the use_forwarded_for flag * merge trunk * Fixes lp828207 * Added unit test * allow specification of key pair/security group info via metadata * Fixed bug in which DescribeInstances was returning deleted instances. Added tests for pertinent api methods * Accept binary user_data in radix-64 format when you launch a new server using OSAPI. This user_data would be stored along with the other server properties in the database. Once the VM instance boots you can query for the user-data to do any custom installation of applications/servers or do some specific job like setting up networking route table * added unittests for volume_extra_data * Removed extra parameter from the call to _provision_resource_locally() * resolve conflicts after upstream merge * Change the call name * Cleanup the '_base' directory in libvirt tests * Oops * Review feedback * Added 'update' method to ServersXMLSerializer * Added more unit testcases for userdata functionality * Remove instances.admin_pass column * merged trunk * Merged with trunk * typo * updated PUT to severs/id to handle accessIPv4 and accessIPv6 * DB password should be an empty string for MySQLdb * first cut on types & extra-data (only DB work, no tests) * merge from trunk * Better docstring for _unrescue() * Review feedback * Need to pass the action * Updated the distributed scheduler docs with the latest changes to the classes * Syntax error * Moved compute calls to their own handler * Remove old comment * Don't send 'injected_files' and 'admin_pass' to db.update * fix docstrings in new api bins * one more * fix typo * remove signal handling and clean up service.serve * add separate api binaries * more cleanup of binaries per review * Changed the filter specified in _ask_scheduler_to_create_instance() to None, since the value isn't used when creating an instance * Minor housecleaning * Fix to return 413 for over limit exceptions with instances, metadata and personality * Refactored a little and updated unit test * minor cleanup * dhcpbridge: add better error if NETWORK_ID is not set, convert locals() to static dict * Added the fix for the missing parameter for the call to create_db_entry_for_new_instance() * Updated a number of items to pave the way for new states * Corrected the hardcoded filter path. Also simplified the filter matching code in host_filter.py * Added rescue mode extension * Fixed issue where accessIP was added in none detail responses * Updated ServersXMLSerializer to allow accessIPv4 and accessIPv6 in XML responses * Merged trunk * Added accessIPv4 and accessIPv6 to servers view builder Updated compute api to handle accessIPv4 and 6 * Fixed several logical errors in the scheduling process. Renamed the 'ZoneAwareScheduler' to 'AbstractScheduler', since the zone-specific designation is no longer relevant. Created a BaseScheduler class that has basic filter_hosts() and weigh_hosts() capabilities. Moved the filters out of one large file and into a 'filters' subdirectory of nova/scheduler * Merged trunk * Adds the enabled status of a host when XenServer reports its host's capabilities. This allows the scheduler to ignore hosts whose enabled is False when considering where to place a new instance * merge trunk and fix unit test errors * in dhcpbridge, only grab network id from env if needed * bug #828429: remove references to interface in nova-dhcpbridge * pep8 * remove extra reference in pipelib * clean up fake auth from server actions test * fix integration tests * make admin context the default, clean up pipelib * merged trunk * Merged with trunk and fixed broken testcases * merged with nova-1450 * nova-manage VSA print & forced update_cap changes; fixed bug with report capabilities; added IP address to VSA APIs; added instances to APIs * Make all services use the same launching strategy * Updated compute manager/API to use vm/task states. Updated vm/task states to cover a few more cases I encountered * Updated server create XML deserializer to account for accessIPv4 and accessIPv6 * Added the host 'enabled' status to the host_data returned by the plugin * Added accessip to models pep8 * Added migration for accessIPv4 and accessIPv6 * Fixed broken unit testcases * Initial instance states migration * pep8 fix * fix some naming inconsistencies, make associate/disassociate PUTs * Add NetworkCommandsTestCase into unit test of nova-manage * very minor cleanup * Undo an unecessary change * Merged trunk * Pep8 fixes * Split set state into vm, task, and power state functions * Add modules for task and vm states * Updated tests to correctly use the tenant id * DB object was being casted to dict() in API code. This did not work as intended and logic has been updated to reflect a more accurate way of getting information out of DB objects * merge from trunk * Cleaned up the extension metadata API data * Updated get_updated time * Cleaned up the file * Fixed vif test to match the JSON key change * Added XML support and changed JSON output keys * Added virtual interfaces API test * Removed serverId from the response * Merged trunk * Merged Dan's branch to add VIF uuid to VIF drivers for Quantum * Removed a change from faults.py that was not required." * Changed return code to 413 for metadata, personality and instance quota issues * Append the project_id to the SERVER-MANAGEMENT-URL header for v1.1 requests. Also, ensure that the project_id is correctly parsed from the request * add new vif uuid for OVS vifplug for libvirt + xenserver * Remove instances.admin_pass column * merge trunk * all tests passing * fix unit tests * Resolved conflicts and merged with trunk * Added uuid for networks and made changes to the Create server API format to accept network as uuid instead of id * I'm taking Thierry at his word that I should merge early and merge often :) * Fixes issue with exceptions getting eaten in image/s3.py if there is a failure during register. The variables referenced with locals() were actually out of scope * Allow local_gb size to be 0. libvirt uses local_gb as a secondary drive, but XenServer uses it as the root partition's size. Now we support both * Merged trunk * merge from trunk * make project_id authorization work properly, with test * Use netaddr's subnet features to calculate subnets * make delete more consistant * Review feedback * Updated note * Allow local_gb to be 0; PEP8 fixes * Updated ViewBuilderV10 as per feedback * * Added search instance by metadata. * instance_get_all_by_filters should filter deleted * This branch implements a nova api extension which allows you to manage and update tenant/project quotas * test improvements per peer review * fixing pep8 issue * defaults now is referred to using a tenant * fixing up the show quotas tests, and extension * making get project quotas require context which has access to the project/tenant) * fixing pep8 issues again * fixing spacing issues * cleaning up a few things from pyflakes * fixing pep8 errors * refactoring tests to not use authmanager, and now returning 403 when non admin user tries to update quotas * removed index, and separated out defaults into its own action * merging test_extensions.py * another trunk merge * another trunk merge... a new change made it into nova before the code was merged * Cleanup the '_base' directory in libvirt tests * Small bug fix...don't cast DB objects to dicts * merge from trunk * Updated the EC2 metadata controller so that it returns the correct value for instance-type metadata * Fix test_metadata tests * merge the trunk * Merged with upstream * Added list_notifier, a driver for the notifer api which calls a list of other drivers * merge with trunk * Refactored the HostFilterScheduler and LeastCostScheduler classes so that they can be combined into a single class that can do both host filtering and host weighting, allowing subclasses to override those processes as needed. Also renamed the ZoneAwareScheduler to AbstractScheduler, for two reasons: one, the 'zone-aware' designation was necessary when the zone code was being developed; now that it is part of nova, it is not an important distinction. Second, the 'Abstract' part clearly indicates that this is a class that is not designed to be used directly, but rather as the basis for specific scheduler subclasses * cosmetic change in test_extensions. Avoids constant merge conflicts between proposals with new extensions * Validate the size of VHD files in OVF containers * Include vif UUID in the network info dictionary * Added uuid to allocate_mac_address * Fixed the naming of the extension * redux of floating ip api * Merged trunk * Merged trunk * log the full exception so we don't lose traceback through eventlet * fix error logging in s3.py * pep8 cleanup * Merged trunk * Removed newly added userdatarequesthandler for OS API, there is no need to add this handler since the existing Ec2 API metadatarequesthandler does the same job * got tests passing with logic changes * pep8 * pep8 * add note * have the tests call create_networks directly * allow for finding a network that fits the size, also format string correctly * adding sqlalchemi api tests for test_instance_get_all_by_filter to ensure doesn't return deleted instances * added cloud unit test for describe_instances to ensure doesn't return deleted instances * return the created networks * pep8 fix * merge trunk * Adding kvm-block-migration feature * i hate these exceptions where it should just return an empty list * fix typo where I forgot a comma * merge trunk, remove _validate_cidrs and replace functionality with a double for loop * fix bug which DescribeInstances in EC2 api was returning deleted instances * We don't have source for open-wrt in the source tree, so we shouldn't use the images. Since the images are only there for uploading smoketests, They are now replaced with random images * Make response structure for list floating ips conform with rest of openstack api * put tenant_id back in places where it was * This branch allows the standard inclusion of a body param which most http clients will send along with a POST request * Libvirt has some autogenerated network info that is breaking ha network * making body default to none * pep8 fix * Adding standard inclusion of a body param which most http clients will send along with a POST request * Fixed merging issue * Merged with trunk * Updated rate limiting tests to use tenants * Corrected names in TODO/FIXME * remove openwrt image * Fix the tests when libvirt actually exists * Merged trunk * Add durable flag for rabbit queues * Fixed merge conflict * merged trunk * Merged trunk * Dryed up contructors * make list response for floating ip match other apis * fix missing 'run_as_root' from bad merge * Added ability too boot VM from install ISO. System detects an image of type iso. Images is streamed to a VDI and mounted to the VM. Blank disk allocated to VM based on instance type * Add source-group filtering * added logic to make the creation of networks (IPv4 only) validation a bit smarter: - detects if the cidr is already in use - detects if any existing smaller networks are within the range of requested cidr(s) - detects if splitting a supernet into # of num_networks && network_size will fit - detects if requested cidr(s) are within range of already existing supernet (larger cidr) * fix InvalidPortRange exception shows up in euca2ools instead of UnknownError when euca-authorize is specified w/ invalid port # * Changes requests with an invalid server action to return an HTTP 400 instead of a 501 * Currently OS API doesn't accept availability zone parameter so there is no way to instruct scheduler (SimpleScheduler) to launch VM instance on specific host of specified zone * typo fix * Fix v1.1 /servers/ PUT request to match API documentation by returning 200 code and the server data in the body * Allow different schedulers for compute and volume * have NetworkManager generate MAC address and pass it to the driver for plugging. Sets the stage for being able to do duplicate checks on those MACs as well * make sure security groups come back on restart of nova-compute * fix all of the tests * rename project_net to same_net * use dhcp server instead of gateway for filter exception * get rid of network_info hack and pass it everywhere * fix issue introduced in merge * merge trunk, fix conflict frim dprince's branch to remove hostname from bin/nova-dhcpbridge * merge in trunk, resolving conflicts with ttx's branch to switch from using sudo to run_as_root=True * remerge trunk * Added durable option for nova rabbit queues added queueu delete script for admin/debug purposes * Added add securitygroup to instance and remove securitygroup from instance functionality * Fix ugly little violations before someone says anything * Merged trunk * Updated logging * end of day * Check uncompressed VHD size * reworked test_extensions code to avoid constant merge conflicts with newly added ext * nova-manage: fixed instance type in vsa creation * Stub out instance_get as well so we can show the results of the name change * removed VSA/drive_type code from EC2 cloud. changed nova-manage not to use cloud APIs * Merged with trunk and fixed broken unit testcases * merged rev1418 and fixed code so that less than 1G image can be migrated * Created the filters directory in nova/scheduler * removed admincontext middleware * updates from review * merge from trunk * fix merges from trunk * Nuke hostname from nova-dhcpbridge. We don't use it * merge the trunk * need to actually assign the v4 network * Fixes to the OSAPI floating API extension DELETE. Updated to use correct args for self.disassociate (don't sweep exceptions which should cause test cases to fail under the rug). Additionally updated to pass network_api.release_floating_ip the address instead of a dict * Merged trunk * Fixed unit tests * only run if the subnet and cidr exist * only run if the subnet and cidr exist * merge from trunk * make sure network_size gets set * merge from trunk * don't require ipv4 * forgot the closing paren * use subnet iteration from netaddr for subnet calculation * Fix a typo that causes ami images to launch with a kernel as ramdisk when using xen * Fixing a 500 error when -1 is supplied for flavorRef on server create * rewriting parsing * fix typo that causes ami instances to launch with a kernal as ramdisk * Merged trunk * Allows for a tunable number of SQL connections to be maintained between services and the SQL server using new configuration flags. Only applies when using the MySQLdb dialect in SQLAlchemy * Merged trunk * Fixes pep8 issues in test_keypairs.py * Merged trunk * start of day * Fixes to the OSAPI floating API extension DELETE. Updated to use correct args for self.disassociate (don't sweep exceptions which should cause test cases to fail under the rug). Additionally updated to pass network_api.release_floating_ip the address instead of a dict * API needs virtual_interfaces.instance joined when pulling instances from the DB. Updated instance_get_all() to match instance_get_all_by_filters() even though the former is only used by nova-manage now. (The latter is used by the API) * remove extra log statements * join virtual_interfaces.instance for DB queries for instances. updates instance_get_all to match instance_get_all_by_filters * remove accidentally duplicated flag * merged trunk * add keystone middlewares for ec2 api * Merged with trunk * added userdata entry in the api paste ini * Initial version * Accidentally added inject_files to merge * Support for management of security groups in OS API as a new extension * Updates to libvirt, write metadata, net, and key to the config drive * prefixed with os- for the newly added extensions * Merged with trunk * Author added * allow scheduling topics to multiple drivers * Check compressed image size and PEP8 cleanup * v1.1 API also requires the server be returned in the body * capabilities fix, run_as_root fix * lp824780: fixed typo in update_service_capabilities * fix pep8 * spacing fixes * fixed pep8 issue * merge from trunk * fixed v1.0 stuff with X-Auth-Project-Id header, and fixed broken integrated tests * merged with 1416 * fixing id parsing * moved vsa_id to metadata. Added search my meta * Refactored the scheduler classes without changing functionality. Removed all 'zone-aware' naming references, as these were only useful during the zone development process. Also fixed some PEP8 problems in trunk code * Added search instance by metadata. get_all_by_filters should filter deleted * got rid of tenant_id everywhere, got rid of X-Auth-Project-Id header support (not in the spec), and updated tests * Silly fixes * v1.0 and v1.1 API differs for PUT, so split them out Update tests to match API * Removed postgres, bug in current ubuntu package which won't allow it to work easily. Will add a bug in LP * minor cleanup * Added availability zone support to the Create Server API * Make PUT /servers/ follow the API specs and return a 200 status * More logging * removed extra paren * Logging for SQLAlchemy type * merged trunk * Fixed per HACKING * * Removes rogue direct usage of subprocess module by proper utils.execute calls * Adds a run_as_root parameter to utils.execute, that prefixes your command with FLAG.root_helper (which defaults to 'sudo') * Turns all sudo calls into run_as_root=True calls * Update fakes accordingly * Replaces usage of "sudo -E" and "addl_env" parameter into passing environment in the command (allows it to be compatible with alternative sudo_helpers) * Additionally, forces close_fds=True on all utils.execute calls, since it's a more secure default * Remove doublequotes from env variable setting since they are literally passed * Changed bad server actions requests to raise an HTTP 400 * removed typos, end of line chars * Fixed broken unit testcases * Support for postgresql * merge from trunk * tenant_id -> project_id * Adding keypair support to the openstack contribute api * elif and FLAG feedback * Removed un-needed log line * Make sure to not use MySQLdb if you don't have it * get last extension-based tests to pass * Allows multiple MySQL connections to be maintained using eventlet's db_pool * Removed verbose debugging output when capabilities are reported. This was clogging up the logs with kbytes of useless data, preventing actual helpful information from being retrieved easily * Removed verbose debugging output when capabilities are reported * Updated extensions to use the TenantMapper * fix pep8 issues * Fixed metadata PUT routing * These fixes are the result of trolling the pylint violations here * Pass py_modules=[] to setup to avoid installing run_tests.py as a top-level module * Add bug reference * Pass py_modules=[] to setup to avoid installing run_tests.py as a top-level module * fix servers test issues and add a test * added project_id for flavors requests links * added project_id for images requests * merge trunk * fix so that the exception shows up in euca2ools instead of UnknownError * Dropped vsa_id from instances * import formatting - thx * List security groups project wise for admin users same as other users * Merged with trunk * merge with nova-1411. fixed * pep8 fix * use correct variable name * adding project_id to flavor, server, and image links for /servers requests * Merged with trunk * tests pass * merge from trunk * merged with nova-1411 * This branch makes sure to detach fixed ips when their associated floating ip is deallocated from a project/tenant * adding other emails to mailmap * add Keypairs to test_extensions * adding myself to authors * This adds the servers search capabilities defined in the OS API v1.1 spec.. and more for admins * Be more tolerant of agent failures. It is often the case there is only a problem with the agent, not with the instance, so don't claim it failed to boot so quickly * Updated the EC2 metadata controller so that it returns the correct value for instance-type metadata * added tests - list doesn't pass due to unicode issues * initial port * merged trunk * Be more tolerant of agent failures. The instance still booted (most likely) so don't treat it like it didn't * Updated extensions to expect tenant ids Updated extensions tests to use tenant ids * Update the OSAPI v1.1 server 'createImage' and 'createBackup' actions to limit the number of image metadata items based on the configured quota.allowed_metadata_items that is set * Fix pep8 error * fixing one pep8 failure * I think this restores the functionality .. * Adds missing nova/api/openstack/schemas to tarball * Instance metadata now functionally works (completely to spec) through OSAPI * updated v1.1 flavors tests to use tenant id * making usage of 'delete' argument more clear * Fix the two pep8 issues that sneaked in while the test was disabled * Fix remaining two pep8 violations * Updated TenantMapper to handle resources with parent resources * updating tests; fixing create output; review fixes * OSAPI v1.1 POST /servers now returns a 202 rather than a 200 * Include missing nova/api/openstack/schemas * Rename sudo_helper FLAG into root_helper * Minor fix to reduce diff * Initial validation for ec2 security groups name * Remove old commented line * Command args can be a tuple, convert them to list * Fix usage of sudo -E and addl_env in dnsmasq/radvd calls, remove addl_env support, fix fake_execute allowed kwargs * Use close_fds by default since it's good for you * Fix ajaxterm's use of shell=True, prevent vmops.py from running its own version of utils.execute * With this branch, boot-from-volume can be marked as completed in some sense. The remaining is minor if any and will be addressed as bug fixes * Update the curl command in the __public_instance_is_accessible function of test_netadmin to return an error code which we can then check for and handle properly. This should allow calling functions to properly retry and timeout if an actual test failure happens * updating more test cases * changing server create response to 202 * Added xml schema validation for extensions resources. Added corresponding xml schemas. Added lxml dep, which is needed for doing xml schema validation * Fixing a bug in nova.utils.novadir() * Adds the ability to read/write to a local xenhost config. No changes to the nova codebase; this will be used only by admin tools that have yet to be created * fixed conditional because jk0 is very picky :) * Fixed typo found in review * removing log lines * added --purge optparse for flavor delete * making server metadata work functionally * cleaning up instance metadata api code * Updated servers tests to use tenant id * Set image progress to 100 if the image is active * Cleaned up merge messes * Merged trunk * cleaned up unneeded line * nova.exception.wrap_exception will re-raise some exceptions, but in the process of possibly notifying that an exception has occurred, it may clobber the current exception information. nova.utils.to_primitive in particular (used by the notifier code) will catch and handle an exception clobbering the current exception being handled in wrap_exception. Eventually when using the bare 'raise', it will attempt to raise None resulting a completely different and unhelpful exception * remove obsolete script from setup.py * assert that vmops.revert_migration is called * Import sys as well * Resolve conflicts and fixed broken unit testcases * This branch adds additional capability to the hosts API extension. The new options allow an admin to reboot or shutdown a host. I also added code to hide this extension if the --allow-admin-api is False, as regular users should have no access to host API calls * adding forgotten import for logging * Adds OS API 1.1 support * Updated test_images to use tenant ids * Don't do anything with tenant_id for now * Review fixes * fixed wrong syntax * Assign tenant id in nova.context * another trunk merge * Merged trunk * Merged trunk * Cleaned up some old code added by the last merge * Fixed some typos from the last refactoring * Moved the restriction on host startup to the xenapi layer.: * Remove nova/tests/network, which was accidentally included in commit * upper() is even better * merged with 1383 * Updated with code changes on LP * Merged trunk * Save exception and re-raise that instead of depending on thread local exception that may have been clobbered by intermediate processing * Adding __init__.py files * Adds ability to disable snapshots in the Openstack API * Sync trunk * Set image progress to 100 if the image is active * Sync trunk * Update the curl command in the __public_instance_is_accessible function of test_netadmin to return an error code which we can then check for and handle properly. This should allow calling functions to properly retry and timout if an actual test failure happens * ZoneAwareScheduler classes couldn't build local instances due to an additional argument ('image') being added to compute_api.create_db_entry_for_new_instance() at some point * simplified test cases further, thanks to trunk changes * Added possibility to mark fixed ip like reserved and unreserved * Update the OSAPI v1.1 server 'createImage' and 'createBackup' actions to limit the number of image metadata items based on the configured quota.allowed_metadata_items that is set * Pep8 fix * zone_aware_scheduler classes couldn't build instances due to a change to compute api's create_db_entry_for_new_instance call. now passing image argument down to the scheduler and through to the call. updated a existing test to cover this * Adding check to stub method * moving try/except block, and changing syntax of except statement * Fixes broken image_convert. The context being passed to glance image service was not a real context * Using decorator for snapshots enabled check * Disable flag for V1 Openstack API * adding logging to exception in delete method * Pass a real context object into image service calls * Adding flag around image-create for v1.0 * Refactored code to reduce lines of code and changed method signature * If ip is deallocated from project, but attached to a fixed ip, it is now detached * Glance Image Service now understands how to use glance client to paginate through images * Allow actions queries by UUID and PEP8 fixes * Fixed localization review comment * Allow actions queries by UUID and PEP8 fixes * Fixed review comments * fixing filters get * fixed per peer review * fixed per peer review * re-enabling sort_key/sort_dir and fixing filters line * Make sure mapping['dns'] is formatted correctly before injecting via template into images. mapping['dns'] is retrieved from the network manager via info['dns'], which is a list constructed of multiple DNS servers * Add a generic image service test and run it against the fake image service * Implemented @test.skip_unless and @test.skip_if functionality in nova/test.py * merged with 1382 * Updates v1.1 servers/id/action requests to comply with the 1.1 spec * fix typo * Moving from assertDictEqual to assertDictMatch * merging trunk * merging trunk * Add exception logging for instance IDs in the __public_instance_is_accessible smoke test function. This should help troubleshoot an intermittent failure * adding --fixes * glance image service pagination * Pass tenant ids through on on requests * methods renamed * Add exception logging for instance IDs in the __public_instance_is_accessible smoke test function. This should help troubleshoot an intermittent failure * Removed most direct sudo calls, make them use run_as_root=True instead * pep8 violations sneaking into trunk? * pep8 violations sneaking into trunk? * trunk merge * Fixes lp821144 * Make disk_format and container_format optional for libvirt's snapshot implementation * pep8 * fixed up zones controller to properly work with 1.1 * Add generic image service tests * Add run_as_root parameter to utils.execute, uses new sudo_helper FLAG to prefix command * Remove spurious direct use of subprocess * Added virtual interfaces REST API extension controller * Trunk contained PEP8 errors. Fixed * Trunk merge * fix mismerge * Added migration to add uuid to virtual interfaces. Added uuid column to models * merged trunk * merged with nova trunk * Launchpad automatic translations update * fixed pep8 issue * utilized functools.wraps * added missing tests * tests and merge with trunk * removed redundant logic * merged trunk * For nova-manage network create cmd, added warning when size of subnet(s) being created are larger than FLAG.network_size, in attempt to alleviate confusion. For example, currently when 'nova-manage network create foo 192.168.0.0/16', the result is that it creates a 192.168.0.0/24 instead without any indication to why * Remove instances of the "diaper pattern" * Read response to reset the connection state-machine for the next request/response cycle * Added explanations to exceptions and cleaned up reboot types * fix pep8 issues * fixed bug , when logic searched for next avail cidr it would return cidrs that were out of range of original requested cidr block. added test for it * Adding missing module xmlutil * fixed bug, wasn't detecting smaller subnet conflict properly added test for it * Properly format mapping['dns'] before handing off to template for injection (Fixes LP Bug #821203) * Read response to reset HTTPConnection state machine * removed unnecessary context from test I had left there from prior * move ensure_vlan_bridge,ensure_bridge,ensure_vlan to the bridge/vlan specific vif-plugging driver * re-integrated my changes after merging trunk. fixed some pep8 issues. sorting the list of cidrs to create, so that it will create x.x.0.0 with a lower 'id' than x.x.1.0 (as an example). <- was causing libvirtd test to fail * Revert migration now finishes * The OSAPI v1.0 image create POST request should store the instance_id as a Glance property * There was a recent change to how we should flip FLAGS in tests, but not all tests were fixed. This covers the rest of them. I also added a method to test.UnitTest so that FLAGS.verbose can be set. This removes the need for flags to be imported from a lot of tests * Bad method call * Forgot the instance_id parameter in the finish call * Merged in the power action changes * Removed test show() method * Fixed rescue/unrescue since the swap changes landed in trunk. Minor refactoring (renaming callback to _callback since it's not used here) * Updates to the XenServer glance plugin so that it obtains the set of existing headers and sends them along with the request to PUT a snapshotted image into glance * Added admin-only decorator * This updates nova-ajax-console-proxy to correctly use the new syntax introduced last week by Zed Shaw * Merged trunk * Changed all references to 'power state' to 'power action' as requested by review * Added missing tests for server actions Updated reboot to verify the reboot type is HARD or SOFT Fixed case of having an empty flavorref on resize * Added more informative docstring * Added XML serialization for server actions * Removed debugging code * Updated create image server action to respect 1.1 * Fixes lp819397 * Fixed rescue unit tests * Nuke hostname. We don't use it * Split serverXMLDeserializers into v1.0 and v1.1 * another merge * Removed temporary debugging raise * Merged trunk * modify _setup_network for flatDHCP as well * Merged trunk * Added xenhost config get/setting * fix syntax error * Fixed rescue and unrescue * remove storing original flags verbosity * remove set_flags_verbosity.. it's not needed * Merged trunk * OS v1.1 is now the default into novarc * added NOVA_VERSION to novarc * remove unused reference to exception object * Add a test for empty dns list in network_info * Fix comments * uses 2.6.0 novaclient (OS API 1.1 support) * Fix to nova-ajax-console-proxy to use the new syntax * Update the OS API servers metadata resource to match the current v1.1 specification - move /servers//meta to /servers//metadata - add PUT /servers//metadata * fix pep8 issues that are in trunk * test_host_filter setUp needs to call its super * fix up new test_server_actions.py file for flags verbosity change * merged trunk * fixing typo * Sync with latest tests * The logic for confirming and reverting resizes was flipped. As a result, reverting a resize would end up deleting the source (instead of the destination) instance, and confirming would end up deleting the destination (instead of the source) instance * Found a case where an UnboundLocalError would be raised in xenapi_conn.py's wait_for_task() method. This fixes the problem by moving the definition of the unbound name outside of the conditional * Moves code restarting instances after compute node reboot from libvirt driver to compute manager; makes start_guests_on_host_boot flag global * Moved server actions tests to their own test file. Updated stubbing and how flags are set to be in line with how they're supposed to be set in tests * merging trunk * add test for spawning a xenapi instance with an empty dns list * Nova uses instance_type_id and flavor_id interchangeably when they almost always different values. This can often lead to an instance changing instance_type during migration because the values passed around internally are wrong. This branch changes nova to use instance_type_id internally and flavor_id in the API. This will hopefully avoid confusion in the future * The OSAPI v1.0 image create POST request should store the instance_id as a Glance property * Linked to bug * Changed the definition of the 'action' dict to always occur * Updates to the XenServer glance plugin so that it obtains the set of existing headers and sends them along with the request to PUT a snapshotted image into glance * Fixed rescue and unrescue * Added in tests that verify tests are skipped appropriately * Merged trunk * Merged dietz' branch * Update HACKING: - Make imports more explicit - Add some dict/list formatting guidelines - Add some long method signature/call guidelines - Add explanation of i18n * Pep8 cleanup * Defaults `dns` to '' if not present, just as we do with the other network info data * Removes extraneous bodies from certain actions in the OSAPI servers controller * Revert should be sent to destination node and confirm should be sent to source node * Conditionals were not actually runing the tests when they were supposed to. Renamed example testcases * fix pylint W0102 errors * Remove whitespaces from name and description before creating security group * Remove instances of the "diaper pattern" * Fixes lp819397 * Initial version * Load instance_types in downgrade method too * Fix trailing whitespace (PEP8) * fix test_cloud FLAGS setting * dist scheduler flag setting fixes * fix scheduler tests that set FLAGS * fix more tests that use FLAGS setting * all subclasses of ComputeDriver should fully implement the interface of the destroy method * align multi-line string * fix test_s3 FLAGS uses * switch FLAGS.* = in tests to self.flags(...) remove unused cases of FLAGS from tests modified test.TestCase's flags() to allow multiple overrides added missing license to test_rpc_amqp.py * follow convention when raising exceptions * pep8 fixes * use an existing exception * use correct exception name * fix duplicate function name * fix undefined variable error * fix potential runtime exception * remove unused imports * remove bit-rotted code * more cleanup of API tests regarding FLAGS * fix use of FLAGS in openstack API servers tests to use the new way * Removes extraneous body argument from server controller methods * Merged trunk * Merged trunk * Default dns to '' if not present * replaced raise Exception with self.fail() * Removed dependancy on os.getenv. Test cases now raise Exception if they are not properly skipped * PEP8 issue * whoops, got a little comma crazy * Merged trunk and fixed conflicts to make tests pass * fumigate non-pep8 code * Use flavorid only at the API level and use instance_type_id internally * Yet another conflict resolved * forgot to remove comment * updated to work w/ changes after merged trunk fixing var renaming. the logic which forces default to FLAGS.network_size if requested cidr was larger, was also applying to requested cidrs smaller than FLAGS.network_size. Requested cidrs smaller than FLAGS.network_size should be ignored and not overriden * merged from trunk * merged from trunk * merge trunk * Launchpad automatic translations update * Resolved pep8 errors * renaming test_skip_unless_env_foo_exists() * merging trunk * Removed trailing whitespace that somehow made it into trunk * Merged trunk * Removed duplicate methods created by previous merge * Fixes lp819523 * Fix for bug #798298 * fix for lp816713: In instance creation, when nova-api is passed imageRefs generated by itself, strip the url down to an id so that default glance connection params are used * Added check for --allow-admin-api to the host API extension code * Another unittest * Merged trunk * Add support for 300 Multiple Choice responses when no version identifier is used in the URI (or no version header is present) * Merged trunk * Glance has been updated for integration with keystone. That means that nova needs to forward the user's credentials (the auth token) when it uses the glance API. This patch, combined with a forth-coming patch for nova_auth_token.py in keystone, establishes that for nova itself and for xenapi; other hypervisors will need to set up the appropriate hooks for their use of glance * Added changes from mini server * raise correct error * Minor test fixes * fix failing tests * fix pep8 complaints * merge from trunk * Fixed a missing space * Bad merge res * merge the trunk * fix missing method call and add failing test * Removed duplicate xattr from pip-requires * Fixed merge issues * Merged trunk * merged trunk * remove unused parameter * Merged trunk * Merged from lab * fix pylint errors * fix pylint errors * merge from trunk * Moves image creation from POST /images to POST /servers//action * Fixed several typos * Changed migration to be an admin only method and updated the tests * - Remove Twisted dependency from pip-requires - Remove Twisted patch from tools/install_venv.py - Remove eventlet patch from tools/install_venv.py - Remove tools/eventlet-patch - Remove nova/twistd.py - Remove nova/tests/test_twistd.py - Remove bin/nova-instancemonitor - Remove nova/compute/monitor.py - Add xattr to pip-requires until glance setup.py installs it correctly - Remove references to removed files from docs/translations/code * Fix an error in fetch_image() * Get instance by UUID instead of id * Merged trunk * Added the powerstate changes to the plugin * pull-up from trunk/fix merge conflict * fixing typo * refactored tests * pull-up from trunk * Removing the xenapi_image_service flag in favor of image_service * cleanup * Merged trunk * abstraction of xml deserialization * fixing method naming problem * removing compute monitor * merge from trunk * code was checking for key in sqlalchemy instance and will ignore if value is None, but wasn't working if floating_ip was a non-sqlalchemy dict obj. Therefore, updated the error checking to work in both caes * While we currently trap JSON encoding exceptions and bail out, for error notification it's more important that *some* form of the message gets out. So, we take complex notification payloads and convert them to something we know can be expressed in JSON * Better error handling for resizing * Adds the auth token to nova's RequestContext. This will allow for delegation, i.e., use of a nova user's credentials when accessing other services such as glance, or perhaps for zones * merged trunk rev1348 * Launchpad automatic translations update * added some tests for network create & moved the ipv6 logic back into the function * merged with nova trunk * Added host shutdown/reboot conditioning * avoid explicit type checking, per brian waldon's comment * Added @test.skip_unless and @test.skip_if functionality. Also created nova/tests/test_skip_examples.py to show the skip cases usage * fix LinuxBridgeInterfaceDriver * merge trunk, resolve conflict in net/manater.py in favor of vif-plug * initial commit of vif-plugging for network-service interfaces * Merged trunk * pep8 fixes * Controller -> self * Added option for rebooting or shutting down a host * removed redundant logic * merged from trunk * adding a function with logic to make the creation of networks validation a bit smarter: - detects if the cidr is already in use - when specifying a supernet to be split into smaller subnets via num_networks && network_size, ensures none of the returned subnets are in use by either a subnet of the same size and range, nor a SMALLER size within the same range. - detects if splitting a supernet into # of num_networks && network_size will fit - detects if the supernet/cidr specified is conflicting with a network cidr that currently exists that may be a larger supernet already encompassing the specified cidr. " * Carry auth_token in nova's RequestContext * merge with trunk, resolve conflicts * Revert hasattr() check on 'set_auth_token' for clients * it makes the pep8, or else it gets the vim again * merge from trunk * Fixes this issue that I may have introduced * Update compute tests to use new exceptions * Resync to trunk * Remove copy/paste error * Launchpad automatic translations update * Launchpad automatic translations update * Fixed review comments: Put parsing logic of network information in create_instance_helper module and refactored unit testcases as per the changed code * pep8 * wow, someone whent all crazy with exceptions, why not just return an empty list? * Only call set_auth_token() on the glance client if there's one available * Make unit tests pass * merging * only attempt to get a fixed_up from a v4 subnet if there is a v4 subnet * FlavorNotFound already existed, no need to create another exception * Created exceptions for accepting in OSAPI, and handled them appropriately * only create fixed_ips if we have an ipv4 range * Revert to using context; to avoid conflict, we import context module as nova_context; add context to rescue * You see what happens Danny when you forget to close the parenthesis * Merged with trunk * Merged trunk * allow the manager to try to do the right thing * allow getting by the cidr_v6 * the netmask is implied by the cidr, so use that to display the v6 subnet * either v4 or v6 is required * merging trunk * pull-up from trunk and conflict resolution * merge trunk * stwart the switch to just fixed_range * typo * Round 1 of changes for keystone integration. * Modified request context to allow it to hold all of the relevant data from the auth component. * Pulled out access to AuthManager from as many places as possible * Massive cleanup of unit tests * Made the openstack api fakes use fake Authentication by default * require either v4 or v6 * pull-up from trunk * Fix various errors discovered by pylint and pyflakes * fixing underline * removing extra verbage * merged trunk * This change creates a minimalist API abstraction for the nova/rpc.py code so that it's possible to use other queue mechanisms besides Rabbit and/or AMQP, and even use other drivers for AMQP rather than Rabbit. The change is intended to give the least amount of interference with the rest of the code, fixes several bugs in the tests, and works with the current branch. I also have a small demo driver+server for using 0MQ which I'll submit after this patch is merged * removing dict() comment * adding more on return_type in docstrings * Fixes issue with OSAPI passing compute API a flavorid instead of an instance identifier. Added tests * made the whole instance handling thing optional * Reorganize the code to satisfy review comments * pull-up from trunk; fix problem obscuring context module with context param; fix conflicts and no-longer-skipped tests * remove unused import * --Stolen from https://code.launchpad.net/~cerberus/nova/lp809909/+merge/68602 * removing 'Defining Methods' paragraph * rewording * Use the util.import_object to import a module * rewording * one last change * upgrades * expanding * merged trunk and fix time call * updating HACKING * Fixing lxml version requirement * Oops, I wasn't actually being compatible with the spec here * bumping novaclient version * Fixes lp:818050 * Updated resize to call compute API with instance_type identifiers instead of flavor identifiers. Updated tests * fix run_tests.sh * merge trunk * Fixed changes missed in merge * fix more spacing issues, and removed self link from versions template data * merged trunk * added instance support to to_primitive and tests * merged trunk and fixed post_live_migratioin_at_destination to get nw_info * Removing unnecessary imports * Added xml schema validation for extensions resources. Added corresponding xml schemas. Added lxml dep, which is needed for doing xml schema validation * remove extra log statement * api/ec2: rename CloudController._get_instance_mapping into _format_instance_mapping * fixed typo * merge with trunk * fixed pep8 issues and removed unnecessary factory function * returned vsa_manager, nova-manage arg and print changes * Added the config values to the return of the host_data method * Adds XML serialization for servers responses that match the current v1.1 spec * Added methods to read/write values to a config file on the XenServer host * fix pep8 errors * minor cleanup * Removed unused Duplicate catch * Fix to_dict() and elevated() to preserve auth_token; revert an accidental change from context.get_admin_context() to simply context * Fixes bug 816604, which is the problem that timeformat in server responses for updated and created are incorrect. This fix just converts the datetime into the correct format * merging trunk * pep8 * moving server backup to /servers//action instead of POST /images * Simplified test cases * Rewrite ImageType enumeration to be more pythonic * refactoring and make self links correct (not hard coded) * Fix tests for checking pylint errors * Use utils.utcnow. Use True instead of literal 1 * Some tests for resolved pylint errors * simplify if statement * merge trunk * use wsgi XMLNS/ATOM vars * Updated deserialization of POST /servers in the OSAPI to match the latest v1.1 spec * Removed unused Duplicate catch * pull-up from trunk * Catch DBError for duplicate projects * Catch DBError for duplicate projects * Make network_info truly optional * trunk infected with non-pep8 code * unicode instead of str() * Add a flag to set the default file mode of logs * merge trunk * make payload json serializable * moved test * Removed v1_1 from individual tests * merge from trunk * merge to trunk * more commented code removed * some minor cosmetic work. addressed some dead code section * merged with nova-1336 * prior to nova-1336 merge * remove authman from images/s3.py and replace with flags * fix tests broken in the merge * merged trunk * fix undeclared name error * fix undeclared name error * fix undeclared name error * fix undeclared name errors * remove unused assignment which causes undeclared name error * fix undefined variable errors * fix call to nonexistant method to_global_ipv6. Add myself to authors file * Make network_info truly optional * updates handling of arguments in nova-manage network create. updates a few of the arguments to nova-manage and related help. updates nova-manage to raise proper exceptions * forgot a line * fixed create_networks ipv6 management * Fail silently * typo * --bridge defaults to br100 but with a deprecation warning and to be removed in d4 * Reverting to original code * use ATOM_XMLNS everywhere * merge trunk * added unit testcase to increase code coverage * stub out VERSIONS for the tests * put run_tests.sh back to how it was * Fixed conflict * Fail silently * Merged with trunk and fixed broken unit test cases * Fix the skipped tests in vmwareapi and misc spots. The vmware networking stuff is stubbed out, so the tests can be improved there by fixing the fakes * pep8 issue * refactoring MetadataXMLDeserializer in wsgi/common * move viewbuilder and serializer tests into their own test cases * Fix all of the skipped libvirt tests * fix typo * merged trunk * Fixes typo in attach volume * utilize _create_link_nodes base class function * default the paramater to None, not sure why it was required to begin with * pass None in for nw_info * added test for accept header of atom+xml on 300 responses to make sure it defaults back to json, and reworked some of the logic to make how this happens clearer * Drop FK before dropping instance_id column * moved rest of build logic into builder * Drop FK before dropping instance_id column * Removed FK import * Delete FK before dropping instance_id column * oops! moved ipv6 block back into the for loop in network manager create_networks * update everything to use global VERSIONS * merged trunk * change local variable name * updated handling of v6 in network manager create_networks to it can receive None for v6 args * added ipv6 requirements to nova-manage network create. changed --network to --fixed_range_v4 * remove unexpected parameter * fixed xmlns issue * updated the bridge arg requirements based on manager * this change will require that local urls be input with a properly constructed local url: http://localhost/v1.1/images/[id]. Such urls are translated to ids at the api layer. Previously, any url ending with and int was ok * make atom+xml accept header be ignored on 300 responses in the VersionsRequestDeserializer * Removed superfluous parameter * Use auth_token to set x-auth-token header in glance requests * Fixed the virt driver base * Some work on testing. Two cases related to lp816713 have some coverage already: using an id as an imageRef (test_create_instance_v1_1_local_href), and using a nova href as a url (test_create_instance_v1_1) * Remove xenapi_inject_image flag * Add a flag to set the default file mode of logs * fixed issue with factory for Versions Resource * Fix context argument in a test; add TODOs * improved the code per peer review * Add context argument a lot more places and make unit tests work * fix hidden breakage in test * Remove xenapi_inject_image flag * removed unused import * pep8 * pep8 * updated nova-manage create network. better help, handling of required args, and exceptions. Also updated FLAG flat_network_bridge to default to None * Re-enables and fixes test_cloud tests that broke from multi_nic * Fix for boto2 * Re-enables and fixes test_cloud tests that broke from multi_nic * add invalid device test and make sure NovaExceptions don't get wrapped * merge from trunk * pep8 * pep8 * updating common metadata xml serializer tests * Cleaned up test_servers * Moved server/actions tests to test_server_actions.py * updating servers metadata resource * pull-up from trunk * Address merge review concerns * Makes security group rules with the newer version of the ec2 api and correctly supports boto 2.0 * merging parent branch servers-xml-serialization * updating tests * updated serializer tests for multi choice * pep8 cleanup * multi choice XML responses with tests * merged recent trunk * merge with trunk * Cherry-pick of tr3buchet's fix for add_fixed_ip_to_instance * Resolved conflicts with trunk * fix typo in attach_volume * fix the last of them * fake plug for vif driver * couple more fixes * cleanup network create * code was checking for key in sqlalchemy instance but if floating_ip is a non-sqlalchemy dict instance instead, value=None will cause NoneType exception * fix more tests * fix the first round of missing data * fix the skipped tests in vmwareapi xenapi and quota * Add myself to authors * Implements a simplified messaging abstraction with the least amount of impact to the code base * fix for lp816713: In instance creation, when nova-api is passed imageRefs generated by itself, strip the url down to an id so that default glance connection params are used * cloud tests all passing again * added multi_choice test just to hit another resource * pep8 fixes * initial working 300 multiple choice stuff * cherry-pick tr3buchet's fix for milestone branch * cleanup * pep8 * pep8 * First pass at converting this stuff--pass context down into vmops. Still need to fix unit tests and actually use auth_token from the context.. * pep8 and simplify rule refresh logic * pep8 * merging parent branch lp:~rackspace-titan/nova/osapi-create-server * adding xml deserialization for createImage action * remove some logging, remove extra if * compute now appends self.host to the call to add an additional fixed ip to an instance * Update security gropu rules to properly support new format and boto 2.0 * Updated test stubs to contain the correct data Updated created and updated in responses to use correct time format * pep8 compliance * VSA volume creation/deletion changes * moved v1.1 image creation from /images to /servers//action * fixed per peer review * passing host from the compute manager for add_fixed_ip_to_instance() * adding assert to check for progress attribute * removing extra function * Remove debugging code * cleanup * fixed minor issues * reverting tests to use imageRef, flavorRef * updating imageRef and flavorRef parsing * Updates to the compute API and manager so that rebuild, reboot, snapshots, and password resets work with the most recent versions of novaclient * merging trunk; resolving conflicts * Add OpenStack API support for block_device_mapping * queries in the models.Instance context need to reference the table by name (fixed_ips) however queries in the models.FloatingIp context alias the tables out properly and return the data as fixed_ip (which is why you need to reference it by fixed_ip in that context) * added warning when size of subnet(s) being created are larger than FLAG.network_size in attempt to alleviate confusion. For example, currently when 'nova-manage network create foo 192.168.0.0/16', the result is that it creates a 192.168.0.0/24 instead without any indication to why * xml deserialization works now * merged from trunk * merged trunk * merging trunk * pull-up from trunk * got rid of print * got rid of more xml string comparisons * atom test updates * got rid of some prints * got rid of string comparisons in serializer tests * removing objectstore and image_service flag checking * Updates /servers requests to follow the v1.1 spec. Except for implementation of uuids replacing ids and access ips both of which are not yet implemented. Also, does not include serialized xml responses * fixed detail xml and json tests that got broken * updated atom tests * Updated ServerXMLSerializer to utilize the IPXMLSerializer * merged trunk * merge from trunk * fix pep8 issues * fix issue with failing test * merged trunk * I'm sorry, for my fail with rebasing. Any way previous branch grew to many other futures, so I supersede it. 1. Used optparse for parsing arg string 2. Added decorator for describe method params 3. Added option for assigning network to certain project. 4. Added field to "network list" for showing which project owns network * Moved the VIF network connectivity logic('ensure_bridge' and 'ensure_vlan_bridge') from the network managers to the virt layer. In addition, VIF driver class is added to allow customized VIF configurations for various types of VIFs and underlying network technologies * merge with trunk, resolve conflicts * fix pep8 * Launchpad automatic translations update * removing rogue print * removing xenapi_image_service flag * adding to authors * fixing merge conflict * merge from trunk * initial stuff to get away from string comparisons for XML, and use ElementTree * merged with 1320 * volume name change. some cleanup * - Updates /images//meta and /images//meta/ to respect the latest specification - Renames ../meta to ../metadata - Adds PUT on ../metadata to set entire container (controller action is called update_all) * Adds proper xml serialization for /servers//ips and /servers//ips/ * some cleanup. VSA flag status changes. returned some files * Pass on auth_token * Warn user instead of ignoring * Added ensuring filter rules for all VMs * atom and xml_detail working, with tests * Adds the -c|--coverage flag to run_tests.sh to generate a local code coverage report * Estetic fix * Fix boot from volume failure for network block devices * Bug #796813: vmwareapi does not support distributed vswitch * modified to conform to latest AWS EC2 API spec for authorize & revoke ingress params using the IpPermissions data structure, which nests lists of CIDR blocks (IpRanges) as well as lists of Group data * Fixes faults to use xml serializers based on api version. This fixed bug 814228 * Fixes a typo in rescue instance in ec2 api. This is mnaser's fix, I just added a test to verify the change * Fixes bug 797250 where a create server request with the body '{"name":"server1"}' results in a HTTP 500 instead of HTTP 422 * adding xml serialization for /servers//ips and /servers//ips/ * add a simple broken test to verify the bug * Fixed old libvirt semantics, added resume_guests_state_on_host_boot flag * xml version detail working with tests * adding testing to solidify handling of None in wsgi serialization * Added check to make sure there is a server entity in the create server request * Fixed some typos in log lines * removed prints, got versions detail tests passing, still need to do xml/atom * reverting some wsgi-related changes * merged trunk * removed print lines * This fixes the xml serialization of the /extensions and /extensions/foo resources. Add an ExtensionsXMLSerializer class and corresponding unit tests * added 1.0 detail test, added VersionRequestDeserializer to support Versions actions properly, started 300/multiple choice work * fix for reviews * Fixed bad test Fixed using wrong variable * Moved the exception handling of unplugging VIF from virt driver to VIF driver. Added better comments. Added OpenStack copyrights to libivrt vifs.py * pep8 + spelling fixes * Floating IP DB tests * Updated Faults controller to choose an xml serializer based on api version found in the request url * removing unnecessary assignments * Hotfix * Some estetic refactoring * Fixing PEP8 compliance issues * adding --fixes * fixing typos * add decorator for 'dns' params * merge with trunk, resolve conflicts * pep8 * Fixed logging * Fixed id * Fixed init_host context name * Removed driver-specific autostart code * fix 'version' command * Add bug reference * Use admin context when fetching instances * Use subscript rather than attribute * Make IP allocation test work again * Adjust and re-enable relevant unit tests * some file attrib changes * some cosmetic changes. Prior to merge proposal * Added test_serialize_extenstions to test ExtensionsXMLSerializer.index() * tests: unit tests for describe instance attribute * tests: an unit test for nova.compute.api.API._ephemeral_size() * tests: unit tests for nova.virt.libvirt.connection._volume_in_mapping() * tests/glance: unit tests for glance serializer * tests: unit tests for nova.virt * tests: unit tests for nova.block_device * db/api: fix network_get_by_cidr() * image/glance: teach glance block device mapping * tests/test_cloud:test_modify_image: make it pass * nova/tests/test_compute.py: make test_compute.test_update_block_device_mapping happy * test_metadata: make test_metadata pass * test_compute: make test_compute pass * test_libvirt: fix up for local_gb * virt/libvirt: teach libvirt driver swap/ephemeral device * virt/libvirt: teach libvirt driver root device name * compute/api: pass down ephemeral device info * compute/manager, virt: pass down root device name/swap/ephemeral to virt driver * ec2/get_metadata: teach block device mapping to get_metadata() * api/ec2: implement describe_instance_attribute() * db/api: block_device_mapping_update_or_create() * block_device: introduce helper function to check swap or ephemeral device * ec2utils: factor generic helper function into generic place * Launchpad automatic translations update * Config-Drive happiness, minus smoketest * merged with latest nova-1308 * more unittest changes * Last patch broke libvirt mapping of network info. This fixes it * Fixes an issue with out of order operations in setup_network for vlan mode in new ha-net code * Merged with 1306 + fix for dns change * update netutils in libvirt to match the 2 dns setup * merge * merge with 1305 * make sure dhcp_server is available in vlan mode * Adds ability to set DNS entries on network create. Also allows 2 dns servers per network to be specified * pep8-compliant. Prior to merge with 1305 * Reverted volume driver part * pep cleanup * remove auth manager from instance helper * docstring update * pass in the right argument * pull out auth manager from db * merge trunk * default to None in the method signature * merged trunk * remove some more stubouts and fakes * clean up fake auth manager in other places * same as: https://code.launchpad.net/~tr3buchet/nova/lp812489/+merge/68448 fixes: https://bugs.launchpad.net/nova/+bug/812489 but in a slightly different context * pep8 * updating images metadata resource * ...and this is me snapping back into reality removing all trace of ipsets. Go me * fixed networks not defined error when creating instances when no networks exist * fix test_access * This is me being all cocky, thinking I'll make it use ipsets.. * fix auth tests * Add i18n for logging, changed create_bridge/vlan to should_create_bridge/vlan, changed unfilter_instance's keyword param to positional, and added Dan's alternate ID to .mailmap * fix extensions tests * merge trunk * fix all tests * pep8 fixes * Updated the comments for VMWare VIF driver * initial test for v1.1 detail request * Moved restaring instances from livbirt driver to ComputeManager * Added network_info to unfilter_instance to avoid exceptions when shutting down instances * Removed unused exception object * Fixed the missing quotes for 802.1Qbh in libvirt template * add decorator for multi host option * Merged Dan's branch * Merged trunk * use new 'create_vlan' field in XenAPIBridgeDriver * merge with trunk, resolve conflicts * remove IPy * for libvirt OVS driver, do not make device if it exists already * refactor xenapi vif plug to combine plug + get_vif_rec, tested and fixed XenAPIBridgeDriver * Correctly add xml namespaces to extensions xml * Added xml serialization for GET => /extensions. Added corresponding tests * merge ryu's branch * remove debugging * fix a whole bunch of tests * start removing references to AuthManager * change context to maintain exact time, store roles, use ids instead of objects and use a uuid for request_id * Resolved conflict with trunk * Adds an XML serializer for limits and adds tests for the Limits view builder * pep8 * add in the right number of fields * pep8 * updated next-available to use utc time * merge trunk * rename in preperation for trunk merge * only include dns entries if they are not None in the database * Updated the compute API so that has_finished_migration uses instance_uuid. Fixes some regressions with 1295-1296 * only use the flag if it evaluates true * Catch the FixedIpNotFoundForInstance exception when no fixed IP is mapped to instance * Updated time-available to be correct format Fixed old tests to respect this * This fixes issues with invalid flavorRef's being passed in returning a 500 instead of a 400, and adds tests to verify that two separate cases work * merge from trunk * Moving lp:~rackspace-titan/nova/extensions-xml-serialization to new branch based off of trunk. To remove dep on another branch * Perform fault wrapping in the openstack WSGI controller. This allows us to just raise webob Exceptions in OS API controllers with the appropriate explanations set. This resolves some inconsistencies with exception raising and returning that would cause HTML output to occur when faults weren't being handled correctly * pep8 and stuff * Some code was recently added to glance to allow the is_public filter to be overridden. This allows us to get all images and filter properly on the nova side until keystone support is in glance. This fixes the issue with private images and snapshots disappearing from the image list * pep8 * Merged with trunk which includes ha-net changes * Updated the compute API so that has_finished_migration uses instance_uuid. Fixes some regressions with 1295-1296 * Updating the /images and /images/detail OSAPI v1.1 endpoints to match spec w/ regards to query params * Ensure valid json/xml/atom responses for versions requests * Update OSAPI v1.1 /flavors, /flavors/detail, and /flavors/ to return correct xml responses * Renamed the virt driver resize methods to migration for marginally more understandable code * allow 2 dns servers to be specified on network create * allow 2 dns servers to be specified on network create * Fixes lp813006 * Fixes lp808949 - "resize doesn't work with recent novaclient" * minor fix * Some broken tests from my other merge * Fixed import issue * added tests, updated pep8 fixes * Changed test_live_migration_raises_exception to use mock for compte manager method * fixed another issue with invalid flavor_id parsing, and added tests * minor cleanup * pep8 issue * cleanup * merge with trunk * Fixed the localization unit test error in the vif driver logging * cleanup tests and fix pep8 issues * removed vif API extension * Fixed Xenapi unit test error of test_rescue * Slight indentation change * Merged Dan Wendlandt's branch and fixed pep8 errors * Added call to second coverage invocation * Fixed an issue where was invoked before it was defined in the case of a venv * - Add 'fixed_ipv6' property to VirtualInterface model - Expose ipv6 addresses in each network in OSAPI v1.1 * forgot to add xenapi/vif.py * Perform fault wrapping in the openstack WSGI controller. This allows us to just raise webob Exceptions in OS API controllers with the appropriate explanations set. This resolves some inconsistencies with exception raising and returning that could cause HTML output to occur when an exception was raised * Added LimitsXMLSerializer Added LimitsViewBuidlerV11Test test case * Added create_vlan/bridge in network unit test * Add OpenStack API support for block_device_mapping * Changed the default of VIF driver * Fixed PEP8 issues * Combined bridige and vlan VIF driver to allow better transition for current Nova users * Merged trunk * Merged lp:~~danwent/nova/network-refactoring * Adds HA networking (multi_host) option to networks * CHanges based on feedback * Older Windows agents are very picky about the data sent to it. It also requires the public key for the password exchange to be in a string format and not an integer * adding flavors xml serialization * added versions list atom test and it passes * Set the status_int on fault wrapped exceptions. Fixes WSGI logging issues when faults are returned * Fix plus passing tests * remove debug prints * merge ryu's branch * update for ryu's naming changes, fix some bugs. tested with OVSDriver only so far * Fixes bug #807764. Please disregard previous proposal with incorrect bug # * Whoops * Added LP bug num to TODO * Split tests into 2 * Fix email address in Author * Make sure reset_network() call happens after we've determined the agent is running * pep8 * Merged trunk * Added Dan Wendlandt to Authors, and fixed failing network unit tests * merged trunk * Made all but one test pass for libvirt * Moved back allow_project_net_traffic to libvirt conn * Set the status_int on fault wrapped exceptions. Fixes WSGI logging issues when faults are returned * lp812489: better handling of periodic network host setup to prevent exception * add smoketests to verify image listing * default image to private on register * correct broken logic for lxc and uml to avoid adding vnc arguments (LP: #812553) * Stupid merge and fixed broken test * Most of the XenServer plugin files need the execute bit set to run properly. However, they are inconsistent as it is, with one file having the execute bit set, but the another having it set when it is not needed * Made the compute unit tests to pass * Host fix * Created _get_instance_nw_info method to clean up duplicate code * initial changes for application/atom+xml for versions * Update Authors file * network api release_floating_ip method will now check to see if an instance is associated to it, prior to releasing * merge from lp:~midokura/nova/network-refactoring-l2 * Corrects a bad model lookup in nova-manage * correct indentation * Fixes lp809587 * Fix permissions for plugins * Ya! Apparently sleep helps me fix failing tests * Some older windows agents will crash if the public key for the keyinit command is not a string * added 'update' field to versions * First attempt at vmware API VIF driver integration * Removed unnecessary context parameter * Merged get_configurations and plug of VIF drivers * Moved ensure_vlan_bridge of vmware to VIF driver * Added network_info parameter to all the appropriate places in virt layers and compute manager * remove xenapi_net.py from network directory, as this functionality is now moved to virt layer * first cut of xenserver vif-plugging, some minor tweaks to libvirt plugging * Refactor device type checking * Modified alias ^Cd minor fixes * Merged with trunk * Reverted to original code, after network binding to project code is in integration code for testing new extension will be added * Fixed broken unit testcases after adding extension and minor code refactoring * Added a new extension instead of directly making changes to OS V1.1. API * have to use string 'none' and add a note * tell glance to not filter out private images * updated links to use proper atom:link per spec * Renamed setup_vif_network to plug_vif * Fixes lp813006 - inconsistent DB API naming * move import network to the top * Merged lp:~danwent/nova/network-refactoring-l2 * merged from trunk * network api release_floating_ip method checks if an instance associated to the floating prior to releasing. added test * Added detroy_vif_network * Functionality fixed and new test passing * Updates to the compute API and manager so that rebuild, reboot, snapshots, and password resets work with the most recent versions of novaclient * better handling of periodic network host setup * Merged trunk * Removed blank lines * Fix unchecked key reference to mappings['gateway6']. Fixes LP #807764 * add downgrade * correct broken logic for lxc and uml to avoid adding vnc arguments (LP: #812553) * Beginnings of the patch * Fixed equality comparison bug in libvirt XML * Fixed bad parameters to setup_vif_networks * Zapped an extra newline * Merged with trunk * Add support for generating local code coverage report * respecting use_ipv6 flag if set to False * merged trunk * merged trunk * fixed reviewer's comment. 1. ctxt -> context, 2. erase unnecessary exception message from nova.sccheduler.driver * cleanup * merge of ovs L2 branch * missed the vpn kwarg in rpc * fix bad merge * change migration number * merged trunk * This change adds the basic boot-from-volume support to the image service * Fixed the broken tests again * Merging from upstream * Some missed instance_id casts * pep8 cleanup * adding --fixes * adding fixed_ipv6 property to VirtualInterface model; exposing ipv6 in api * VSA schedulers reorg * Merged with trunk * fix issues that were breaking vlan mode * fixing bad lookup * Updates to the XenServer agent plugin to fix file injection: * Don't jsonify the inject_file response. It is already json * localization changes. Removed vsa params from volume cloud API. Alex changes * Added auth info to XML * returncode is an integer * - Fixed the conflift in vmops.py * Check returncode in get_agent_features * resolved pep8 issues * merged from trunk * Updated servers to choose XML serializer based on api version * pep8 * updated servers to use ServerXMLSerializer * added 'create' to server XML serializer * added 'detail' to server XML serializer * convert group_name to string, incase it's a long * nova/api/ec2/cloud.py: Rearranged imports to be alphabetical as per HACKING * pep8'd * Extended test to check for error specific error code and test cover for bad chars * Some basic validation for creating ec2 security groups. (LP: #715443) * changed to avoid localization test failure * Initial test case proving we have a bug of, ec2 security group name can exceed 255 chars * added index to servers xml serializer * Change _agent_has_method to _get_agent_features. Update the inject files function so that it calls _get_agent_features only once per injected file * pep8 * Moved Metadata Serialization Test * Added ServerXMLSerializer with working 'show' method Factored out MetadataXMLSerializer from images and servers into common * added missing drive_types.py * added missing instance_get_all_by_vsa * merged with 1280 * VSA: first cut. merged with 1279 * Added some unit and integration tests for updating the server name via the openstack api * renamed priv method arg_to_dict since it's not just used for revoke. modified to conform to latest AWS EC2 API spec for authorize & revoke ingress params using the IpPermissions data structure, which nests lists of CIDR blocks (IpRanges) as well as lists of Group data * got rid of return_server_with_interfaces and added return_server_with_attributes * Added ServerXMLSerializationTest * take out print statements * Ensures a bookmark link is returned in GET /images. Before, it was only returned in GET /images/detail * One last nit * Tests passing again * put maxDiff in setUp * remove get_uuid_from_href and tests * stop using get_uuid_from_href for now * Updated with some changes from manual testing * Updates to the XenServer agent plugin to fix file injection: * merging trunk * use id in links instead of uuid * pep8 fixes * fix ServersViewBuilderV11Tests * Adds greater configuration flexibility to rate limiting via api-paste.ini. In particular: * return id and uuid for now * merge with trunk * Adds distributed scheduler and multinic docs to the Developer Reference page * Added more view builder tests * merged wills revisions * Added ViewBuilderV11 tests Fixed bug with build detail * fix issues with uuid and old tests * - Present ip addresses in their actual networks, not just a static public/private - Floating ip addresses are grouped into the networks with their associated fixed ips - Add addresses attribute to server entities * Update the agent plugin so that it gets 'b64_contents' from the args dict instead of 'b64_file' (which isn't what nova sends) * Adding unit and integration tests for updating the server name via the 1.1 api * merge with trunk, resolve conflicts * remove argument help from docstrings + minor fix * Fixes Bug #810149 that had an incomplete regex * Existing Windows agent behaves differently than the Unix agents and require some workarounds to operate properly. Fixes are going into the Windows agent to make it behave better, but workarounds are needed for compatibility with existing installed base * Add possibility to call commands without subcommands * fix redundency * Updated Authors * Fixed remove_version_from_href Added tests * mistakenly commited this code into my branch, reverting it to original from trunk * Merged with trunk and fixed pep errors * added integrated unit testcases and minor fixes * First pass * corrected catching NoNetworksDefined exception in host setup and getting networks for instance * catching the correct exception * Added ServersTestv1_1 test case Changed servers links to use uuid instead of id * pep8 * Updated old tests * add support to write to stdout rather than file if '-' is specified. see bug 810157 * merging trunk * removed self links from flavors * added commands * exposing floating ips * updated image entity for servers requests * Update the agent plugin so that it gets 'b64_contents' from the args dict instead of 'b64_file' (which isn't what nova sends) * Use assertRaises instead of try/except--stupid brain-o * Added progress attribute to servers responses * fixing bad merge * pull-up from trunk, while we're at it * Comment on parse_limits(); expand an exception message; add unit tests; fix a minor discovered bug * adding bookmark to images index * add updated and created to servers detail test, and make it work * removing mox object instantiation from each test; renaming _param to filter_name * add self to authors * use 'with' so that close is called on file handle * adding new query parameters * support '-' to indicate stdout in nova-manage project 'environment' and 'zip' * Improvements to nova-manage: 1. nova-manage network list now shows what belongs to what project, and what's the vlan id, simplifying management in case of several networks/projects 2. nova-manage server list [zone] - shows servers. Useful if you have many servers and want to list them in particular zone, instead of grep'ing nova-manage service list * Minor fixes * Merged with Trunk * updated to support and check for flavor links in server detail response * Updated responses for GET /images and GET /images/detail to respect the OSAPI v1.1 spec * merge * beginning server detail spec 1.1 fixup * Augment rate limiting to allow greater flexibility through the api-paste.ini configuration * merge from trunk * added unit testcases for validating the requested networks * Extends the exception.wrap_exception decorator to optionally send an update to the notification system in the event of a failure * trunk merge * merging trunk * updating testing; simplifying instance-level code * pep8 * adding test; casting instance to dict to prevent sqlalchemy errors * merged branch lp:~rackspace-titan/nova/images-response-formatting * Add multinic doc and distributed scheduler doc to developer guide front page * merged trunk * Don't pop 'vpn' on kwargs inside a loop in RPCAllocateFixedIP._allocate_fixed_ips (fixes KeyError) * Added Mohammed Naser to Authors file * merge with trunk * fix reviewer's comment * Starting part of multi-nic support in the guest. Adds the remove_fixed_ip code, but is incomplete as it needs the API extension that Vek is working on * Don't pop 'vpn' on kwargs inside a loop in RPCAllocateFixedIP._allocate_fixed_ips (fixes KeyError's) * added unit test cases and minor changes (localization fix and added fixed_ip validation) * Made sure the network manager accepts kwargs for FlatManager * Fix bug 809316. While attempting to launch cloudpipe instance via 'nova-manage vpn run' command, it comes up with IP from instances DHCP pool and not the second IP from the subnet, which break the forwarding rules that allow users to access the vpn. This is due 'allocate_fixed_ip' method in VlanManager doesn't receive 'vpn' as an argument from caller method and cloudpipe instances always considers as 'common' instances * cleanup * server create deserialization functional and tested * added xml deserialization unit test cases and fixe some pep errors * Updated some common.py functions to raise ValueErrors instead of HTTPBadRequests * Renamed 'nova-manage server list' -> 'nova-manage host list' to differentiate physical hosts from VMs * Allowed empty networks, handled RemoteError properly, implemented xml format for networks and fixed broken unit test cases * minor cleanup * Updated ImageXMLSerializer to serialize links in the server entity * Updated images viewbuilder to return links in server entity * updated images tests * merged trunk * pep8 * Updated remove_version_from_href to be more intelligent Added tests * Fix PEP8 for 809316 bugfix * Fix 809316 bug which prevent cloudpipe to get valid IP * fix reviewer's comment * stray debug * pep8 * fixed marshalling problem to cast_compute.. * fixed all failed unit test cases * This doesn't actually fix anything anymore, as the wsgi_refactor branch from Waldon took care of the issue. However, a couple rescue unit tests would have caught this originally, so I'm proposing this to include those * fixes an issue where network host fails to start because a NoNetworksFound exception wasn't being handled correctly * Bad test * unknowingly made these changes, reverting to original * catch raise for networks not found in network host and instance setup * Merged with Trunk * add optional parameter networks to the Create server OS API * Changed broken perms * Tests * Made xen plugins rpm noarch * Set the proper return code for server delete requests * Making the xen plugins rpm to be noarch * merging trunk * Expanding OSAPI wsgi module to allow handling of headers and status codes * Updates some of the extra scripts in contrib and tools to current versions * updating code to implement tests * merging parent wsgi-refactor * allowing controllers to return Nonew * adding headers serializer * pep8 * minor refactoring * minor tweaks * Adds an extension which makes add_fixed_ip() available through an OpenStack extension * Comment out these two asserts; Sandy will uncomment in his merge-prop * Fix the bug 800759 * merging wsgi-refactor * adding 204 response code * pre trunk merge * Missing Author updated * Allows for ports in serverRef in image create through the openstack api * Adds security groups to metadata server. Also adds some basic tests for metadata code * fix comments * fix conflict * Added vif OS API extension to get started on it * Moved 'setup_compute_network' logic into the virt layer * Added myself to authors file * Fixed two typos in rescue API command * flaw in ec2 cloud api, _get_image method , if doing a search for aki-0000009, yet that image name doesn't exist, it strips off aki- and looks for any image_id 0000009 and if there was an image match that happens to be an ami instead of aki, it will go ahead and deregister the ami instead. That behavior is unintended, so added logic to ensure that the original request image_id matches the type of image being returned from database by matching against container_format attr * Fixed up an incorrect key being used to check Zones * merged trunk * fix tests * make sure that old networks get the same dhcp ip so we don't break existing deployments * cleaned up on set network host to _setup_network and made networks allocate ips dynamically * Make the instance migration calls available via the API * Add a flag to disable ec2 or osapi * Add a flag to disable ec2 or osapi * refactor * easing up content-type restrictions * peer review fix - per vish: 'This method automatically converts unknown formats to ami, which is the same logic used to display unknown images in the ec2 api. This will allow you to properly deregister raw images, etc.' * Updated resize docstring * removing Content-Length requirement * Add docstrings for multinic extension * Add support for remove_fixed_ip() * Merged trunk * pull-up from trunk * Added unit tests * First take at migrations * Fixes bug #805604 "Multiprocess nova-api does not handles SIGTERM correctly." * image/fake: added teardown method * Updated mailmap due to wrong address in commit message * tests/test_cloud: make an unit test, test_create_image, happy * nova/compute/api.py: fixed mismerge * ec2 api _get_image method logic flaw that strips the hex16 digit off of the image name, and does a search against the db for it and ignores that it may not be the correct image, such as if doing a search for aki-0000009, yet that image name doesn't exist, it strips off aki- and looks for any image_id 0000009 and if there was an image match that happens to be an ami instead of aki, it will go ahead and deregister that. That behavior is unintended, so added logic to ensure that the original request image_id matches the type of image being returned from database by matching against container_format attr * sqlalchemy/migrate: resolved version conflict * merge with trunk * pull-up from trunk * unit test suite for the multinic extension * pull-up from trunk * Added server entity to images that only has id * Merging issues * Updated _create_link_nodes to be consistent with other create_*_nodes * Changed name of xml_string to to_xml_string * Merging issuse * Temporarily moved create server node functionality into images.py Temporarily changed image XML tests to expect server entities with only ids * Removed serverRef from some tests and viewbuilder * Comments for bugfix800759 and pep8 * Removed bookmark link from non detailed image viewbuilder * implemented clean-up logic when VM fails to spawn for xenapi back-end * Adds the os-hosts API extension for interacting with hosts while performing maintenance. This differs from the previous merge prop as it uses a RESTful design instead of GET-based actions * Added param to keep current things from breaking until we update all of the xml serializers and view builders to reflect the current spec * Fixes Bug #805083: "libvirtError: internal error cannot determine default video type" when using UML * Dried up images XML serialization * Dried up images XML serialization * stricter zone_id checking * trunk merge * cleanup * Added image index * pep8 fixes * Comments Incorporated for Bug800759 * Added API and supporting code for rebooting or shutting down XenServer hosts * fixed image create response test * Updated test_detail * Merged trunk * make server and image metadata optional * Updated the links container for flavors to be compliant with the current spec * pep8 * Renamed function * moved remove_version to common.py * unit tests * progress and server are optional * merged trunk * Add a socket server responding with an allowing flash socket policy for all requests from flash on port 843 to nova-vncproxy * pep8 compliance * Pull-up from trunk (post-multi_nic) * changed calling signature to be (instance_id, address) * correct test_show * first round * removed extra comment * Further test update and begin correcting serialization * Removed a typo error in libvirt connection.py * updated expected xml in images show test to represent current spec * pep8 fixes * Added VIF driver concept * Added the missing 'self' parameter * after trunk merge * Changed the exception type for invalid requests to webob.exc.HTTPBadRequest * Added net_attrs argument for ensure_bridge/vlan methods * Added a L2 network driver for bridge/vlan creation * wrap list comparison in test with set()s * slightly more fleshed out call path * merged trunk * merge code i'd split from instance_get_fixed_addresses_v6 that's no longer needed to be split * fix metadata test since fixed_ip searching now goes thru filters db api call instead of the get_by_fixed_ip call * clean up compute_api.get_all filter name remappings. ditch fixed_ip one-off code. fixed ec2 api call to this to compensate * clean up OS API servers getting * rename _check_servers_options, add some comments and small cleanup in the db get_by_filters call * pep8 fix * convert filter value to a string just in case before running re.compile * add comment for servers_search_options list in the OS API Controllers * pep8 fixes * fix ipv6 search test and add test for multiple options at once * test fixes.. one more to go * resolved conflict incorrectly from trunk merge * merged trunk * doc string fix * fix OS API tests * test fixes and typos * typos * cleanup checking of options in the API before calling compute_api's get_all() * a lot of major re-work.. still things to finish up * merged trunk * remove debug from failing test * remove faults.Fault wrapper on exceptions * rework OS API checking of search options * merged trunk * missing doc strings for fixed_ip calls I renamed * clarify a couple comments * test fixes after unknown option string changes * minor fixups * merged trunk * pep8 fixes * test fix for renamed get_by_fixed_ip call * ec2 fixes * added API tests for search options fixed a couple of bugs the tests caught * allow 'marker' and 'limit' in search options. fix log format error * another typo * merged trunk * missed power_state import in api fixed reversed compare in power_state * more typos * typos * flavor needs to be converted to int from query string value * add image and flavor searching to v1.0 api fixed missing updates from cut n paste in some doc strings * added searching by 'image', 'flavor', and 'status' reverted ip/ip6 searching to be admin only * compute's get_all should accept 'name' not 'display_name' for searching Instance.display_name. Removed 'server_name' searching.. Fixed DB calls for searching to filter results based on context * Refactored OS API code to allow checking of invalid query string paremeters and admin api/context to the index/detail calls. v1.0 still ignores unknown parameters, but v1.1 will return 400/BadRequest on unknown options. admin_api only commands are treated as unknown parameters if FLAGS.enable_admin_api is False. If enable_admin_api is True, non-admin context requests return 403/Forbidden * clean up checking for exclusive search options fix a cut n paste error with instance_get_all_by_name_regexp * merged trunk * python-novaclient 2.5.8 is required * fix bugs with fixed_ip returning a 404 instance searching needs to joinload more stuff * added searching by instance name added unit tests * pep8 fixes * Replace 'like' support with 'regexp' matching done in python. Since 'like' would result in a full table scan anyway, this is a bit more flexible. Make search options and matching a little more generic Return 404 when --fixed_ip doesn't match any instance, instead of a 500 only when the IP isn't in the FixedIps table * start of re-work of compute/api's 'get_all' to handle more search options * Silence warning in case tests.sqlite doesn't exist * fix libvirt test * update tests * don't set network host for multi_host networks * add ability to set multi_host in nova-manage and remove debugging issues * filter the dhcp to only respond to requests from this host * pass in dhcp server address, fix a bunch of bugs * PEP8 passed * Formatting fix * Proper Author section insertion (thx Eldar) * Signal handler cleanup, proper ^C handling * copy paste * make sure to filter out ips associated by host and add some sync for allocating ip to host * fixed zone id check * it is multi_host not multi_gateway * First round of changes for ha-flatdhcp * Updated the plugin to return the actual enabled status instead of just 'true' or 'false' * UML doesnt do vnc as well * fixed a bug which prevents suspend/resume after block-migration * Gracefull shutdown of nova-api * properly displays addresses in each network, not just public/private; adding addresses attribute to server entities * Gracefull shutdown of nova-api * Removing import of nova.test added to nova/__init.py__ as problem turned out to be somewhere else (not in nova source code tree) * Fixing weird error while running tests. Fix required patching nova/tests/___init__.py explictly importing nova.test * Added missing extension file and tests. Also modified the get_host_list() docstring to be more accurate about the return value * Silence warning in case tests.sqlite doesn't exist * Fix boot from volume failure for network block devices * Improvements to nova-manage: network list now includes vlan and projectID, added servers list filtered by zone if needed * removed unneeded old commented code * removed more stray debug output * removed debugging output * after trunk merge * Updated unit tests * remove logging statement * Found some additional fixed_ip. entries in the Intance model contest that needed to be updated * use url parse instead of manually splitting * Changed fixed_ip.network to be fixed_ips.network, which is the correct DB field * Added the GroupId param to any pertinent security_group methods that support it in the official AWS API * Removes 'import IPy' introduced in recent commit * removing IPy import * trunk merge * Fixed the case where an exception was thrown when trying to get a list of flavors via the api yet there were no flavors to list * fix up tests * tweak * review fixes * completed api changes. still need plugin changes * Update the fixed_ip_disassociate_all_by_timeout in nova.db.api so that it supports Postgres. Fixes casting errors on postgres with this function * after trunk merge * Fixes MANIFEST.in so that migrate_repo/versions/*.sql files are now included in tarball * Include migrate_repo/versions/*.sql in tarball * Ensure auto-delete is false on Topic Queues * refactored the security_group tests a bit and broke up a few of them into smaller tests * Reverses the self.auto_delete = True that was added to TopicPublisher in the bugfix for lp804063. That bugfix should have only added auto_delete = True to FanoutPublisher to match the previous change to FanoutConsumer * Added 'self.auto_delete = True' to the two Publisher subclasses that lacked that setting * Added the '--fixes' tag to link to bug * Added self.auto_delete = True to the Publisher subclasses that did not have that set * added multi-nic support * osapi test_servers fixed_ip -> fixed_ips * updated osapi 1.0 addresses view to work with multiple fixed ips * trunk merge with migration renumbering * Allows subdirectory tests to run even if sqlite database doesn't exist * fix bug 800759 * Child Zone Weight adjustment available when adding Child Zones * trunk merge * blah * merge trunk * merged trunk * Windows instances will often take a few minutes setting up the image on first boot and then reboot. We should be more patient for those systems as well check if the domid changes so we can send agent requests to the current domid * Theese changes eliminate dependancy between hostname and ec2-id. As I understand, there already were no such dependancy, but still we had confusing names in code. Also I added more sophisticated generation of default hostname to give user possibility to set the custom one * updated images * updated servers * refactored flavors viewbuilder * fixes lp:803615 * added FlavorRef exception handling on create instance * refactored instance type code * Update the ec2 get_metadata handler so it works with the most recent version of the compute API get_all call which now returns a list if there is only a single record * - add metadata container to /images/detail and /images/ responses - update xml serialization to encode image entities properly * merging trunk * PEP8 fix * Adapt flash socket policy branch to new nova/wsgi.py refactoring * clean up * Update the ec2 get_metadata handler so it works with the most recent version of the compute API get_all call which now returns a list if there is only a single record * trunk merge * pep8 * pep8 * done and done * Update the fixed_ip_disassociate_all_by_timeout in nova.db.api so that it supports Postgres. Fixes casting errors on postgres with this function * phew ... working * compute_api.get_all should be able to recurse zones (bug 744217). Also, allow to build more than one instance at once with zone_aware_scheduler types. Other cleanups with regards to zone aware scheduler.. * Updated v1.1 links in flavors to represent the curret spec * fix issue of recurse_zones not being converted to bool properly add bool_from_str util call add test for bool_from_str slight rework of min/max_count check * fixed incorrect assumption that nullable defaults to false * removed port_id from virtual interfaces and set network_id to nullable * changes a few instance refs * merged trunk * Rename one use of timeout to expiration to make the purpose clearer * pulled in koelkers test changes * merge with trey * major reactor of the network tests for multi-nic * Merged trunk * Fixes Bug #803563 by changing how nova passes options in to glance. Before, if limit or marker were not set, we would pass limit=0 and marker=0 in to glance. However, marker is supposed to be an image id. With this change, if limit or marker are not set, they are simply not passed into glance. Glance is free then to choose the default behavior * Fixed indentation issues Fixed min/max_count checking issues Fixed a wrongly log message when zone aware scheduler finds no suitable hosts diablo-2 -------- * Fixes Bug #803563 by changing how nova passes options in to glance. Before, if limit or marker were not set, we would pass limit=0 and marker=0 in to glance. However, marker is supposed to be an image id. With this change, if limit or marker are not set, they are simply not passed into glance. Glance is free then to choose the default behavior * Sets 'exclusive=True' on Fanout amqp queues. We create the queues with uuids, so the consumer should have exclusive access and they should get removed when done (service stop). exclusive implies auto_delete. Fixes lp:803165 * don't pass zero in to glance image service if no limit or marker are present * more incorrect list type casting in create_network * removed the list type cast in create_network on the NETADDR projects * renumbered migrations again * Make sure test setup is run for subdirectories * merged trunk, fixed the floating_ip fixed_ip exception stupidity * trunk merge * "nova-manage vm list" was still referencing the old "image_id" column, which was renamed to "image_ref" at revision 1144 * Implement backup with rotation and expose this functionality in the OS API * Allow a port name in the server ref for image create * Fanout queues use unique queue names, so the consumer should have exclusive access. This means that they also get auto deleted when we're done with them, so they're not left around on a service restart. Fixes lp:803165 * pep8 fix * removed extra stubout, switched to isinstance and catching explicit exception * get latest branch * Deprecate -r for run_tests.sh and adds -n, switching the default back to recreate * check_domid_changes is superfluous right now since it's only used when timeout is used. So simplify code a little bit * updated pip-requires for novaclient * Merged trunk * pip requires * adopt merge * clean up logging for iso SR search * moved to wrap_exception approach * Fix 'undefined name 'e'' pylint error * change the default to recreate the db but allow -n for faster tests * Fix nova-manage vm list * Adding files for building an rpm for xenserver xenapi plugins * moved migration again & trunk merge * Brought back that encode under condition * Add test for hostname generation * Remove unnessesary (and possibly failing) encoding * Fix for bug 803186 that fixes the ability for nova-api to run from a source checkout * moved to wrap_exception decorator * Review feedback * Merged trunk * Put possible_topdir back in nova-api * Use milestone cut * Merged trunk * Let glance handle sorting * merging trunk * Review feedback * This adds system usage notifications using the notifications framework. These are designed to feed an external billing or similar system that subscribes to the nova feed and does the analysis * Refactored usage generation * pep8 * remove zombie file * remove unecessary cast to list * merge with trey * OOPS * Whoops * Review feedback * skipping another libvirt test * Fix merge issue in compute unittest * adding unicode support to image metadata * Fix thinko in previous fix :P * change variable names to remove future conflict with sandy's zone-offsets branch * Fix yet more merge-skew * merge with trey * This branch allows LdapDriver to reconnect to LDAP server if connection is lost * Fix issues due to renming of imange_id attrib * Re-worked some of the WSGI and WSGIService code to make launching WSGI services easier, less error prone, and more testable. Added tests for WSGI server, new WSGI loader, and modified integration tests where needed * Merged trunk * update a test docstring to make it clear we're testing multiple instance builds * log formatting typo pep8 fixes * Prevent test case from ruining other tests. Make it work in earlier python versions * pep8 fix * I accidently the whole unittest2 * Adds support for "extra specs", additional capability requirements associated with instance types * refactoring to compute from scheduler * remove network to project bind * resync with trunk * Add test for spawn from an ISO * Add fake SR with ISO content type * Revise key used to identify the SR used to store ISO images streamed from Glance * remerged trunk * Fix pep8 nits in audit script * Re-merging code for generating system-usages to get around bzr merge braindeadness * getting started * Added floating IP support in OS API * This speeds up multiple runs of tests to start up much faster because it only runs db migrations if the test db doesn't exist. It also adds the -r/--recreate-db option to run_tests.sh to delete the tests db so it will be recreated * small formatting change * breaking up into individual tests for security_groups * Proposing this because it is a critical fix before milestone. Suggestions on testing it are welcome * logging fixes * removed unneded mac parameter to lease and release fixed ip functions * Made _issue_novaclient_command() behave better. Fixed a bunch of tests * Review feedback * merge with trey * trunk merge, getting fierce. * Merged trunk * Added nova.version to utils.py * - Modified NOTE in vm_util.py - Changed gettext line to nova default in guest_tool.py * renaming tests * make sure basic filters are setup on instance restart * typo * changed extension alias to os-floating-ips * missed the bin line * Updating license to ASL 2.0 * update nova.sh * make nova-debug work with new style instances * Changed package name to openstack-xen-plugins per dprince's suggestion. All the files in /etc/xapi.d/plugins must be executable. Added dependency on parted. Renamed build.sh to build-rpm.sh * remove extra stuff from clean vlans * Clarify help verbiage * making key in images metadata xml serialization test null as well * making image metadata key in xml serialization test unicode * extracting images metadata xml serialization tests into specific class; adding unicode image metadata value test * merged blamar's simpler test * Pulled changes, passed the unit tests * Pulled trunk, merged boot from ISO changes * Removed now un-needed fake_connection * Use webob to test WSGI app * fixed pep style * review issues fixed * sqlalchmey/migration: resolved version conflict * merge with trunk * Adding files for building an rpm for xenserver xenapi plugins * Upstream merge * merging trunk; adding error handling around image xml serialization * adding xml serialization test of zero images * pep8 * add metadata tests * add fake connection object to wsgi app * add support to list security groups * only create the db if it doesn't exist, add an option -r to run_tests.py to delete it * Fix for bug #788265. Remove created_at, updated_at and deleted_at from instance_type dict returned by methods in sqlalchemy API * PEP8 fix * pep8 * Updated _dict_with_extra_specs docstring * Renamed _inst_type_query_to_dict -> _dict_with_extra_specs * Merged from trunk * Add api methods to delete provider firewall rules * This small change restores single quotes and double quotes as they were before in the filter expression for retrieving the PIF (physical interface) xenapi should use for creating VLAN interfaces * Remove the unnecessary insertion of whitespace. This happens to be enough to make this patch apply on recent versions of XenServer / Xen Cloud Platform * Removes the usage of the IPy module in favor of the netaddr module * - update glance image fixtures with expected checksum attribute - ensure checksum attribute is handled properly in image service * mailmap * mailmap * configure number of attempts to create unique mac address * merged * trunk merged. conflicts resolved * added disassociate method to tests * fixes * tests * PEP8 cleanup * parenthesis issue in the migration * merge * some tests and refactoring * Trunk merge fixes * Merging trunk * implement list test * some tests * fix tests for extensions * Fixed snapshot logic * PEP8 cleanup * Refactored backup rotate * conflict resolved * stub tests * add stubs for flating api os api testing * merge with kirill * associate diassociate untested, first attept to test * Pep8 fix * Adding tests for backup no rotation, invalid image type * Fixed the default arguments to None instead of an empty list * Fixing PEP8 compliance issues * Trailing whitespace * Adding tests for snapshot no-name and backup no-name * Edited the host filter test case for extra specs * Removed an import * Merged from trunk * Remove extra debug line * Merged with trunk * Add reconnect test * Use simple_bind_s instead of bind_s * Add reconnect on server fail to LDAP driver * ec2/cloud: typo * image/s3: typo * same typo i made before! * on 2nd run through filter_hosts, we've already accounted for the topic memory needs converted to Bytes from MB * LeastCostScheduler wasn't checking for topic cost functions correctly. Added support so that --least_cost_scheduler_cost_functions only needs to have method names specified, instead of the full blown version with module and class name. Still works the old way, too * requested_mem typo * more typos * typo in least cost scheduler * Unwind last commit, force anyjson to use our serialization methods * debug logging of number of instances to build in scheduler * missed passing in min/max_count into the create/create_all_at_once calls * Dealing with cases where extra_specs wasn't defined * pep8 fixes * Renamed from flavor_extra_specs to extra_specs * All tests passing * missed passing an argument to consume_resources * Committing some broken code in advance of trying a different strategy for specifying args to extensions.ResoruceExtensions, using parent * Starting to transition instance type extra specs API to an extension API * Now automatically populates the instance_type dict with extra_specs upon being retrieved from the database * pep8 * Created Bootstrapper to handle Nova bootstrapping logic * alter test, alter some debug statements * altered some tests * freakin migration numbering * trunk merge * removing erroneous block, must've been a copy and paste fat finger * specify keyword, or direct_api proxy method blows up * updated the way vifs/fixed_ips are deallocated and their relationships, altered lease/release fixed_ip * Fixed syntax errors * This adds a way to create global firewall blocks that apply to all instances in your nova installation * Accept a full serverRef to OSAPI POST /images (snapshot) * Cast rotation to int * PEP8 cleanup * Fixed filter property and added logging * added tests * Implemented view and added tests * Adding missing import * Fixed issue with zero flavors returning HTTP 500 * Adding dict with single 'meta' key to /imgages//meta/ GET and PUT * fixing 500 error on v1.0 images xml * Small refactoring around getting params * libvirt test for deleting provider firewall rules * Make firewall rules tests idempotent, move IPy=>netaddr, add deltete test * merge from trunk * altho security_group authorize & revoke tests already exist in test_api, adding some direct ec2 api method tests. added group_id param support to the pertinent security group methods * Make sure there are actually rules to test against * Add test for listing provider firewall rules * pep8: remove newline at end of file * Add admin api test case (like cloud test case) with a test for fw rules * Move migration to newer version * an int() was missed being removed from UUID changes when zone rerouting kicks in * fixing 500 on None metadata value * proper xml serialization for images * "nova-manage checks if user is member of proj, prior to adding role for that project" * adding metadata container to /images/detail and /images/ calls * Add xml serialization for all /images//meta and /images//meta/ responses * trunk merge and migration bump * handle errors for listing an instance by IP address * Merged markwash's fixes * Merged list-zone-recurse * str_GET is a property * Fixed typo * Merged trunk * minor fixups * fixes for recurse_zones and None instances with compute's get_all * typo * add support for compute_api.get_all() recursing zones for more than just reservation_id * Change so that the flash socket policy server is using eventlet instead of twisted and is running in the same process as the main vnx proxy * ec2/cloud: address review * compute/api: an unit test for _update_{image_}bdm * ec2/cloud: unit tests for parser/formatter of block device mapping * ec2/cloud: an unit test for _format_instance_bdm() * ec2utils: an unit test for mapping_prepend_dev() * ec2: bundle block device mapping * ec2utils: introduce helper function to prepend '/dev/' in mappings * volume/api: an unit test for create_snapshot_force() * Add some resource checking for memory available when scheduling Various changes to d-sched to plan for scheduling on different topics, which cleans up some of the resource checking. Re-compute weights when building more than 1 instance, accounting for resources that would be consumed * Returned code to original location * Merged from trunk * run launcher first since it initializes global flags and logging * Now passing unit tests * Two tests passing * Now stubbing nova.db instead of nova.db.api * Bug fixing * Added flavor extra specs controller * Initial unit test (failing) * This catches the InstanceNotFound exception on create, and ignores it. This prevents errors in the compute log, and causes the server to not be built (it should only get InstanceNotFound if the server was deleted right after being created). This is a temporary fix that should be fixed correctly once no-db-messaging stuff is complete * allocate and release implementation * fixed pep8 issues * merge from trunk * image -> instance in comment * added virtual_interface_update method * Fixes issues with displaying exceptions regarding flavors in nova-manage * better debug statement around associating floating ips when multiple fixed_ips exist * pep8 fixes * merging trunk * added fixed ip filtering by null virtual interface_id to network get associated fixed ips * fixed ip gets now have floating IPs correctly loaded * reverting non-xml changes * Adding backup rotation * moving image show/update into 'meta' container * Check API request for min_count/max_count for number of instances to build * updated libvirt tests network_info to be correct * fixed error * skipping more ec2 tests * skipping more ec2 tests * skipping more ec2 tests * skipping test_run_with_snapshot * updated test_cloud to set stub_network to true * fixed incorrect exception * updating glance image fixtures with checksum attribute; fixing glance image service to use checksum attribute * Round 1 of backup with rotation * merge from trunk * fix some issues with flags and logging * Add a socket server responding with an allowing flash socket policy for all requests from flash on port 843 to nova-vncproxy * api/ec2: an unit test for create image * api/ec2, boot-from-volume: an unit test for describe instances * unittest: an unit test for ec2 describe image attribute * test_cloud: an unit test for describe image with block device mapping * ec2utils: an unit test for ec2utils.properties_root_defice_name * unittest, image/s3: unit tests for s3 image handler * image/s3: factor out _s3_create() for testability * ec2utils: unit tests for case insensitive true/false conversion * ec2utils: add an unit test for dict_from_dotted_str() * test_api: unit tests for ec2utils.id_to_ec2_{snap, vol}_id() * api/ec2: make CreateImage pass unit tests * volume/api: introduce create_snapshot_force() * api/ec2/image: make block device mapping pass unit tests * db/block_device_mapping/api: introduce update_or_create * db/migration: resolve version conflict * merge with trunk * ec2 api describe_security_groups allow group_id param , added tests for create/delete security group in test_cloud although also exists in test_api this tests directly the ec2 method * pip-requires * pep8 * fixed zone update * Stop trying to set a body for HTTP methods that do not allow it. It renders the unit tests useless (since they're testing a situation that can never arise) and webob 1.0.8 fails if you do this * fixed local db create * omg stop making new migrations.. * trunk merge * merge from trunk * added try except around floating ip get by host in host init * This branch adds support to the xenapi driver for updating the guest agent on creation of a new instance. This ensures that the guest agent is running the latest code before nova starts configuring networking, setting root password or injecting files * renamed migrations again * merge from trunk * if we get InstanceNotFound error on create, ignore (means it has been deleted before we got the create message) * some libvirt multi-nic just to get it to work, from tushar * Removed whitespace * Fixed objectstore test * merge with trey * Very small alterations, switched from using start() to pass host/port, to just defining them up front in init. Doesn't make sense to set them in start because we can't start more than once any way. Also, unbroke binaries * Bump WebOb requirement to 1.0.8 in pip-requires * Oops, I broke --help on nova-api, fixed now * pep8 fix * Monkey patching 'os' kills multiprocessing's .join() functionality. Also, messed up the name of the eventlet WSGI logger * Filter out datetime fields from instance_type * erase unnecessary TODO: statement * fixed reviewer's comment. 1. adding dest-instance-dir deleting operation to nova.compute.manager, 2. fix invalid raise statement * fix comment line * Stop trying to set a body for HTTP methods that do not allow it. It renders the unit tests useless (since they're testing a situation that can never arise) and webob 1.0.8 fails if you do this * log -> logging to keep with convention * Removed debugging and switched eventlet to monkey patch everything * Removed unneeded import * Tests for WSGI/Launcher * Remove the unnecessary insertion of whitespace. This happens to be enough to match this patch apply on recent versions of XenServer / Xen Cloud Platform * trunk merge * fix lp 798361 * Removed logging logic from __init__, added concept of Launcher...no tests for it yet * nova-manage checks if user is member of proj, prior to adding role for that project * Other migrations have been merged in before us, so renumber * Merged trunk * pep8 fixes * assert_ -> assertTrue since assert_ is deprecated * added adjust child zone test * tests working again * updated the exceptions around virtual interface creation, updated flatDHCP manager comment * more trunks * another trunk merge * This patch adds support for working with instances by UUID in addition to integer IDs * importing sqlalchemy IntegrityError * Moving add_uuid migration to 025 * Merging trunk, fixing conflicts * Enclosing tokens for xenapi filter in double quotes * working commit * Fix objectstore test * Cleanup and addition of tests for WSGI server * Merged trunk * Check that server exists when interacting with /v1.1/servers//meta resource * No, really. Added tests for WSGI loader * Added tests for WSGI loader * nova.virt.libvirt.connection._live_migration is changed * Cleanup * merged rev trunk 1198 * Introduced Loader concept, for paste decouple * fix pep8 check * fix comments at nova.virt.libvirt.connection * Cleanup of the cleanup * Further nova-api cleanup * Cleaned up nova-api binary and logging a bit * Removed debugging, made objectstore tests pass again * General cleanup and refactor of a lot of the API/WSGI service code * Adding tests for is_uuid_like * Using proper UUID format for uuids * Implements a portion of ec2 ebs boot. What's implemented - block_device_mapping option for run instance with volume (ephemeral device and no device isn't supported yet) - stop/start instance * updated fixed ip and floating ip exceptions * pep8: white space/blank lines * Merging trunk * renamed VirtualInterface exception and extend NovaException * moving instance existance logic down to api layer * Ensure os_type and architecture get set correctly * Make EC2 update_instance() only update updatable_fields, rather than all fields. Patch courtesy of Vladimir Popovski * Fixes two minor bugs (lp795123 and lp795126) in the extension mechanism. The first bug is that each extension has _check_extension() called twice on it; this is a minor cosmetic problem, but the second is that extensions which flunk _check_extension() are still added. The proposed fix is to make _check_extensions() return True or False, then make _add_extension() call it from the top and return immediately if _check_extensions() returns False * Fixes a bug where a misleading error message is outputted when there's a sqlalchemy-migrate version conflict * Result is already in JSON format from _wait_for_agent * Fix PEP8 * Fix for lp:796834 * Add new architecture attribute along with os_type * bunch of docstring changes * adding check for serverRef hostname matching app url * Fix for Bug lp:796813 * Fix the volumes extension resource to have a proper prefix - /os-volumes * Fixes lp797017, which is broken as a result of a fragile method in the xenapi drivers that assumed there would only ever be one VBD attached to an instance * adding extra image service properties to compute api snapshot; adding instance_ref property * Missed a pep8 fix * Remove thirdwheel.py and do the test with a now-public ExtensionManager.add_extension() * Removes nova/image/local.py (LocalImageService) * Add some documentation for cmp_version Add test cases for cmp_version * Increased error message readability for the OpenStack API * fixing test case * Updated "get_all_across_zones" in nova/compute/api.py to have "context = context.elevated()", allowing it to be run by non-admin users * merging trunk * more words * Cleaned up some pep8 issues in nova/api/openstack/create_instance_helper.py and nova/api/openstack/__init__.py * Pull-up from trunk * Add a test to ensure invalid extensions don't get added * Update xenapi/vm_utils.py so that it calls find_sr instead of get_sr. Remove the old get_sr function which by default looked for an SR named 'slices' * add vlan diagram and some text * Added context = context.elevated() to get_all_across_zones * auto load table schema instead of stubbing it out * Fixed migration per review feedback * Made hostname independent from ec2 id. Add generation of hostnames based on display name * Fix for a problem where run_tests.sh would output a seemingly unrelated error message when there was a sqlalchemy-migrate version number conflict * stub api methods * Missed a InstanceTypeMetadata -> InstanceTypeExtraSpecs rename in register_models * Fix unitttest so that it actually fails without the fix * Make $my_ip Glance's default host, not localhost * We don't check result in caller, so don't set variable to return value * Remove debugging statement * Fix lp795123 and lp795126 by making _check_extension() return True or False and checking the result only from the top of _add_extension() * Glance host defaults to rather than localhost * Upstream merge * add in dhcp drawing * Rename: intance_type_metadata -> instance_type_extra_specs * erroneous self in virtual_interface_delete_by_instance() sqlalchemy api * Fixes a bug where a unit test sometimes fails due to a race condition * remove the network-host fromt he flat diagram * add multinic diagram * add the actual image * Renaming to _build_instance_get * merged trunk * returned two files to their trunk versions, odd that they were altered in the first place * Added a new test for confirming failure when no primary VDI is present * Unit tests pass again * more doc (and by more I mean like 2 or 3 sentances) * Fix copyright date * PEP8 cleanup * Attempting to retrieve the correct VDI for snapshotting * Fixing another test * Fixing test_servers_by_uuid * floating_ips extension is loading to api now * initial commit of multinic doc * generated files should not be in source control * Fixed UUID migration * Added UUID migration * Clean up docstrings to match HACKING * merge with trey * Small tweaks * Merged reldan changes * First implementation of FloatingIpController * First implementation of FloatingIpController * compute/api: fix mismerge due to instance creation change * ec2/cloud.py: fix mismerge * fix conflict with rebasing * api/ec2: support CreateImage * api/ec2/image: support block device mapping * db/model: add root_device_name column to instances table * ec2utils: consolidate 'vol-%08x' and 'snap-%08x' * api/ec2: check user permission for start/stop instances * ec2utils: consolidate 'vol-%08x' and 'snap-%08x' * api/ec2: check user permission for start/stop instances * api/ec2: check user permission for start/stop instances * Adds 'joinedload' statements where they need to be to prevent access of a 'detached' object * novaclient changed to support projectID in authentication. Caused some minor issues with distributed scheduler. This fixes them up * Add trailing LF (\n) to password for compatibility with old agents * Workaround windows agent bugs where some responses have trailing \\r\\n * removed commented out shim on Instance class * Windows instances will often take a few minutes setting up the image on first boot and then reboot. We should be more patient for those systems as well check if the domid changes so we can send agent requests to the current domid * Split patch off to new branch instead * Add --fixes * First attempt to rewrite reroute_compute * syntax * Merged trunk * Windows instances will often take a few minutes setting up the image on first boot and then reboot. We should be more patient for those systems as well check if the domid changes so we can send agent requests to the current domid * Fixed bug * Added metadata joinedloads * Prep-work to begin on reroute_compute * specify mysql_engine for the virtual_interfaces table in the migration * Passed in explanation to 400 messages * Fixing case of volumes alias * The volumes resource extension should be prefixed by its alias - os-volumes * Adding uuid test * Pep8 Fixes * Fixing test_servers.py * pep8 * Fixing private-ips test * adding server existence check to server metadata resource * Fixing test_create_instance * made the test_xenapi work * test xenapi injected set to True * something else with tests * something with tests * i dont even care anymore * network_info has injected in xenapi tests * Adding UUID test * network_info passed in test_xenapi, mac_address no longer in instance values dict * added network injected to stub * added injected to network dict oportion of tuple returned by get_instance_nw_info * don't provision to all child zones * network info to _create_vm * fix mismerge * updated xenapi_conn finish_resize arguments * stubbed out get_instance_nw_info for compute_test * pip novaclient bump * merge with nova trunk * fixed up some little project_id things with new novaclient * typo * updated finish_resize to accept network_info, updated compute and tests in accordance * _setup_block_device_mapping: raise ApiError when db inconsistency found * db/block_device_mapping_get_all_by_instance: don't raise * Print list of agent builds a bit prettier * PEP8 cleanups * Rename to 024 since 023 was added already * pep8 * The Xen driver supports running instances in PV or HVM modes, but the method it uses to determine which to use is complicated and doesn't work in all cases. The result is that images that need to use HVM mode (such as FreeBSD 64-bit) end up setting a property named 'os' set to 'windows' * typo * None project_id now default * Adds code to run_tests.py which: * Fixing code to ensure unit tests for objectstore, vhd & snapshots pass * ec2utils: minor optimize _try_convert() * block_device_mapping: don't use [] as default argument * api/ec2: make the parameter parser an independent method * Show only if we have slow tests, elapsed only if test success * Showing elapsed time is now default * Ensuring pep8 runs even when nose optons are passed * network tests now teardown user * Removing seconds unit * network user only set if doesnt exist * net base project id now from context, removed incorrect floatnig ip host assignment * fixed instance[fixed_ip] in ec2 api, removed fixed_ip shim * various test fixes * Updated so that we use a 'tmp' subdirectory under the Xen SR when staging migrations. Fixes an issue where you would get a 'File exists' error because the directory under 'images' already existed (created via the rsync copy) * db fakes silly error fix * debug statements * updated db fakes * updated db fakes * Changed requests with malformed bodies to return a HTTP 400 Bad Request instead of a HTTP 500 error * updated db fakes and network base to work with virtual_interface instead of mac_address * Phew ... ok, this is the last dist-scheduler merge before we get into serious testing and minor tweaks. The heavy lifting is largely done * db fakes * db fakes * updated libvirt test * updated libvirt test * updated libvirt test * updated libvirt test * updated libvirt test * getting the test_host_filter.py file from trunk, mine is jacked somehow * removed extra init calls * fixed HACKING * Changed requests with malformed bodies to return a HTTP 400 Bad Request instead of a HTTP 500 error * duplicate routes moved to base class * fixed scary diff from trunk that shouldnt have been there * version passing cleanup * refactored out controller base class to use aggregation over inheritance * Move ipy commands to netaddr * merged trunk * mp fixes * Really PEP8? A tab is inferior to 2 spaces? * pep8 fix * upstream merge * Stub out the rpc call in a unit test to avoid a race condition * merged trunk rev 1178 * Making timing points stricter, only show slow/sluggish tests in summary * Improved errors * added kernel/ramdisk migrate support * Added faults wrapper * remove file that got ressurected * Cleaned up pep8 errors using the current version of pep8 located in pip-requires. This is to remove the cluttered output when using the virtualenv to run pep8 (as you should). This will make development easier until the virtualenv requires the latest version of pep8 (see bug 721867) * merge with trey * autoload with the appropriate engine during upgrade/downgrade * Created new exception for handling malformed requests Wrote tests Raise httpBadRequest on malformed request bodies * Fixed bug 796619 * Adds --show-elapsed option for run_tests * pep8 * Alias of volumes extension should be OS-VOLUMES * Illustrations now added to Distributed Scheduler documentation (and fixed up some formatting) * Load table schema automatically instead of stubbing out * Removed clocksource=jiffies from PV_args * Test now passes even if the rpc call does not complete on time * - fixes bug that prevented custom wsgi serialization * Removed clocksource=jiffies from PV_args * merging trunk, fixing pep8 * pep8 * Improved tests * removing unnecessary lines * wsgi can now handle dispatching action None more elegantly * This fixes the server_metadata create and update functions that were returning req.body (as a string) instead of body (deserialized body dictionary object). It also adds checks where appropriate to make sure that body is not empty (and return 400 if it is). Tests updated/added where appropriate * removed yucky None return types * merging trunk * trunk merge * zones image_id/image_href support for 1.0/1.1 * Update xenapi/vm_utils.py so that it calls find_sr instead of get_sr. Remove the old get_sr function which by default looked for an SR named 'slices' * fixed bug 796619 * merge trunk * check for none and empty string, this way empty dicts/lists will be ok * Updated so that we use a 'tmp' subdirectory under the Xen SR when staging migrations. Fixes an issue where you would get a 'File exists' error because the directory under 'images' already existed (created via the rsync copy) * fix method chaining in database layer to pass right parameters * Add a method to delete provider firewall rules * Add ability to list ip blocks * pep 8 whitespace fix * Move migration * block migration feature added * Reorder firewall rules so the common path is shorter * ec2 api method allocate_address ; raises exception.NoFloatingIpsDefined instead of UnknownError when there aren't any floating ips available * in XML Serialization of output, the toprettyxml() call would sometimes return a str() and sometimes unicode(), I've forced encoding to utf-8 to ensure that we always get str(). This fixes the related bug * A recent commit added a couple of directories that don't belong in version control. Remove them again * adding support for cusom serialization methods * forgot a comma * floating ips can now move around the network hosts * A recent commit added a couple of directories that don't belong in version control. Remove them again * 'network list' prints project id * got rid of prints for debugging * small pep8 fixes * return body correctly as object instead of a string, with tests, also check for empty body on requests that need a body * adding xml support to /images//meta resource; moving show/update entities into meta container * removed posargs decorator, all methods decorated * Allows Nova to talk to multiple Glance APIs (without the need for an external load-balancer). Chooses a random Glance API for each request * forgot a comma * misc argument alterations * force utf-8 encoding on toprettyxml call for XMLDictSerializer * added new exception more descriptive of not having available floating addresses avail for allocation * raise instance instead of class * Fix copyright year * style change * Only update updateable fields * removing LocalImageService from nova-manage * rebase from trunk * decorators for action methods added * source illustrations added & spelling/grammar based on comstud's feedback * fixed reraise in trap_error * forgot some debugging statements * trunk merge and ec2 tests fixed * Add some docstrings for new agent build DB functions * Add test for agent update * Multiple position dependent formats and internationalization don't work well together * Adding caveat * Fixing code per review comments * removed fixed_ips virtual_interface_id foreignkey constraint from multi_nic migration, and added it as a standalone migration with special sqlite files * Record architecture of image for matching to agent build later. Add code to automatically update agent running on instance on instance creation * Add version and agentupdate commands * Add an extension to allow for an addFixedIp action on instances * further changes * tests working after merge-3 update * 022 migration has already been added, so make ours 023 now * parse options with optparse, options prepended '--' * renamed migration again * Pull-up from multi_nic * merged koelkers tests branch * remove file that keeps popping up * Merging trunk * Fixing the tests * matched the inner exception specifically, instead of catching all RemoteError exceptions * Support multiple glance-api servers * Merged trunk * Fix merge conflict * removing custom exception, instead using NoFloatingIpsDefined * raises exception.NoFloatingIpsDefined instead of UnknownError * Normalize and update database with used vm_mode * added a test for allocate_address & added error handling for api instead of returning 'UnknownError', will give information 'AllocateAddressError: NoMoreAddresses * merged trunk again * updated docstring for nova-manage network create * Now forwards create instance requests to child zones. Refactored nova.compute.api.create() to support deferred db entry creation * MySQL database tables are currently using the MyISAM engine. Created migration script nova/db/sqlalchemy/migrate_repo/versions/021_set_engine_mysql_innodb.py to change all current tables to InnoDB * merged trunk again * Support for header "X-Auth-Project-Id" in osapi * Cleaned up some pylint errors * tweaks * PEP8 fix * removed network_info shims in vmops * Fix for bug#794239 to allow pep8 in run_tests.sh to use the virtual environment * adding Authorizer key for ImportPublicKey * fix exception type catched * Look for vm_mode property on images and use that if it exists to determine if image should be run in PV or HVM mode. If it doesn't exist, fall back to existing logic * removed straggler code * trunk merge * merge trunk * pep8 * removed autogen file * added field NOVA_PROJECT_ID to template for future using * added tests for X-Auth-Project-Id header * fix fake driver for using string project * adding Authorizer key for ImportPublicKey * Cleaned up some of the larger pylint errors. Set to ignore some lines that pylint just couldn't understand * DRY up the image_state logic. Fix an issue where glance style images (which aren't required to have an 'image_state' property) couldn't be used to run instances on the EC2 controller * remove the debuging lines * remove the old stuff * tests all pass * Added virtual environment to PEP8 tests * Added test_run_instances_image_status_active to test_cloud * Add the option to specify a default IPv6 gateway * pep8 * Removed use of super * Added illustrations for Distributed Scheduler and fixed up formatting * Disabled pylint complaining about no 'self' parameter in a decorator function * DRY up the image_state logic. Fix an issue where glance style images (which aren't required to have an 'image_state' property) couldn't be used to run instances on the EC2 controller * Fixed incorrect error message Added missing import Fixed Typo (pylint "undefined variable NoneV") * removing local image service * Remove unnecessary docstrings * Add the option to specify a default IPv6 gateway * port the floating over to storing in a list * Make libvirt snapshotting work with images that don't have an 'architecture' property * take out the host * Removed empty init * Use IPNetwork rather than IPRange * Fixed type causing pylint "exception is not callable" Added param to fake_instance_create, fake objects should appear like the real object. pylint "No value passed for parameter 'values' in function call" * sanity check * run_instances will check image for 'available' status before attempting to create a new instance * fixed up tests after trunk merge * Use True/False instead of 1/0 when setting updating 'deleted' column attributes. Fixes casting issues when running nova with Postgres * merged from trunk * Remove more stray import IPy * Dropped requirement for IPy * Convert stray import IPy * Use True/False instead of 1/0 when setting updating 'deleted' column attributes.Fixes casting issues when running nova with Postgres * Removed commented code * Added test case for snapshoting base image without architecture * Remove ipy from virt code and replace with netaddr * Remove ipy from network code and replace with netaddr * Remove ipy from nova/api/ec2/cloud.py and use netaddr * Remove ipy from nova-manage and use netaddr * This branch allows marker and limit parameters to be used on image listing (index and detail) requests. It parses the parameters from the request, and passes it along to the glance_client, which can now handle these parameters. Essentially all of the logic for the pagination is handled in glance, we just pass along the correct parameters and do some error checking * merge from trunk, resolved conflicts * Update the OSAPI images controller to use 'serverRef' for image create requests * Changed the error raise to not be AdminRequired when admin is not, in fact, required * merge with trey * Change to a more generic error and update documentation * make some of the tests * Merged trunk * merge trunk * Ignore complaining about dynamic definition * Removed Duplicate method * Use super on an old style class * Removed extraneous code * Small pylint fixes * merge with trunk * Fixed incorrect exception * This branch removes nwfilter rules when instances are terminated to prevent resource leakage and serious eventual performance degradation. Without this patch, launching instances and restarting nova-compute eventually become very slow * merge with trunk * resolve conflicts with trunk * Update migrate script version to 22 * Added 'config list' to nova-manage. This function will output all of the flags and their values * renamed migration * trunk merge after 2b hit * Distributed Scheduler developer docs * Updated to use the '/v1/images' URL when uploading images to glance in the Xen glance plugin. Fixes the issue where snapshots fail to upload correctly * merged trunk again * added 'nova-manage config list' which will list out all of the flags and their values. I also alphabetized the list of available categories * Updated to use the '/v1/images' URL when uploading images to glance in the Xen glance plugin. Fixes issue where snapshots failed to get uploaded * Removed "double requirement" from tools/pip-requires file * merged koelker migration changes, renumbered migration filename * fix comment * Fixed pip-requires double requirement * Added a test case for XML serialization * Removed unused and erroneous (yes, it was both) function * paramiko is not installed into the venv, but is required by smoketests/base.py. Added paramiko to tools/pip-requires * Changes all uses of utcnow to use the version in utils. This is a simple wrapper for datetime.datetime.utcnow that allows us to use fake values for tests * Set pylint to ignore correct lines that it could not determine were correct, due to the means by which eventlet.green imported subprocess Minimized the number of these lines to ignore * LDAP optimization and fix for one small bug caused huge performance leak. Dashboard's benchmarks showed overall x22 boost in page request completion time * Adds LeastCostScheduler which uses a series of cost functions and associated weights to determine which host to provision to * Make libvirt snapshotting work with images that don't have an 'architecture' property * Add serverRef to image metadata serialization list * Fixed pylint: no metadata member in models.py * Implement OSAPI v1.1 style image create * trunk merge * little tweaks * Flush AuthManager's cache before each test * Fixed FakeLdapDriver, made it call LdapDriver.__init__ * Merged with trunk * This change set adds the ability to create new servers with an href that points to a server image on any glance server (not only the default one configured). This means you can create a server with imageRef = http://glance1:9292/images/3 and then also create one with imageRef = http://glance2:9292/images/1. Using the old way of passing in an image_id still works as well, and will use the default configured glance server (imageRef = 3 for instance) * added nova_adminclient to tools/pip-requires * merged trunk * Added paramiko to tools/pip-requires * Tests that all exceptions can be raised properly, and fix the couple of instances where they couldn't be constructed due to typos * merge trunk... yay.. * switch zones to use utcnow * make all uses of utcnow use our testable utils.utcnow * Fix error with % as replacement string * Fixing conflicts * Tests to assure all exceptions can be raised as well as fixing NotAuthorized * use %% because % is a replacement string character * some comment docstring modifications * Makes novarc work properly on a mac and also for zsh in addition to bash. Other shells are not guaranteed to work * This adds the ability to publish nova errors to an error queue * don't use python if readlink is available * Sudo chown the vbd device to the nova user before streaming data to it. This resolves an issue where nova-compute required 'root' privs to successfully create nodes with connection_type=xenapi * Bugfix #780784. KeyError when creating custom image * Remove some of the extra image service calls from the OS API images controller * pep8 fixes * merge with trey * make it pass for the demo * Merged with Will * Minor comment formatting changes * got rid of more test debugging stuff that shouldnt have made it in * Remove comment about imageRef not being implemented * Remove a rogue comment * more tests (empty responses) * get_all with reservation id across zone tests * move index and detail functions to v10 controller * got rid of prints * Refactored after review, fixed merge * image href should be passed through the rebuild pipeline, not the image id * merge from trunk * got rid of print debugs * cleanup based on waldon's comments, also caught a few other issues * missed a couple chars * Little cleanups * pep8 and all that * tests all passing again * list --reservation now works across zones * fix novarc to work on mac and zsh * merged, with trunk, fixed the test failure, and split the test into 3 as per peer review * Fixes nova-manage bug. When a nova-network host has allocated floating ips *AND* some associated, the nova-manage floating list would throw exception because was expecting hash with 'ec2_id' key , however, the obj returned is a sqlalchemy obj and the attr we need is 'hostname' * start the flat network * more testing fun * fixed as per peer review to make more consistent * merged from trunk * Implement the v1.1 style resize action with support for flavorRef * Updates to the 018_rename_server_management_url migration to avoid adding and dropping a column. Just simply rename the column * Support SSL AMQP connections * small fixes * Allow SSL AMQP connections * reservation id's properly forwarded to child zones on create * merge from trunk * fix pep8 issue from merge * coose the network_manager based on instance variable * fix the syntax * forgot a comma * This just fixes a bunch of pep8 issues that have been lingering around for a while and bothering me :) * touch ups * Updates to the 018_rename_server_management_url to avoid adding and dropping a column. Just simply rename the column * basic reservation id support to GET /servers * - move osapi-specific wsgi code from nova/wsgi.py to nova/api/openstack/wsgi.py - refactor wsgi modules to use more object-oriented approach to wsgi request handling: - Resource object steps up to original Controller position - Resource coordinates deserialization, dispatch to controller, serialization - serialization and deserialization broken down to be more testable/flexible * merge from trunk * make the stubs * use the host * da stubs * Bumped migration number * Merged from trunk * updates to keep things looking better * merge from trunk * fix pep8 issues * PEP8 fix * Moved memcached driver import to the top of modules * fix pep8 issues * pep8 fixes * Cleanup instances_path in the test_libvirt test_spawn_with_network_info test. Fixes issue where the nova/tests/instance-00000001/ is left in the nova source tree when running run_test.sh -N * fix filtering tests * Renamed migration to 020 * osapi: added support for header X-Auth-Project-Id * added /zones/boot reservation id tests diablo-1 -------- * Adds hooks for applying ovs flows when vifs are created and destroyed for XenServer instances * Logs the exception if metadata fails and returns a 500 with an error message to the client * Fixing a bunch of conflicts * add new base * refator existing fakes, and start stubbing out the network for the new manager tests * pep8 * Incremented version of migration script to reflect changes in trunk * basic zone-boot test in place * Incremented version of migration script to reflect changes in trunk * Incremented version of migration script to reflect changes in trunk * switch to using webob exception * Added new snapshots table to InnoDB migrations * Adds a few more status messages to error states on image register for the ec2 api. This will hopefully provide users of the ec2 api with a little more info if their registration fails * Cleaned up bug introduced after fixing pep8 errors * Fixing Scheduler Tests * Cleaned up bug introduced after fixing ^Cp8 errors * Basic hook-up to HostFilter and fixed up the passing of InstanceType spec to the scheduler * make the old tests still pass * rename da stuffs * rename da stuffs * Resolving conflict and finish test_images * merge * added tests for image detail requests * Merged trunk * Merged trunk and fixed conflicts * Whitespace cleanups * added pause/suspend implementation to nova.virt.libvirt_conn * Change version number of migration * Update the rebuild_instance function in the compute manager so that it accepts the arguments that our current compute API sends * Moved everything from thread-local storage to class attributes * Added the filtering of image queries with image metadata. This is exposing the filtering functionality recently added to Glance. Attempting to filter using the local image service will be ignored * This enables us to create a new volume from a snapshot with the EC2 api * Use a new instance_metadata_delete_all DB api call to delete existing metadata when updating a server * added tests for GlanceImageService * Add vnc_keymap flag, enable setting keymap for vnc console and fix bug #782611 * Add refresh_provider_fw_rules to virt/driver.py#ComputeDriver so virtualization drivers other than libvirt will raise NotImplemented * Rebased to trunk rev 1120 * trunk merge * added get_pagination_params function in common with tests, allow fake and local image services to accept filters, markers, and limits (but ignore them for now) * Cleaned up text conflict * pep8 fixed * pep8 fixes * Cleaned up text conflict * removing semicolon * Cleaned up text conflict * skip the vlam test, not sure why it doesn't work * Cleaned up pep8 errors * Fixed the APIError typo * MySQL database tables are currently using the MyISAM engine. Created migration script nova/db/sqlalchemy/migrate_repo/versions/020_set_engine_mysql_innodb.py to change all current tables to InnoDB * MySQL database tables are currently using the MyISAM engine. Created migration script nova/db/sqlalchemy/migrate_repo/versions/020_set_engine_mysql_innodb.py to change all current tables to InnoDB * Handle the case when a v1.0 api tries to list servers that contain image hrefs * Added myself to Authors file * edits based on ed's feedback * More specific error messages for resize requests * pep8 fixes * merge trunk * tests passing again * Actually remove the _action_resize code from the base Servers controller. The V11 and V10 controllers implement these now * merge from trunk * This adds a volume snapshot support with the EC2 api * Fixed the typo of APIError with ApiError * nova/auth/novarc.template: Changed NOVA_KEY_DIR to allow symlink support * Updated compute api and manager to support image_refs in rebuild * zone-boot working * regular boot working again * regular boot working again * first pass at reservation id support * Updates so that 'name' can be updated when doing a OS API v1.1 rebuild. Fixed issue where metadata wasn't getting deleted when an empty dict was POST'd on a rebuild * first cut complete * project_id moved to be last * add support for keyword arguments * fixed nova.virt.libvirt_conn.resume() method - removing try-catch * reservation_id's done * basic flow done * lots more * starting * boot-from-volume: some comments and NOTE(user name) * Use metadata variable when calling _metadata_refs * Implement the v1.1 style resize action with support for flavorRef * Fixes to the SQLAlchmeny API such that metadata is saved on an instance_update. Added integration test to test that instance metadata is updated on a rebuild * Update the rebuild_instance function in the compute manager so that it accepts the arguments that our current compute API sends * Cleanup instances_path in test_libvirt test_spawn_with_network_info test * Added missing nova import to image/__init__.py * Another image_id location in hyperv * Fixing nova.tests.api.openstack.fakes.stub_out_image_service. It now stubs out the get_image_service and get_default_image_service functions. Also some pep8 whitespace fixes * Fixing xen and vmware tests by correctly mocking glance client * Fixing integration tests by correctly stubbing image service * More image_id to image_ref stuff. Also fixed tests in test_servers * When encrypting passwords in xenapi's SimpleDH(), we shouldn't send a final newline to openssl, as it'll use that as encryption data. However, we do need to make sure there's a newline on the end when we write the base64 string for decoding.. Made these changes and updated the test * Fixes the bug introduced by rpc-multicall that caused some test_service.py tests to fail by pip-requiring a later version of mox * added \n is not needed with -A * now pip-requires mox version 0.5.3 * added -A back in to pass to openssl * merge with dietz * merge with dietz * XenAPI tests pass * fixed so all the new encryption tests pass.. including data with newlines and so forth * Glance client updates for xenapi and vmware API to work with image refs * Merged lp:~rackspace-titan/nova/lp788979 * get the right args * Fixing pep8 problems * Modified instance_type_create to take metadata * Added test for instance type metadata create * merge with trey * Added test for instance type metadata update * Added delete instance metadata unit test * Added a unit test * Adding test code * Changed metadata to meta to avoid sqlalchemy collision * Adding accessor methods for instance type metadata * remove errant print statement * prevent encryption from adding newlines on long messages * trunk merge * nova/auth/novarc.template: Changed NOVA_KEY_DIR to allow symlink support * docstrings again and import ordering * fix encryption handling of newlines again and restructure the code a bit * Libvirt updates for image_ref * Commit the migration script * fixed docstrings and general tidying * remove _take_action_to_instance * fix calls to openssl properly now. Only append \n to stdin when decoding. Updated the test slightly, also * fixed read_only check * Fix pep8 errors * Fix pep8 violations * Fix a description of 'snapshot_name_template' * unittest: make unit tests happy * unittest: tests for boot from volume and stop/start instances * compute: implement ec2 stop/start instances * compute, virt: support boot-from-volume without ephemeral device and no device * db: add a table for block device mapping * volume/api: allow volume clone from snapshot without size * api/ec2: parse ec2 block device mapping and pass it down to compute api * teach ec2 parser multi dot-separted argument * api/ec2: make ec2 api accept true/false * Adds the ability to make a call that returns multiple times (a call returning a generator). This is also based on the work in rpc-improvements + a bunch of fixes Vish and I worked through to get all the tests to pass so the code is a bit all over the place * fix a minor bug unrelated to this change * updated the way allocate_for_instance and deallocate_for_instance handle kwargs * Rename instances.image_id to instances.image_ref * changes per review * merge with dietz * stub out passing the network * Virt tests passing while assuming the old style single nics * adding TODOs per dabo's review * Fixes from Ed Leafe's review suggestions * merge trunk * move udev file so it follows the xen-backend.rules * Essentially adds support for wiring up a swap disk when building * add a comment when calling glance:download_vhd so it's clear what is returned * make the fakes be the correct * skip vmware tests, since they need to be updated for multi-nic by someone who knows the backend * put back the hidden assert check i accidentally removed from glance plugin * fix image_path in glance plugin * Merged trunk * skip the network tests for now * Change the return from glance to be a list of dictionaries describing VDIs Fix the rest of the code to account for this Add a test for swap * cleaning up getattr calls with default param * branch 2a merge (including trunk) * trunk merge * remerged with 2a * tests pass and pep8'ed * review fixups * Expanded tests * In vmwareapi_net.py removed the code that defines the flag 'vlan_interface' and added code to set default value for the flag 'vlan_interface' to 'vmnic0'. This will now avoid flag re-definition issue * missed a driver reference * exceptions are logged via the raise, so just log an error message * log upload errors * instance obj returned is not a hash, instead is sqlalchemy obj and hostname attr is what the logic is looking for * we don't need the mac or the host anymore * Test tweaks * instances don't need a mac_address to be created anymore * Make a cleaner log message and use [] instead of . to get database fields * use the skip decorator rather than comment out * merging trunk * Adding some pluralization * Double quotes are ugly #3 * merge with dietz * fix typo introduced during merge conflict resolution * Remove spurious newline at end of file * Move migration to fix ordering * remove dead/duplicate code * Double quotes are ugly #2 * Double quotes are ugly * refactoring compute.api.create() * Fix test_cloud tests * Restricted image filtering by name and status only * Switch the run_instances call in the EC2 back to 'image_id'. Incoming requests use 'imageId' so we shouldn't modify this for image HREF's * Switching back to chown. I'm fine w/ setfacl too but nova already has 'chown' via sudoers so this seems reasonable for now * replace double quatation to single quatation at nova.virt.libvirt_conn * remove unnecessary import inspect at nova.virt.libvirt_conn * creating _take_action_to_instance to nova.virt.libvirt_conn.py * Instead of redefining the flag 'vlan_interface', just setting a default value (vmnic0) in vmwareapi_net.py * Renamed image_ref variables to image_href. Since the convention is that x_ref vars may imply that they are db objects * Added test skipper class * change the behavior of calling a multicall * move consumerset killing into stop * don't put connection back in pool * replace removed import * cleanups * cleanup the code for merging * make sure that using multicall on a call with a single result still functions * lots of fixes for rpc and extra imports * don't need to use a separate connection * almost everything working with fake_rabbit * bring back commits lost in merge * connection pool tests and make the pool LIFO * Add rpc_conn_pool_size flag for the new connection pool * Always create Service consumers no matter if report_interval is 0 Fix tests to handle how Service loads Consumers now * catch greenlet.GreenletExit when shutting service down * fix consumers to actually be deleted and clean up cloud test * fakerabbit's declare_consumer should support more than 1 consumer. also: make fakerabbit Backend.consume be an iterator like it should be. * convert fanout_cast to ConnectionPool * pep8 and comment fixes * Add a connection pool for rpc cast/call Use the same rabbit connection for all topic listening and wait to be notified vs doing a 0.1 second poll for each * add commented out unworking code for yield-based returns * make the test more expicit * add support to rpc for multicall * merge with dietz * Fixing divergence * Merged trunk * Added params to local and base image service * Fixed the mistyped line referred to in bug 787023 * Merged trunk and resolved conflicts * Fixed a typo * make the test work * Merged with trunk * Several changes designed to bring the openstack api 1.1 closer to spec - add ram limits to the nova compute quotas - enable injected file limits and injected file size limits to be overridden in the quota database table - expose quota limits as absolute limits in the openstack api 1.1 limits resource - add support for controlling 'unlimited' quotas to nova-manage * During the API create call, the API would kick off a build and then loop in a greenthread waiting for the scheduler to pick a host for the instance. After API would see a host was picked, it would cast to the compute node's set_admin_password method * starting breakdown of nova.compute.api.create() * fix test. instance is not updated in DB with admin password in the API anymore * Merged upstream * pep8 fixes * Initial tests * fix forever looping on a password reset API call * updating admin_pass moved down to compute where the password is actually reset. only update if it succeeds * merged trunk * change install_ref.admin_password to instance_ref.admin_pass to match the DB * Merged trunk * remove my print * we're getting a list of tuples now' * we have a list of tuples, not a list of dicts * pep8 fixes * return the result of the function * Updated tests to use mox pep8 * InstanceTypesMetadata is now registered * make some changes to the manager so dupe keywords don't get passed * Fixing the InstanceTypesMetadata table definition * try out mox for testing image request filters * Adding the migrate code to add the new table * dist-sched-2a merge * Created new libvirt directory, moved libvirt_conn.py to libvirt/connection.py, moved libvirt templates, broke out firewall and network utilities * make the column name correct * The code for getting an opaque reference to an instance assumed that there was a reference to an instance obj available when raising an exception. I changed this from raising an InstanceNotFound exception to a NotFound, as this is more appropriate for the failure, and doesn't require an instance ID * merge against 2a * trunk merge * simplified the limiting differences for different versions of the API * New tests added * Changed the exception type to not require an instance ID * Added model for InstanceTypeMetadata * Added test * Avoid wildcard import * Add unittests for cloning volumes * merged recent trunk * merged recent trunk * Make snapshot_id=None a default value in VolumeManager:create_volume(). It is not a regular case to create a volume from a snapshot * Don't need to import json * Fix wrong call of the volume api create() * pep8 fix in nova/compute/api.py * instead of the API spawning a greenthread to wait for a host to be picked, the instance to boot, etc for setting the admin password... let's push the admin password down to the scheduler so that compute can just take care of setting the password as a part of the build process * tests working again * eventlet.spawn_n() expects the function and arguments, but it expects the arguments unpacked since it uses *args * Don't pass a tuple since spawn_n will get the arguments with *args anyway * move devices back * Using the root-password subcommand of the nova client results in the password being changed for the instance specified, but to a different unknown password. The patch changes nova to use the password specified in the API call * Pretty simple. We call openssl to encrypt the admin password, but the recent changes around this code forgot to strip the newline off the read from stdout * DHSimple's decrypt needs to append \n when writing to stdin * need to strip newline from openssl stdout data * merge with trey * work on * merge trunk * moved auto assign floating ip functionality from compute manager to network manager * create a mac address entry and blindly use the first network * create a mac address entry and blindly use the first network * create a mac address entry and blindly use the first network * need to return the ref * Added filtering on image properties * Fixes a bug related to incorrect reparsing of flags and prevents many extra reparses * no use mac * comment out the direct cloud case * make fake_flags set defaults instead of runtime values * add a test from vish and fix the issues * Properly reparse flags when adding dynamic flags * no use mac * instances don't have mac's anymore and address is now plural * let the fake driver accept the network info * Comment out the 2 tests that require the instance to contain mac/ip * initial use of limited_by_marker * more fix up * many tests pass now * its a dict, not a class * we don't get the network in a tuples anymore * specified image_id keyword in exception arg * When adding a keypair with ec2 API that already exists, give a friendly error and no traceback in nova-api * added imageid string to exception, per peer review * Fixes some minor doc issues - misspelled flags in zones doc and also adds zones doc to an index for easier findability * removed most of debugging code * Fixing docstring * Synchronise with Diablo development * make _make_fixture respect name passed in * zone1 merge * sending calls * accepting calls * Fixing _get_kernel_ramdisk_from_image to use the correct image service * Fixing year of copyright * merge * select partially going through * merge from trunk * make image_ref and image_id usage more consistant, eliminate redundancy in compute_api.create() call * take out irrelevant TODO * blah * uhhh yea * local tweaks * getting closer to working select call * swap should use device 1 and rescue use device 2 * merged from trunk * fix tests, have glance plugin return json encoded string of vdi uuids * make sure to get a results, not the query * merged from trunk * Removing code duplication between parse_image_ref and get_image service. Made parse_image_ref private * Changed ec2 api dupe key exception log handler info->debug * Added test case for attempting to create a duplicate keypair * Removing debug print line * Renaming service_image_id vars to image_id to reduce confusion. Also some minor cleanup * cleanup and fixes * got rid of print statement * initial fudging in of swap disk * make the test_servers pass by removing the address tests for 1.1, bug filed * port the current create_networks over to the new network scheme * need to have the complete table def since sqlalchemy/sqlite won't reload the model * must have the class defined before referencing it * make the migration run with tests * get rid of all mention of drivers ... it's filter only now * merge trunk * Fixes euca-attach-volume for iscsi using Xenserver * fix typo * merge branch lp:~rackspace-titan/nova/ram-limits * Added test * Fixes missing space * Fixed mistyped line * Rebased to trunk rev 1101 * merge from trunk * moved utils functions into nova/image/ * Trunk merge * Fix bug #744150 by starting nova-api on an unused port * Removing utils.is_int() * Added myself to Authors * When adding a keypair that already exists, give a friendly error and no traceback in nova-api * --dhcp-lease-max=150 by default. This prevents >150 instances in one network * Minor cleanup * No reason to modify the way file names are generated for kernel and ramdisk, since the kernel_id and ramdisk_id is still guaranteed to be ints * found a typo in the xenserver glance plugin that doesn't work with glance trunk. Also modified the image url to fetch from /v1/image/X instead of /image/X as that returned a 300 * fixing glance plugin bug and setting the plugin to use /v1 of the glance api * merge trunk * move init start position to 96 to allow openvswitch time to fully start * Include data files for public key tests in the tarball * minor cleanup * Makes sure vlan creation locks so we don't race and fail to create a vlan * merging trunk * Include data files for public key tests in the tarball * Merged with trunk * renaming resource_factory to create_resource * combined the exception catching to eliminate duplication * synchronize vlan creation * print information about nova-manage project problems * merge from trunk * fix comments * make nwfilter mock more 'realistic' by having it remember which filters have been defined * fix pep8 issue * fixed silly issue with variable needing to be named 'id' for the url mapper, also caught new exception type where needed * This is the groundwork for the upcoming distributed scheduler changes. Nothing is actually wired up here, so it shouldn't break any existing code (and all tests pass) * Merging trunk * Get rid of old virt/images.py functions that are no longer needed. Checked for any loose calls to these functions and found none. All tests pass for me * Update OSAPI v1.1 extensions so that it supports RequestExtensions. ResponseExtensions were removed since the new RequestExtension covers both use cases. This branch also removes some of the odd serialization code in the RequestExtensionController that converted dictionary objects into webob objects. RequestExtension handlers should now always return proper webob objects * Addressing bug #785763. Usual default for maximum number of DHCP leases in dnsmasq is 150. This prevents instances to obtain IP addresses from DHCP in case we have more than 150 in our network. Adding myself to Authors * foo * syntax errors * temp fixes * added support for reserving certain network for certain project * Fixed some tests * merge with trunk * Added an EC2 API endpoint that'll allow import of public key. Prior, api only allowed generation of new keys * This fix ensures that kpartx -d is called in the event that tune2fs fails during key injection, as it does when trying to inject a key into a windows instance. Bug #760921 is a symptom of this issue, as if kpartx -d is not called then partitions remain mapped that prevent the underlying nbd from being reused * Add new flag 'max_kernel_ramdisk_size' to specify a maximum size of kernel or ramdisk so we don't copy large files to dom0 and fill up /boot/guest * The XenAPI driver uses openssl as part of the nova-agent implementation to set the password for root. It uses a temporary file insecurely and unnecessarily. Change the code to write the password directly to stdin of the openssl process instead * The tools/* directory is now included in pep8 runs. Added an opt-out system for excluding files/dirs from pep8 (using GLOBIGNORE) * fill out the absolute limit tests for limits v1.0 controller * add absolute limits support to 1.0 api as well * Merged with trunk * fixed pep8 issue * merge from trunk * Fail early if requested imageRef does not exist when creating a server * Separate out tests for when unfilter is called from iptables vs. nwfilter driver. Re: lp783705 * Moved back templates and fixed pep8 issue. Template move was due to breaking packaging with template moves. That will need to happen in a later merge * further refactoring of wsgi module; adding documentation and tests * don't give instance quota errors with negative values * Merged trunk and resolved horrible horrible conflicts * No reason to hash ramdisk_id and kernel_id. They are ints * temp * waldon's naming feedback * Fixing role names to match code * Merging trunk * updated the hypervisors and ec2 api to support receiving lists from pluralized mac_addresses and fixed_ips * fname should have been root_fname * minor cleanup, plus had to merge because of diverged-branches issue * Minor cleanup * merge from trunk * Fix comments * Add a unitest to test EC2 snapshot APIs * Avoid wildcard import * Simple change to sort the list of controllers/methods before printing to make it easier to read * missed the new wsgi test file * removing controller/serializer code from wsgi.py; updating other code to use new modules * merge lp:nova * fixup absolute limits to latest 1.1 spec * refactoring wsgi to separate controller/serialization/deserialization logic; creating osapi-specific module * default to port 80 if it isnt in the href/uri * return dummy id per vishs suggestion * hackish patch to fix hrefs asking for their metadata in boot (this really shouldnt be in ec2 api?) * Sort list of controllers/methods before printing * use a manual 500 with error text instead of traceback for failure * log any exceptions that get thrown trying to retrieve metadata * skeleton of forwarding calls to child zones * fix typo in udev rule * merge trunk * libvirt fixes to use new image_service stuff * On second thought, removing decorator * Adding FlagNotSet exception * Implements a basic mechanism for pushing notifications out to interested parties. The rationale for implementing notifications this way is that the responsibility for them shouldn't fall to Nova. As such, we simply will be pushing messages to a queue where another worker entirely can be written to push messages around to subscribers * Spacing changes * get real absolute limits in openstack api and verify absolute limit responses * Added missing xenhost plugin. This was causing warnings to pop up in the compute logs during periodic_task runs. It must have not been bzr add'd when this code was merged * fixed bug with compute_api not having actual image_ref to use proper image service * Adding xenhost plugin * Merging trunk * Added missing xenhost plugin * Fix call to spawn_n() instead. It expects a callable * fix pep8 issues * oops, took out commented out tests in integrated.test_servers and made tests pass again * fixed api.openstack.test_servers tests...again * fixed QuotaTestCases * fixed ComputeTestCase tests * made ImageControllerWithGlanceServiceTests pass * fixed test_servers small tests as well * get integrated server_tests passing * Removed all utils.import_object(FLAGS.image_service) and replaced with utils.get_default_image_service() * MySQL database tables are using the MyISAM engine. Created migration script to change all current tables to InnoDB, updated version to 019 * MySQL database tables are using the MyISAM engine. Created migration script to change all current tables to InnoDB, updated version to 019 * Small cleanups * Moving into scheduler subdir and refactoring out common code * Moving tests into scheduler subdirectory * added is_int function to utils * Pep8 fixes * made get_image_service calls in servers.py * use utils.get_image_service in compute_api * updates to utils methods, initial usage in images.py * added util functions to get image service * Using import_class to import filter_host driver * Adding fill first cost function * add more statuses for ec2 image registration * Add --fixes * Add --fixes * Fixes the naming of the server_management_url in auth and tests * Merging in Sandy's changes adding Noop Cost Fn with tests * merged trunk * move migration 017 to 018 * merge ram-limits * Removed extra serialization metadata * Docstring cleanup and formatting (nova/network dir). Minor style fixes as well * pep8 * Fixes improper attribute naming around instance types that broke Resizes * merge ram-limits * support unlimited quotas in nova-manage and flags * fix test * Changed builder to match specs and added test * add migration for proper name * Update test case to ensure password gets set correctly * make token use typo that is in database. Also fix now -> utcnow and stop using . syntax for dealing with tokens * Added missing metadata join to instance_get calls * Avoid using spawn_n to fix LP784132 * add ram limits to instance quotas * Convert instance_type_ids in the instances table from strings to integers to enable joins with instance_types. This in particular fixes a problem when using postgresql * Set password to one requested in API call * don't throw type errors on NoneType int conversions * Added network_info into refresh_security_group_rules That fixs https://bugs.launchpad.net/nova/+bug/773308 * Improved error notification in network create * Instead of using a temp file with openssl, just write directly to stdin * First cut at least cost scheduler * merge lp:nova * Implemented builder for absolute limits and updated tests * provision_resource no longer returns value * provision working correctly now * Re-pull changed notification branch * PEP8 fixes * adding --fixes lp:781429 * Fixed mistyped key, caused huge performance leak * Moved memcached connection in AuthManager to thread-local storage. Added caching of LDAP connection in thread-local storage. Optimized LDAP queries, added similar memcached support to LDAPDriver. Add "per-driver-request" caching of LDAP results. (should be per-api-request) * ugh, fixed again * tests fixed and pep8'ed * Update comment on RequestExtension class * failure conditions are being sent back properly now * Added opt-out system for excluding files/dirs from pep8 (using GLOBIGNORE) * MySQL database tables are using the MyISAM engine. Created migration script to change all current tables to InnoDB * MySQL database tables are using the MyISAM engine. Created migration script to change all current tables to InnoDB * fix for lp783705 - remove nwfilters when instance is terminated * basic call going through * Added missing metadata join to instance_get calls * add logging to migration and fix migration version * Migrate quota schema from hardcoded columns to a key-value approach. The hope is that this change would make it easier to change the quota system without future schema changes. It also adds the concept of quotas that are unlimited * Conceded :-D * updated the mac_address delete function to actually delete the rows, and update fixed_ips * Added missing flavorRef and imageRef checks in the os api xml deserialization code along with tests * Fixed minor pylint errors * This branch splits out the IPv6 address generation into pluggable backends. A new flag named ipv6_backend specifies which backend to use * Reduce indentation to avoid PEP8 failures * merge koelker migration changes * using mac_address from fixed_ip instead of instance * PEP8 cleanups * Use new 3-argument API * add a todo * style fixing * Removed obsolete method and test * renamed test cases in nova/tests/api/openstack/test_servers.py to use a consistent naming convention as used in nova/tests/api/openstack/test_images.py. also fixed a couple of pylint #C0103 errors in test_servers.py * make the migration work like we expect it to * Fixed all pep8 errors in tools/install_venv.py. All tests pass * Added the imageRef and flavorRef attributes in the xml deserialization * Add vnc_keymap flag and enable setting keymap for vnc console * Review changes and merge from trunk * Pep8 cleaning * Added response about error in nova-manage project operations * Removed tools/clean_vlans and tools/nova-debug from pep8 tests as they are shell scripts * Added lines to include tools/* (except ajaxterm) in pep8 tests * Add a unit test for snapshot_volume * Define image state during snapshotting. Name snapshot to the name provided, not generate * Unit test for snapshotting (creating custom image) * fixed a few C0103 errors in test_servers.py * renamed test cases to use a consistent naming convention as used in nova/tests/api/openstack/test_images.py * fix sys.argv requirement * first cut at weighted-sum tests * merge trunk * add udev rules and modified ovs_configure_vif_flows.py to work with udev rules * Adds proper error handling for images that can't be found and a test for deregister image * added |fixed_ip_get_all_by_mac_address| and |mac_address_get_by_fixed_ip| to db and sqlalchemy APIs * started on integrating HostFilter * Add support for rbd snapshots * Merging in trunk * I'm assuming that openstack doesnt work with python < 2.6 here (which I read somewhere on the wiki). This patch will check to make sure python >= 2.6 is installed, and also allow it to work with python 2.7 (and greater in the future) * merge lp:nova * XenAPI was not implemented to allow for multiple simultaneous XenAPI requests. A single XenAPIConnection (and thus XenAPISession) is used for all queries. XenAPISession's wait_for_task method would set a self.loop = for looping calls to _poll_task until task completion. Subsequent (parallel) calls to wait_for_task for another query would overwrite this. XenAPISession._poll_task was pulled into the XenAPISession.wait_for_task method to avoid having to store self.loop * pep8 fixes * Merged trunk * volume/driver: make unit test, test_volume, pass * Make set_admin_password non-blocking to API * Merged trunk * Review feedback * Lost a flag pulling from another branch. Whoops * Update the compute manager so that it breaks out of a loop if set_admin_password is not implemented by the driver. This avoids excessively logging NotImplementedError exceptions * Merging in Sandy's changes * Make host timeout configurable * Make set_admin_password non-blocking to API * volume/driver: implement basic snapshot * merge trunk * Update the compute manager so that it breaks out of a loop if set_admin_password is not implemented by the driver * Add init script and sysconfig file for openvswitch-nova * volume/driver: factor out lvm opration * Authors: add myself to Authers file * trunk merge * Adding zones doc into index of devref plus a bug fix for flag spellings * fixup based on Lorin's feedback * added flag lost in migration * merge trunk * pep8 * Adding basic tests for call_zone_method * fixed_ip disassociate now also unsets mac_address_id * Make sure imports are in alphabetical order * updated previous calls referring to the flags to use the column from the networks table instead * merged from trunk * handle instance_type_ids that are NULL during upgrade to integers * fix for lp760921. Previously, if tune2fs failed, as it does on windows hosts, kpartx -d also failed to be called which leaves mapped partitions that retain holds on the nbd device. These holds cause the observed errors * if a LoopingCall has canceled the loop, break out early instead of sleeping any more than needed * Add a test for parallel builds. verified this test fails before this fix and succeeds after this fix * incorporated ImageNotFound instead of NotFound * merged from trunk * misc related network manager refactor and cleanup * changed NotFound exception to ImageNotFound * Update comment * Variable renaming * Add test suite for IPv6 address generation * Accept and ignore project_id * Make it so that ExtensionRequest objects now return proper webob objects. This avoids the odd serialization code in the RequestExtensionController class which converts JSON dicts to webobs for us * merged from trunk * Remove ResponseExtensions. The new RequestExtension covers both use cases * Initial work on request extensions * Added network_info into refresh_security_group_rules * fixed pep8 spacing issue * merge from trunk * rename quota column to 'hard_limit' to make it simpler to avoid collisions with sql keyword 'limit' * Fix remote volume code * 1 Set default paths for nova.conf and api-paste.ini to /etc/nova/ 2 Changed countryName policy because https://bugs.launchpad.net/nova/+bug/724317 still affected * Implement IPv6 address generation that includes account identifier * messing around with the flow of create() and specs * Redundant line * changes per review * docstring cleanup, nova/network dir * make instance.instance_type_id an integer to support joins in postgres * merge from trunk and update .mailmap file * Merged trunk * Updated MANIFEST for template move * NoValidHost exception test * Fixes an issue with conversion of images that was introduced by exception refactoring. This makes the exceptions when trying to locate an ec2 id clearer and also adds some tests for the conversion methods * oops fixed a docstring * Pep8 stuff * Bluprint URL: https://blueprints.launchpad.net/nova/+spec/improve-pylint-scores/ * start of zone_aware_scheduler test * Moved everything into notifier/api * make sure proper exceptions are raised for ec2 id conversion and add tests * better function name * Updated the value of the nova-manager libvirt_type * more filter alignment * Removed commented out 'from nova import log as logging' line, per request from Brian Lamar * merge trunk * align filters on query * better pylint scores on imports * Code cleanup * Merged trunk * Abstract out IPv6 address generation to pluggable backends * Merged trunk * First cut with tests passing * changing Authors file * removed unused wild card imports, replaced sqlalchemy wildcard import with explicit imports * removed unused wild card imports, replaced sqlalchemy wildcard import with explicit imports * Fix for #780276 (run_tests.sh fails test_authors_up_to_date when using git repo) * extracted xenserver capability reporting from dabo's dist-scheduler branch and added tests * migrate back updated_at correctly * added in log_notifier for easier debugging * Add priority based queues to notifications. Remove duplicate json encoding in notifier (rpc.cast does encoding... ) make no_op_notifier match rabbit one for signature on notify() * Bugfix #780784. KeyError when creating custom image * removed unused wild card imports, replaced sqlalchemy wildcard import with explicit imports * removed unused wild card imports, replaced sqlalchemy wildcard import with explicit imports * removed unused wild card imports, replaced sqlalchemy wildcard import with explicit imports * Better tests * Add example * give a more informative message if pre-migration assertions fail * Whoops * fix migration bug * Pep8 * Test * remove stubbing of XenAPISession.wait_for_task for xenapi tests as it doesn't need to be faked. Also removed duplicate code that stubbed xenapi_conn._parse_xmlrpc_value * migration bug fixes * Change xenapi's wait_for_task to handle multiple simultaenous queries to fix lp:766404 * Added GitPython to [install_dir]/tools/pip-requires * got rid of unnecessary imports * Enable RightAWS style signature checking using server_string without port number, add test cases for authenticate() and a new helper routine, and fix lp753660 * Better message format description * unified underscore/dash issue * update tests to handle unlimited resources in the db * pep8 * capabilities flattened and tests fixed * Set root password upon XenServer instance creation * trunk merge * clean up unused functions from virt/images.py * Removing a rogue try/catch expecting a non-existant exception.TimeoutException that is never raised * basic test working * db: fix db versioning * fix mismerge by 1059 * volume/driver: implement basic snapshot/clone * volume/driver: factor out lvm opration * Host Filtering for Distributed Scheduler (done before weighing) * Rebased to trunk rev 1057 * Adds coverage-related packages to the tools/pip-requires to allows users to generate coverage reporting when running unit tests with virtulenv * merge from trunk * Set publish_errors default to False * convert quota table to key-value * Simple fix for this issue. Tries to raise an exception passing in a variable that doesn't exist, which causes an error * Fixed duplicate function * Review feedback * Review feedback * Fixed method in flavors * Review feedback * Review feedback * Merged trunk * Set root password upon XenServer instance creation * Added Python packages needed for coverage reports to virtualenv packages * Added interface functions * merge from trunk * added test for show_by_name ImageNotFound exception * tests pass again * Sanitize get_console_output results. See bug #758054 * revised file docs * New author in town * Changes to allow a VM to boot from iso image. A blank HD is also attached with a size corresponding to the instance type * Added stub function for a referenced, previously non-existant function * Merged trunk * grabbed from dist-sched branch * Explicitly casted a str to a str to please pylint * Removed incorrect, unreachable code * spacing fix * pep8 fix * Improved error notification in network create * Add two whitespaces to conform PEP8 * Publish errors via nova.notifier * Added myself to Authors file * terminology: no more plug-ins or queries. They are host filters and drivers * Added interface function to ViewBilder * Added interfaces to server controller * added self to authors * fixed issue with non-existent variable being passed to ImageNotFound exception * removing rogue TimeoutException * merge prop fixes * Merged trunk * print statements removed * merge with trunk * flipped service_state in ZoneManager and fixed tests * pep8 * not = * not = * and or test * and or test * merge from trunk * Removed extra newline after get_console_output in fake virt driver * Moved all reencoding to compute manager to satisfy both Direct API and internal cloud call * Merged with current trunk * added myself to Authors * Adding a test case to show the xml deserialization failure for imageRef and flavorRef * Fixes for nova-manage vpn list * json parser * Don't fail the test suite in the absence of VCS history * It's ok if there's no commit history. Otherwise the test suite in the tarball will fail * Merged trunk * flavor test * Fix indentation * tests and better driver loading * Add missed hyphen * Adding OSAPI v1.1 limits resource * Adding support for server rebuild to v1.0 and v1.1 of the Openstack API * reduce policy for countyname * looking for default flagfile * adding debug log message * merging trunk * merging trunk * removing class imports * Merged trunk * Merged trunk * Moved reencoding logic to compute manager and cloud EC2 API * ensure create image conforms to OS API 1.1 spec * merge updates from trunk * Added support in the nova openstack api for requests with local hrefs, e.g., "imageRef":"2" Previously, it only supported "imageRef":"http://foo.com/images/2". The 1.1 api spec defines both approaches * Add a flag to allow the user to specify a dnsmasq configuration file for nova-network to use when starting dnsmasq. Currently the command line option is set to "--config-fil=" with nothing specified. This branch will leave it as it is if the user does not specify a config file, but will utilize the specific file if they do * merged from trunk * implemented review suggestion EAFP style, and fixed test stub fake_show needs to have image_state = available or other tests will fail * got rid of extra whitespace * Update tools/pip-requires and tools/install_venv.py for python2.7 support (works in ubuntu 11.04) * No need to test length of admin password in local href test * merging trunk; resolving conflicts; fixing issue with ApiError test failing since r1043 * Added support in osapi for requests with local hrefs, e.g., "imageRef":"2" * initial pass * Implement get_host_ip_addr in the libvirt compute driver * merging trunk; resolving conflicts * Modified the instance status returned by the OS api to more accurately represent its power state * Fixed 2 lines to allow pep8 check to pass * Since run_tests.sh utilizes nose to run its tests, the -x, --stop flag works correctly for halting tests on the first failed test. The usage information for run_tests.sh now includes the --stop flag * add support for git checking and a default of failing if the history can't be read * ApiError 'code' arg set to None, and will only display a 'code' as part of the str if specified * Fixed: Check for use of IPv6 missing * removed unused method and fixed imports * Change the links in the sidebar on the docs pages * Use my_ip for libvirt version of get_host_ip_addr * fix typo in import * removed unused method and fixed imports * small changes in libvirt tests * place ipv6_rules creation under if ip_v6 section * Added checking ip_v6 flag and test for it * merging trunk * adding view file * Expose AuthManager.list_projects user filter to nova-manage * Final cleanup of nova/exceptions.py in my series of refactoring branches * Uses memcached to cache roles so that ldap is actually usable * added nova version to usage output of bin/nova-manage for easy identification of installed codebase * Changing links in sidebar to previous release * Rebased to trunk rev 1035 * converted 1/0 comparison in db to True/False for Postgres cast compatibility * Changed test_cloud and fake virt driver to show out the fix * converted 1/0 comparison to True/False for Postgres compatibility * pep8 * fixed docstring per jsb * added version list command to nova-manage * Added more unit-test for multi-nic-nova libvirt * Sanitize get_console_output in libvirt_conn * added nova version output to usage printout for nova-manage * Make the import of distutils.extra non-mandatory in setup.py. Just print a warning that i18n commands are not available.. * Correcting exception case * further cleanup of nova/exceptions.py * added eagerloading mac adddresses for instance * merge with trunk and resolve conflicts * Added myself to authors file * pep8 fixes * Refactoring usage of nova.exception.NotFound * Let nova-mange limit project list by user * merging trunk * Make the import of distutils.extra non-mandatory in setup.py. Just print a warning that i18n commands are not available.. * Updated run_tests.sh usage info to reflect the --stop flag * Fixed formatting to align with PEP 8 * Modified instance status for shutoff power state in OS api * Refactoring the usage of nova.exception.Duplicate * Rebased to trunk rev 1030 * removed extra newline * merged from trunk * updated tests to reflect serverRef as href (per Ilya Alekseyev) and refactored _build_server from ViewBuilder (per Eldar Nugaev) * Add a test checking spawn() works when network_info is set, which currently doesn't. The following patch would fix parameter mismatch calling _create_image() from spawn() in libvirt_conn.py * removed unused imports and renamed template variables * pep8 * merging trunk * Renamed test_virt.py to test_libvirt.py as per suggestion * fixing bad merge * Merged trunk and fixed simple exception conflict * merging trunk * Refactoring nova.exception.Invalid usage * adding gettext to setup.py * Use runtime XML instead of VM creation time XML for createXML() call in order to ensure volumes are attached after RebootInstances as a workaround, and fix bug #747922 * Created new libvirt directory, moved libvirt_conn.py to libvirt/connection.py, moved libvirt templates, broke out firewall and network utilities * Rebased to trunk rev 1027, and resolved a conflict in nova/virt/libvirt_conn.py * Rebased to trunk rev 1027 * clarifies error when trying to add duplicate instance_type names or flavorids via nova-manage instance_type * merge trunk * Rework completed. Added test cases, changed helper method name, etc * pep8 * merge trunk, resolved conflict * merge trunk * Abstracted libvirt's lookupByName method into _lookup_by_name * Provide option of auto assigning floating ip to each instance. Depend on auto_assign_floating_ip boolean flag value. False by default * Fixes per review * Restore volume state on migration failure to fix lp742256 * Fixes cloudpipe to get the proper ip address * merging trunk * Fix bug with content-type and small OpenStack API actions refactor * merge with trunk * merge trunk * merged trunk * -Fixed indent for _get_ip_version -Added LoopingCall to destroy as suggested by earlier bug report -Standardized all LoopingCall uses to include useful logging and better error handling * Create a dictionary of instance_types before executing SQL updates in the instance_type_id migration (014). This should resolve a "cannot commit transaction - SQL statements in progress" error with some versions of sqlite * create network now takes bridge for flat networks * Adapt DescribeInstances to EC2 API spec * Change response of the EC2 API CreateVolume method to match the API docs for EC2 * Merged trunk and fixed api servers conflict * pep8 * Fixes and reworkings based on review * pep8 * Addressing exception.NotFound across the project * fix logging in reboot OpenStack API * eager loaded mac_address attributes for mac address get functions * updated image builder and tests for OS API 1.1 compatibility (serverRef) * forgot import * change action= to actions= * typo * forgot to save * moved get_network_topic to network.api * style cleaning * Fixed network_info creation in libvirt driver. Now creating same dict as in xenapi driver * Modified instance status for shutdown power state in OS api * rebase trunk * altered imports * commit to push for testing * Rebased to trunk rev 1015 * Utility method reworked, etc * Docstring cleanup and formatting (nova/image dir). Minor style fixes as well * Docstring cleanup and formatting (nova/db dir). Minor style fixes as well * Docstring cleanup and formatting (nova dir). Minor style fixes as well * use vpn filter in basic filtering so cloudpipe works with iptables driver * use simpler interfaces * Docstring cleanup and formatting (console). Minor style fixes as well * Docstring cleanup and formatting (compute). Minor style fixes as well * merge trunk * Add privateIpAddress and ipAddress to EC2 API DescribeInstances response * style fixing * Fix parameter mismatch calling _create_image() from spawn() in libvirt_conn.py * Add a test checking spawn() works when network_info is set, which currently doesn't. The following patch would fix it * put up and down in the right dir * Makes metadata correctly display kernel-id and ramdisk-id * pep8 cleaning * style fix * revert changes that doesn't affect the bug * in doesn't work properly on instance_ref * Another small round of pylint clean-up * Added an option to run_tests.sh so you can run just pep8. So now you can: ./run_tests.sh --just-pep8 or ./run_tests.sh -p * merge trunk * fix display of vpn instance id and add output rule so it can be tested from network host * Exit early if tests fail, before pep8 is run * more changes per review * fixes per review * docstring cleanup, nova/image dir * Docstring cleanup and formatting. Minor style fixes as well * cleanups per code review * docstring cleanup, nova dir * fixed indentation * docstring cleanup, console * docstring cleanup, nova/db dir * attempts to make the docstring rules clearer * fix typo * docstring cleanup compute manager * bugfix signature * refactor the way flows are deleted/reset * remove ambiguity in test * Pylinted nova-compute * Pylinted nova-manage * replaced regex to webob.Request.content_type * fix after review: style, improving tests, replacing underscore * merge with trunk * fix Request.get_content_type * Reverted bad merge * Rebased to trunk rev 1005 * Removed no longer relevant comment * Removed TODO we don't need * Removed _ and replaced with real variable name * instance type get approach changed. tests fixed * Merged trunk * trunk merged * fix: mark floating ip as auto assigned * Add to Authors * Change response format of CreateVolume to match EC2 * revamped spacing per Rick Harris suggestion. Added exact error to nova-manage output * only apply ipv6 if the data exists in xenstore * Create a dictionary of instance_types before executing SQL updates in the instance_type_id migration (014). This should resolve a "cannot commit transaction - SQL statements in progress" error with some versions of sqlite * add support for git checking and a default of failing if the history can't be read * strip output, str() link local * merging lp:~rackspace-titan/nova/exceptions-refactor-invalid * Round 1 of pylint cleanup * Review feedback * Implement quotas for the new v1.1 server metadata controller * fix doc typo * fix logging in reboot OpenStack API * make geninter.sh use the right tmpl file * pep8 fix * refactoring usage of exception.Duplicate errors * rename all versions of image_ec2_id * Abstracted lookupByName calls to _lookup_by_name for centralized error handling * actually use the ec2_id * remove typo * merging lp:~rackspace-titan/nova/exceptions-refactor-invalid * Fixes cloudpipe to get the proper ip address * add include file for doc interfaces * add instructions for setting up interfaces * Merged trunk and fixed small comment * Fixed info messages * Tweak to destroy loop logic * Pretty critical spelling error * Removed extra calls in exception handling and standardized the way LoopingCalls are done * one last i18n string * Merged trunk * multi-line string spacing * removing rogue print * moving dynamic i18n to static * refractoring * Add support for cloning a Sheepdog volume * Add support for cloning a Sheepdog volume * Add support for creating a new volume from a existing snapshot with EC2 API * Add support for creating a new volume from a existing snapshot with EC2 API * Add support for creating a Sheepdog snapshot * Add support for creating a Sheepdog snapshot * Add support for creating a snapshot of a nova volume with euca-create-snapshot * Add support for creating a snapshot of a nova volume with euca-create-snapshot * trunk merged * Implement get_host_ip_addr in the libvirt compute driver * Adding projectname username to the nova-manage project commands to fix a doc bug, plus some edits and elimination of a few doc todos * pep8 fixes * Remove zope.interface from the requires file since it is not used anywhere * use 'is not None' instead of '!= None' * Fix loggin in creation server in OpenStack API 1.0 * Support admin password when specified in server create requests * First round of pylint cleanup * merge lp:nova and resolve conflicts * Change '== None' to 'is None' * remove zope.interface requires * use 'is not None' instead of '!= None' * pep8 fixes * Change '== None' to 'is None' * Fixes nova-manage image convert when the source directory is the same one that local image service uses * trunk merged * pep8 fixed * calc link local * not performing floating ip operation with auto allocated ips * it is rename not move * pep8 fix * Rebased to trunk rev 995 * Rebased to trunk rev 995 * merge trunk * add fault as response * Fix logging in openstack api * Fix logging in openstack api * Fix logging in openstack api * trunk merged. conflict resolved * trunk merged. conflict resolved * The change to utils.execute's call style missed this call somehow, this should get libvirt snapshots working again * Fix parameter mismatch calling to_xml() from spawn() in libvirt_conn.py * move name into main metadata instead of properties * change libvirt snapshot to new style execute * Add additional logging for WSGI and OpenStack API authentication * Rename the id * Added period to docstring for metadata test * Merged trunk * Empty commit to hopefully regenerate launchpad diff * Explicitly tell a user that they need to authenticate against a version root * Merged trunk * merging trunk * adding documentation & error handling * correcting tests; pep8 * Removed the unused self.interfaces_xml variable * Only poll for instance states that compute should care about * Diablo versioning * Diablo versioning * Rebased to trunk rev 989 * Rebased to trunk rev 989 2011.2 ------ * Final versioning for Cactus * initial roundup of all 'exception.Invalid' cases * merge trunk * set the bridge on each OvsFlow * merge with trunk * bugfix * bugfix * Fix parameter mismatch calling to_xml() from spawn() in libvirt_conn.py * add kvm-pause and kvm-suspend 2011.2rc1 --------- * Rework GlanceImageService._translate_base( * Updated following to RIck's comments * Rebased to trunk rev 987 * Rework GlanceImageService._translate_base() to not call BaseImageService._translate_base() otherwise the wrong class attributes are used in properties construction.. * Try to be nicer to the DB when destroying a libvirt instance * pep8 * merge trunk * fixed error message i18n-ization. added test * Don't hammer on the DB * Debug code clean up * Rebased to trunk rev 986 * An ultimate workaround workd... :( * Zero out volumes during deletion to prevent data leaking between users * Minor formatting cleanup * jesse@aire.local to mailmap * Changed pep8 command line option from --just-pep8 to --pep8 * re-add broken code * merge trunk * Final versioning * Updates the documentation on creating and using a cloudpipe image * iSCSI/KVM test completed * Minor fixes * Fix RBDDriver in volume manager. discover_volume was raising exception. Modified local_path as well * Fixes VMware Connection to inherit from ComputeDriver * Fixes s3.py to allow looking up images by name. Smoketests run unmodified again with this change! * move from try_execute to _execute * Make VMWare Connection inherit from ComputeDriver * add up and down .sh * fix show_by_name in s3.py and give a helpful error message if image lookup fails * remove extra newline * dots * Rebased to trunk rev 980 * Rework importing volume_manager * Blushed up a little bit * Merged trunk * Only warn about rouge instances that compute should know about * Added some tests * Dangerous whitespace mistake! :) * Cleanup after prereq merge * Add new flag 'max_kernel_ramdisk_size' to specify a maximum size of kernel or ramdisk so we don't copy large files to dom0 and fill up /boot/guest * Rebased to trunk rev 980 * Merged lp:~rackspace-titan/nova/server_metadata_quotas as a prereq * Merged trunk * Docstring cleanup and formatting. Minor style fixes as well * Updated to use setfacl instead of chown * Commit for merge of metadata_quotas preq * merge trunk * Removed extra call from try/except * Reverted some superfluous changes to make MP more concise * Merged trunk * Reverted some superfluous changes to make MP more concise * Replace instance ref from compute.api.get_all with one from instance_get. This should ensure it gets fully populated with all the relevant attributes * Add a unit test for terminate_instances * pep8 * Fix RBDDriver in volume manager. discover_volume was raising exception. Modified local_path as well * pep8 fixes * migaration and pep8 fixes * update documentation on cloudpipe * Makes genvpn path actually refer to genvpn.sh instead of geninter.sh * typo * Merged trunk * Updating the runnova information and fixing bug 753352 * merge trunk * network manager changes, compute changes, various other * Floating ips auto assignment * Sudo chown the vbd device to the nova user before streaming data to it. This resolves an issue where nova-compute required 'root' privs to successfully create nodes with connection_type=xenapi * Minor blush ups * A minor blush up * A minor blush up * Remove unused self.interfaces_xml * Rebased to trunk rev 977 * Rebase to trunk rev 937 * debug tree status checkpoint 2 * docstring cleanup, direct api, part of compute * bzr ignore the top level CA dir that is created when running 'run_tests.sh -N' * fix reference to genvpn to point to the right shell script * Set default stateOrProvice to 'supplied' in openssl.cnf.tmpl * merge trunk * This branch fixes https://bugs.launchpad.net/bugs/751231 * Replace instance ref from compute.api.get_all with one from instance_get. This should ensure it gets fully populated with all the relevant attributes * When using libvirt, remove the persistent domain definition when we call destroy, so that behavior on destroy is as it was when we were using transient instances * Rebased to trunk rev 973 * Currently terminating an instance will hang in a loop, this allows for deletion of instances when using a libvirt backend. Also I couldn't help add a debug log where an exception is caught and ignored * merge trunk * resolved lazy_match conflict between bin/nova-manage instance and instance_type by moving instance subcommand under vm command. documented vm command in man page. removed unused instance_id from vm list subcommand * Ooops - redefining the _ variable seems like a _really_ bad idea * Handle the case when the machine is already SHUTOFF * Split logic on shutdown and undefine, so that even if the machine is already shutdown we will be able to proceed * Remove the XML definition when we destroy a machine * Rebased to trunk rev 971 * debug tree status checkpoint * Reabased to trunk rev 971 * Fixed log message gaffe * pylintage * typo - need to get nova-volumes working on this machine :-/ * dd needs a count to succeed, and remove unused/non-working special case for size 0 * There is a race condition when a VDI is mounted and the device node is created. Sometimes (depending on the configuration of the Linux distribution) nova loses the race and will try to open the block device before it has been created in /dev * zero out volumes on delete using dd * Added RST file on using Zones * Fixes euca-attach-volume for iscsi using Xenserver * pep8 * merge trunk * removes log command from nova-manage as it no longer worked in multi-log setup * Added error message to exception logging * Fixes bug which hangs nova-compute when terminating an instance when using libvirt backend * missing 'to' * Short circuit non-existant device during unit tests. It won't ever be created because of the stubs used during the unit tests * Added a patch for python eventlet, when using install_venv.py (see FAQ # 1485) * fixed LOG level and log message phrase * merge prop tweaks 2 * Set default stateOrProvice to 'supplied' in openssl.cnf.tmpl * This branch fixes https://bugs.launchpad.net/nova/+bug/751242 * Ignore errors when deleting the default route in the ensure_bridge function * bzr ignore the CA dir * merge prop tweaks 2011.2gamma1 ------------ * Import translations from Launchpad * added Zones doc * Update the describe_image_attribute and modify_image_attribute functions in the EC2 API so they use the top level 'is_public' attribute of image objects. This brings these functions in line with the base image service * Import from lp:~nova-core/nova/translations * corrects incorrect openstack api responses for metadata (numeric/string conversion issue) and image format status (not uppercase) * Implement a mechanism to enforce a configurable quota limit for image metadata (properties) within the OS API image metadata controller * Update the describe_image_attribute and modify_image_attribute functions in the ec2 API so they use the top level 'is_public' attribute of image objects. This brings these functions in line with the base image service * Ignore errors when deleting the default route in the ensure_bridge function * merge trunk * removed log command from nova-manage. no longer applicable with multiple logfiles * merge trunk * reminde admins of --purge option * Fixes issues with describe instances due to improperly set metadata * Keep guest instances when libvirt host restarts * fix tests from moving access check into update and delete * Added support for listing addresses of a server in the openstack api. Now you can GET * /servers/1/ips * /servers/1/ips/public * /servers/1/ips/private Supports v1.0 json and xml. Added corresponding tests * Log libvirt errcode on exception * This fixes how the metadata and addresses collections are serialized in xml responses * Fix to correct libvirt error code when the domain is not found * merged trunk * Removed commented-out old 'delete instance on SHUTOFF' code * Automatically add the metadata address to the network host. This allows guests to ARP for the address properly * merged trunk and resolved conflict * slight typo * clarified nova-manage instance_type create error output on duplicate flavorid * This branch is a patch for fixing below issue. > Bug #746821: live_migration failing due to network filter not found Link a bug report * fix pep8 violation * Update instances table to use instance_type_id instead of the old instance_type column which represented the name (ex: m1.small) of an instance type * Drop extra 'None' arg from dict.get call * Some i18n fixes to instance_types * Renamed computeFault back to cloudServersFault in an effort to maintain consistency with the 1.0 API spec. We can look into distinguishing the two in the next release. Held off for now to avoid potential regression * adds a timeout on session.login_with_password() * Drop unneeded Fkey on InstanceTypes.id * Bypass a potential security vulnerability by not setting shell=True in xenstore.py, using johannes.erdfelt's patch * Renamed computeFault to cloudServersFault * fixed the way ip6 address were retrieved/returned in _get_network_info in nova/virt/xenapi/vmops * added -manage vm [list|live-migration] to man page * removed unused instance parameter from vm list ... as it is unused. added parameters to docstring for vm list * moved -manage instance list command to -manage vm list to avoid lazy match conflict with instance_types * Simplify by always adding to loopback * Remove and from AllocateAddress response, and fix bug #751176 * remove unused code * better error message * Blush up a bit * Rebased to trunk rev 949 * pep8 * adds timeout to login_with_password * test provider fw rules at the virt/ipteables layer. lowercase protocol names in admin api to match what the firewall driver expects. add provider fw rule chain in iptables6 as well. fix a couple of small typos and copy-paste errors * fixed based on reviewer's comment - 1. erase unnecessary blank line, 2. adding LOG.debug * Rebased to trunk rev 949 * fixed based on reviewer's comment - 'locals() should be off from _() * Make description of volume_id more generic * add the tests * pep8 cleanup * ApiError code should default to None, and will only display a code if one exists. Prior was output an 'ApiError: ApiError: error message' string, which is confusing * ec2 api run_instances checks for image status must be 'available'. Overhauled test_run_instances for working set of test assertions * if we delete the old route when we move it we don't need to check for exists * merged trunk * removed comment on API compliance * Added an option to run_tests.sh so you can run just pep8. So now you can: ./run_tests.sh --just-pep8 or ./run_tests.sh -p * Add automatic metadata ip to network host on start. Also fix race where gw is readded twice * Controllers now inherit from nova.api.openstack.common.OpenstackController * Merged trunk * Support providing an XML namespace on the XML output from the OpenStack API * Merged with trunk, fixed up test that wasn't checking namespace * Added support for listing addresses of a server in the openstack api. Now you can GET * /servers/1/ips * /servers/1/ips/public * /servers/1/ips/private Supports v1.0 json and xml. Added corresponding tests * check visibility on delete and update * YADU (Yet Another Docstring Update) * Make sure ca_folder is created before chdir()ing into it * another syntax error * Use a more descriptive name for the flag to make it easier to understand the purpose * Added logging statements for generic WSGI and specific OpenStack API requests * syntax error * Incorprate johannes.erdfelt's patch * updated check_vm_record in test_xenapi to check the gateway6 correctly * updated get_network_info in libvirt_conn to correctly insert ip6s and gateway6 into the network info, also small style fixes * add docstrings * updated _prepare_injectables() to use info[gateway6] instead of looking inside the ip6 address dict for the gateway6 information * Enable RightAWS style signing on server_string without port number portion * modified behavior of inject_network_info and reset_network related to a vm_ref not being passed in * Create ca_folder if it does not already exist * Wait for device node to be created after mounting image VDI * Improved unit tests Fixed docstring formatting * Only create ca_path directory if it does not already exist * Added bug reference * Only create ca_path directory if it does not already exist * Make "setup.py install" much more thorough. It now installs tools/ into /usr/share/nova and makes sure api-paste.conf lands in /etc/nova rather than /etc * fixed based on reviwer's comment * return image create response as image dict * Add a patch for python eventlet, when using install_venv.py (see FAQ # 1485) * Undo use of $ in chain name where not needed * Testing for iptables manager changes * Don't double-apply provider fw rules in NWFilter and Iptables. Don't create provider fw rules for each instance, use a chain and jump to it. Fix docstrings * typo * remove -None for user roles * pep8 * fallback to status if image_state is not set * update and fix tests * unite the filtering done by glance client and s3 * Removing naughty semicolon * merged trunk * remove extraneous empty lines * move error handling down into get_password function * refactor to handle invalid adminPass * fixed comment * merged trunk * add support for specifying adminPass for JSON only in openstack api 1.1 * add tests for adminPass on server create * Fix a giant batch of copypasta * Remove file leftover from conflict * adding support for OSAPI v1.1 limits resource * Moved 'name' from to , corrected and fixes bug # 750482 * This branch contains the fix for lp:749973. VNC is assumed that is default for all in libvirt which LXC does not support yet * Remove comments * Separate CA/ dir into code and state * removed blank lines for pep8 fix * pep8 fixed * Fixed the addresses and metadata collections in xml responses. Added corresponding tests * Dont configure vnc if we are using lxc * Help paste_config_file find the api config now that we moved it * Add bug reference * Move api-paste.ini into a nova/ subdir of etc/ * Add a find_data_files method to setup.py. Use it to get tools/ installed under /usr/(local/)/share/nova * Nits * Add missing underscore * fix bug lp751242 * fix bug lp751231 * Automatically create CA state dir, and make sure the CA scripts look for the templates in the right places * fix bug 746821 * Remove and from AllocateAddress response, and fix bug #751176 * Allow CA code and state to be separated, and make sure CA code gets installed by setup.py install * Rebased to trunk 942 * fix bug lp:682888 - DescribeImages has no unit tests * Correct variable name * correct test for numeric/string metadata value conversion * openstack api metadata responses must be strings * openstack api requires uppercase image format status responses * merge trunk * Refactor so that instances.instance_type is now instances.instance_type_id * splitting test_get_nic_for_xml into two functions * Network injection check fixed in libvirt driver * merging trunk * fixing log message * working with network_ref like with mapping * add test for NWFilterFirewall * Removed adminclient.py and added reference to the new nova-adminclient project in tools/pip-requires * Don't prefix adminPass with the first 4 chars of the instance name * Declares the flag for vncproxy_topic in compute.api * Fixes bug 741246. Ed Leafe's inject_file method for the agent plugin was mistakenly never committed after having to fix commits under wrong email address. vmops makes calls to this (previously) missing method * Attempt to circumvent errors in the API from improper/malformed responses from image service * fixes incorrect case of OpenStack API status response * Fixed network_info creating * Moved 'name' property from to , corrected and fixes bug # 750482 * corrected capitalization of openstack api status and added tests * libvirt_con log fix * Ensure no errors for improper responses from image service * merge trunk * Fixes error which occurs when no name is specified for an image * improving tests * network injection check fixed * Only define 'VIMMessagePlugin' class if suds can be loaded * Make euca-get-ajax-console work with Euca2ools 1.3 * Add bug reference * Use keyword arguments * add multi_nic_test * added preparing_xml test * split up to_xml to creation xml_info and filling the template * use novalib for vif_rules.py, fix OvsFlow class * extract execute methods to a library for reuse * Poller needs to check for BUILDING not NOSTATE now, since we're being more explict about what is going on * Add checking if the floating_ip is allocated or not before appending to result array in DescribeAddresses * Added synchronize_session parameter to a query in fixed_ip_disassociate_all_by_timeout() and fix #735974 * Made the fix simpler * Add checking if the floating_ip is allocated or not before appending to result array * Added updated_at field to update statement according to Jay's comment * change bridge * Add euca2ools import * Rebased to trunk 930 * Rebased to trunk 726 * lots of updates to ovs scripts * Make euca-get-ajax-console work with Euca2ools 1.3 * merge trunk * Hopefully absolved us of the suds issue? * Removes excessive logging message in the event of a rabbitmq failure * Add a change password action to /servers in openstack api v1.1, and associated tests * Removal of instance_set_state from driver code, it shouldnt be there, but instead should be in the compute manager * Merged trunk * Don't include first 4 chars of instance name in adminPass * Friendlier error message if there are no compute nodes are available * merge lp:nova * Merged waldon * Adding explanation keyword to HTTPConflict * Merged waldon * makes sure s3 filtering works even without metadata set properly * Merged waldon * Didn't run my code. Syntax error :( * Now using the new power state instead of string * adding servers view mapping for BUILDING power state * removes excessive logging on rabbitmq failure * Review feedback * Friendlier error message if there are no compute nodes are available * Merged with Waldon * Better error handling for spawn and destroy in libvirt * pep8 * adding 'building' power state; testing for 409 from OSAPI when rebuild requested on server being rebuild * More friendly error message * need to support python2.4, so can't use uuid module * If the floating ip address is not allocated or is allocated to another project, then the user trying to associate the floating ip address to an instance should get a proper error message * Update state between delete and spawn * adding metadata support for v1.1 * Rebuild improvements * Limit image metadata to the configured metadata quota for a project * Add volume.API.remove_from_compute instead of compute.API.remove_volume * Rebased to trunk rev 925 * Removed adminclient and referred to pypi nova_adminclient module * fixed review comment for i18n string multiple replacement strings need to use dictionary format * fixed review comment for i18n string multiple replacement strings need to use dictionary format * Add obviously-missing method that prevents an Hyper-V compute node from even starting up * Avoid any hard dependencies in nova.virt.vmwareapi.vim * review cleanup * Handles situation where Connection._instances doesn't exist (ie. production) * localize NotImplementedError() * Change '"%s" % e' to 'e' * Fix for LP Bug #745152 * Merged waldon * adding initial v1.1 rebuild action support * Add ed leafe's code for the inject_file agent plugin method that somehow got lost (fixes bug 741246). Update TimeoutError string for i18n * submitting a unit test for terminate_instance * Update docstrings and spacing * fixed ordering and spacing * removed trailing whitespace * updated per code review, replaced NotFound with exception.NotFound * Merged Waldon's API code * remove all references to image_type and change nova-manage upload to set container format more intelligently * Rough implementation of rebuild_instance in compute manager * adding v1.0 support for rebuild; adding compute api rebuild support * Key type values in ec2_api off of container format * Whoops * Handle in vim.py * Refixed unit test to check XML ns * Merged with trunk (after faults change to return correct content-type) * OpenStack API faults have been changed to now return the appropriated Content-Type header * More tests that were checking for no-namespace * Some tests actually tested for the lack of a namespace :-) * pep8 fixes * Avoid hard dependencies * Implement quotas for the new v1.1 server metadata controller. Modified the compute API so that metadata is a dict (not an array) to ensure we are using unique key values for metadata. This is isn't explicit in the SPECs but it is implied by the new v1.1 spec since PUT requests modify individual items * Add XML namespaces to the OpenStack API * Merged with trunk * Fixed mis-merge: OS API version still has to be v1.1 * Store socket_info as a dictionary rather than an array * Merged with trunk * Added synchronize_session parameter to a query in fixed_ip_disassociate_all_by_timeout() and fix #735974 * Key was converted through str() even if None, resulting in "None" being added to authorized_keys when no key was specified * queues properly reconnect if rabbitmq is restarted * Moving server update adminPass support to be v1.0-specific OS API servers update tests actually assert and pass now Enforcing server name being a string of length > 0 * Adding Content-Type code to openstack.api.versions.Versions wsgi.Application * Fixes metadata for ec2_api to specify owner_id so that it filters properly * Makes the image decryption code use the per-project private key to decrpyt uploaded images if use_project_ca is set. This allows the decryption code to work properly when we are using a different ca per project * exception -> Fault * Merged trunk * Do not push 'None' to authorized_keys when no key is specified * Add missing method that prevent HyperV compute nodes from starting up * TopicAdapterConsumer uses a different callback model than TopicConsumer. This patch updates the console proxy to use this pattern * merge trunk * Uses the proc filesystem to check the volume size in volume smoketests so that it works with a very limited busybox image * merged trunk * The VNC Proxy is an OpenStack component that allows users of Nova to access their instances through a websocket enabled browser (like Google Chrome) * make sure that flag is there in compute api * fix localization for multiple replacement strings * fix doc to refer to nova-vncproxy * Support for volumes in the OpenStack API * Deepcopy the images, because the string formatting transforms them in-place * name, created_at, updated_at are required * Merged with trunk * "Incubator" is no more. Long live "contrib" * Rename MockImageService -> FakeImageService * Removed unused super_verbose argument left over from previous code * Renamed incubator => contrib * Wipe out the bad docstring on get_console_pool_info * use project key for decrypting images * Fix a docstring * Found a better (?) docstring from get_console_pool_info * Change volume so that it returns attachments in the same format as is used for the attachment object * Removed commented-out EC2 code from volumes.py * adding unit tests for describe_images * Fix unit test to reflect fact that instance is no longer deleted, just marked SHUTOFF * Narrowly focused bugfix - don't lose libvirt instances on host reboot or if they crash * fix for lp742650 * Added missing blank line at end of multiline docstring * pep8 fixes * Reverted extension loading tweaks * conversion of properties should set owner as owner_id not owner * add nova-vncproxy to setup.py * clarify test * add line * incorporate feedback from termie * Make dnsmasq_interface configurable * Stop nova-manage from reporting an error every time. Apparently except: catches sys.exit(0) * add comment * switch cast to a call * move functions around * move flags per termie's feedback * initial unit test for describe images * don't print the error message on sys.exit(0) * added blank lines in between functions & removed the test_describe_images (was meant for a diff bug lp682888) * Make Dnsmasq_interface configurable * fix flag names * Now checking that exists at least one network marked injected (libvirt and xenapi) * This branch adds support for linux containers (LXC) to nova. It uses the libvirt LXC driver to start and stop the instance * use manager pattern for auth token proxy * Style fixes * style fix * Glance used to return None when a date field wasn't set, now it returns ''. Glance used to return dates in format "%Y-%m-%dT%H:%M:%S", now it returns "%Y-%m-%dT%H:%M:%S.%f" * Fix up docstring * Added content_type to OSAPI faults * accidentally dropped a sentence * Added checks that exists at least one network marked inhected in libvirt and xenapi * Adds support for versioned requests on /images through the OpenStack API * Import order * Switch string concat style * adding xml test case * adding code to explicitly set the content-type in versions controller; updating test * Merged trunk * Added VLAN networking support for XenAPI * pep8 * adding server name validation to create method; adding tests * merge lp:nova * use informative error messages * adding more tests; making name checks more robust * merge trunk * Fix pep8 error * Tweaking docstrings just in case * Catch the error that mount might through a bit better * sorted pep8 errors that were introduced during previous fixes * merge trunk * make all openstack status uppercase * Add remove_volume to compute API * Pass along the nbd flags although we dont support it just yet * cleaned up var name * made changes per code review: 1) removed import of image from objectstore 2) changed to comments instaed of triple quotes * Displays an error message to the user if an exception is raised. This is vital because if logfile is set, the exception shows up in the log and the user has no idea something went wrong * Yet more docstring fixes * More style changes * Merged with trunk * Multi-line comments should end in a blankline * add note per review * More fixes to keep the stylebot happy * Cleaned up images/fake.py, including move to Duplicate exception * Code cleanup to keep the termie-bot happy * displays an error message if a command fails, so that the user knows something went wrong * Fixes volume smoketests to work with ami-tty * address some of termie's recommendations * add period, test github * pep8 * osapi servers update tests actually assert now; enforcing server name being a string of length > 0; moving server update adminPass support to be v1.0-specific * Moving shared_ip_groups controller to APIRouterV10 Replacing all shared_ip_groups contoller code with HTTPNotImplemented Adding shared_ip_groups testing * fix docstrings * Merged trunk * Updated docstrings to satisfy * Updated docstrings to satisfy * merge trunk * merge trunk * minor fix and comment * style fixes * merging trunk * Made param descriptions sphinx compatible * Toss an __init__ in the test extensions dir. This gets it included in the tarball * pep8 * Fix up libvirt.xml.template * This fixes EC2 API so that it returns image displayName and description properly * merged from trunk * Moving backup_schedule route out of base router to OS API v1.0 All controller methods return HTTPNotImplemented to prevent further confusion Correcting tests that referred to incorrect url * Fixed superfluous parentheses around locals() * Added image name and description mapping to ec2 api * use self.flags in virt test * Fixed DescribeUser in the ec2 admin client to return None instead of an empty UserInfo object * Remove now useless try/except block * Dont make the test fail * backup_schedule tests corrected; controller moved to APIRouterV10; making controller fully HTTPNotImplemented * when image_id provided cannot be found, returns more informative error message * Adds support for snapshotting (to a new image) in the libvirt code * merge lp:nova * More pep8 corrections * adding shared_ip_groups testing; replacing all shared_ip_groups contoller code with HTTPNotImplemented; moving shared_ip_groups controller to APIRouterV10 * Merged trunk * pep8 whitespace * Add more unit tests for lxc * Decided to not break old format so this should work with the way Glance used to work and the way glace works now..The best of both worlds? * update glance params per review * add snapshot support for libvirt * HACKING update for docstrings * merge trunk * Fix libvirt merge mistake * lock down requirements for change password * merge trunk * Changed TopicConsumer to TopicAdapterConsumer in bin/nova-ajax-console-proxy to allow it to start up once again * style changes * Removed iso8601 dep from pip-requires * Merged trunk * Removed extra dependency as per suggestion, although it fixes the issue much better IMO, we should be safe sticking with using the format from python's isoformat() * Assume that if we don't find a VM for an instance in the DB, and the DB state is NOSTATE, that the db instance is in the process of being spawned, and don't mark it SHUTOFF * merge with trunk * Added MUCH more flexiable iso8601 parser dep for added stability * Fix formatting of TODO and NOTE - should be a space after the # * merge lp:nova * Mixins for tests confuse pylint no end, and aren't necessary... you can stop the base-class from being run as a test by prefixing the class name with an underscore * Merged the two periodic_tasks functions, that snuck in due to parallel merges in compute.manager * Start up nova-api service on an unused port if 0 is specified. Fixes bug 744150 * Removed 'is not None' to do more general truth-checking. Added rather verbose testing * Merged with trunk * merge trunk * merge trunk, fixed conflicts * TopicConsumer -> TopicAdapterConsumer * Fix typo in libvirt xml template * Spell "warn" correctly * Updated Authors file * Removed extraneous white space * Add friendlier message if an extension fails to include a correctly named class or factory * addressed reviewers' concerns * addressed termies review (third round) * addressed termie's review (second round) * Do not load extensions that start with a "_" * addressed termies review (first round) * Clarified note about scope of the _poll_instance_states function * Fixed some format strings * pep8 fixes * Assume that if we don't find a VM for an instance in the DB, and the DB state is NOSTATE, that the db instance is in the process of being spawned * pep8 fixes * Added poll_rescued_instances to virt driver base class * There were two periodic_tasks functions, due to parallel merges in compute.manager * pep8 fixes * Bunch of style fixes * Fix utils checking * use_ipv6 now passing to interfaces.template as first level variable in libvirt_conn * Replaced import of an object with module import as per suggestion * Updates to the newest version of nova.sh, which includes: * Installing new python dependencies * Allows for use of interfaces other than eth0 * Adds a run_detached mode for automated testing * Now that it's an extension, it has to be v1.1. Also fixed up all the things that changed in v1.1 * merge trunk addressing Trey's comments * Initial extensification of volumes * Merged with trunk, resolved conflicts & code-flicts * Removed print * added a simple test for describe_images with mock for detail funciton * merged trunk * merge trunk * merge lp:nova * Adding links container to openstack api v1.1 servers entities * Merged trunk * Add license and copyright to nova/tests/api/openstack/extensions/__init__.py * Fixed a typo on line 677 where there was no space between % and FLAGS * fix typos * updated nova.sh * Added a flag to allow a user to specify a dnsmasq_config_file is they would like to fine tune the dnsmasq settings * disk_format is now an ImageService property. Adds tests to prevent regression * Merged trunk * Merged trunk * merging trunk * merge trunk * Merged trunk and fixed broken/conflicted tests * - add a "links" container to versions entities for Openstack API v1.1 - add testing for the openstack api versions resource and create a view builder * merging trunk * This is basic network injection for XenServer, and includes: * merging trunk * Implement image metadata controller for the v1.1 OS API * merging trunk * Changed use_ipv6 passing to interfaces.template * merging trunk, resolving conflicts * Add a "links" container to flavors entities for Openstack API v1.1 * Toss an __init__ in the test extensions dir. This gets it included in the tarball * Use metadata = image.get('properties', {}) * merge trunk * Revert dom check * merge trunk * Fix unit tests w/ latest trunk merge * merging trunk and resolving conflicts * Fix up destroy container * Fix up templating * Implement metadata resource for Openstack API v1.1. Includes: -GET /servers/id/meta -POST /servers/id/meta -GET /servers/id/meta/key -PUT /servers/id/meta/key -DELETE /servers/id/meta/key * Dont always assume qemu * Removed partition from setup_container * pep8 fix * disk_format is now an ImageService property * Restore volume state on migration failure * merge trunk, add unit test * merge trunk * merge trunk addressing reviewer's comments * clarify comment * add documentation * Empty commit? * minor pep8 fix in db/fakes.py * Support for markers for pagination as defined in the 1.1 spec * add hook for osapi * merge trunk * Ports the Tornado version of an S3 server to eventlet and wsgi, first step in deprecating the twistd-based objectstore * Merged with trunk Updated net injection for xenapi reflecting recent changes for libvirt * Fix lp741415 by splitting arguments of _execute in the iSCSI driver * make everything work with trunk again * Support for markers for pagination as defined in the 1.1 spec * add descriptive docstring * don't require integrated tests to recycle connections * remove twisted objectstore * port the objectstore tests to the new tests * update test base class to monkey patch wsgi * rename objectstore tests * port s3server to eventlet/wsgi * add s3server, pre-modifications * merge trunk * Added detail keywork and i18n as per suggestions * incorporate feedback from termie * Implementation of blueprint hypervisor-vmware-vsphere-support. (Link to blueprint: https://blueprints.launchpad.net/nova/+spec/hypervisor-vmware-vsphere-support) * fix typo * Addressing Trey's comments. Removed disk_get_injectables, using _get_network_info's return value * Adds serverId to OpenStack API image detail per related_image blueprint * Fix for bug #740947 Executing parted with sudo in _write_partition (vm_utils.py) * Implement API extensions for the Openstack API. Based on the Openstack 1.1 API the following types of extensions are supported: * Merging trunk * Adds unit test coverage for XenAPI Rescue & Unrescue * libvirt driver multi_nic support. In this phase libvirt can work with and without multi_nic support, as in multi_nic support for xenapi: https://code.launchpad.net/~tr3buchet/nova/xs_multi_nic/+merge/53458 * Merging trunk * Review feedback * Merged trunk * Additions to the Direct API: * Merged trunk * Added test_get_servers_with_bad_limit, test_get_servers_with_bad_offset and test_get_servers_with_bad_marker * pep8 cleanups * Added test_get_servers_with_limit_and_marker to test pagination with marker and limit request params * style and spacing fixed * better error handling and serialization * add some more docs and make it more obvious which parts are examples * add an example of a versioned api * add some more docs to direct.py * add Limited, an API limiting/versioning wrapper * improve the formatting of the stack tool * support volume and network in the direct api * Merged with trunk, fix problem with behaviour of (fake) virt driver when instance doesn't reach scheduling * In this branch we are forwarding incoming requests to child zones when the requested resource is not found in the current zone * trunk merge * Fixes a bug that was causing tests to fail on OS X by ensuring that greenthread sleep is called during retry loops * Merged trunk * Fix some errors that pylint found in nova/api/openstack/servers.py * Fix api logging to show proper path and controller:action * Merged trunk * Pylint 'Undefined variable' E0602 error fixes * Made service_get_all()'s disabled parameter default to None. Pass False for enabled services; True for disabled services. Calls to this method have been updated to remain consistent * Merged with trunk * Reconcile tests with latest trunk merges * Merged trunk and resolved conflict in nova/db/sqlalchemy/api.py * Don't try to parse the empty string as a datetime * change names for consistency with existing db api * Merged with trunk * Forgot one set of flags * Paginated results should not include the item starting at marker. Improved implementation of common.limited_by_marker as suggested by Matt Dietz. Added flag osapi_max_limit * Detect if user is running the default Lucid version of libvirt, and give a nicer error message * Updated to use new APIRouterV11 class in tests * Fix lp741514 by declaring libvirt_type in nova-manage * Docstring fixes * get image metadata tests working after the datetime interface change in image services * adding versioned controllers * Addressed issues raised by Rick Harris' review * Stubbing out utils.execute for migrate tests * Aggregates capabilities from Compute, Network, Volume to the ZoneManager in Scheduler * merged trunk r864 * removing old Versions application and correcting fakes to use new controller * Renamed __image and __compute to better describe their purposes. Use os.path.join to create href as per suggestion. Added base get_builder as per pychecker suggestion * merging trunk r864 * trunk merged. conflicts resolved * Merged trunk * merge trunk * merge trunk * Small refactor * Merged trunk and fixed tests * Couple of pep8 fixes * pep8 clearing * making servers.generate_href more robust * merging trunk r863 * Fixes lp740322: cannot run test_localization in isolation * couple of bugs fixed * Merged trunk * Dont use popen in dettaching the lxc loop * Fix up formatting of libvirt.xml.template * trunk merge * fix based on sirp's comments * Grrr... because we're not recycling the API yet, we have to configure flags the first time it's called * merge trunk * Fake out network service as well, otherwise we can't terminate the instance in test_servers now that we've started a compute service * merge trunk * Sorted out a problem occurred with units tests for VM migration * pep8 fixes * Test for attach / detach (and associated fixes) * Pass a fake timing source to live_migration_pre in every test that expectes it to fail, shaving off a whole minute of test run time * merge trunk * Poll instance states periodically, so that we can detect when something changes 'behind the scenes' * Merged with conflict and resolved conflict (with my own patch, no less) * Added simple nova volume tests * Created simple test case for server creation, so that we can have something to attach to.. * Merged with trunk * Added volume_attachments * Declare libvirt_type to avoid AttributeError in live_migration * minor tweak from termie feedback * Added a mechanism for versioned controllers for openstack api versions 1.0/1.1. Create servers in the 1.1 api now supports imageRef/flavorRef instead of imageId/flavorId * Fixed the docstring for common.get_id_from_href * better logging of exceptions * Merged trunk * Merged trunk * Fix issues with certificate updating & whitespace removal * Offers the ability to run a periodic_task that sweeps through rescued instances older than 24 hours and forcibly unrescues them * Merged trunk * Added hyperv stub * Don't try to parse a datetime if it is the empty string (or None) * Remove a blank line * pep8 fix * Split arguments of _execute in the iSCSI driver * merge trunk * Added revert_resize to base class * Addressing Rick Clark's comments * Merged with lp:nova, fixed conflicts * boto_v6 module is imported if the flag "use_ipv6" is set to True * pep8 fixes, backported some important fixes that didn't make it over from my testing system :-( * Move all types of locking into utils.synchronize decorator * Doh! Missed two places which were importing the old driver location * Review feedback * make missing noVNC error condition a bit more fool-proof * clean some pep8 issues * general cleanup, use whitelist for webserver security * Better method name * small fix * Added docstring * Updates the previously merged xs_migration functionality to allow upsizing of the RAM and disk quotas for a XenServer instance * Fix lp735636 by standardizing the format of image timestamp properties as datetime objects * migration gateway_v6 to network_info * merge prop fixes * Should not call super __init__ twice in APIRouter * fix utils.execute retries for osx * Keep the fallback code - we may want to do better version checking in future * Give the user a nicer error message if they're using the Lucid libvirt * Only run periodic task when rescue_timeout is greater than 0 * Fixed some typos * Forgot extraneous module import again * Merged trunk * Forgot extraneous module import * Automatically unrescue instances after a given timeout * trunk merge * indenting cleanup * fixing some dictionary get calls * Unit test cleanup * one more minor fix * Moving the migration yet again * xml template fixed * merge prop changes * pep8 fixed * trunk merged * added myself to authors file * Using super to call parent _setup_routes in APIRouter subclasses * Merged trunk * pep8 fix * Implement v1.1 image metadata * This branch contains the fix for bug #740929 It makes sure cidr_v6 is not null before building the 'ip6s' key in the network info dictionary. This way utils.to_global_ipv6 does not fail because of cidr==None * review comments fixed * add changePassword action to os api v1.1 * Testing of XML and JSON for show(), and conformance to API spec for JSON * Fixed tests * Merged trunk * Removed some un-needed code, and started adding tests for show(), which I forgot\! * id -> instance_id * Checking whether cidr_v6 is not null before populating ipv6 key in network info map (VMOps._get_network_info) * Executing parted with sudo in _write_partition * We update update_ra method to synchronize, in order to prevent crash when we request multiple instance at once * merged with trunk Updated xenapi network injection for IPv6 Updated unit tests * merge trunk * merge trunk * removed excess debug line * more progress * use the nova Server object * separating out components of vnc console * Earlier versions of the python libvirt binding had getVersion in the libvirt namespace, not on the connection object. Check both * Report the exception (happens when can't import libvirt) * Use subset_dict * Removing dead code * Touching up comment * Merging trunk * Pep8 fixes * Adding tests for owned and non-existent images * More small cleanups * Fix for #740742 - format describe_instance_output correctly to prevent errors in dashboard * Cleaning up make_image_fixutres * Merged with lp:nova * Small cleanup of openstack/images.py * Fixed up the new location of driver.py * Fix for lp740742 - format describe_instance_output correctly to prevent errors in dashboard * Merged with lp:nova * Filtering images by user_id now * Clarified my "Yuk" comment * Cleaned up comment about virsh domain.info() return format * Added space in between # and TODO in #TODO * Added note about the advantages of using a type vs using a set of global constants * Filled out the base-driver contract, so it's not a false-promise * Enable flat manager support for ipv6 * Adding a talk bubble to the nova.openstack.org site that points readers to the 2011.1 site and the docs.openstack.org site - similar to the swift.openstack.org site. I believe it helps people see more sites are available, plus they can get to the Bexar site if they want to. Going forward it'll be nice to use this talk bubble to point people to the trunk site from released sites * Correctly imports greenthread in libvirt_conn.py. It is used by live_migrate() * Forgot this in the rename of check_instance -> check_isinstance * Test the login behavior of the OpenStack API. Uncovered bug732866 * trunk merge * Renamed check_instance -> check_isinstance to make intent clearer * Fix some crypto strangeness (\n in file_name field of certificates, wrong IMPL method for certificate_update) * Added note agreeing with Brian Lamar that the namespace doesn't belong in wsgi * Fix to avoid db migration failure in virtualenv * Fixed up unit tests and direct api that was also calling _serialize (naughty!) * Fix the describe_vpns admin api call * pep8 and fixed up zone-list * Support setting the xmlns intelligently * get_all cleanup * Refactored out _safe_translate code * Set XML namespace when returning XML * Fix for LP Bug #704300 * Fix a typo in the ec2 admin api * typo fix * Pep8 fix * Merging trunk * make executable * Adding BASE_IMAGE_ATTRS to ImageService * intermediate progress on vnc-nova integration. checking in to show vish * add in eventlet version of vnc proxy * Updating doc strings in accordance with PEP 257. Fixing order of imports in common.py * one more copyright fix * pep8 stupidness * Tweak * fixing copyright * tweak * tweak * Whoops * Changed default for disabled on service_get_all to None. Changed calls to service_get_all so that the results should still be as they previously were * Now using urlparse to parse a url to grab id out of it * Resolved conflicts * Fix * Remove unused global semaphore * Addressed reviewer's comments * pep8 fix * Apparantly a more common problem than first thought * Adding more docstrings. image_id and instance_type fields of an instance will always exist, so no reason to check if keys exist * Pass a fake timing source to test_ensure_filtering_rules_for_instance_timeout, shaving off 30 seconds of test run time * pep8 * Merged trunk * Add a test for leaked semaphores * Remove checks in _cache_image tests that were too implementation specific * adding view builder tests * Add correct bug fixing metadata * When updating or creating set 'delete = 0'. (thus reactivating a deleted row) Filter by 'deleted' on delete * merging trunk r843 * making Controller._get_flavors is_detail a keyword argument * merging trunk r843 * Fix locking problem in security group refresh code * merging trunk r843 * Add unit test and code updates to ensure that a PUT requests to create/update server metadata only contain a single key * Add call to unset all stubs * IptablesManager.semaphore is no more * Get rid of IptablesManager's explicit semaphore * Add --fixes lp: metadata * Convert _cache_image to use utils.synchronized decorator. Disable its test case, since I think it is no longer needed with the tests for synchronized * Make synchronized decorator not leak semaphores, at the expense of not being truly thread safe (but safe enough for Eventlet style green threads) * merge trunk * Wrap update_ra in utils.synchronized * Make synchronized support both external (file based) locks as well as internal (semaphore based) locks. Attempt to make it native thread safe at the expense of never cleaning up semaphores * merge with trunk * vpn changes * added zone routing flag test * routing test coverage * routing test coverage * xenapi support for multi_nic. This is a phase of multi_nic which allows xenapi to work as is and with multi_nic. The other virt driver(s) need to be updated with the same support * better comments. First redirect test * better comments. First redirect test * Remove _get_vm_opaque_ref() calls in rescue/unrescue * Remove dupe'd code * Wrap update_dhcp in utils.synchronized * if fingerprint data not provided, added logic to calculate it using the pub key * get rid of another datetime alias * import greenthread in libvirt * merge lp:nova * make bcwaldon happy * fix licenses * added licenses * wrap and log errors getting image ids from local image store * merge lp:nova * merging trunk * Fix for LP Bug #739641 * pep8; various fixes * Provide more useful exception messages when unable to load the virtual driver * Added Gabe to Authors file. He helped code this up too * Added XenAPI rescue unit tests * added an enumerate to track device in vmops.create_vifs() * pep8 * Openstack api 1.0 flavors resource now implemented to match the spec * more robust extraction of arguments * Updated comment per the extension naming convention we actually use * Added copyright header * Fix pep8 issues in nova/api/openstack/extensions.py * Fix limit unit tests (reconciles w/ trunk changes) * Changed fixed_range (CIDR) to be required in the nova-manage command; changed default num_networks to 1 * merging trunk r837 * zones3 and trunk merge * Added space * trunk merge * remove scheduler.api.API. naming changes * Changed error to TypeError so that we get the arguments list * Added my name to Authors Added I18n for network create string * merge with trunk * merge trunk * merge trunk * merge trunk * Add bug metadata * Wrap update_dhcp in utils.synchronized * fixes nova-manage instance_type compatibility with postgres db * Tell PyLint not to complain about the "_" function * Make smoketests' exit code reveal whether they were succesful * pep8 * Added run_instances method to the connection.py of the contrib/boto_v6/ec2 which would return ReservationV6 object instead of Reservation in order to access attribute dns_name_v6 of an instance * cleanup another inconsistent use of 1 for True in nova-manage * Changed Copyright to NTT for newly added files for flatmanager ipv6 * merge trunk * * committing ovs scripts * fix nova-manage instance_type list for postgres compatibility * fixed migration instance_types migration to support postgres correctly * comment more descriptive * Seriously? * Fixed netadmin smoketests for ipv6 * Merged trunk * Better errors when virt driver isn't loaded * merge lp:nova * fix date formatting in images controller show * huh * fix ups * merge trunk * uses True/False instead of 1/0 for Postgres compatibility * cleaned up tests stubs that were accidentally checked in * works again. woo hoo * created api endpoint to allow uploading of public key * api decorator * Cleanup of FakeAuthManager * Replaced all pylint "disable-msg=" with "disable=" and "enable-msg=" with "enable=" * Change cloud.id_to_ec2_id to ec2utils.id_to_ec2_id. Fixes EC2 API error handling when invalid instances and volume names are specified * A few more single-letter variable names bite the dust * Re-implementation (or just implementation in many cases) of Limits in the OpenStack API. Limits is now available through /limits and the concept of a limit has been extended to include arbitrary regex / http verb combinations along with correct XML/JSON serialization. Tests included * Avoid single-letter variable names * auth_data is a list now (thanks Rick!) * merge with trunk * Mark instance metadata as deleted when we delete the instance * results * fixed up novaclient usage to include managers * Added test case * Minor fixes to replace occurances of "VI" by "VIM" in 2 comments * whoopsy2 * whoopsy * Fixed 'Undefined variable' errors generated by pylint (E0602) * Merged trunk * Change cloud.id_to_ec2_id to ec2utils.id_to_ec2_id. Fixes EC2 API error handling when invalid instances and volume names are specified * enable-msg -> enable * disable-msg -> disable * enable_zone_routing flag * PEP-8 * Make flag parsing work again * Using eventlets greenthreads for optimized image processing. Fixed minor issues and style related nits * Fixed issue arisen from recent feature update (utils.execute) * Make proxy.sh work with both openbsd and traditional variants of netcat * Query the size of the block device, not the size of the filesystem * merge trunk * Ensuring kernel/ramdisk files are always removed in case of failures * merge trunk * merge trunk * Implement metadata resource for Openstack API v1.1. Includes: -GET /servers/id/meta -POST /servers/id/meta -GET /servers/id/meta/key -PUT /servers/id/meta/key -DELETE /servers/id/meta/key * Make "ApiError" the default error code for ApiError instances, rather than "Unknown." * When changing the project manager, if the new manager is not yet a project member, be sure to make them be a project member * Make the rpc cast/call debug calls show what topic they are sending to. This aides in debuugging * Final touches and bug/pep8 fixes * Support for markers for pagination as defined in the 1.1 spec * Merged trunk * Become compatible with ironcamel and bcwaldon's implementations for standardness * pep8 * Merged dependant branch lp:~rackspace-titan/nova/openstack-api-versioned-controllers * Updated naming, removed some prints, and removed some invalid tests * adding servers container to openstack api v1.1 servers entities * decorator more generic now * Images now v1.1 supported...mostly * fixed up bzr mess * Fix for LP Bug #737240 * refactored out middleware, now it's a decorator on service.api * Fix for LP Bug #737240 * Add topic name to cast/call logs * Changing project manager should make sure that user is a project member * Invert some of the original logic and fix a typo * Make the smoketests pep8 compliant (they weren't when I started working on them..) * Update the Openstack API to handle case where personality is set but null in the request to create a server * Fix a couple of things that assume that libvirt == kvm/qemu * Made fixed_range a required parameter for nova-manage network create. Changed default num_networks to 1; 1000 seems large * Fix a number of place in the volume driver where the argv hadn't been fully split * fix for lp712982, and likely a variety of other dashboard error handling issues. This fix simply causes the default error code for ApiError to be 'ApiError' rather than 'Unknown', which makes dashboard handle the error gracefully, and makes euca error output slightly prettier * Fix mis-merge * pep8 is hard * syntax error * create vifs before inject network info to remove rxtx_cap from network info (don't need to inject it) * Make utils.execute not overwrite std{in,out,err} args to Popen on retries. Make utils.execute reject unknown kwargs * merged trunk, merged qos, slight refactor regarding merges * - general approach for openstack api versioning - openstack api version now preserved in request context - added view builder classes to handle os api responses - added imageRef and flavorRef to os api v1.1 servers - modified addresses container structure in os api v1.1 servers * Pep8 * Test changes * pep8 * Adjust test cases * pep8 * merge * Mark instance metadata as deleted when we delete the instance * Backfix of bugfix of issue blocking creating servers with metadata * Better comment for fault. Improved readability of two small sections * Add support for network QoS (ratelimiting) for XenServer. Rate is pulled from the flavor (instance_type) when constructing a vm * pep8 * I suck at merging * Now returns a 400 for a create server request with invalid hrefs for imageRef/flavorRef values. Also added tests * moving Versions app out of __init__.py into its own module; adding openstack versions tests; adding links to version entities * fixed code formatting nit * handle create and update requests, and update the base image service documentation to reflect the (defacto) behavior * Move the check for None personalities into the create method * Get the migration out * get api openstack test_images working * merge trunk * Improved exception handling * better implementation of try..except..else * merging parent branch lp:~bcwaldon/nova/osapi-flavors-1_1 * merging parent branch lp:~rackspace-titan/nova/openstack-api-version-split * iptables filter firewall changes merged * merged trunk * pep8 * adding serialization_metadata to encode links on flavors * merge with libvirt_multinic_nova * pep8 * teach glance image server get to handle timestamps * merge trunk * merge trunk * fixes for NWFilterFirewall and net injection * moving code out of try/except that would never trigger NotFound * handle timestamps in glance service detail * fixed IpTablesFirewal * Fixes lp736343 - Incorrect mapping of instance type id to flavor id in Openstack API * Comparisons to None should not use == or != * Pep8 error, oddly specific to pep8 v0.5 < x > v0.6 * Remove unconditional raise, probably left over from debugging * Mapping the resize status * Mapping the resize status * Fixed pep8 violation * adding comments; removing returns from build_extra; removing unnecessary backslash * refactor to simpler implementation * Foo * glance image service show testcases * oh come on * refactoring * Add tests and code to handle multiple ResponseExtension objects * Just use 'if foo' instead of 'if len(foo)'. It will fail as spectacularly if its not acting on a sequence anyways * bugfix * Remove unconditional raise, probably left over from debugging * No need to modify this test case function as well * refactored: network_info creation extracted to method * Call _create_personality_request_dict within the personalities_null test * Foo * more pep8 fixes * Switch back to 'is not None' for personality_files check. (makes mark happy) * pep8 fixes * 1) Update few comments where whitespace is missing after '#' 2) Update document so that copy right notice doesn't appear in generated document 3) Now using self.flag(...) instead of setting the flags like FLAGS.vmwareapi_username by direct assignment. 4) Added the missing double quote at the end a string in vim_util.py * more pep8 fixes * Fix up tests * Replaced capability flags with List * Fix more pep8 errors * Remove me from mailmap * Fix up setup container * Merged trunk * Update the Openstack API to handle case where personality is set but null in the request to create a server * Make smoketests' exit code reveal whether they were succesful * merge with trunk. moved scheduler_manager into manager. fixed tests * Set nbd to false when mounting the image * Fixed typo when I was trying to add test cases for lxc * Remove target_partition for setup_container but still hardcode because its needed when you inject the keys into the image * Remove nbd=FLAGS.use_cow_images for destroy container * Update mailmap * Fix a number of place in the volume driver where the argv hadn't been fully split * Fix pep8 errors * Update authors again * Improved exception handling: - catching appropriate errors (OSError, IOError, XenAPI.Failure) - reduced size of try blocks - moved exception handling code in separate method - verifing for appropriate exeception type in unit tests * get_console_output is not supported by lxc and libvirt * Update Authors and testsuite * Comparisons to None should not use == or != * Make error message match the check * Setting the api verion in the request in the auth middle is no longer needed. Also, common.get_api_version is no longer needed. As Eric Day noted, having versioned controllers will make that unnecessary * moving code out of try/except that would never trigger NotFound * Added mechanism for versioned controllers for openstack api versions 1.0/1.1. Create servers in the 1.1 api now supports imageRef/flavorRef instead of imageId/flavorId * fix up copyright * removed dead method * pep8 * pep8 * Remerge trunk * cleanup * added in network qos support for xenserver. Pull qos settings from flavor, use when creating instance * moved scheduler API check into db.api decorator * Add basic tests for lxc containers * Revert testsuite changes * MErge trunk * Fix a few of the more obvious non-errors while we're in here * hacks in place * Fix the errors that pylint was reporting on this file * foo * foo * commit before monster * Fix __init__ method on unit tests (they take a method_name kwarg) * Don't warn about C0111 (No docstrings) * In order to disable the messages, we have to use disable, not disable-msg * Avoid mixins on image tests, keeping pylint much happier * Use _ trick to hide base test class, thereby avoiding mixins and helping PyLint * hurr * hurr * get started testing * foo * Don't complain about the _ function being used * Again * pep8 * converted new lines from CRLF to LF * adding bookmarks links to 1.1 flavor entities * Reverting * Log the use of utils.synchronized * expanding osapi flavors tests; rewriting flavors resource with view builders; adding 1.1 specific links to flavors resources * Dumb * Unit test update * Fix lp727225 by adding support for personality files to the openstack api * Changes * fixes bug 735298: start of nova-compute not possible because of wrong xml paths to the //host/cpu section in "virsh capabilities", used in nova/virt/libvirt_conn.py * update image service documentation * merge lp:nova and resolve conflicts * User ids are strings, and are not necessarily == name. Also fix so that non-existent user gives a 404, not a 500 * Fudge * Keypairs are not required in the OpenStack API; don't require them! * Merging trunk * Add missing fallback chain for ipv6 * Typo fix * fixed pep8 issue * chchchchchanges * libvirt template and libvirt_conn.spawn modified in way that was proposed for xenapi multinic support * Re-commit r805 * Re-commit r804 * Refactored ZoneRedirect into ZoneChildHelper so ZoneManager can use this too * Don't generate insecure passwords where it's easy to use urandom instead * merging openstack-api-version-split * chchchchchanges * chchchchchanges * Fixes euca-get-ajax-console returning Unknown Error, by using the correct exception in get_open_port() logic. Patch from Tushar Patil * chchchchchanges * Revert commit that modified CA/openssl.cnf.tmpl * Comment update * Derped again * Move mapper code into the _action_ext_controllers and _response_ext_controllers methods * The geebees * forgot to return network info - teehee * refactored, bugfixes * merge trunk * moving code out of try/except that would never trigger NotFound * merge trunk * Logging statements * added new class Instances for managaging instances added new method list in class Instances: * tweak * Stuff * Removing io_util.py. We now use eventlets library instead * Some typos * * Updated document vmware_readme.rst to mention VLAN networking * Corrected docstrings as per pep0257 recommentations. * Stream-lined the comments. * Updated code with locals() where ever applicable. * VIM : It stands for VMware Virtual Infrastructure Methodology. We have used the terminology from VMware. we have added a question in FAQ inside vmware_readme.rst in doc/source * New fake db: vmwareapi fake module uses a different set of fields and hence the structures required are different. Ex: bridge : 'xenbr0' does not hold good for VMware environment and bridge : 'vmnic0' is used instead. Also return values varies, hence went for implementing separate fake db. * Now using eventlet library instead and removed io_utils.py from branch. * Now using glance.client.Client instead of homegrown code to talk to Glance server to handle images. * Corrected all mis-spelled function names and corresponding calls. Yeah, an auto-complete side-effect! * Implement top level extensions * Added i18n to error message * Checks locally before routing * Really fix testcase * More execvp fallout * Fix up testsuite for lxc * Error codes handled properly now * merge trunk * Adding unit test * Fix instance creation fail under use_ipv6=false and FlatManager * pep8 clean * Fix a couple of things that assume that libvirt == kvm/qemu * Updating gateway_v6 in _on_set_network_host() is not required for FlatManager * added correct path to cpu information (tested on a system with 1 installed cpu package) * Fix unknown exception error in euca-get-ajax-console * fixed pep8 errors (with version 0.5.0) * Use integer ids for (fake) users * req envirom param 'nova.api.openstack.version' should be 'api.version' * pep8 fixes * Fixed DescribeUser in ec2 admin client * openstack api 1.0 flavors resource now implemented; adding flavors request value testing * response working * Added tests back for RateLimitingMiddleware which now throw correctly serialized errors with correct error codes * Add ResponseExtensions * revised per code review * first pass openstack redirect working * Adding newlines for pep8 * Removed VIM specific stuff and changed copyright from 2010 to 2011 * Limits controller and testing with XML and JSON serialization * adding imageRef and flavorRef attributes to servers serialization metadata * Merged with trunk (and brian's previous fixes to fake auth) * Plugin * As suggested by Eric Day: * changed request.environ version key to more descriptive 'api.version' * removed python3 string formatting * added licenses to headers on new files * Tweak * A few fixes * pep8 * merge lp:nova * ignore differently-named nodes in personality and metadata parsing * wrap errors getting image ids from local image store * Moving the migration again * Updating paste config * pep8 * internationalization * Per Eric Day's suggest, the verson is not store in the request environ instead of the nova.context * s/onset_files/injected_files/g * pep8 fixes * Add logging to lock check * Now that the fix for 732866, stop working around the bug * Major cosmetic changes to limits, but little-to-no functional changes. MUCH better testability now, no more relying on system time to tick by for limit testing * Merged with trunk to get fix for bug 732866 * Merged trunk * modifying paste config to support v1.1; adding v1.1 entry in versions resource ( GET /) * Fixed lp732866 by catching relevant `exception.NotFound` exception. Tests did not uncover this vulnerability due to "incorrect" FakeAuthManager. I say "incorrect" because potentially different implementations (LDAP or Database driven) of AuthManager might return different errors from `get_user_from_access_key` * refactor onset_files quota checking * Code clean up. Removing _decorate_response methods. Replaced them with more explicit methods, _build_image, and _build_flavor * Use random.SystemRandom for easy secure randoms, configurable symbol set by default including mixed-case * merge lp:nova * Support testing the OpenStack API without key_pairs * merge trunk * Fixed bugs in bug fix (plugin call) * adding missing view modules; modifying a couple of servers tests to use enumerate * just fixing a small typo in nova-manage vm live-migration * exception fixup * Make Authors check account for tests being run with different os.getcwd() depending on how they're run. Add missing people to Authors * Removed duplicated tests * PEP8 0.5.0 cleanup * Really delete the loop * Add comments about the destroy container function * Mount the right device * Merged trunk * Always put the ipv6 fallback in place. FLAGS.use_ipv6 does not exist yet when the firewall driver is instantiated and the iptables manager takes care not to fiddle with ipv6 if not enabled * merged with trunk and removed conflicts * Merging trunk * Reapplied rename to another file * serverId returned as int per spec * Reapplied rename of Openstack -> OpenStack. Easier to do it by hand than to ask Bazaar to do it * Merged with trunk. Had to hold bazaar's hand as it got lost again * Derive unit test from standard nova.test.TestCase * pep8 fixes * adding flavors and images barebones view code; adding flavorRef and imageRef to v1.1 servers * Fixed problem with metadata creation (backported fix) * Clarify the logic in using 32 symbols * moving addresses views to new module; removing 'Data' from 'DataViewBuilder' * Don't generate insecure passwords where it's easy to use urandom instead * Added a views package and a views.servers module. For representing the response object before it is serialized * Make key_pair optional with OpenStack API * Moved extended resource code into the extensions.py module * Moving fixtures to a factory * Refactor setup contianer/destroy container * Fixing API per spec, to get unit-tests to pass * Implements basic OpenStack API client, ready to support API tests * Fix capitalization of ApiError (it was mistakenly called APIError) * added migration to repo * Clarified message when a VM is not running but still in DB * Implemented Hyper-V list_instances_detail function. Needs a cleanup by someone that knows the Hyper-V code * So the first of those tests doesn't pass. Removing as it looks like it was meant to be deleted * Added test and fixed up code so that it works * Fix for LP Bug #704300 * fixed keyword arg error * pep8 * added structure to virt.xenapi.vmops to support network info being passed in * Removed duplicated test, renamed same-named (but non-identical) tests * merge trunk * PEP8 cleanup * Fixes other half of LP#733609 * Initial implementation of refresh instance states * Add missing fallback chain for ipv6 * The exception is called "ApiError", not "APIError" * Implement action extensions * Include cpuinfo.xml.template in tarball * Adding instance_id as Glance image_property * Add fixes metadata * Include cpuinfo.xml.template in tarball * Merged test_network.py properly. Before I had deleted this file and added again, but this file status should be modified when you see the merged difference * removed conflicts and merged with trunk * Create v1_0 and v1_1 packages for the openstack api. Added a servers module to each. Added tests to validate the structure of ip addresses for a 1.1 request * committing to share * small typo in nova-manage vm live-migration * NTT's live-migration branch, merged with trunk, conflicts resolved, and migrate file renamed * Reverted unmodified files * Reverted unmodified files * Only include kernel and ramdisk ID in meta-data output if they are actually set * Test fixes and some typos * Test changes * Migration moved again * Compute test * merge trunk * merge trunk * Make nova-dhcpbridge output lease information in dnsmasq's leasesfile format * Merged my doc changes with trunk * Fixed pep8 errors * Fixed failing tests in test_xenapi * Fixes link to 2011.1 instad of just to trunk docs * fixes: 733137 * Add a unit test * Make utils.execute not overwrite std{in,out,err} args to Popen on retries. Make utils.execute reject unknown kwargs * Removed excess LOG.debug line * merge trunk * The extension name is constructed from the camel cased module_name + 'Extension' * Merged with trunk * Fix instructions for setting up the initial database * Fix instructions for setting up the initial database * merged with latest trunk and removed unwanted files * Removed _translate_keys() functions since it is no longer used. Moved private top level functions to bottom of module * Use a consistent naming scheme for XenAPI variables * oops * Review feedback * Review feedback * Review feedback * Some unit tests * Change capitalization of Openstack to OpenStack * fixed conflicts after merging with trunk with 787 * Adding a sidebar element to the nova.openstack.org site to point people to additional versions of the site * oops * Review feedback * Replace raw SQL calls through session.execute() with SQLAlchemy code * Review feedback * Remove vish comment * Remove race condition when refreshing security groups and destroying instances at the same time * Removed EOL whitespace in accordance with PEP-8 * Beginning of cleanup of FakeAuthManager * Make the fallback value None instead of False * Indentation adjustment (cosmetical) * Fixed lp732866 by catching relevant `exception.NotFound` exception. Tests did not uncover this vulnerability due to "incorrect" FakeAuthManager. I say "incorrect" because potentially different implementations (LDAP or Database driven) of AuthManager might return different errors from `get_user_from_access_key` * Merged trunk * This change adds the ability to boot Windows and Linux instances in XenServer using different sets of vm-params * merge trunk * New migration * Passes net variable as value of keyword argument process_input. Prior to the execvp patch, this was passed positionally * Changes the output of status in describe_volumes from showing the user as the owner of the volume to showing the project as the owner * Added support for ips resource: /servers/1/ips Refactored implmentation of how the servers response model is generated * merge trunk * Adds in multi-tenant support to openstack api. Allows for multiple accounts (projects) with admin api for creating accounts & users * merge trunk * remerge trunk (again). fix issues caused by changes to deserialization calls on controllers * Add config for osapi_extensions_path. Update the ExtensionManager so that it loads extensions in the osapi_extensions_path * process_input for tee. fixes: 733439 * Minor stylistic updates affecting indentation * Make linux_net ensure_bridge commands that add and remove ip addr's from devices/bridges work with with the latest utils.execute method (execvp) * Added volume api from previous megapatch * Made changes to xs-ipv6 code impacted because of addition of flatmanger ipv6 support * Need to set version to '1.0' in the nova.context in test code for tests to be happy * merge from trunk.. * Discovered literal_column(), which does exactly what I need * Merged trunk * Further vmops cleanup * cast execute commands to str * Remove broken test. At least this way, it'll actually fix the problem and be mergable * * Updated the readme file with description about VLAN Manager support & guest console support. Also added the configuration instructions for the features. * Added assumptions section to the readme file * * Modified raise statements to raise nova defined Exceptions. * Fixed Console errors and in network utils using HostSystem instead of Datacenter to fetch network list * Added support for vmwareapi module in nova/virt/connection.py so that vmware hypervisor is supported by nova * Removing self.loop to achieve synchronization * merge trunk * Moved vlan_interface flag in network.manager removed needless carriage return in vm_ops * Use self.instances.pop in unfilter_instance to make the check/removal atomic * Make Authors check account for tests being run with different os.getcwd() depending on how they're run. Add missing people to Authors * Make linux_net ensure_bridge commands that add and remove ip addr's from devices/bridges work with with the latest utils.execute method (execvp) * _translate_keys now needs one more argument, the request object * Added version attribute to RequestContext class. Set the version in the nova.context object at the middleware level. Prototyped how we can serialize ip addresses based on the version * execvp: fix params * merge lp:nova * switch to a more consistent usage of onset_files variable names * re-added a test change I removed thinking it was related to removed code. It wasn't :> * merge trunk * Document known bug numbers by the code which is degraded until the bugs are fixed * fix minor typo * Fix a fer nits jaypipes found in review * Pep8 / Style * Re-removed the code that was deleted upstream but somehow didn't get merged in. Bizarre! * More resize * Merged with upstream * pep8 fun * Test login. Uncovered bug732866 * Merged with upstream * Better logging, be more careful about when we throw login errors re bug732866 * Don't wrap keys and volumes till they're in the API * Add a new IptablesManager that takes care of all uses of iptables * Last un-magiced session.execute() replaced with SQLAlchemy code.. * PEP8 * Add basic test case * Implements basic OpenStack API client, ready to support API tests * Initial support fo extension resources. Tests * Partial revert of one conversion due to phantom magic exception from SQLAlchemy in unrelated code; convert all deletes * merge lp:nova * add docstring * fixed formatting and redundant imports * Cleaned up vmops * merge trunk * initializing instance power state on launch to 0 (fixes EC2 API bug) * Correct a misspelling * merge lp:nova * merge trunk * Use a FLAGS.default_os_type if available * Another little bit of fallout from the execvp branch * Updated the code to detect the exception by fault type. SOAP faults are embedded in the SOAP response as a property. Certain faults are sent as a part of the SOAP body as property of missingSet. E.g. NotAuthenticated fault. So we examine the response object for missingSet and try to check the property for fault type * Another little detail. * Fix a few things that were either missed in the execvp conversion or stuff that was merged after it, but wasn't updated accordingly * Introduces the ZoneManager to the Scheduler which polls the child zones and caches their availability and capabilities * One more thing. * merge trunk * Only include ramdisk and kernel id if they are actually set * Add bugfix metadata * More execvp fallout * Make nova.image.s3 catch up with the new execute syntax * Pass argv of dnsmasq and radvd to execute as individual args, not as a list * Split dnsmasq and radvd commands into their respective argv's * s/s.getuid()/os.getuid()/ * merge lp:nova and add stub image service to quota tests as needed * merged to trunk rev781 * fix pep8 check * merge lp:nova * Modifies S3ImageService to wrap LocalImageService or GlanceImageService. It now pulls the parts out of s3, decrypts them locally, and sends them to the underlying service. It includes various fixes for image/glance.py, image/local.py and the tests * add tests to verify the serialization of adminPass in server creation response * Fixes nova.sh to run properly the first time. We have to get the zip file after nova-api is running * minor fixes from review * merged trunk * fixed based on reviewer's comment * merge lp:nova * Moved umount container to disk.py and try to remove loopback when destroying the container * Merged trunk * Replace session.execute() calls performing raw UPDATE statements with SQLAlchemy code, with the exception of fixed_ip_disassociate_all_by_timeout() * Fixes a race condition where multiple greenthreads were attempting to resize a file at the same time. Adds tests to verify that the image caching call will run concurrently for different files, but will block other greenthreads trying to cache the same file * maybe a int instead ? * merge lp:nova * merge, resolve conflicts, and update to reflect new standard deserialization function signature * Fixes doc build after execvp patch * execvp: fix docs * initializing instance power state on launch to 0 (fixes EC2 API bug) * - Content-Type and Accept headers handled properly - Content-Type added to responses - Query extensions no long cause computeFaults - adding wsgi.Request object - removing request-specific code from wsgi.Serializer * Fixes bug 726359. Passes unit tests * merge lp:nova, fix conflicts, fix tests * fix the copyright notice in migration * execvp: cleanup * remove the semaphore when there is no one waiting on it * merge lp:nova and resolve conflicts * Hi guys * Update the create server call in the Openstack API so that it generates an 'adminPass' and calls set_admin_password in the compute API. This gets us closer to parity with the Cloud Servers v1.0 spec * Added naming scheme comment * Merged trunk * execvp passes pep8 * merge trunk * Add a decorator that lets you synchronise actions across multiple binaries. Like, say, ensuring that only one worker manipulates iptables at a time * renaming wsgi.Request.best_match to best_match_content_type; correcting calls to that function in code from trunk * merge lp:nova * Fixes bug #729400. Invalid values for offset and limit params in http requests now return a 400 response with a useful message in the body. Also added and updated tests * Add password parameter to the set_admin_password call in the compute api. Updated servers password to use this parameter * stuff * rearrange functions and add docstrings * Fixes uses of process_input * update authors file * merged trunk r771 * merge lp:nova * remove unneeded stubs * move my tests into their own testcase * replaced ConnectionFailed with Exception in tools/euca-get-ajax-console was not working for me with euca2tools 1.2 (version 2007-10-10, release 31337) * Fixed pep8 issues * remerge trunk * removed uneeded **kw args leftover from removed account-in-url changes * fixed lp715427 * fixed lp715427 * Fix spacing * merge lp:nova and resolve conflicts * remove superfluous trailing blank line * add override to handle xml deserialization for server instance creation * Added 'adminPass' to the serialization_metadata * merge trunk * Merged with trunk Updated exception handling according to spawn refactoring * Fixed pep8 violation in glance plugin * Added unit tests for ensuring VDI are cleaned up upon spawn failures * Stop assuming anything about the order in which the two processes are scheduled * make static method for testing without initializing libvirt * tests and semaphore fix for image caching * execvp: unit tests pass * merged to trunk rev 769 * execvp: almost passes tests * Refactoring nova-api to be a service, so that we can reuse it in unit tests * Added documentation about needed flags * a few fixes for the tests * Renamed FLAG.paste_config -> FLAG.api_paste_config * Sorted imports correctly * merge trunk * Fixes lp730960 - mangled instance creation in virt drivers due to improper merge conflict resolution * Use disk_format and container_format in place of image type * using get_uuid in place of get_record in _get_vm_opaqueref changed SessionBase._getter in fake xenapi in order to return HANDLE_INVALID failure when reference is not in DB (was NotImplementedException) * Merging trunk * Fixing tests * Pep8 fixes * Accidentally left some bad data around * Fix the bug where fakerabbit is doing a sort of prefix matching on the AMQP routing key * merge trunk * Use disk_format and container_format instead of image type * merged trunk * update manpage * update code to work with new container and disk formats from glance * modify nova manage doc * Nits * abstracted network code in the base class for flat and vlan * Remerged trunk. fixed conflict * Removes VDIs from XenServer backend if spawn process fails before vm rec is created * Added ability to remove networks on nova-manage command * Remove addition of account to service url * refactored up nova/virt/xenapi/vmops _get_vm_opaque_ref() no longer inspects the param to check to see if it is an opaque ref works better for unittests * This fix is an updated version of Todd's lp720157. Adds SignatureVersion checking for Amazon EC2 API requests, and resolves bug #720157 * * pep8 cleanups in migrations * a few bugfixes * Removed stale references to XenAPI * Moved guest_tool.py from etc/esx directory to tools/esx directory * Removed excess comment lines * Fix todo comment * execvp * Merged trunk * virt.xenapi.vmops._get_vm_opaque_ref changed vm to vm_ref and ref to obj * virt.xenapi.vmops._get_vm_opaque_ref assumes VM.get_record raises * add a delay before grabbing zipfile * Some more refactoring and a tighter unit test * Moved FLAGS.paste_config to its re-usable location * Merged with trunk and fixed conflict. Sigh * Converted tabs to spaces in bin/nova-api * A few more changes * Inhibit inclusion of stack traces in the logs UNLESS --verbose has been specified. This should help keep the logs compact, helping admins find the messages they're interested in (e.g., "Can't connect to MySQL server on '127.0.0.1' (111)") without having to sort through the stack traces, while still allowing developers to see those traces at will * Addresses bugs 704985 and 705453 by: * And unit tests * A few formatting niceties * First part of the bug fix * virt.xenapi.vmops._get_vm_opaque_ref checks for basestring instance instead of str * virt.xenapi.vmops._get_vm_opaque_ref exception caught properly * cleaned up virt.xenapi.vmops._get_vm_opaque_ref. more reliable approach to checking if param is an opaque ref. code is cleaner * deleted network_is_associated from nova.db api * move the images_dir out of the way when converting * pep8 * rework register commands based on review * added network_get_by_cidr method to nova.db api * Use IptablesManager.semapahore from securitygroups driver to ensure we don't apply half a rule set * Log failed command execution if there are more retry attempts left * Make iptables rules class __ne__ just be inverted __eq__ * Invalid values for offset and limit params in http requests now return a 400 response with a useful message in the body. Also added and updated tests * Create --paste_config flag defaulting to api-paste.ini and mv etc/nova-api.conf to match * Implementation for XenServer migrations. There are several places for optimization but I based the current implementation on the chance scheduler just to be safe. Beyond that, a few features are missing, such as ensuring the IP address is transferred along with the migrated instance. This will be added in a subsequent patch. Finally, everything is implemented through the Openstack API resize hooks, but actual resizing of the instance RAM and hard drive space is not yet implemented * Generate 'adminPass' and call set_password when creating servers * Merged with current trunk * merge trunk * Resolving excess conflicts due to criss-cross in branch history * Make "dhcpbridge init" output correctly formatted leases information * Rebased to nova revision 761 * Fixed some more pep8 errors * * Updated readme file with installation of suds-0.4 through easy_install. * Removed pass functions * Fixed pep8 errors * Few bug fixes and other commits * zipfile needs to be extracted after nova is running * make compute get the new images properly, fix a bunch of tests, and provide conversion commands * avoid possible string/int comparison problems * merge lp:nova * select cleanups * Merged to trunk rev 760, and fixed comment line indent according to Jay's comment * Fix renaming of instance fields using update_instance api method * apirequest -> apireq * * os_type is no longer `not null` * respond well if personality attribute is incomplete * Added initial support to delete networks nova-manage * move the id wrapping into cloud layer instead of image_service * added flatmanager unit testcases and renamed test_network.py to test_vlan_network.py * remove xml testing infrastructure since it is not feasible to use at present * refactor server tests to support xml and json separately * More unit tests and rabbit hooks * Fix renaming of instance fields using update_instance method * Fix api logging to show proper path and controller:action * merged trunk * * Tests to verify correct vm-params for Windows and Linux instances * More fixes * delete unnecessary DECLARE * Fixed based on reviewer's comment. Main changes are below. 1. get_vcpu_total()/get_memory_mb()/get_memory_mb_used() is changed for users who used non-linux environment. 2. test code added to test_virt * merge lp:nova * merge trunk * fixed wrong local variable name in vmops * Use %s for instance-delete logging in case instance_id comes through as a string * remove ensure_b64_encoding * add the ec2utils file i forgot * spawn a greenthread for image registration because it is slow * fix a couple issues with local, update the glance fake to actually return the same types as the real client, fix the image tests * make local image service work * use LocalImageServiceByDefault * Replace objectstore images with S3 image service backending to glance or local * Merged to trunk rev 759 * Merged trunk rev 758 * remove ra_server from model and fix migration issue while running unit tests * Removed properties added to fixed_ips by xs-ipv6 BP * altered ra_server name to gateway_v6 * merge lp:nova * rename onset_files to personality_files all the way down to compute manager * Changing output of status from showing the user as the owner, to showing the project * enforce personality quotas * localize a few error messages * Refactor wsgi.Serializer away from handling Requests directly; now require Content-Type in all requests; fix tests according to new code * pep8 * Renaming my migration yet again * Merged with Trunk * Use %s in case instance_id came through as a string * Basic notifications drivers and tests * adding wsgi.Controller and wsgi.Request testing; fixing format keyword argument exception * This fix changes a tag contained in the DescribeKeyPairs response from to so that Amazon EC2 access libraries which does more strict syntax checking can work with Nova * some comments are modified * Merged to trunk rev 757. Main changes are below. 1. Rename db table ComputeService -> ComputeNode 2. nova-manage option instance_type is reserved and we cannot use option instance, so change instance -> vm * adding wsgi.Request class to add custom best_match; adding new class to wsgify decorators; replacing all references to webob.Request in non-test code to wsgi.Request * Remerged trunk, fixed a few conflicts * Add in multi-tenant support in openstack api * Merged to trunk rev 758 * Fix regression in the way libvirt_conn gets its instance_types * Updated DescribeKeyPairs response tag checked in nova/tests/test_cloud.py * merged to trunk rev757 * Fixed based on reviewer's comments. Main changes are below. 1. Rename nova.compute.manager.ComputeManager.mktmpfile for better naming. 2. Several tests code in tests/test_virt.py are removed. Because it only works in libvirt environment. Only db-related testcode remains * Fix regression in the way libvirt_conn gets its instance_types * more rigorous testing and error handling for os api personality * Updated Authors and .mailmap * Merged to rev 757 * merges dynamic instance types blueprint (http://wiki.openstack.org/ConfigureInstanceTypesDynamically) and bundles blueprint (https://blueprints.launchpad.net/nova/+spec/flavors) * moved migration to 008 (sigh) * merged trunk * catching bare except: * added logging to instance_types for DB errors per code review * Very simple change checking for < 0 values in "limit" and "offset" GET parameters. If either are negative, raise a HTTPBadRequest exception. Relevant tests included * requested style change * Fixes Bug #715424: nova-manage : create network crashes when subnet range provided is not enough , if the network range cannot fit the parameters passed, a ValueError is raised * adding new source docs * corrected error message * changed _context * pep8 * added in req.environ for context * pep8 * fixed _context typo * coding style change per devcamcar review * fixed coding style per devcamcar review notes * removed create and delete method (and corresponding tests) from flavors.py * Provide the ability to rescue and unrescue a XenServer instance * Enable IPv6 injection for XenServer instances. Added addressV6, netmaskV6 and gatewayV6 columns to the fixed_ips table via migration #007 as per NTT FlatManager IPv6 spec * Updated docstrings * add support for quotas on file injection * Added IPv6 migrations * merge fixes * Inject IPv6 data into XenStore for instance * Change DescribeKeyPairs response tag from keypairsSet to keySet, and fix lp720133 * Port Todd's lp720157 fix to the current trunk, rev 752 * Changed _get_vm_opaqueref removing test-specific code paths * Removed excess TODO comments and debug line * initial commit of vnc support * merged trunk * Changed ra_server to gateway_v6 and removed addressv6 column from fixed_ips db table * * Added first cut of migration for os_type on instances table * Track os_type when taking snapshots * merging trunk * * Added ability to launch XenServer instances with per-os vm-params * test osapi server create with multiple personalities * ensure personality contents are b64 encoded * Merged trunk * Fixed pep8 issues, applied jaypipes suggestion * Rebased to nova revision 752 * Use functools.wraps to make sure wrapped method's metadata (docstring and name) doesn't get mangled * merge from trunk * Fake database module for vmware vi api. Includes false injection layer at the level of API calls. This module is base for unit tests for vmwareapi module. The unit tests runs regardless of presence of ESX/ESXi server as computer provider in OpenStack * Review feedback * Updated the code to include support for guest consoles, VLAN networking for guest machines on ESX/ESXi servers as compute providers in OpenStack. Removed dependency on ZSI and now using suds-0.4 to generate the required stubs for VMware Virtual Infrastructure API on the fly for calls by vmwareapi module * Added support for guest console access for VMs running on ESX/ESXi servers as computer providers in OpenStack * Support for guest consoles for VMs running on VMware ESX/ESXi servers. Uses vmrc to provide the console access to guests * Minor modification to document. Removed excess flags * Moved the guest tools script that does IP injection inside VM on ESX server to etc/esx directory from etc/ directory * support adding a single personality in the osapi * corrected copyrights for new files * Updated with flags for nova-compute, nova-network and nova-console. Added the flags, --vlan_interface= --network_driver=nova.network.vmwareapi_net [Optional, only for VLAN Networking] --flat_network_bridge= [Optional, only for Flat Networking] --console_manager=nova.console.vmrc_manager.ConsoleVMRCManager --console_driver=nova.console.vmrc.VMRCSessionConsole [Optional for OTP (One time Passwords) as against host credentials] --vmwareapi_wsdl_loc=/vimService.wsdl> * Fixed trunk merge issues * Merged trunk * At previous commit, I forget to erase conflict - fixed it * merged to trunk rev 752 * Rebased at lp:nova 759 * test_compute is changed b/c lack of import instance_types * rename db migration script * 1. merged trunk rev749 2. rpc.call returns '/' as '\/', so nova.compute.manager.mktmpfile, nova.compute.manager.confirm.tmpfile, nova.scheduler.driver.Scheduler.mounted_on_same_shared_storage are modified followed by this changes. 3. nova.tests.test_virt.py is modified so that other teams modification is easily detected since other team is using nova.db.sqlalchemy.models.ComputeService * updated docs * updated docs * Fixed xenapi tests Gave up on clever things with map stored as string in xenstore. Used ast.liteeral_eval instead * This branch implements the openstack-api-hostid blueprint: "Openstack API support for hostId" * refactored adminclient * No reason to initialize metadata twice * Units tests fixed partially. Still need to address checking data injected into xenstore need to convert string into dict or similar. Also todo PEP8 fixes * replaced ugly INSTANCE_TYPE constant with (slightly less ugly) stubs * add test for instance creation without personalities * fixed pep8 * Add a lock_path flag for lock files * refactored nova-manage list (-all, ) and fixed docs * moved nova-manage flavors docs * Edited `nova.api.openstack.common:limited` method to raise an HTTPBadRequest exception if a negative limit or offset is given. I'm not confident that this is the correct approach, because I guess this method could be called out of an API/WSGI context, but the method *is* located in the OpenStack API module and is currently only used in WSGI-capable methods, so we should be safe * merge trunk * moving nova-manage integration tests to smoke tests * Wrapped the instance_types comparison with an int and added a test case for it. Removed the inadvertently added newline * Rename migration to coincide with latest trunk changes * Adds VHD build support for XenServer driver * Suppress stack traces unless --verbose is specified * Removed extraneous newline * Merging trunk to my branch. Fixed a conflict in servers.py * Fixed obvious errors with flags. Note: tests still fail * Merging trunk * Fixed default value for xenapi_agent_path flag * 1) merge trunk 2) removed preconfigure_xenstore 3) added jkey for broadcast address in inject_network_info 4) added 2 flags: 4.1) xenapi_inject_image (default True) This flag allows for turning off data injection by mounting the image in the VDI (agreed with Trey Morris) 4.2) xenapi_agent_path (default /usr/bin/xe-update-networking) This flag specifies the path where the agent should be located. It makes sense only if the above flag is True. If the agent is found, data injection is not performed * Wrap IptablesManager.apply() calls in utils.synchronized to avoid having different workers step on each other's toes * merge trunk * Add utils.synchronized decorator to allow for synchronising method entrance across multiple workers on the same host * execvp * execvp * execvp * execute: shell=True removed * Add lxc to the libvirt tests * Clean up the mount points when it shutsdown * Add ability to mount containers * Add lxc libvirt driver * Rebased to Nova revision 749 * added listing of instances running on a specific host * fixed FIXME * beautification.. * introduced new flag "max_nbd_devices" to set the number of possible NBD devices * renamed flag from maximum_... to max_.. * replaced ConnectionFailed with Exception in tools/euca-get-ajax-console was not working for me with euca2tools 1.2 (version 2007-10-10, release 31337) * Did a pull from trunk to be sure I had the latest, then deleted the test directory. I guess it appeared when I started using venv. Doh * Deleting test dir from a pull from trunk * introduced new flag "maximum_nbd_devices" to set the number of possible NBD devices * reverted my changes from https://code.launchpad.net/~berendt/nova/lp722554/+merge/50579 and reused the existing db api methods to add the disabled services. Looks much better now :) * add timeout and retry for ssh * Makes nova-api correctly load the default flagfile * force memcache key to be str * only create auth connection if cache misses * No reason to dump a stack trace just because the AMQP server is unreachable; an error notification should be sufficient * Add error message to the error report so we know why the AMQP server is unreachable * No reason to dump a stack trace just because we can't reach the AMQP servire; it ends up being just noise * DescribeInstances modified to return ipv6 fixed ip address in case of flatmanager * Bootlock original instance during rescue * merge with zones2 fixes and trunk * check if QUERY_STRING is empty or not before building the request URL in bin/nova-ajax-console-proxy * trunk merge * API changed to new style class * trunk merge, pip-requires and novatools to novaclient changes * Fixes FlatDHCP by making it inherit from NetworkManager and moving some methods around * fixed: bin/nova-ajax-console-proxy:66:19: W601 .has_key() is deprecated, use 'in' * merged trunk * add a caching layer to the has_role call to increase performance * Removed unnecessary compute import * Set rescue instance VIF device * use default flagfile in nova-api * Add tests for 718999, fix a little brittle code introduced by the committed fix * Rename test to describe what it actually does * Copy over to current trunk my tests, the 401/500 fix, and a couple of fixes to the committed fix which was actually brittle around the edges.. * I'm working on consolidating install instructions specifically (they're the most asked-about right now) and pointing to the docs.openstack.org site for admin docs * check if QUERY_STRING is empty or not before building the request URL * Teardown rescue instance * Merged trunk * Create rescue instance * Merging trunk, conflicts fixed * Verify status of image is active * Rebased at lp:nova 740 * merged with trunk * Cleanup db method names for dealing with auth_tokens to follow standard naming pattern * The proposed bug fix stubs out the _is_vdi_pv routine for testing purposes * revert a few unnecessary changes to base.py * removed unused references to unittest * add customizable tempdir and remove extra code * Pass id of token to be deleted to the db api, not the actual object * Removing unecessary headers * Rename auth_token db methods to follow standard * Removing unecessary nokernel stuff * Adding _make_subprocess function * No longer users image/ directory in tarball * Merging trunk, small fixes * make smoketests run with nose * IPV6 FlatManager changes * Make tests start with a clean database for every test * merge trunk * merge clean db * merged trunk * sorry, pep8 * adds live network injection/reconfiguration. Some refactoring * forgot to get vm_opaque_ref * new tests * service capabilities test * moved network injection and vif creation to above vm start in vmops spawn * Merged trunk * nothing * Removes processName from debug output since we aren't using multiprocessing and it doesn't exist in python 2.6.1 * Add some methods to the ec2 admin api to work with VPNs. Also implements and properly documents the get_hosts method * Fix copypasta pep8 violation * moved migrate script to 007 (again..sigh) * Don't require metadata (hotfix for bug 724143) * merge trunk * Merged trunk * Updated email in Authors * Easy and effective fix for getting the DNS value from flag file, when working in FlatNetworking mode * Some first steps towards resolving some of the issues brought up on the mailing list related to documenting flags * Support HP/LeftHand SANs. We control the SAN by SSHing and issuing CLIQ commands. Also improved the way iSCSI volumes are mounted: try to store the iSCSI connection info in the volume entity, in preference to doing discovery. Also CHAP authentication support * This fix checks whether the boot/guest directory exists on the hypervisor. If that is not the case, it creates it * Globally exclude *.pyc files from generated tarballs * stubbing out _is_vdi_pv for test purposes * merge trunk * Globally exclude .pyc files from tarball contents * Get DNS value from Flag, when working in FlatNetworking mode. Passing the flag was ineffective previously. This is an easy fix. I think we would need nova-manage to accept dns also from command line * xenapi plugin function now checks whether /boot/guest already exists. If not, it creates the directory * capability aggregation working * fix check for existing port 22 rule * move relevant code to baseclass and make flatdhcp not inherit from flat * Hotfix to not require metadata * Documentation fixes so that output looks better * more smoketest fixes * Removed Milind from Authors file, as individual Contributer's License Agreement & Ubuntu code of conduct are not yet signed * Fixed problems found in localized string formatting. Verified the fixes by running ./run_tests.sh -V * Change missed reference to run_tests.err.log * PEP 257 fixes * Merged with trunk * fix missed err.log * Tests all working again * remove extra flag in admin tests * Revert commit 709. This fixes issues with the Openstack API causing 'No user for access key admin' errors * Adds colors to output of tests and cleans up run_tests.py * Reverted bad-fix to sqlalchemy code * Merged with trunk * added comments about where code came from * merge and fix conflicts * Prevent logging.setup() from generating a syslog handler if we didn't request one (breaks on mac) * fix pep8 * merged upstream * Changed create from a @staticmethod to a @classmethod * revert logfile redirection and make colors work by temporarily switching stdout * merged trunk * add help back to the scripts that don't use service.py * Alphabetize imports * remove processName from debug output since we aren't using multiprocessing and it doesn't exist in python 2.6.1 * updates to nova.flags to get help working better * Helper function that supports XPath style selectors to traverse an object tree e.g * tests working again * Put back the comments I accidentally removed * Make sure there are two blank links after the import * Rename minixpath_select to get_from_path * Fixes the describe_availability_zones to use an elevated context when getting services and the db calls to pass parameters correctly so is_admin check works * Fix pep8 violation (trailing whitespace) * fix describe_availability_zones * Cope when we pass a non-list to xpath_select - wrap it in a list * Fixes existing smoketests and splits out sysadmin tests from netadmin tests * Created mini XPath implementation, to simplify mapping logic * move the deletion of the db into fixtures * merged upstream * Revert commit 709. This fixes issues with the Openstack API causing 'No user for access key admin' errors * put the redirection back in to run_tests.sh and fix terminal colors by using original stdout * Deleted trailing whitespace * Fixes and optimizes filtering for describe_security_groups. Also adds a unit test * merged trunk * fix for failing describe_instances test * merged trunk * use flags for sqlite db names and fix flags in dhcpbridge * merged trunk * Fixes lp715424, code now checks network range can fit num_networks * network_size * The proposed branch prevents FlatManager from executing network initialisation tasks contained in linux_net.init_host(), which are unnecessary when flat networking is used * Adds some features to run_tests.sh: - if it crashes right away with a short erorr log, print that directly - allow specifying tests without the nova.tests part * The kernel_id and the ramdisk_id are optional, yet the OpenStack API was requiring them. In addition, with the ObjectStore these properties are not under 'properties' (as they are with Glance) * merged trunk * merge trunk * Initial support for per-instance metadata, though the OpenStack API. Key/value pairs can be specified at instance creation time and are returned in the details view. Support limits based on quota system * Merged trunk * Removed pass * Changed unit test to refer to compute API, per Todd's suggestion. Avoids needing to extend our implementation of the EC2 API * Fixes lots of errors in the unit tests * dump error output directly on short import errors * allow users to omit 'nova.tests' with run_tests * Merged trunk * * Took care of localization of strings * Addressed all one liner docstrings * Added Sateesh, Milind to Authors file * Fixed pep8 errors * FlatManager.init_host now inhibits call to method in superclass. Floating IP methods have been redefined in FlatManager to raise NotImplementedError * speed up network tests * merged trunk * move db creation into fixtures and clean db for each test * fix failures * remove unnecessary stubout * Lots of test fixing * Update the admin client to deal with VPNs and have a function host list * Removed unused import & formatting cleanups * Exit with exit code 1 if conf cannot be read * Return null if no kernel_id / ramdisk_id * Reverted change to focus on the core bug - kernel_id and ramdisk_id are optional * Make static create method behave more like other services * merged fix-describe-groups * add netadmin smoketests * separate out smoketests and add updated nova.sh * fix and optimize security group filtering * Support service-like wait behaviour for API service * Added create static method to ApiService * fix test * Refactoring nova-api to be a service, so that we can reuse it in tests * test that shows error on filtering groups * don't make a syslog handler if we didn't ask for one * Don't blindly concatenate queue name if second portiion is None * Missing import for nova.exceptions (!) * At the moment --pidfile is still used in some scripts in contrib/puppet/. I don't use puppet, please check if there are possible side effects * We're not using prefix matching on AMQP, so fakerabbit shouldn't be doing it! * merge fixes from anso branch * merged trunk * Removed block of code that resurrected itself in the last merge * Added Andy Southgate to the Authors file * Merged with trunk, including manual conflict resolution in nova/virt/disk.py and nova/virt/xenapi/vmops.py * Put the whitespace back *sigh* * Remove duplicate import gained across a merge * Rename "SNATTING" chain to "snat" * Fix DescribeRegion answer by introducing '{ec2,osapi}_listen' flags instead of overloading {ec2,osapi}_host. Get rid of paste_config_to_flags, bin/nova-combined. Adds debug FLAGS dump at start of nova-api * Also remove nova-combined from setup.py * Fixed some docstring * Get rid of nova-combined, see rationale on ML * Merged trunk * no, really fix lp721297 this time * Updated import statements according to HACKING guidelines. Added docstrings to each document. Verified pep8 over all files. Replaced some constants by enums accordingly. Still little bit more left in vm_util.py and vim_util.py files * Add flags for listen_port to nova-api. This allows us to listen on one port, but return another port (for a proxy or load balancer) in calls like describe_regions, etc * Fix tiny mitakes! (remove unnecessary comment, etc) * Fixed based on reviewer's comment. 1. Change docstrings format 2. Fix comment grammer mistake, etc * PEP8 again * Account for the fact that iptables-save outputs rules with a space at the end. Reverse the rule deduplication so that the last one takes precedence * floating-ip-snat was too long. Use floating-snat instead * PEP8 adjustments * Remove leftover from debugging * Add a bunch of tests for everything * Fixes various issues regarding verbose logging and logging errors on import * merged trunk * Add a new chain, floating-ip-snat, at the top of SNATTING, so that SNATting for floating ips gets applied before the default SNAT rule * Address some review comments * Some quick test cleanups, first step towards standardizing the way we start services in tests * use a different flag for listen port for apis * added disabled services to the list of displayed services in bin/nova-manage * merged to trunk rev709. NEEDS to be fixed based on 3rd reviewer's comment * just add 005_add_live_migration.py * Fixed based on reviewer's comment. 1. DB schema change vcpu/memory/hdd info were stored into Service table. but reviewer pointed out to me creating new table is better since Service table has too much columns * update based on prereq branch * update based on prereq branch * fixed newline and moved import fake_flags into run_tests where it makes more sense * merged fix * remove changes to test db * Fixed my confusion in documenting the syntax of iSCSI discovery * pretty colors for logs and a few optimizations * Renamed db_update to model_update, and lots more documentation * modify tests to use specific hosts rather than default * Merged with head * remove keyword argument, per review * move test_cloud to use start_service, too * add a start_service method to our test baseclass * add a test for rpc consumer isolation * Merged with trunk * The OpenStack API was using the 'secret' as the 'access key'. There is an 'access key' and there is a 'secret key'. Access key ~= username. Secret key ~= password. This fix is necessary for the OpenStack Python API bindings to log in * Add a bunch of docs for the new iptables hotness * fix pep8 and remove extra reference to reset * switch to explicit call to logging.setup() * merged trunk * Adds translation catalogs and distutils.extra glue code that automates the process of compiling message catalogs into .mo files * Merged with trunk * make sure that ec2 response times are xs:dateTime parsable * Removing pesky DS_Store files too. Begone * Updated to remove built docs * Removing duplicate installation docs and adding flag file information, plus pointing to docs.openstack.org for Admin-audience docs * introducing a new flag timeout_nbd for manually setting the time in seconds for waiting for an upcoming NBD device * use tests.sqlite so it doesn't conflict with running db * cleanup from review * Duh, continue skips iteration, not pass. #iamanidiot * reset to notset if level isn't in flags * Enable rescue testing * PEP8 errors and remove check in authors file for nova-core, since nova-core owns the translation export branch * Merged trunk * Stub out VM create * * Removed VimService_services.py & VimService_services_types.py to reduce the diffs to normal. These 2 files are auto-generated files containing stubs for VI SDK API end points. The stub files are generated using ZSI SOAP stub generator module ZSI.commands.wsdl2py over Vimservice.wsdl distributed as part of VMware Virtual Infrastructure SDK package. To not include them in the repository we have few options to choose from, 1) Generate the stub files in build time and make them available as packages for distribution. 2) Generate the stub files in installation/configuration time if ESX/ESXi server is detected as compute provider. Further to this, we can try to reduce the size of stub files by attempting to create stubs only for the API end points required by the module vmwareapi * introducing a new flag timeout_nbd for manually setting the time in seconds for waiting for an upcoming NBD device * * Removed nova/virt/guest-tools/guest_tool.bat & nova/virt/guest-tools/guest_tool.sh as guest_tool.py can be invoked directly during guest startup * More PEP-8 * Wrap ipv6 rules, too * PEP-8 fixes * Allow non-existing rules to be removed * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NOVA-CORE DEVELOPERS SHOULD NOT REVIEW THIS MERGE PROPOSAL ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * merged with nova trunk revision #706 * Fix typo * Unfilter instance correctly on termination * move exception hook into appropriate location and remove extra stuff from module namespace * Also remove rules that jump to deleted chains * simplify logic for parsing log level flags * reset all loggers on flag change, not just root * add docstring to reset method * removed extra comments and initialized from flags * fix nova-api as well * Fix refresh sec groups * get rid of initialized flag * clean up location of method * remove extra references to logging.basicConfig * move the fake initialized into fake flags * fixes for various logging errors and issues * fanout works * fanout kinda working * service ping working * scheduler manager * tests passing * start of fanout * merge trunk * previous trunk merge * puppet scripts only there as an example, should be moved to some other place if they are still necessary * Various optimizations of lookups relating to users * If there are no keypairs registered on a create call, output a useful error message rather than an out-of-range exception * Fixes vpn images to use kernel and ramdisk specified by the image * added elif branch to handle the conversion of datetime instances to isoformat instead of plain string conversion * Calculate time correctly for ec2 request logs * fix ec2 launchtime response not in iso format bug * pep8 leftover * move from datetime.datetime.utcnow -> utils.utcnow * pass start time as a param instead of making it an attribute * store time when RequestLogging starts instead of using context's time * Fix FakeAuthManager so that unit tests pass; I believe it was matching the wrong field * more optimizations context.user.id to context.user_id * remove extra * replace context.user.is_admin() with context.is_admin because it is much faster * remove the weird is_vpn logic in compute/api.py * Don't crash if there's no 'fixed_ip' attribute (was returning None, which was unsubscriptable) * ObjectStore doesn't use properties collection; kernel_id and ramdisk_id aren't required anyway * added purge option and tightened up testing * Wrap iptables calls in a semaphore * pep8 * added instance types purge test * Security group fallback is named sg-fallback * Rename a few things for more clarity * Port libvirt_conn.IptablesDriver over to use linux_net.IptablesManager * merged trunk * Typo fix * added admin api call for injecting network info, added api test for inject network info * If there are no keypairs, output a useful error message * Fix typo (?) in authentication logic * Changing type -> image_type * Pep8 cleanup * moved creating vifs to its own function, moved inject network to its own function * sandy y u no read hacking guide and import classes? * Typo fix * XenAPI tests * Introduce IptablesManager in linux_net. Port every use of iptables in linux_net to it * Use WatchedFileHandler instead of RotatingFileHandler * Resize compute tests * Support for HP SAN * Merging trunk to my branch. Fixed conflicts in Authors file and .mailmap * Rename migration 004 => 005 * Added Author and tests * Merging trunk * fixups backed on merge comments * Fixed testing mode leftover * PEP8 fix * Remove paste_config_to_flags since it's now unused * Port changes to nova-combined, rename flags to API_listen and API_listen_port * Set up logging once FLAGS properly read, no need to redo logging config anymore (was inoperant anyway) * Switch to API_listen and API_listen_port, drop wsgi.paste_config_to_flags * added new class Instances to manage instances and added a new listing method into the class * added functionality to list only fixed ip addresses of one node and added exception handling to list method * Use WatchedFileHandler instead of RotatingFileHandler * Incorporating minor cleanups suggested by Rick Harris: * Use assertNotEqual instead of assertTrue * Use enumerate function instead of maintaining a counter * Resize compute tests * fixed based on reviewer's comment. 1. erase wrapper function(remove/exists/mktempfile) from nova.utils. 2. nova-manage service describeresource(->describe_resource) 3. nova-manage service updateresource(->update_resource) 4. erase "my mistake print" statement * Tests * pep8 * merged trunk * Makes FlatDHCPManager clean up old fixed_ips like VlanManager * Correctly pass the associate paramater for project_get_network through the IMPL layer in the db api * changed migration to 006 for trunk compatibility * completed doc and added --purge option to instance type delete * moved inject network info to a function which accepts only instance, and call it from reset network * Test changes * Merged with trunk * Always compare incoming flavor_id as an int * Initial support for per-instance metadata, though the OpenStack API. Key/value pairs can be specified at instance creation time and are returned in the details view. Support limits based on quota system * a few changes and a bunch of unit tests * remove leftover periodic tasks * Added support for feature parity with the current Rackspace Cloud Servers practice of "injecting" files into newly-created instances for configuration, etc. However, this is in no way restricted to only writing files to the guest when it is first created * missing docstring and fixed copyrights * move periodic tasks to base class based on class variable as per review * Correctly pass the associate paramater to project_get_network * Add **kwargs to VlanManager's create_networks so that optional args from other managers don't break * Uncommitted changes using the wrong author, and re-committing under the correct author * merge with zone phase 1 again * Added http://mynova/v1.0/zones/ api options for add/remove/update/delete zones. child_zones table added to database and migration. Changed novarc vars from CLOUD_SERVERS_* to NOVA_* to work with novatools. See python-novatools on github for help testing this * pip requires novatools * copyright notice * moved 003_cactus.py migration file to 004_add_instance_types.py to avoid naming collision with new trunk migration * Add **kwargs to VlanManager's create_networks so that optional args from other managers don't break * merge with zone phase 1 * changed from 003-004 migration * merged lp:~jk0/nova/dynamicinstancetypes * Merged trunk * merge from dev * fixed strings * multi positional string fix * Use a semaphore to ensure we don't run more than one iptables-restore at a time * Fixed unit test * merge with trunk * fixed zone list tests * Make eth0 the default for the public_interface flag * Finished flavor OS API stubs * Re-alphabetise Authors, move extra addresses into .mailmap * Re-alphabetise Authors, move extra addressses into .mailmap * Move the ramdisk logging stuff * Hi guys * fixup * zone list now comes from scheduler zonemanager * Stop blowing away the ramdisk * Rebased at lp:nova 688 * Update the Openstack API so that it returns 'addresses' * I have a bug fix, additional tests for the `limiter` method, and additional commenting for a couple classes in the OpenStack API. Basically I've just tried to jump in somewhere to get my feet wet. Constructive criticism welcome * added labels to networks for use in multi-nic added writing network data to xenstore param-list added call to agent to reset network added reset_network call to openstack api * Add a command to nova-manage to list fixed ip's * Foo * comments + Englilish, changed copyright in migration, removed network_get_all from db.api (vestigial) * Adding myself to Authors and .mailmap files * example: * Switched mailmap entries * Supporting networks with multiple PIFs. pep8 fixes unit tests passed * Merged kpepple * Merged trunk * More testing * Block diagram for vmwareapi module * added entry in the category list * Added vmwareapi module to add support of hypervisor vmware-vsphere to OpenStack * added new functionality to list all defined fixed ips * added more I18N * Merged trunk and fixed conflict with other Brian in Authors * removing superfluous pass statements; replacing list comprehension with for loop; alphabetizing imports * Rebased at lp:nova 687 * added i18n of 'No networks defined' * Make eth0 the default for FLAGS.public_interface * Typo fixes * Merging trunk * Adding tests * first crack at instance types docs * merge trunk * style cleanup * polling tests * Use glance image type to determine disk type * Minor change. Adding a helper function stub_instance() inside the test test_get_all_server_details_with_host for readability * Fixes ldapdriver so that it works properly with admin client. It now sanitizes all unicode data to strings before passing it into ldap driver. This may need to be rethought to work properly for internationalization * Moved definition of return_servers_with_host stub to inside the test_get_all_server_details_with_host test * fixed * Pep8 fixes * Merging trunk * Adding basic test * Better exceptions * Update to our HACKING doc to add examples of our docstring style * add periodic disassociate from VlanManager to FlatDHCPManager * Flipped mailmap entries * -from migrate.versioning import exceptions as versioning_exceptions + +try: + from migrate.versioning import exceptions as versioning_exceptions +except ImportError: + try: + # python-migration changed location of exceptions after 1.6.3 + # See LP Bug #717467 + from migrate import exceptions as versioning_exceptions + except ImportError: + sys.exit(_("python-migrate is not installed. Exiting.")) * Accidently removed myself from Authors * Added alternate email to mailmap * zone manager tests * Merged to trunk * added test for reset_network to openstack api tests, tabstop 5 to 4, renamed migration * Use RotatingFileHandler instead of FileHandler * pep8 fixes * sanitize all args to strings before sending them to ldap * Use a threadpool for handling requests coming in through RPC * Typos * Derp * Spell flags correctly (i.e. not in upper case) * Fixed merge error * novatools call to child zones done * novatools call to child zones done * Putting glance plugin under pep8 control * fixed authors, import sys in migration.py * Merged trunk * First commit of working code * Stubbed out flavor create/delete API calls * This implements the blueprint 'Openstack API support for hostId': https://blueprints.launchpad.net/nova/+spec/openstack-api-hostid Now instances will have a unique hostId which for now is just a hash of the host. If the instance does not have a host yet, the hostId will be '' * Fix for bug #716847 * merge trunk * First commit for xenapi-vlan-networking. Totally untested * added functionality to nova-manage to list created networks * Add back --logdir=DIR option. If set, a logfile named after the binary (e.g. nova-api.log) will be kept in DIR * Fix PEP-8 stuff * assertIsNone is a 2.7-ism * This branch should resolve nova bug #718675 (https://bugs.launchpad.net/nova/+bug/718675) * Added myself to the authors file * I fail at sessions * I fail at sessions * Foo * hurr durr * Merging trunk part 1 * stubbed out reset networkin xenapi VM tests to solve domid problem * foo * foo * Adding vhd hidden sanity check * Fixes 718994 * Make rpc thread pool size configurable * merge with trunk * fail * Fixing test by adding stub for get_image_meta * this bug bit me hard today. pv can be None, which does not translate to %d and this error gets clobbered by causing errors in the business in charge of capturing output and reporting errors * More pep8 fixes * Pep8 fixes * Set name-label on VDI * Merge * Don't hide RotatingFileHandler behind FileHandler's name * Refactor code that decides which logfile to use, if any * Fixing typo * polling working * Using Nova style nokernel * changed d to s * merge with trunk * More plugin lol * moved reset network to after boot durrrrr.. * Don't hid RotatingFileHandler behind FileHandler's name * removed flag --pidfile from nova/services.py * Added teammate Naveed to authors file for his help * plugin lol * Plugin changes * merging trunk back in; updating Authors conflict * Adding documentation * Regrouping methods so they make sense * zone/info works * Refactoring put_vdis * Adding safe_find_sr * Merged lp:nova * Fixes tarball contents by adding missing scripts and files to setup.py / MANIFEST.in * Moving SR path code outside of glance plugin * When re-throwing an exception, use "raise", not "raise e". This way we don't lose the stack trace * Adding more documentation, code-cleanup * Replace placeholders in nova.pot with some actual values * The proposed fix puts a VM which fails to spawn in a (new) 'FAILED' power state. It does not perform a clean-up. This because the user needs to know what has happened to the VM he/she was trying to run. Normally, API users do not have access to log files. In this case, the only way for the user to know what happened to the instance is to query its state (e.g.: doing euca-describe-instances). If we perform a complete clean-up, no information about the instance which failed to spawn will be left * Some trivial cleanups in context.py, mostly just a test of using the updated git-bzr-ng * Use eventlet.green.subprocess instead of standard subprocess * derp * Better host acquisition * zones merge * fixed / renamed migration scripts * Merged trunk * Update .pot file with source file and line numbers after running python setup.py build * Adds Distutils.Extra support, removes Babel support, which is half-baked at best * Pull in .po message catalogs from lp:~nova-core/nova/translations * Fix sporadically failing unittests * Missing nova/tests/db/nova.austin.sqlite file * Translations will be shipped in po/, not locale/ * Adding missing scripts and files to setup.py / MANIFEST.in * Fixes issues when running euca-run-instances and euca-describe-image-attribute against the latest nova/trunk EC2 API * initial * Naïve attempt at threading rpc requests * Beautify it a little bit, thanks to dabo * OS-55: Moved conn_common code into disk.py * Break out of the "for group in rv" loop in security group unit tests so that we are use we are dealing with the correct group * Tons o loggin * merged trunk * Refactored * Launchpad automatic translations update * trunk merge * better filtering * Adding DISK_VHD to ImageTypes * Updates to that S3ImageService kernel_id and ramdisk_id mappings work with EC2 API * fixed nova-combined debug hack and renamed ChildZone to Zone * plugin * Removing testing statements * Adds missing flag that makes use_nova_chains work properly * bad plugin * bad plugin * bad plugin * fixed merge conflict * First cut on XenServer unified-images * removed debugging * fixed template and added migration * better filtering * Use RotatingFileHandler instead of FileHandler * Typo fixes * Resurrect logdir option * hurr * Some refactoring * hurr * Snapshot correctly * Added try clause to handle changed location of exceptions after 1.6.3 in python-migrate LP Bug #717467 * Use eventlet.green.subprocess instead of standard subprocess * Made kernel and ram disk be deleted in xen api upon instance termination * Snapshot correctly * merged recent version. no conflict, no big/important change to this branch * wharrgarbl * merge jk0 branch (with trunk merge) which added additional columns for instance_types (which are openstack api specific) * corrected model for table lookup * More fixes * Derp * fix for bug #716847 - if a volume has not been assigned to a host, then delete from db and skip rpc * added call to reset_network from openstack api down to vmops * merging with trunk * Got rid of BadParameter, just using standard python ValueError * Merged trunk * support for multiple IPs per network * Fix DescribeRegion answer by using specific 'listen' configuration parameter instead of overloading ec2_host * Fixed tables creation order and added clearing db after errors * Modified S3ImageService to return the format defined in BaseService to allow EC2 API's DescribeImages to work against Glance * re-add input_chain because it got deleted at some point * Launchpad automatic translations update * Fixes a typo in the auth checking for DescribeAvailabilityZones * Fixes describe_security_groups by forcing it to return a list instead of a generator * return a list instead of a generator from describe_groups * Hi guys * Added missing doc string and made a few style tweaks * fix typo in auth checking for describe_availability_zones * now sorting by project, then by group * Launchpad automatic translations update * Made a few tweaks to format of S3 service implementation * Merged trunk * First attempt to make all image services use similar schemas * fix :returns: and add pep-0257 * Preliminary fix for issue, need more thorough testing before pushing to lp * Launchpad automatic translations update * More typos * More typos * More typos * More typos * More typos * fixed exceptions import from python migrate * Cast to host * This fixes a lazy-load issue in describe-instances, which causes a crash. The solution is to specifically load the network table when retrieving an instance * added instance_type_purge() to actually remove records from db * updated tests and added more error checking * Merged trunk * more error checking on inputs and better errors returned * Added more columns to instance_types tables * Added LOG line to describe groups function to find out what's going * joinedload network so describe_instances continues to work * zone api tests passing * Create a new AMQP connection by default * First, not all * Merged to trunk and fixed merge conflict in Authors * rough cut at zone api tests * Following Rick and Jay's suggestions: - Fixed LOG.debug for translation - improved vm_utils.VM_Helper.ensure_free_mem * Create a new AMQP connection by default * after hours of tracking his prey, ken slowly crept behind the elusive wilderbeast test import hiding in the libvirt_conn.py bushes and gutted it with his steely blade * fixed destroy calls * Forgot the metadata includes * added get IPs by instance * added resetnetwork to the XenAPIPlugin.dispatch dict * Forgot the metadata includes * Forgot the metadata includes * Typo fixes and some stupidity about the models * passing instance to reset_network instead of vm_ref, also not converting to an opaque ref before making plugin call * Define sql_idle_timeout flag to be an integer * forgot to add network_get_all_by_instance to db.api * template adjusted to NOVA_TOOLS, zone db & os api layers added * Spawn from disk * Some more cleanup * sql_idle_timeout should be an integer * merged model change: flavorid needs to unique in model * testing refactor * flavorid needs to unique in model * Add forwarding rules for floating IPs to the OUTPUT chain on the network node in addition to the PREROUTING chain * typo * refactored api call to use instance_types * Use a NullPool for sqlite connections * Get a fresh connection in rpc.cast rather than using a recycled one * Make rpc.cast create a fresh amqp connection. Each API request has its own thread, and they don't multiplex well * Only use NullPool when using sqlite * Also add floating ip forwarding to OUTPUT chain * trunk merge * removed ZoneCommands from nova-manage * Try using NullPool instead of SingletonPool * Try setting isolation_level=immediate * This branch fixes bug #708347: RunInstances: Invalid instance type gives improper error message * Wrap line to under 79 characters * Launchpad automatic translations update * adding myself to Authors file * 1. Merged to rev654(?) 2. Fixed bug continuous request. if user continuouslly send live-migration request to same host, concurrent request to iptables occurs, and iptables complains. This version add retry for this issue * forgot to register new instance_types table * Plugin tidying and more migration implementation * fixed overlooked mandatory changes in Xen * Renamed migration plugin * A lot of stuff * - population of public and private addresses containers in openstack api - replacement of sqlalchemy model in instance stub with dict * Fixes the ordering of init_host commands so that iptables chains are created before they are used * Pass timestamps to the db layer in fixed_ip_disassociate_all_by_timeout rather than converting to strings ahead of time, otherwise comparison between timestamps would often fail * Added support for 'SAN' style volumes. A SAN's big difference is that the iSCSI target won't normally run on the same host as the volume service * added support to pull list of ALL instance types even those that are marked deleted * Indent args to ssh_connect correctly * Fix PEP8 violations * Added myself to Authors * 1) Moved tests for limiter to test_common.py (from __init__.py) and expanded test suite to include bad inputs and tests for custom limits (#2) * Added my mail alias (Part of an experiment in using github, which got messy fast...) * Fixed pep8 error in vm_utils.py * Add my name to AUTHORS, remove parentheses from the substitution made in the previous commit * Don't convert datetime objects to a string using .isoformat(). Leave it to sqlalchmeny (or pysqlite or whatever it is that does the magic) to work it out * Added test case for 'not enough memory' Successfully ran unit tests Fixed pep8 errors * Give a better error message if the instance type specified is invalid * Launchpad automatic translations update * added testing for instance_types.py and refactored nova-manage to use instance_types.py instead of going directly to db * added create and delete methods to instance_types in preparation to call them from nova-manage * added testing for nova-manage instance_type * additional error checking for nova-manage instance_type * Typos and primary keys * Automates the setup for FlatDHCP regardless of whether the interface has an ip address * add docstring and revert set_ip changes as they are unnecessary * Commas help * Changes and bug fixes * avoiding HOST_UNAVAILABLE exception: if there is not enough free memory does not spawn the VM at all. instance state is set to "SHUTDOWN" * merge lp:nova at revision #654 * merge with lp:nova * Fixed pep8 errors Unit tests passed * merge source and remove ifconfig * fixes #713766 and probably #710959, please test the patch before committing it * use route -n instead of route to avoid chopped names * Updates to the multinode install doc based on Wayne's findings. Merged with trunk so should easily merge in * Checks whether the instance id is a list or not before assignment. This is to fix a bug relating to nova/boto. The AWK-SDK libraries pass in a string, not a list. The euca tools pass in a list * Launchpad automatic translations update * Catching all socket errors in _get_my_ip, since any socket error is likely enough to cause a failure in detection * Catching all socket errors in _get_my_ip, since any socket error is likely enough to cause a failure in detection * blargh * Some stuff * added INSTANCE_TYPES to test for compatibility with current tests * Checking whether the instance id is a list or not before assignment. This is to fix a bug relating to nova/boto. The AWK-SDK libraries pass in a string, not a list. the euca tools pass in a list * Added data_transfer xapi plugin * Another quick fix to multinode install doc * Made updates to multinode install doc * fixed instance_types methods to use database backend * require user context for most flavor/instance_type read calls * added network_get_all_by_instance(), call to reset_network in vmops * added new parameter --dhcp_domain to set the used domain by dnsmasq in /etc/nova/nova.conf * minor * Fix for bug #714709 * A few changes * fixed format according to PEP8 * replaced all calls to ifconfig with calls to ip * added myself to the Authors file * applied http://launchpadlibrarian.net/63698868/713434.patch * Launchpad automatic translations update * aliased flavor to instance_types in nova-manage. will probably need to make flavor a full fledged class as users will want to list flavors by flavor name * simplified instance_types db calls to return entire row - we may need these extra columns for some features and there seems to be little downside in including them. still need to fix testing calls * refactor to remove ugly code in flavors * updated api.create to use instance_type table * added preliminary testing for bin/nova-manage while i am somewhat conflicted about the path these tests have taken, i think it is better than no tests at all * rewrote nova-manage instance_type to use correct db.api returned objects and have more robust error handling * instance_types should return in predicatable order (by name currently) * flavorid and name need to be unique in the database for the ec2 and openstack apis, repectively * corrected db.instance_types to return expect dict instead of lists. updated openstack flavors to expect dicts instead of lists. added deleted column to returned dict * converted openstack flavors over to use instance_types table. a few pep changes * added FIXME(kpepple) comments for all constant usage of INSTANCE_TYPES. updated api/ec2/admin.py to use the new instance_types db table * Launchpad automatic translations update * allow for bridge to be the public interface * Removed (newly) unused exception variables * Didn't mean to actually make changes to the glance plugin * Added a bunch of stubbed out functionality * Moved ssh_execute to utils; moved comments to docstring * Fixes for Vish & Devin's feedback * Fixes https://bugs.launchpad.net/nova/+bug/681417 * Don't swallow exception stack traces by doing 'raise e'; just use 'raise' * Implementation of 'SAN' volumes A SAN volume is 'special' because the volume service probably won't run on the iSCSI target. Initial support is for Solaris with COMSTAR (Solaris 11) * merging * Fixed PEP8 test problems, complaining about too many blank lines at line 51 * Adds logging.basicConfig() to run_tests.py so that attempting to log debug messages from tests will work * Launchpad automatic translations update * flagged all INSTANCE_TYPES usage with FIXME comment. Added basic usage to nova-manage (needs formatting). created api methods * added seed data to migration * Don't need a route for guests. Turns out the issue with routing from the guests was due to duplicate macs * Changes the behavior of run_test.sh so that pep8 is only run in the default case (when running all tests). It will no longer run when individual test cases are being given as in: * open cactus * some updates to HACKING to describe the docstrings * Casting to the scheduler * moves driver.init_host into the base class so it happens before floating forwards and sets up proper iptables chains 2011.1rc1 --------- * Set FINAL = True in version.py * Open Cactus development * Set FINAL = True in version.py * pass the set_ip from ensure_vlan_bridge * don't fail on ip add exists and recreate default route on ip move if needed * initial support for dynamic instance_types: db migration and model, stub tests and stub methods * better setup for flatdhcp * added to inject networking data into the xenstore * forgot context param for network_get_all * Fixes bug #709057 * Add and document the provider_fw method in virt/FakeConnection * Fix for LP Bug #709510 * merge trunk * fix pep8 error :/ * Changed default handler for uncaughted exceptions. It uses logging instead print to stderr * Launchpad automatic translations update * rpartition sticks the rhs in [2] * Fix for LP Bug #709510 * change ensure_bridge so it doesn't overwrite existing ips * Fix for LP Bug #709510 * Enabled modification of projects using the EC2 admin API * Reorder insance rules for provider rules immediately after base, before secgroups * Merged trunk * Match the initial db version to the actual Austin release db schema * 1. Discard nova-manage host list Reason: nova-manage service list can be replacement. Changes: nova-manage * Only run pep8 after tests if running all the tests * add logging.basicConfig() to tests * fix austin->bexar db migration * woops * trivial cleanup for context.py * Made adminclient get_user return None instead of throwing EC2Exception if requested user not available * pep8 * Added modify project to ec2 admin api * incorporate feedback from devin - use sql consistently in instance_destroy also, set deleted_at * Fixed whitespace * Made adminclient get_user return None instead of throwing EC2Exception if requested user not available * OS-55: Fix typo for libvirt_conn operation * merge trunk * remove extraneous line * Fixed pep8 errors * Changed default handler for uncaughted exceptions. Logging with level critical instead of print to stderr * Disassociate all floating ips on terminate instance * Fixes simple scheduler to able to be run_instance by admins + availability zones * Makes having sphinx to build docs a conditional thing - if you have it, you can get docs. If you don't, you can't * Fixed a pep8 spacing issue * fixes for bug #709057 * Working on api / manager / db support for zones * Launchpad automatic translations update * Adds security group output to describe_instances * Use firewall_driver flag as expected with NWFilterFirewall. This way, either you use NWFilterFirewall directly, or you use IptablesFirewall, which creates its own instance of NWFilterFirewall for the setup_basic_filtering command. This removes the requirement that LibvirtConnection would always need to know about NWFirewallFilter, and cleans up the area where the flag is used for loading the firewall class * simplify get and remove extra reference to import logging * Added a test that checks for localized strings in the source code that contain position-based string formatting placeholders. If found, an exception message is generated that summarizes the problem, as well as the location of the problematic code. This will prevent future trunk commits from adding localized strings that cannot be properly translated * Made changes based on code review * makes sure that : is in the availability zone before it attempts to use it to send instances to a particular host * Makes sure all instance and volume commands that raise not found are changed to show the ec2_id instead of the internal id * remove all floating addresses on terminate instance * Merged in trunk changes * Fixed formatting issues in current codebase * Added the test for localized string formatting * Fixes NotFound messages in api to show the ec2_id * Changed cpu limit to a static value of 100000 (100%) instead of using the vcpu value of 1. There is no weight/limit variable now so I see no other solution than the static max limit * Make nova.virt.images fetch images from a Glance URL when Glance is used as the image service (rather than unconditionally fetch them from an S3/objectstore URL) * Fixed spacing... AGAIN * Make unit tests clean up their mess in /tmp after themselves * Make xml namespace match the API version requested * Missing import in xen plugin * Shortened comment for 80char limt * Added missing import * Naive, low-regression-risk fix enabling Glance to work with libvirt/hyperv * Add unit test for xmlns version matching request version * Properly pulling the name attribute from security_group * adding testcode * Fix Bug #703037. ra_server is None * Fix regression in s3 image service. This should be a feature freeze exception * I have a feeling if we try to migrate from imageId to id we'll be tracking it down a while * more instanceId => id fixes * Fix regression in imageId => id field rename in s3 image service * Apply lp:707675 to this branch to be able to test * merge trunk * A couple of bugfixes * Fixes a stupid mistake I made when I moved this method from a module into a class * Add dan.prince to Authors * Make xml namespace match the API version requested * Fix issue in s3.py causing where '_fix_image_id' is not defined * added mapping parameter to write_network_config_to_xenstore * OS-55: Added a test case for XenAPI file-based network injection OS-55: Stubbed out utils.execute for all XenAPI VM tests, including command simulation where necessary * Simple little changes related to openstack api to work better with glance * Merged trunk * Cleaned up _start() and _shutdown() * Added missing int to string conversion * Simple little changes related to openstack api to work better with glance * use 'ip addr change' * Fix merge miss * Changed method signature of create_network * merged r621 * Merged with http://bazaar.launchpad.net/~vishvananda/nova/lp703037 * Merged with vish branch * Prefixed ending multi-line docstring with a newline * Fixing documentation strings. Second attempt at pep8 * Removal of image tempdir in test tearDown. Also, reformatted a couple method comments to match the file's style * Add DescribeInstanceTypes to admin api. This lets the dashboard know what sizes can be launched (using the -t flag in euca-run-instances, for example) and what resources they provide * Rename Mock, since it wasn't a Mock * Add DescribeInstanceTypes to admin api (dashboard uses it) * Fix for LP Bug #699654 * Change how libvirt firewall drivers work to have meaningful flags * Fixed pep8 errors * This branch updates docs to reflect the db sync addition. It additionally adds some useful errors to nova-manage to help people that are using old guides. It wraps sqlalchemy errors in generic DBError. Finally, it updates nova.sh to use current settings * Added myself to the authors list * fix pep8 issue (and my commit hook that didn't catch it) * Add a host argument to virt drivers's init_host method. It will be set to the name of host it's running on * merged trunk * Wraps the NotFound exception at the api layer to print the proper instance id. Does the same for volume. Note that euca-describe-volumes doesn't pass in volume ids properly, so you will get no error messages on euca-describe-volumes with improper ids. We may also need to wrap a few other calls as well * Fixes issue with SNATTING chain not getting created or added to POSTROUTING when nova-network starts * Fix for bug #702237 * Moving init_host before metadata_forward, as metadata_forward modifies prerouting rules * another trunk merge * Limit all lines to a maximum of 79 characters * Perform same filtering for OUTPUT as FORWARD in iptables * Fixed up a little image_id return * Trunk merged * This patch: * Trunk merged * In instance chains and rules for ipv4 and ipv6, ACCEPT target was missing * moved imageId change to s3 client * Migration for provider firewall rules * Updates for provider_fw_rules in admin api * Adds driver.init_host() call to flatdhcp driver * Fixed pep8 errors * Fixed pep8 errors * No longer hard coding to "/tmp/nova/images/". Using tempdir so tests run by different people on the same development machine pass * Perform same filtering for OUTPUT as FORWARD in iptables. This removes a way around the filtering * Fix pep-8 problem from prereq branch * Add a host argument to virt driver's init_host method. It will be set to the name of host it's running on * updated authors since build is failing * Adds conditional around sphinx inclusion * merge with trunk * Fixes project and role checking when a user's naming attribute is not uid * I am new to nova, and wanted to fix a fairly trivial bug in order to understand the process * Fix for LP Bug #707554 * Added iptables rule to IptablesFirewallDriver like in Hisaharu Ishii patch with some workaround * Set the default number of IP's to to reserve for VPN to 0 * Merged with r606 * Properly fixed spacing issue for pep8 * Fixed spacing issue for pep8 * Fixed merge conflict * Added myself to ./Authors file * Switches from project_get_network to network_get_by_instance, which actually works with all networking modes. Also removes a couple duplicate lines from a bad merge * Set the default number of IP's to to reserver for VPN to 0 * Localized strings that employ formatting should not use positional arguments, as they prevent the translator from re-ordering the translated text; instead, they should use mappings (i.e., dicts). This change replaces all localized formatted strings that use more than one formatting placeholder with a mapping version * add ip and network to nwfilter test * merged ntt branch * use network_get_by_instance * Added myself (John Dewey) to Authors * corrected nesting of the data dictionary * Updated a couple data structures to pass pep8 * Added static cpu limit of 100000 (100%) to hyperv.py instead of using the vcpu value of 1 * PEP8 fixes * Changes __dn_to_uid to return the uid attribute from the user's object * OS-55: PEP8 fixes * merged branch to name net_manager.create_networks args * the net_managers expect different args to create_networks, so nova-manage's call to net_manager.create_networks was changed to use named args to prevent argument mismatching * OS-55: Post-merge fixes * Fix describe_regions by changing renamed flags. Also added a test to catch future errors * changed nova-manage to use named arguments to net_manager.create_networks * Merged trunk * Removed tabs form source. Merged trunk changes * allow docs to build in virtualenv prevent setup.py from failing with sphinx in virtualenv * fixes doc build and setup.py fail in virtualenv * fix reversed assignment * fixes and refactoring of smoketests * remove extra print * add test and fix describe regions * merged trunk * This patch skips VM shutdown if already in the halted state * Use Glance to relate machine image with kernel and ramdisk * Skip shutdown if already halted * Refactoring _destroy into steps * i18n! * merged trunk fixed whitespace in rst * wrap sqlalchemy exceptions in a generic error * Wrap instance at api layer to print the proper error. Use same logic for volumes * This patch adds two flags: * Using new style logging * Adding ability to remap VBD device * Resolved trunk merge conflicts * Adds gettext to pluginlib_nova.py. Fixes #706029 * Adding getttext to pluginlib_nova * Add provider_fw_rules awareness to iptables firewall driver * No longer chmod 0777 instance directories, since nova works just fine without them * Updated docs for db sync requirements; merged with Vish's similar doc updates * Change default log formats so that: * they include a timestamp (necessary to correlate logs) * no longer display version on every line (shorter lines) * use [-] instead of [N/A] (shorter lines, less scary-looking) * show level before logger name (better human-readability) * OS55: pylint fixes * OS-55: Added unit test for network injection via xenstore * fixed typo * OS-55: Fix current unit tests * Fixed for pep8 * Merged with rev597 * No longer chmod 0777 instance directories * Reverted log type from error to audit * undid moving argument * Fix for LP Bug #699654 * moved argument for label * fixed the migration * really added migration for networks label * added default label to nova-manage and create_networks * syntax * syntax error * added plugin call for resetnetworking * Fix metadata using versions other than /later. Patch via ~ttx * should be writing some kindof network info to the xenstore now, hopefully * Use ttx's patch to be explict about paths, as urlmap doesn't work as I expected * Doc changes for db sync * Fixes issue with instance creation throwing errors when non-default groups are used * Saving a database call by getting the security groups from the instance object * Fixes issue with describe_instances requiring an admin context * OS-55: pylint fixes * Fixing another instance of getting a list of ids instead of a list of objects * Adds security group output to describe_instances * Finds and fixes remaining strings for i18n. Fixes bug #705186 * Pass a PluginManager to nose.config.Config(). This lets us use plugins like coverage, xcoverage, etc * i18n's strings that were missed or have been added since initial i18n strings branch * OS-55: Only modify Linux image with no or injection-incapable guest agent OS-55: Support network configuration via xenstore for Windows images * A couple of copypasta errors * Keep exception tracing as it was * Pass a PluginManager to nose.config.Config(). This lets us use plugins like coverage, xcoverage, etc * Also print version at nova-api startup, for consistency * Add timestamp to default log format, invert name and level for better readability, log version once at startup * When radvd is already running, not to hup, but to restart * fix ipv6 conditional * more smoketest fixes * Passing in an elevated context instead of making the call non-elevated * Added changes to make errors and recovery for volumes more graceful: * Fetches the security group from ID, allowing the object to be used properly, later * Changing service_get_all_by_host to not require admin context as it is used for describing instances, which any user in a project can do * Exclude vcsversion.py from pep8 check. It's not compliant, but out of our control * Exclude vcsversion.py from pep8 check. It's not compliant, but out of our control * Include paste config in tarball * Add etc/ directory to tarball * Fixes for bugs: * Return non-zero if either unit tests or pep8 fails * Eagerly load fixed_ip.network in instance_get_by_id * Add Rob Kost to Authors * Return non-zero if either unit tests or pep8 fails * Merged trunk * merge trunk * Add paste and paste.deploy installation to nova.sh, needed for api server * Updated trunk changes to work with localization * Implement provider-level firewall rules in nwfilter * Whitespace (pep8) cleanups * Exception string lacking 'G' for gigabytes unit * Fixes **params unpacking to ensure all kwargs are strings for compatibility with python 2.6.1 * make sure params have no unicode keys * Removed unneeded line * Merged trunk * Refactor run_tests.sh to allow us to run an extra command after the tests * update the docs to reflect db sync as well * add helpful error messages to nova-manage and update nova.sh * Fixed unit tests * Merged trunk * fixed pep8 error * Eagerly load instance's fixed_ip.network attribute * merged trunk changes * minor code cleanup * minor code cleanup * remove blank from Authors * .mailmap rewrite * .mailmap updated * Refactor run_tests.sh to allow us to run an extra command after the tests * Add an apply_instance_filter method to NWFilter driver * PEP-8 fixes * Revert Firewalldriver * Replace an old use of ec2_id with id in describe_addresses * various fixes to smoketests, including allowing admin tests to run as a user, better timing, and allowing volume tests to run on non-udev linux * merged trunk * replace old ec2_id with proper id in describe_addresses * merge vish's changes (which merged trunk and fixed a pep8 problem) * merged trunkand fixed conflicts and pep error * get_my_linklocal raises exception * Completed first pass at converting all localized strings with multiple format substitutions * Allows moving from the Austin-style db to the Bexar-style * move db sync into nosetests package-level fixtures so that the existing nosetests attempt in hudson will pass * previous commit breaks volume.driver. fix it. * per vish's feedback, allow admin to specify volume id in any of the acceptable manners (vol-, volume-, and int) * Merged trunk * Fixed unit tests * Fix merge conflict * add two more columns, set string lengths) * Enable the use_ipv6 flag in unit tests by default * Fixed unit tests * merge from upstream and fix small issues * merged to trunk rev572 * fixed based on reviewer's comment * Basic stubbing throughout the stack * Enable the use_ipv6 flag in unit tests by default * Add an apply_instance_filter method to NWFilter driver * update status to 'error_deleting' on volumes where deletion fails * Merged trunk * This disables ipv6 by default. Most use cases will not need it on and it makes dependencies more complex * The live_migration branch ( https://code.launchpad.net/~nttdata/nova/live-migration/+merge/44940 ) was not ready to be merged * merge from upstream to fix conflict * Trunk merge * s/cleanup/volume. volume commands will need their own ns in the long run * disable ipv6 by default * Merged trunk * Plug VBD to existing instance and minor cleanup * fixes related to #701749. Also, added nova-manage commands to recover from certain states: * Implement support for streaming images from Glance when using the XenAPI virtualization backend, as per the bexar-xenapi-support-for-glance blueprint * Works around the app-armor problem of requiring disks with backing files to be named appropriately by changing the name of our extra disks * fix test to respect xml changes * merged trunk * Add refresh_security_group_* methods to nova/virt/fake.py, as FakeConnection is the reference for documentation and method signatures that should be implemented by virt connection drivers * added paste pastedeploy to nova.sh * authors needed for test * revert live_migration branch * This removes the need for the custom udev rule for iscsi devices. It instead attaches the device based on /dev/disk/by-path/ which should make the setup of nova-volume a little easier * Merged trunk * Risk of Regression: This patch don’t modify existing functionlities, but I have added some. 1. nova.db.service.sqlalchemy.model.Serivce (adding a column to database) 2. nova.service ( nova-compute needes to insert information defined by 1 above) * Docstrings aren't guaranteed to exist, so split() can't automatically be called on a method without first checking for the method docstring's existence. Fixes Bug #704447 * Removes circular import issues from bin/stack and replaces utils.loads with json.loads. Fixes Bug#704424 * ComputeAPI -> compute.API in bin/nova-direct-api. Fixes LP#704422 * Fixed apply_instance_filter is not implemented in NWFilterFirewall * pep8 * I might have gone overboard with documenting _members * Add rules to database, cast refresh message and trickle down to firewall driver * Fixed error message in get_my_linklocal * openstack api fixes for glance * Stubbed-out code for working with provider-firewalls * Merged trunk * Merged with trunk revno 572 * Better shutdown handling * Change where paste.deploy factories live and how they are called. They are now in the nova.wsgi.Application/Middleware classes, and call the __init__ method of their class with kwargs of the local configuration of the paste file * Further decouple api routing decisions and move into paste.deploy configuration. This makes paste back the nova-api binary * Clean up openstack api test fake * Merged trunk * Add Start/Shutdown support to XenAPI * The Openstack API requires image metadata to be returned immediately after an image-create call * merge trunk * Fixing whitespace * Returning image_metadata from snapshot() * Merging trunk * Merged trunk * merged trunk rev569 * merged to rev 561 and fixed based on reviewer's comment * Adds a developer interface with direct access to the internal inter-service APIs and a command-line tool based on reflection to interact with them * merge from upstream * pep8 fixes... largely to things from trunk? * merge from upstream * pep8 * remove print statement * This branch fixes two outstanding bugs in compute. It also fixes a bad method signature in network and removes an unused method in cloud * Re-removes TrialTestCase. It was accidentally added in by some merges and causing issues with running tests individually * removed rpc in cloud * merged trial fix again * fix bad function signature in create_networks * undo accidental removal of fake_flags * Merged trunk * merged lp:~vishvananda/nova/lp703012 * remove TrialTestCase again and fix merge issues * import re, remove extra call in cloud.py. Move get_console_output to compute_api * Create and use a generic handler for RPC calls to compute * Create and use a generic handler for RPC calls to compute * Create and use a generic handler for RPC calls * Merged trunk * OS-55: Inject network settings in linux images * Merged with trunk revno 565 * use .local and .rescue for disk images so they don't make app-armor puke * Implements the blueprint for enabling the setting of the root/admin password on an instance * OpenStack Compute (Nova) IPv4/IPv6 dual stack support http://wiki.openstack.org/BexarIpv6supportReadme * Merged to rev.563 * This change introduces support for Sheepdog (distributed block storage system) which is proposed in https://blueprints.launchpad.net/nova/+spec/sheepdog-support * Sort Authors * Update Authors * merge from upstream: * pep8 fixes * update migration script to add new tables since merge * sort Authors * Merged with r562 * This modifies libvirt to use CoW images instead of raw images. This is much more efficient and allows us to use the snapshotting capabilities available for qcow2 images. It also changes local storage to be a separate drive instead of a separate partition * pep8. Someday I'll remember 2 blank lines between module methods * remove ">>>MERGE" iin nova/db/sqlalchemy/api.py * checking based on pep8 * merged trunk * Modified per sorens review * Fix for Pep-8 * Merged with r561 * Moved commands which needs sudo to nova.sh * Added netaddr for pip-requires * Marking snapshots as private for now * Merging Trunk * Fixing Image ID workaround and typo * Fixed based on the comments from code review. Merged to trunk rev 561 * Add a new method to firewall drivers to tell them to stop filtering a particular instance. Call it when an instance has been destroyed * merged to trunk rev 561 * Merged trunk * merge trunk rev560 * Fixes related to how EC2 ids are displayed and dealt with * Get reviewed and fixed based on comments. Merged latest version * Make libvirt and XenAPI play nice together * Spelling is hard. Typing even moreso * Revert changes to version.py * Minor code cleanups * Minor code cleanups * Minor code cleanups * Make driver calls compatible * Merged trunk * Stubbed out XenServer rescue/unrescue * Added unit tests for the Diffie-Hellman class. Merged recent trunk changes * Bring NWFilter driver up to speed on unfilter_instance * Replaced home-grown Diffie-Hellman implementation with the M2Crypto version supplied by Soren * Instead of a set() to keep track of instances and security groups, use a dict(). __eq__ for stuff coming out of sqlalchemy does not do what I expected (probably due to our use of sessions) * Fixes broken call to __generate_rc in auth manager * Fixes bug #701055. Moves code for instance termination inline so that the manager doesn't prematurely mark an instance as deleted. Prematurely doing so causes find calls to fail, prevents instance data from being deleted, and also causes some other issues * Revert r510 and r512 because Josh had already done the same work * merged trunk * Fixed Authors * Merged with 557 * Fixed missing _(). Fixed to follow logging to LOG changes. Fixed merge miss (get_fixed_ip was moved away). Update some missing comments * merge from upstream and fix leaks in console tests * make sure get_all returns * Fixes a typo in the name of a variable * Fixes #701055. Move instance termination code inline to prevent manager from prematurely marking it as destroyed * fix invalid variable reference in cloud api * fix indentation * add support for database migration * fix changed call to generate_rc * merged with r555 * fixed method signature of modify_rules fixed unit_test for ipv6 * standardize volume ids * standardize volume ids * standardize on hex for ids, allow configurable instance names * correct volume ids for ec2 * correct formatting for volume ids * Fix test failures on Python 2.7 by eagerly loading the fixed_ip attribute on instances. No clue why it doesn't affect python 2.6, though * Adding TODO to clarify status * Merging trunk * Do joinedload_all('fixed_ip.floating_ips') instead of joinedload('fixed_ip') * Initialize logging in nova-manage so we don't see errors about missing handlers * _wait_with_callback was changed out from under suspend/resume. fixed * Make rescue/unrescue available to API * Stop error messages for logs when running nova-manage * Fixing stub so tests pass * Merging trunk * Merging trunk, small fixes * This branch adds a backend for using RBD (RADOS Block Device) volumes in nova via libvirt/qemu. This is described in the blueprint here: https://blueprints.launchpad.net/nova/+spec/ceph-block-driver * Fix url matching for years 2010-forward * Update config for launching logger with cleaner factory * Update paste config for ec2 request logging * merged changes from trunk * cleaned up prior merge mess * Merged trunk * My previous modifications to novarc had CLOUDSERVER_AUTH_URL pointing to the ec2 api port. Now it's correctly pointing to os api port * Check for whole pool name in check_for_setup_error * change novarc template from cc_port to osapi_port. Removed osapi_port from bin scripts * Start to add rescue/unrescue support * fixed pause and resume * Fixed another issue in _stream_disk, as it did never execute _write_partition. Fixed fake method accordingly. Fixed pep8 errors * pep8 fixes * Fixing the stub for _stream_disk as well * Fix for _stream_disk * Merged with r551 * Support IPv6 firewall with IptablesFirewallDriver * Fixed syntax errors * Check whether 'device_path' has ':' before splitting it * PEP8 fixes, and switch to using the new LOG in vm_utils, matching what's just come in from trunk * Merged with trunk * Merged with Orlando's recent changes * Added support of availability zones for compute. models.Service got additional field availability_zone and was created ZoneScheduler that make decisions based on this field. Also replaced fake 'nova' zone in EC2 cloud api * Eagerly load fixed_ip property of instances * Had to abandon the other branch (~annegentle/nova/newscript) because the diffs weren't working right for me. This is a fresh branch that should be merged correctly with trunk. Thanks for your patience. :) * Added unit tests for the xenapi-glance integration. This adds a glance simulator that can stub in place of glance.client.Client, and enhances the xapi simulator to add the additional calls that the Glance-specific path requires * Merged with 549 * Change command to get link local address Remove superfluous code * This branch adds web based serial console access. Here is an overview of how it works (for libvirt): * Merged with r548 * Fixed bug * Add DescribeInstanceV6 for backward compatibility * Fixed test environments. Fixed bugs in _fetch_image_objecstore and _lookup_image_objcestore (objectstore was broken!) Added tests for glance * Fixed for pep8 Remove temporary debugging * changed exception class * Changing DN creation to do searches for entries * Fixes bug #701575: run_tests.sh fails with a meaningless error if virtualenv is not installed. Proposed fix tries to use easy_install to install virtualenv if not present * merge trunk, fix conflict * more useful prefix and fix typo in string * use by-path instead of custom udev script * Quick bugfix. Also make the error message more specific and unique in the equivalent code in the revoke method * remove extra whitspaces * Raise meaningful exception when there aren't enough params for a sec group rule * bah - pep8 errors * resolve pylint warnings * Removing script file * Read Full Spec for implementation details and notes on how to boot an instance using OS API. http://etherpad.openstack.org/B2RK0q1CYj * Added my name to Authors list * Changes per Edays comments * Fixed a number of issues with the iptables firewall backend: * Port specifications for firewalls come back from the data store as integers, but were compared as strings. * --icmp-type was misspelled as --icmp_type (underscore vs dash) * There weren't any unit tests for these issues * merged trunk changes * Removed unneeded SimpleDH code from agent plugin. Improved handling of plugin call failures * Now tries to install virtualenv via easy_install if not present * Merging trunk * fixed issue in pluginlib_nova.py * Trunk merge and conflcts resolved * Implementation of xs-console blueprint (adds support for console proxies like xvp) * Fixed a number of issues with the iptables firewall backend: * Port specifications for firewalls come back from the data store as integers, but were compared as strings. * --icmp-type was misspelled as --icmp_type (underscore vs dash) * There weren't any unit tests for these issues * Add support for EBS volumes to the live migration feature. Currently, only AoE is supported * Changed shared_ip_group detail routing * Changed shared_ip_group detail routing * A few more changes to the smoeketests. Allows smoketests to find the nova package from the checkout. Adds smoketests for security groups. Also fixes a couple of typos * Fixes the metadata forwarding to work by default * Adds support to nova-manage to modify projects * Add glance to pip-requires, as we're now using the Glance client code from Nova * Now removing kernel/ramdisk VDI after copy Code tested with PV and HVM guests Fixed pep8 errors * merged trunk changes * consolidate boto_extensions.py and euca-get-ajax-console, fix bugs from previous trunk merge * Fixed issues raised by reviews * xenapi_conn was not terminating utils/LoopingCall when an exception was occurring. This was causing the eventlet Event to have send_exception() called more than once (a no-no) * merge trunk * whups, fix accidental change to nova-combined * remove uneeded superclass * Bugfix * Adds the requisite infrastructure for automating translation templates import/export to Launchpad * Added babel/gettext build support * Can now correctly launch images with external kernels through glance * re-merged in trunk to correct conflict * Fix describe_availablity_zones versobse * Typo fix * merged changes from trunk * Adding modify option for projects * Fixes describe_instances to filter by a list of instance_ids * Late import module for register_models() so it doesn't create the db before flags are loaded * Checks for existence of volume group using vgs instead of checking to see if /dev/nova-volumes exists. The dev is created by udev and isn't always there even if the volume group does exist * Add a new firewall backend for libvirt, based on iptables * Create LibvirtConnection directly, rather than going through libvirt_conn.get_connection. This should remove the dependency on libvirt for tests * Fixed xenapi_conn wait_for_task to properly terminate LoopingCall on exception * Fixed xenapi_conn wait_for_task to properly terminate LoopingCall on exception * Fixed xenapi_conn wait_for_task to properly terminate LoopingCall on exception * optimize to call get if instance_id is specified since most of the time people will just be requesting one id * fix describe instances + test * Moved get_my_ip into flags because that is the only thing it is being used for and use it to set a new flag called my_ip * fixes Document make configuration by updating nova version mechanism to conform to rev530 update * alphbetized Authors * added myself to authors and fixed typo to follow standard * typo correction * fixed small glitch in _fetch_image_glance virtual_size = imeta['size'] * fixed doc make process for new nova version (rev530) machanism * late import module for register_models() so it doesn't create the db before flags are loaded * use safer vgs call * Return proper region info in describe_regions * change API classname to match the way other API's are done * small cleanups * First cut at implementing partition-adding in combination with the Glance streaming. Untested * some small cleanups * merged from upstream and made applicable changes * Adds a mechanism to programmatically determine the version of Nova. The designated version is defined in nova/version.py. When running python setup.py from a bzr checkout, information about the bzr branch is put into nova/vcsversion.py which is conditionally imported in nova/version.py * Return region info in the proper format * Now that we aren't using twisted we can vgs to check for the existence of the volume group * s/canonical_version/canonical_version_string/g * Fix indentation * s/string_with_vcs/version_string_with_vcs/g * Some fixes to _lookup_image_glance: fix the return value from lookup_image, attach the disk read-only before running pygrub, and add some debug logging * Reverted formatting change no longer necessary * removed a merge conflict line I missed before * merged trunk changes * set the hostname factory in the service init * incorporated changes suggested by eday * Add copyright and license info to version.py * Fixes issue in trunk with downloading s3 images for instance creation * Fix pep8 errors * Many fixes to the Glance integration * Wrap logs so we can: * use a "context" kwarg to track requests all the way through the system * use a custom formatter so we get the data we want (configurable with flags) * allow additional formatting for debug statements for easer debugging * add an AUDIT level, useful for noticing changes to system components * use named logs instead of the general logger where it makes sesnse * pep8 fixes * Bug #699910: Nova RPC layer silently swallows exceptions * Bug #699912: When failing to connect to a data store, Nova doesn't log which data store it tried to connect to * Bug #699910: Nova RPC layer silently swallows exceptions * pv/hvm detection with pygrub updated for glance * Bug #699912: When failing to connect to a data store, Nova doesn't log which data store it tried to connect to * Resolved merge differences * Additional cleanup prior to pushing * Merged with trunk * Fixing unescaped quote in nova-CC-install.sh script plus formatting fixes to multinode install * getting ready to push for merge prop * Fixing headers line by wrapping the headers in single quotes * Less code generation * grabbed the get_info fix from my other branch * merged changes from trunk * Remove redundant import of nova.context. Use db instance attribute rather than module directly * Merging trunk * Removing some FIXMEs * Reserving image before uploading * merge * Half-finished implementation of the streaming from Glance to a VDI through nova-compute * Fix Nova not to immediately blow up when talking to Glance: we were using the wrong URL to get the image metadata, and ended up getting the whole image instead (and trying to parse it as json) * another merge with trunk to remedy instance_id issues * merge * Include date in API action query * Review feedback * This branch implements lock functionality. The lock is stored in the compute worker database. Decorators have been added to the openstack API actions which alter instances in any way * Review feedback * Review feedback * Review feedback * typo * refers to instance_id instead of instance_ref[instance_id] * passing the correct parameters to decorated function * accidentally left unlocked in there, it should have been locked * various cleanup and fixes * merged trunk * pep8 * altered argument handling * Got the basic 'set admin password' stuff working * Include date in action query * Let documentation get version from nova/version.py as well * Add default version file for developers * merge pep8 fixes from newlog2 * Track version info, and make available for logging * pep8 * Merged trunk * merge pep8 and tests from wsgirouter branch * Remove test for removed class * Pep8 * pep8 fix * merged trunk changes * commit before merging trunk * Fixes format_instances error by passing reservation_id as a kwarg instead of an arg. Also removes extraneous yields in test_cloud that were causing tests to pass with broken code * Remove module-level factory methods in favor of having a factory class-method on wsgi components themselves. Local options from config are passed to the __init__ method of the component as kwargs * fix the broken tests that allowed the breakage in format to happen * Fix format_run_instances to pass in reservation id as a kwarg * Add factories into the wsgi classes * Add blank __init__ file for fixing importability. The stale .pyc masked this error locally * merged trunk changes * Introduces basic support for spawning, rebooting and destroying vms when using Microsoft Hyper-V as the hypervisor. Images need to be in VHD format. Note that although Hyper-V doesn't accept kernel and ramdisk separate from the image, the nova objectstore api still expects an image to have an associated aki and ari. You can use dummy aki and ari images -- the hyper-v driver won't use them or try to download them. Requires Python's WMI module * merged trunk changes * Renamed 'set_root_password' to 'set_admin_password' globally * merge with trunk * renamed sharedipgroups to shared_ip_groups and fixed tests for display_name * Fix openstack api tests and add a FaultWrapper to turn exceptions to faults * Fixed display_name on create_instance * fix some glitches due to someone removing instanc.internal_id (not that I mind) remove accidental change to nova-combined script * Fixed trunk merge conflicts as spotted by dubs * OS API parity: map image ID to numeric ID. Ensure all other OS operations are at least stubbed out and callable * add in separate public hostname for console hosts. flesh out console api data * allow smoketests to find nova package and add security rules * Fix a bunch of pep8 stuff * This addition to the docs clarifies that it is a requirement for contributors to be listed in the Authors file before their commits can be merged to trunk * merge trunk * another merge from trunk to the latest rev * pulled changes from trunk added console api to openstack api * Removed dependencies on nova server components for the admin client * Remove stale doc files so the autogeneration extension for sphinx will work properly * Add to Authors and mailmap * Make test case work again * This branch contains the internal API cleanup branches I had previously proposed, but combined together and with all the UUID key replacement ripped out. This allows multiple REST interfaces (or other tools) to use the internal API directly, rather than having the logic tied up in the ec2 cloud.py file * socat will need to be added to our nova sudoers * merged trunk changes * intermediate work * Created a XenAPI plugin that will allow nova code to read/write/delete from xenstore records for a given instance. Added the basic methods for working with xenstore data to the vmops script, as well as plugin support to xenapi_conn.py * Merged trunk * Recover from a lost data store connection * Updated register_models() docstring * simplify decorator into a wrapper fn * add in xs-console worker and tests * pep8 cleanup * more fixes, docstrings * fix injection and xml * Fixing formatting problems with multinode install document * Split internal API get calls to get and get_all, where the former takes an ID and returns one resource, and the latter can optionally take a filter and return a list of resources * missing _() * Fixed for pep8 * Fixed:Create instance fails when use_ipv6=False * Removed debug message which is not needed * Fixed misspelled variable * Fixed bug in nova_project_filter_v6 * The _update method in base Instance class overides dns_name_v6,so fixed it * self.XENAPI.. * Changed Paused power state from Error to Paused * fixed json syntax error * stop using partitions and first pass at cow images * Remove stale doc files * pep8 * tests fixed up * Better method for eventlet.wsgi.server logging * Silence eventlet.wsgi.server so it doesn't go to stdout and pollute our logs * Declare a flag for test to run in isolation * Build app manually for test_api since nova.ec2.API is gone * Recover from a lost data store connection * Added xenstore plugin changed * merged changes from trunk * some more cleanup * need one more newline * Redis dependency no longer needed * Make test_access use ec2.request instead of .controller and .action * Revert some unneeded formatting since twistd is no longer used * pep8 fixes * Remove flags and unused API class from openstack api, since such things are specified in paste config now * i18n logging and exception strings * remove unused nova/api/__init__.py * Make paste the default api pattern * Rework how routing is done in ec2 endpoint * Change all 2010 Copyright statements to 2010-2011 in doc source directory only * rename easy to direct in the scripts * fix typo in stack tool * rename Easy API to Direct API * Moved __init__ api code to api.py and changed allowed_instances quota method argument to accept all type data, not just vcpu count * Made the plugin output fully json-ified, so I could remove the exception handlers in vmops.py. Cleaned up some pep8 issues that weren't caught in earlier runs * merged from trunk * Renamed argument to represent possible types in volume_utils * Removed leftover UUID reference * Removed UUID keys for instance and volume * Merged trunk * Final edits to multi-node doc and install script * Merged trunk changes * Some Bug Fix * Fixed bug in libvirt * Fixed bug * Fixed for pep8 * Fixed conflict with r515 * Merged and fiexed conflicts with r515 * some fixes per vish's feedback * Don't know where that LOG went.. * Final few log tweaks, i18n, levels, including contexts, etc * Apply logging changes as a giant patch to work around the cloudpipe delete + add issue in the original patch * dabo fix to update for password reset v2 * krm_mapping.json sample file added * dabo fix to update for password reset * added cloudserver vars to novarc template * Update Authors * Add support for rbd volumes * Fixes LP688545 * First pass at feature parity. Includes Image ID hash * Fixing merge conflicts with new branch * merged in trunk changes * Fixing merge conflicts * Fixes LP688545 * Make sure we point to the right PPA's everywhere * Editing note about the database schema available on the wiki * Modifying based on reviewer comments * Uses paste.deploy to make application running configurable. This includes the ability to swap out middlewares, define new endpoints, and generally move away from having code to build wsgi routers and middleware chains into a configurable, extensible method for running wsgi servers * Modifications to the nova-CC-installer.sh based on review * Adds the pool_recycle option to the sql engine startup call. This enables connection auto-timeout so that connection pooling will work properly. The recommended setting (per sqlalchemy FAQ page) has been provided as a default for a new configuration flag. What this means is that if a db connection sits idle for the configured # of seconds, the engine will automatically close the connection and return it to the available thread pool. See Bug #690314 for info * Add burnin support. Services are now by default disabled, but can have instances and volumes run on them using availability_zone = nova:HOSTNAME. This lets the hardware be put through its paces without being put in the generally available pool of hardware. There is a 'service' subcommand for nova-manage where you can enable, disable, and list statuses of services * pep8 fixes * Merged compute-api-cleanup branch * Removed compute dependency in quota.py * add timeout constant, set to 5 minutes * removed extra whitespace chars at the end of the changed lines * Several documentation corrections and formatting fixes * Minor edits prior to merging changes to the script file * add stubs for xen driver * merge in trunk * merged latest trunk * merge trunk * merge trunk * temp * Stop returning generators in the refresh_security_group_{rules,members} methods * Don't lie about which is the default firewall implementation * Move a closing bracket * Stub out init_host in libvirt driver * Adjust test suite to the split between base firewall rules provided by nwfilter and the security group filtering * Fix a merge artifact * Remove references to nova-core/ppa and openstack/ppa PPA's * Updated the password generation code * Add support for Sheepdog volumes * Add support for various block device types (block, network, file) * Added agent.py plugin. Merged xenstore plugin changes * fixed pep8 issues * Added OpenStack's copyright to the xenstore plugin * fixed pep8 issues * merged in trunk and xenstore-plugin changes * Ignore CA/crl.pem * Before merge with xenstore-plugin code * Corrected the sloppy import in the xenstore plugin that was copied from other plugins * Ignore CA/crl.pem * Merged trunk * Merged trunk * deleting README.livemigration.txt and nova/livemigration_test/* * Merged trunk * Merged trunk * 最新ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã«ãƒžãƒ¼ã‚¸ã€‚変更点ã¯ä»¥ä¸‹ã®é€šã‚Šã€‚ Authorsã«è‡ªåˆ†ã®æ‰€å±žã‚’追加 utils.pyã®generate_uidãŒãŠã‹ã—ã„ã®ã§ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹IDãŒã‚ªãƒ¼ãƒãƒ¼ãƒ•ローã—ã¦ã„ãŸãŒã€ ãã®å‡¦ç†ã‚’一時撤廃。後ã§è©¦é¨“ã—ãªãŠã—ã¨ã™ã‚‹ã“ã¨ã«ã—ãŸã€‚ * Merged trunk * Auth Tokens assumed the user_id was an int, not a string * Removed dependencies on flags.py from adminclient * Make InstanceActions and live diagnostics available through the Admin API * Cleanup * Improved test * removed some debugging code left in previous push * Converted the pool_recycle setting to be a flag with a default of 3600 seconds * completed the basic xenstore read/write/delete functionality * Removed problematic test * PEP8 fix * * Fix bad query in __project_to_dn * use __find_dns instead of __find_objects in __uid_to_dn and __project_to_dn * Moved network operation code in ec2 api into a generic network API class. Removed a circular dependency with compute/quota * Oopsies * merge trunk * merge trunk * Make compute.api methods verbs * Fail * Review feedback * Cleans up the output of run_tests.sh to look closer to Trial * change exit code * Changing DN creation to do searches for entries * Merged trunk * Implemented review feedback * This patch is beginning of XenServer snapshots in nova. It adds: * Merged trunk * Calling compute api directly from OpenStack image create * Several documentation corrections * merge recent revision(version of 2010/12/28) Change: 1. Use greenthread instead of defer at nova.virt.libvirt_conn.live_migration. 2. Move nova.scheduler.manager.live_migration to nova.scheduler.driver 3. Move nova.scheduler.manager.has_enough_resource to nova.scheduler.driver 4. Any check routine in nova-manage.instance.live_migration is moved to nova.scheduler.driver.schedule_live_migration * Merging trunk * Note that contributors are required to be listed in Authors file before work can be merged into trunk * Mention Authors and .mailmap files in Developer Guide * pep 8 * remove cloudpipe from paste config * Clean up how we determine IP to bind to * Converted a few more ec2 calls to use compute api * Cleaned up the compute API, mostly consistency with other parts of the system and renaming redundant module names * fixed the compute lock test * altered the compute lock test * removed tests.api.openstack.test_servers test_lock, to hell with it. i'm not even sure if testing lock needs to be at this level * fixed up the compute lock test, was failing because the context was always admin * syntax error * moved check lock decorator from the compute api to the come manager... when it rains it pours * removed db.set_lock, using update_instance instead * added some logging * typo, trying to hurry.. look where that got me * altered error exception/logging * altered error exception/logging * fixd variables being out of scope in lock decorator * moved check lock decorator to compute api level. altered openstack.test_servers according and wrote test for lock in tests.test_compute * Moved ec2 volume operations into a volume API interface for other components to use. Added attach/detach as compute.api methods, since they operate in the context of instances (and to avoid a dependency loop) * pep8 fix, and add in flags that don't refernece my laptop * apt-get install socat, which is used to connect to the console * removed lock check from show and changed returning 404 to 405 * fix lp:695182, scheduler tests needed to DECLARE flag to run standalone * removed () from if (can't believe i did that) and renamed checks_lock decorator * Add the pool_recycle setting to enable connection pooling features for the sql engine. The setting is hard-coded to 3600 seconds (one hour) per the recommendation provided on sqlalchemy's site * i18n * Pep-8 cleanup * Fix scheduler testcase so it knows all flags and can run in isolation * removed some code i didn't end up using * fixed merge conflict with trunk * pep8 * fixed up test for lock * added tests for EC2 describe_instances * PEP8 cleanup * This branch fixes an issue where VM creation fails because of a missing flag definition for 'injected_network_template'. See Bug #695467 for more info * Added tests * added test for lock to os api * refactor * Re-added flag definition for injected_network_template. Tested & verified fix in the same env as the original bug * forgot import * syntax error * Merged trunk * Added implementation availability_zones to EC2 API * Updating Authors * merge * Changes and error fixes to help ensure basic parity with the Rackspace API. Some features are still missing, such as shared ip groups, and will be added in a later patch set * initial lock functionality commit * Merged with trunk * Additional edits in nova.concepts.rst while waiting for script changes * Bug #694880: nova-compute now depends upon Cheetah even when not using libvirt * add ajax console proxy to nova.sh * merge trunk * Fix pep8 violations * add in unit tests * removed superfluous line * Address bug #695157 by using a blank request class and setting an empty request path * Defualt services to enabled * Address bug #695157 by using a blank request class and setting an empty request path * Add flag --enable_new_services to toggle default state of service when created * merge from trunk * This commit introduces scripts to apply XenServer host networking protections * Whoops * merge from upstream and fix conflicts * Update .mailmap with both email addresses for Ant and myself * Make action log available through Admin API * Merging trunk * Add some basic snapshot tests * Added get_diagnostics placeholders to libvirt and fake * Merged trunk * Added InstanceAction DB functions * merge trunk * Bug #694890: run_tests.sh sometimes doesn't pass arguments to nosetest * Output of run_tests.sh to be closer to trial * I've added suspend along with a few changes to power state as well. I can't imagine suspend will be controversial but I've added a new power state for "suspended" to nova.compute.power_states which libvirt doesn't use and updated the xenapi power mapping to use it for suspended state. I also updated the mappings in nova.api.openstack.servers to map PAUSED to "error" and SUSPENDED to "suspended". Thoughts there are that we don't currently (openstack API v1.0) use pause, so if somehow an instance were to be paused an error occurred somewhere, or someone did something in error. Either way asking the xenserver host for the status would show "paused". Support for more power states needs to be added to the next version of the openstack API * fixed a line length * Bug #694880: nova-compute now depends upon Cheetah even when not using libvirt * Bug #694890: run_tests.sh sometimes doesn't pass arguments to nosetest * fix bug #lp694311 * Typo fix * Renamed based on feedback from another branch * Added stack command-line tool * missed a couple of gettext _() * Cleans up nova.api.openstack.images and fix it to work with cloudservers api. Previously "cloudservers image-list" wouldn't work, now it will. There are mappings in place to handle s3 or glance/local image service. In the future when the local image service is working, we can probably drop the s3 mappings * Fixing snapshots, pep8 fixes * translate status was returning the wrong item * Fixing bad merge * Converted Volume model and operation to use UUIDs * inst -> item * syntax error * renaming things to be a bit more descriptive * Merging trunk * Converted instance references to GUID type * Added custom guid type so we can choose the most efficient backend DB type easily * backup schedule changes * Merged trunk * Merging trunk, fixing failed tests * A few fixes * removed \ * Moving README to doc/networking.rst per recommendation from Jay Pipes * Merged trunk * couple of pep8s * merge trunk * Fixed after Jay's review. Integrated code from Soren (we now use the same 'magic number' for images without kernel & ramdisk * Fixed pep8 errors * launch_at ã‚’å‰å›žã‚³ãƒŸãƒƒãƒˆæ™‚ã«è¿½åŠ ã—ãŸãŒã€lauched_atã¨ã„ã†ã‚«ãƒ©ãƒ ãŒæ—¢ã«å­˜åœ¨ã—〠紛らã‚ã—ã„ã®ã§lauched_onã«ã—ãŸã€‚ * logs inner exception in nova/utils.py->import_class * Fix Bug #693963 * remove requirement of sudo on tests * merge trunk * Merge * adding zones to api * Support IPv6 * test commit * テスト項目表をå†ã³è¿½åŠ ã—ãŸçŠ¶æ…‹ã§ã‚³ãƒŸãƒƒãƒˆ * テスト項目表をローカルã‹ã‚‰ä¸€åº¦å‰Šé™¤ã—ãŸçŠ¶æ…‹ã§ã‚³ãƒŸãƒƒãƒˆ * テスト項目表ãŒãªãœã‹æ¶ˆãˆãŸã®ã§è¿½åŠ  * nova.compute.managerãŒã“れã¾ã§ã®ä¿®æ­£ã§ãƒ‡ã‚°ãƒ¬ã—ã¦ã„ãŸã®ã§ä¿®æ­£ CPUID, ãã®ä»–ã®ãƒã‚§ãƒƒã‚¯ãƒ«ãƒ¼ãƒãƒ³ã‚’nova.scheduler.manager.live_migrationã«è¿½åŠ  * nova.compute.managerãŒã“れã¾ã§ã®ä¿®æ­£ã§ãƒ‡ã‚°ãƒ¬ã—ã¦ã„ãŸã®ã§ä¿®æ­£ CPUID, ãã®ä»–ã®ãƒã‚§ãƒƒã‚¯ãƒ«ãƒ¼ãƒãƒ³ã‚’nova.scheduler.manager.live_migrationã«è¿½åŠ  * Make nova work even when user has LANG or LC_ALL configured * merged trunk, resolved trivial conflict * merged trunk, resolved conflict * Faked out handling for shared ip groups so they return something * another typo * applied power state conversion to test * trying again * typo * fixed the os api image test for glance * updated the xenstore methods to reflect that they write to the param record of xenstore, not the actual xenstore itself * fixed typo * Merged with trunk All tests passed Could not fix some pep8 errors in nova/virt/libvirt_conn.py * fixed merge conflict * updated since dietz moved the limited function * fixed error occuring when tests used glance attributes, fixed docstrings * Merged again from trunk * fixed a few docstrings, added _() for gettext * added _() for gettext and a couple of pep8s * adds a reflection api * unit test - should be reworked * Moves implementation specific Openstack API code from the middleware to the drivers. Also cleans up a few areas and ensures all the API tests are passing again * PEP8 fix * One more time * Pep8 cleanup * Resolved merge conflict * Merged trunk * Trying to remove twisted dependencies, this gets everything working under nosetests * Merged Monty's branch * Merged trunk and resolved conflicts * Working diagnostics API; removed diagnostics DB model - not needed * merged trunk * merged trunk * Superfluous images include and added basic routes for shared ip groups * Simplifies and improves ldap schema * xenapi iscsi support + unittests * Fixed trunk and PEP8 cleanup * Merged trunk * Added reference in setup.py so that python setup.py test works now * merge lp:nova * better bin name, and pep8 * pep8 fixes * some pep8 fixes * removing xen/uml specific switches. If they need special treatment, we can add it * add license * delete xtra dir * move euca-get-ajax-console up one directory * merge trunk * move port range for ajaxterm to flag * more tweaks * add in license * some cleanup * rewrite proxy to not use twisted * added power state logging to nova.virt.xenapi.vm_utils * added suspend as a power state * last merge trunk before push * merge trunk, fixed unittests, added i18n strings, cleanups etc etc * And the common module * minor notes, commit before rewriting proxy with eventlet * There were a few unclaimed addresses in mailmap * first merge after i18n * remove some notes * Add Ryan Lane as well * added tests to ensure the easy api works as a backend for Compute API * fix commits from Anthony and Vish that were committed with the wrong email * remove some yields that snuck in * merge from trunk * Basic Easy API functionality * Fixes reboot (and rescue) to work even if libvirt doesn't know about the instance and the network doesn't exist * merged trunk * Fixes reboot (and rescue) to work even if libvirt doesn't know about the instance and the network doesn't exist * Adds a flag to use the X-Forwarded-For header to find the ip of the remote server. This is needed when you have multiple api servers with a load balancing proxy in front. It is a flag that defaults to False because if you don't have a sanitizing proxy in front, users could masquerade as other ips by passing in the header manually * Got basic xenstore operations working * Merged trunk * Modified InstanceDiagnostics and truncate action * removed extra files * merged trunk * Moves the ip allocation requests to the from the api host into calls to the network host made from the compute host * pep8 fix * merged trunk and fixed conflicts * Accidentally yanked the datetime line in auth * remove extra files that slipped in * merged trunk * add missing flag * Optimize creation of nwfilter rules so they aren't constantly being recreated * use libvirt python bindings instead of system call * fixed more conflicts * merged trunk again * add in support of openstack api * merge trunk and upgrade to cheetah templating * Optimize nwfilter creation and project filter * Merging trunk * fixed conflicts * Adding more comments regarding XS snapshots * working connection security * WSGI middleware for lockout after failed authentications of ec2 access key * Modifies nova-network to recreate important data on start * Puts the creation of nova iptables chains into the source code and cleans up rule creation. This makes nova play more nicely with other iptables rules that may be created on the host * Forgot the copyright info * i18n support for xs-snaps * Finished moving the middleware layers and fixed the API tests again * Zone scheduler added * Moved some things for testing * Merging trunk * Abstracted auth and ratelimiting more * Getting Snapshots to work with cloudservers command-line tool * merge trunk * Minor bug fix * Populate user_data field from run-instances call parameter, default to empty string to avoid metadata base64 decoding failure, LP: #691598 * Adding myself and Antony Messerli to the Authors file * Fixes per-project vpns (cloudpipe) and adds manage commands and support for certificate revocation * merge trunk * merge antonymesserli's changes, fixed some formatting, and added copyright notice * merged i8n and fixed conflicts * Added networking protections readme * Moved xenapi into xenserver specific directory * after trunk merge * Fixes documentation builds for gettext.. * committing so that I can merge trunk changes * Log all XenAPI actions to InstanceActions * Merged trunk * merging trunk * merging trunk * Fix doc building endpoint for gettext * All merged with trunk and let's see if a new merge prop (with no pre-req) works. * Problem was with a missplaced parentheses. ugh * Adding me in the Authors file * Populate user_data field from run-instances call parameter, default to empty string to avoid metadata base64 decoding failure, LP: #691598 * connecting ajax proxy to rabbit to allow token based security * remove a debugging line * a few more fixes after merge with trunk * merging in trunk * move prototype code from api into compute worker * Burnin support by specifying a specific host via availability_zone for running instances and volumes on * Merged trunk * This stops the nova-network dhcp ip from being added to all of the compute hosts * prototype works with kvm. now moving call from api to compute * Style correction * fix reboot command to work even if a host is rebooted * Filter templates and dom0 from list_instances() * removed unused import and fix docstring * merge fakerabbit fix and turn fake back on for cloud unit tests * Reworked fakerabbit backend so each connection has it's own. Moved queues and exchanges to be globals * PEP8 cleanup * Refactored duplicate rpc.cast() calls in nova/compute/api.py. Cleaned up some formatting issues * Log all XenAPI actions * correct xenapi resume call * activate fake rabbit for debugging * change virtualization to not get network through project * update db/api.py as well * don't allocate networks when getting vpn info * Added InstanceDiagnostics and InstanceActions DB models * PEP8 cleanup * Merged trunk * merge trunk * 1) Merged from trunk 2) 'type' parameter in VMHelper.fetch_image converted in enum 3) Fixed pep8 errors 4) Passed unit tests * Remove ec2 config chain and move openstack versions to top-level application * Use paste.deploy for running the api server * pep8 and removed extra imports * add missing greenthread import * add a few extra joined objects to get instance * remove extra print statements * Tests pass after cleaning up allocation process * Merging trunk * Typo fix, stubbing out to use admin project for now * Close devnull filehandle * added suspend and resume * Rewrite of vif_rules.py to meet coding standards and be more pythonic in general. Use absolute paths for iptables/ebtables/arptables in host-rules * Add raw disk image support * Add my @linux2go.dk address to .mailmap * fixed some pep8 business * directly copy ip allocation into compute * Minor spellchecking fixes * Adds support for Pause and Unpause of xenserver instances * Make column names more generic * don't add the ip to bridge on compute hosts * PEP8 fixups * Added InstanceActions DB model * initial commit of xenserver host protections * Merged trunk * Fixed pep8 errors * Integrated changes from Soren (raw-disk-images). Updated authors file. All tests passed * pep8 (again again) * pep8 (again) * small clean up * テストコードをレãƒã‚¸ãƒˆãƒªã«è¿½åŠ  nova.compute.manager.pre_live_migration()ã«ã¤ã„ã¦ã€ç•°å¸¸çµ‚了ã—ã¦ã„ã‚‹ã®ã«æ­£å¸¸çµ‚äº†ã®æˆ»ã‚Šå€¤ã‚’è¿”ã™ã“ã¨ãŒã‚ã£ãŸãŸã‚変更 - æ­£å¸¸çµ‚äº†ã®æˆ»ã‚Šå€¤ã‚’Trueã«å¤‰æ›´ - fixed_ipãŒè¦‹ã¤ã‹ã‚‰ãªã„ã¨ãã«ã¯RemoteErrorã‚’raiseã™ã‚‹ - ãれã«åˆã‚ã›ã¦nova.compute.manager.live_migrationも変更 * テストコードをレãƒã‚¸ãƒˆãƒªã«è¿½åŠ  nova.compute.manager.pre_live_migration()ã«ã¤ã„ã¦ã€ç•°å¸¸çµ‚了ã—ã¦ã„ã‚‹ã®ã«æ­£å¸¸çµ‚äº†ã®æˆ»ã‚Šå€¤ã‚’è¿”ã™ã“ã¨ãŒã‚ã£ãŸãŸã‚変更 - æ­£å¸¸çµ‚äº†ã®æˆ»ã‚Šå€¤ã‚’Trueã«å¤‰æ›´ - fixed_ipãŒè¦‹ã¤ã‹ã‚‰ãªã„ã¨ãã«ã¯RemoteErrorã‚’raiseã™ã‚‹ - ãれã«åˆã‚ã›ã¦nova.compute.manager.live_migrationも変更 * Support proxying api by using X-Forwarded-For * eventlet merge updates * Cleaned up TODOs, using flags now * merge trunk and minor fix(for whatever reason validator_unittest did not get removed from run_test.py) * fixed unittests and further clean-up post-eventlet merge * All API tests finally pass * Removing unneeded Trial specific code * A few more tweaks to get the OS API tests passing * Adding new install script plus changes to multinode install doc * Removing unneeded Trial specific code * Replaced the use of redis in fakeldap with a customized dict class. Auth unittests should now run fine without a redis server running, or without python-redis installed * Adding Ed Leafe to Authors file * Some tweaks * Adding in Ed Leafe so we can land his remove-redis test branch * Add wait_for_vhd_coalesce * Some typo fixes * pep8 cleanup * Fixed some old code that was merged incorrectly * Replaced redis with a modified dict class * bug fixes * first revision after eventlet merge. Currently xenapi-unittests are broken, but everything else seems to be running okay * Integrated eventlet_merge patch * Code reviewed * XenAPI Snapshots first cut * Fixed network test (thanks Vish!) and fixed run_tests.sh * First pass at converting run_tests.py to nosetests. The network and objctstore tests don't yet work. Also, we need to manually remove the sqlite file between runs * remerged for pep8 * pep8 * merged in project-vpns to get flag changes * clean up use of iptables chains * move some flags around * add conditional bind to linux net * make sure all network data is recreated when nova-network is rebooted * merged trunk * merged trunk, fixed conflicts and tests * Added Instance Diagnostics DB model * Put flags back in nova.virt.xenapi/vm_utils * Removed unnecessary blank lines * Put flags back in vm_utils * This branch removes most of the dependencies on twisted and moves towards the plan described by https://blueprints.launchpad.net/nova/+spec/unified-service-architecture * pep8 fixes for bin * PEP8 cleanups * use getent, update docstring * pep8 fixes * reviewed the FIXMEs, and spotted an uncaught exception in volume_utils...yay! * fixed a couple of more syntax errors * Moved implementation specific stuff from the middleware into their respective modules * typo * fixed up openstack api images index and detail * fake session clean-up * Removed FakeInstance and introduced stubout for DB. Code clean-up * removed extra stuff used for debugging * Restore code which was changed for testing reasons to the original state. Kudos to Armando for spotting this * Make nova work even when user has LANG or LC_ALL configured * Merged changes from trunk into the branch * Hostテーブルã®ã‚«ãƒ©ãƒ åを修正 FlatManager, FlatDHCPManagerã«å¯¾å¿œ * merged with trunk. fixed compute.pause test * fixup after merge with trunk * memcached requires strings not unicode * Fix 688220 Added dependency on Twisted>=10.1.0 to pip-requires * Make sure we properly close the bzr WorkingTree in our Authors up-to-datedness unit test * fixes for xenapi (thanks sandywalsh) * clean up tests and add overriden time method to utils * merged from upstream * add missing import * Adding back in openssh-lpk schema, as keys will likely be stored in LDAP again * basic conversion of xs-pause to eventlet done * brougth clean-up from unittests branch and tests * I made pep8 happy * * code cleanup * revised unittest approach * added stubout and a number of tests * clean up code to use timeout instead of two keys * final cleanup * Restore alphabetical order in Authors file * removed temporary comment lines * Lots of PEP-8 work * refresh_security_group renamed to refresh_security_group_rules * added volume tests and extended fake to support them * Make sure the new, consolidated template gets included * Make sure we unlock the bzr tree again in the authors unit test * The ppa was moved. This updates nova.sh to reflect that * merged upstream * remove some logging * Merged from trunk and fixed merge issues. Also fixed pep8 issues * Lockout middleware for ec2 api * updates per review * Initial work on i18n. This adds the installation of the nova domain in gettext to all the "endpoints", which are all the bin/* files and run_tests.py * For some reason, I forgot to commit the other endpoints.. * Remove default_{kernel,ramdisk} flags. They are not used anymore * Don't attempt to fiddle with partitions for whole-disk-images * pep8 * Includes architecture on register. Additionally removes a couple lines of cruft * nothing * nothing * nothing * support for pv guests (in progress) * merge trunk * Now that we have a templating engine, let's use it. Consolidate all the libvirt templates into one, extending the unit tests to make sure I didn't mess up * first cut of unittest framework for xenapi * Added my contacts to Authors file * final cleanup, after moving unittest work into another branch * fixup after merge with trunk * added callback param to fake_conn * added not implemented stubs for libvirt * merge with trey tests * Fixed power state update with Twisted callback * simplified version using original logic * moving xenapi unittests changes into another branch * Adds support to the ec2 api for filtering describe volumes by volume_ids * Added LiveCD info as well as some changes to reflect consolidation of .conf files * Fix exception throwing with wrong instance type * Add myself * removing imports that should have not been there * second round for unit testing framework * Added Twisted version dependency into pip-requires * only needs work for distinguishing pv from hvm * Move security group refresh logic into ComputeAPI * Refactored smoketests to use novarc environment and to separate user and admin specific tests * Changed OpenStack API auth layer to inject a RequestContext rather than building one everywhere we need it * Elaborate a bit on ipsets comment * Final round of marking translation strings * First round of i18n-ifying strings in Nova * Initial i18n commit for endpoints. All endpoints must install gettext, which injects the _ function into the builtins * Fixed spelling errors in index.rst * fix pep8 * Includes kernel and ramdisk on register. Additinally removes a couple lines of cruft * port new patches * merge-a-tat-tat upstream to this branch * Format fixes and modification of Vish's email address * There is always the odd change that one forgets! * * pylint fixes * code clean-up * first cut for xenapi unit tests * added pause and unpause to fake connection * merged changes from sandy's branch * added unittest for pause * add back utils.default_flagflie * removed a few more references to twisted * formatting and naming cleanup * remove service and rename service_eventlet to service * get service unittests runnning again * whitespace fix * make nova binaries use eventlet * Converted the instance table to use a uuid instead of a auto_increment ID and a random internal_id. I had to use a String(32) column with hex and not a String(16) with bytes because SQLAlchemy doesn't like non-unicode strings going in for String types. We could try another type, but I didn't want a primary_key on blob types * remove debug messages * merge with trey * pause and unpause code/tests in place. To the point it stuffs request in the queue * import module and not classe directely as per Soren recommendation * Make XenServer VM diagnostics available through nova.virt.xenapi * Merged trunk * Added exception handling to get_rrd() * Changed OpenStack API auth layer to inject a RequestContext rather than building one everywhere we need it * changed resume to unpause * Import module instead of function * filter describe volumes by supplied ids. Includes unittest * merging sandy's branch * Make get_diagnostics async * raw instances can now be launched in xenapi (only as hvm at the moment) * pause from compute.manager <-> xenapi * Merged Armando's XenAPI fix * merge with trunk to pull in admin-api branch * Flag to define which operations are exposed in the OpenStack API, disabling all others * Fixed Authors conflict and re-merged with trunk * fixes exception throwing with wrong instance type * Ignore security group rules that reference foreign security groups * fixed how the XenAPI library is loaded * remove some unused files * port volume manager to eventlet also * intermediate commit to checkpoint progress * some pylint caught changes to compute * added to Authors * adds bzr to the list of dependencies in pip-require so that upon checkout using run_tests.sh succeeds * merge conflict * merged upstream changes * add bzr to the dev dependencies * Fixed docstrings * Merged trunk * Got get_diagnostics in working order * merged updates to trunk * merge trunk * typo fix * removing extraneous config ilnes * Finished cleaning up the openstack servers API, it no longer touches the database directly. Also cleaned up similar things in ec2 API and refactored a couple methods in nova.compute.api to accommodate this work * Pushed terminate instance and network manager/topic methods into network.compute.api * Merged trunk * Moved the reboot/rescue methods into nova.compute.api * PEP8 fixes * Setting the default schema version to the new schema * Adding support for choosing a schema version, so that users can more easily migrate from an old schema to the new schema * merged with trunk. All clear! * Removing novaProject from the schema. This change may look odd at first; here's how it works: * test commit * コメントを除去 README.live_migration.txtã®ãƒ¬ãƒ“ãƒ¥ãƒ¼çµæžœã‚’修正 * This change adds better support for LDAP integration with pre-existing LDAP infrastructures. A new configuration option has been added to specify the LDAP driver should only modify/add/delete attributes for user entries * More pep8 fixes to remove deprecated functions * pep8 fix * Clarifying previously commited exception message * Raising an exception if the user doesn't exist before trying to modify its attributes * Removing redundant check * Added livecd instructions plus fixed references to .conf files * pylint fixes * Initial diagnostics import -- needs testing and cleanup * Added a script to use OpenDJ as an LDAP server instead of OpenLDAP. Also modified nova.sh to add an USE_OPENDJ option, that will be checked when USE_LDAP is set * Reverting last change * a few more things ironed out * Make sure Authors check also works for pending merges (otherwise stuff can get merged that will make the next merge fail this check) * It looks like Soren fixed the author file, can I hit the commit button? * merge trunk * Make sure Authors check also works for pending merges (otherwise stuff can get merged that will make the next merge fail this check) * Add a helpful error message to nova-manage in case of NoMoreNetworks * Add Ryan Lucio to Authors * Adding myself to the authors list * Add Ryan Lucio to Authors * Addresses bug 677475 by changing the DB column for internal_id in the instances table to be unsigned * importing XenAPI module loaded late * Added docstring for get_instances * small fixes on Exception handling * first test commit * and yet another pylint fix * fixed pylint violations that slipped out from a previous check * * merged with lp:~armando-migliaccio/nova/xenapi-refactoring * fixed pylint score * complied with HACKING guidelines * addressed review comments, complied with HACKING guidelines * adding README.livemigration.txt * rev439ベースã«ãƒ©ã‚¤ãƒ–ãƒžã‚¤ã‚°ãƒ¬ãƒ¼ã‚·ãƒ§ãƒ³ã®æ©Ÿèƒ½ã‚’マージ ã“ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã¯EBSãªã—ã€CPUフラグã®ãƒã‚§ãƒƒã‚¯ãªã— * modified a few files * Fixed conflicts with gundlach's fixes * Remove dead test code * Add iptables based security groups implementation * Merged gundlach's fixes * Don't wrap HTTPAccepted in a fault. Correctly pass kwargs to update_instance * fixed import module in __init__.py * minor changes to docstrings * added interim solution for target discovery. Now info can either be passed via flags or discovered via iscsiadm. Long term solution is to add a few more fields to the db in the iscsi_target table with the necessary info and modify the iscsi driver to set them * merge with lp:~armando-migliaccio/nova/xenapi-refactoring * merge trunk * moved XenAPI namespace definition into xenapi/__init__.py * pylint and pep8 fixes * Decreased the maximum value for instance-id generation from uint32 to int32 to avoid truncation when being entered into the instance table. Reverted fix to make internal_id column a uint * Finished cleaning up the openstack servers API, it no longer touches the database directly. Also cleaned up similar things in ec2 API and refactored a couple methods in nova.compute.api to accomodate this work * Merged reboot-rescue into network-manager * Merged trunk * Fixes a missing step (nova-manage network create IP/nn n nn) in the single-node install guide * Tired of seeing various test files in bzr stat * Updated sqlalchemy model to make the internal_id column of the instances table as unsigned integer * * Removes unused schema * Removes MUST uid from novaUser * Changes isAdmin to isNovaAdmin * Adds two new configuration options: ** ldap_user_id_attribute, with a default of uid ** ldap_user_name_attribute, with a default of cn * ldapdriver.py has been modified to use these changes * Pushed terminate instance and network manager/topic methods into network.compute.api * Fix bugs that prevented OpenStack API from supporting server rename * pep8 * Use newfangled compute_api * Update tests to use proper id * Fixing single node install doc * Oops, update 'display_name', not 'name'. And un-extract-method * Correctly translate instance ids to internal_ids in some spots we neglected * Added test files to be ignored * Consolidated the start instance logic in the two API classes into a single method. This also cleans up a number of small discrepencies between the two * Moved reboot/rescue methods into nova.compute.api * Merged trunk and resolved conflicts. Again * Instances are assigned a display_name if one is not passed in -- and now, they're assigned a display_name even if None is explicitly passed in (as the EC2 API does.) * Merged trunk and resolved conflicts * Default Instance.display_name to a value even when None is explicitly passed in * Refactor nwfilter code somewhat. For iptables based firewalls, I still want to leave it to nwfilter to protect against arp, mac, and ip spoofing, so it needed a bit of a split * Add a helpful error message to nova-manage in case of NoMoreNetworks * minor refactoring after merge * merge lp:~armando-migliaccio/nova/refactoring * merge trunk * typo fix * moved flags into xenapi/novadeps.py * Add a simple abstraction for firewalls * fix nova.sh to reflect new location of ppa * Changed null_kernel flag from aki-00000000 to nokernel * Guarantee that the OpenStack API's Server-related responses will always contain a "name" value. And get rid of a redundant field in models.py * Going for a record commits per line changes ratio * Oops, internal_id isn't available until after a save. This code saves twice; if I moved it into the DB layer we could do it in one save. However, we're moving to one sqlite db per compute worker, so I'd rather have two saves in order to keep the logic in the right layer * Todd points out that the API doesn't require a display_name, so let's make a default. That way the OpenStack API can rest assured that its server responses will always have a name key * Adds in more documentation contributions from Citrix * Remove duplicate field and make OpenStack API return server.name for EC2-API-created instances * Move cc_host and cc_port flags into nova/network/linux_net.py. They weren't used anywhere else * Add include_package_data=True to setup.py * With utils.default_flagfile() in its old location, the flagfile isn't being read -- twistd.serve() loads flags earlier than that point. Move the utils.default_flagfile() call earlier so the flagfile is included * Removed a blank line * Broke parts of compute manager out into compute.api to separate what gets run on the API side vs the worker side * Move default_flagfile() call to where it will be parsed in time to load the flagfile * minor refactoring * Move cc_host and cc_port flags into nova/network/linux_net.py. They weren't used anywhere else * Added a script to use OpenDJ as an LDAP server instead of OpenLDAP. Also modified nova.sh to add an USE_OPENDJ option, that will be checked when USE_LDAP is set * Fixed termie's tiny bits from the prior merge request * Delete unused flag in nova.sh * Moving the openldap schema out of nova.sh into it's own files, and adding sun (opends/opendj/sun directory server/fedora ds) schema files * OpenStack API returns the wrong x-server-management-url. Fix that * Cleaned up pep8 errors * brought latest changes from trunk * iscsi volumes attach/detach complete. There is only one minor issue on how to discover targets from device_path * Fix unit tests * Fix DescribeImages EC2 API call * merged Justin Santa Barbara's raw-disk-image back into the latest trunk * If only I weren't so lazy * Rename imageSet variable to images * remove FAKE_subdomain reference * Return the correct server_management_url * Default flagfile moved in trunk recently. This updates nova.sh to run properly with the new flagfile location * Correctly handle imageId list passed to DescribeImages API call * update of nova.sh because default flagfile moved * merged trunk * Add a templating mechanism in the flag parsing * Adjust state_path default setting so that api unit tests find things where they used to find them * Import string instead of importing Template from string. This is how we do things * brought the xenapi refactoring in plus trunk changes * changes * pep8 fixes and further round of refactoring * Rename cloudServersFault to computeFault -- I missed this Rackspace branding when we renamed nova.api.rackspace to nova.api.openstack * Make sure templated flags work across calls to ParseNewFlags * Add include_package_data=True to setup.py * fixed deps * first cut of the refactoring of the XenAPIConnection class. Currently the class merged both the code for managing the XenAPI connection and the business logic for implementing Nova operations. If left like this, it would eventually become difficult to read, maintain and extend. The file was getting kind of big and cluttered, so a quick refactoring now will save a lot of headaches later * other round of refactoring * further refactoring * typos and pep8 fixes * first cut of the refactoring of the XenAPIConnection class. Currently the class merged both the code for managing the XenAPI connection and the business logic for implementing Nova operations. If left like this, it would eventually become difficult to read, maintain and extend. The file was getting kind of big and cluttered, so a quick refactoring now will save a lot of headaches later * PEP fixes * Adding support for modification only of user accounts * This modification should have occured in a different branch. Reverting * added attach_volume implementation * work on attach_volume, with a few things to iron out * A few more changes: * Fixed up some flags * Put in an updated nova.sh * Broke out metadata forwarding so it will work in flatdhcp mode * Added descriptive docstrings explaining the networking modes in more detail * small conflict resolution * first cut of changes for the attach_volume call * The image server should throw not found errors, don't need to check in compute manager * Consolidated the start instance logic in the two API classes into a single method. This also cleans up a number of small discrepencies between the two * Setting "name" back to "cn", since id and name should be separate * Adding support for modification only of user accounts * don't error on edge case where vpn has been launched but fails to get a network * Make sure all workers look for their flagfile in the same spot * Fix typo "nova.util" -> "nova.utils" * Fix typo "nova.util" -> "nova.utils" * Added a .mailmap that maps addresses in bzr to people's real, preferred e-mail addresses. (I made a few guesses along the way, feel free to adjust according to what is actually the preferred e-mail) * Add a placeholder in doc/build. Although bzr handles empty directories just fine, setuptools does not, so to actually ship this directory in the tarball, we need a file in it * Add a placeholder in doc/build. Although bzr handles empty directories just fine, setuptools does not, so to actually ship this directory in the tarball, we need a file in it * Merged trunk * pep8 * merged trunk, added recent nova.sh * fix typos in docstring * docstrings, more flags, breakout of metadata forwarding * doc/build was recently accidentally removed from VCS. This adds it back, which makes the docs build again * Add doc/build dir back to bzr * Make aws_access_key_id and aws_secret_access_key configurable * add vpn ping and optimize vpn list * Add an alias for Armando * the serial returned by x509 is already formatted in hex * Adding developer documentation - setting up dev environment and how to add to the OpenStack API * Add a --logdir flag that will be prepended to the logfile setting. This makes it easier to share a flagfile between multiple workers while still having separate log files * Address pep8 complaints * Address PEP8 complaints * Remove FAKE_subdomain from docs * Adding more polish * Adding developer howtos * Remove FAKE_subdomain from docs * Make aws_access_key_id and aws_secret_access_key configurable * updated nova.sh * added flat_interface for flat_dhcp binding * changed bridge_dev to vlan_interface * Add a --logdir flag that will be prepended to the logfile setting. This makes it easier to share a flagfile between multiple workers while still having separate log files * added svg files (state.svg is missing because its source is a screen snapshot) * Unify the location of the default flagfile. Not all workers called utils.default_flagfile, and nova-manage explicitly said to use the one in /etc/nova/nova-manage.conf * Set and use AMQP retry interval and max retry FLAGS * Incorporating security groups info * Rename cloudServersFault (rackspace branding) to computeFault. Fixes bug lp680285 * Use FLAGS instead of constants * Incorporating more networking info * Make time.sleep() non-blocking * Removed unnecessary continue * Update Authors and add a couple of names to .mailmap (from people who failed to set bzr whoami properly) * Refactor AMQP retry loop * Allows user to specify hosts to listen on for nova-api and -objectstore * Make sure all the libvirt templates are included in the tarball (by replacing the explicitly listed set with a glob pattern) * fixed pep8 violations * Set and use AMQP retry interval and max retry constants * pep8 violations fix * added placeholders * added test for invalid handles * Make sure all templates are included (at least rescue tempaltes didn't used to be included) * Check for running AMQP instances * Use logging.exception instead * Reverted some changes * Added some comments * Adds images (only links one in), start for a nova-manage man file, and also documents all nova-manage commands. Can we merge it in even though the man page build isn't working? * Added some comments * Check for running AMQP instances * first cut of fixes for bug #676128 * Removed .DS_Store files everywhere, begone! * Moves the EC2 API S3 image service into nova.service. There is still work to be done to make the APIs align, but this is the first step * PEP8 fixes, 2 lines were too long * First step to getting the image APIs consolidated. The EC2 API was using a one-off S3 image service wrapper, but this should be moved into the nova.image space and use the same interface as the others. There are still some mismatches between the various image service implementations, but this patch was getting large and wanted to keep it within a resonable size * Improved Pylint Score * Fixes improper display of api error messages that happen to be unicode * Make sure that the response body is a string and not unicode * Soren updated setup.py so that the man page builds. Will continue working on man pages for nova-compute and nova-network * Overwrite build_sphinx, making it run once for each of the html and man builders * fixes flatdhcp, updates nova.sh, allows for empty bridge device * Update version to 2011.1 as that is the version we expect to release next * really adding images * adding images * Documenting all nova-manage commands * Documenting all nova-manage commands * Fixes eventlet race condition in cloud tests * fix greenthread race conditions in trunk and floating ip leakage * Testing man page build through conf.py * Improved Pylint Score * adjusting images size and bulleted list * merged with trunk * small edit * Further editing and added images * Update version to 2011.1 as that is the version we expect to release next * ec2_api commands for describe_addresses and associate_address are broken in trunk. This happened during the switch to ec2_id and internal_id. We clearly didn't have any unit tests for this, so I've added a couple in addition to the three line change to actually fix the bugs * delete floating ips after tests * remove extra line and ref. to LOG that doesn't exist * fix leaking floating ip from network unittests and use of fakeldap driver * Adds nova-debug to tools directory, for debugging of instances that lose networking * fixes errors in describe address and associate address. Adds test cases * Ryan_Lane's code to handle /etc/network not existing when we try to inject /etc/network/interfaces into an image * pep8 * First dump of content related to Nova RPC and RabbitMQ * Add docstrings to any methods I touch * pep8 * PEP8 fixes * added myself to Authors file. Enjoy spiders * Changed from fine-grained operation control to binary admin on/off setting * Changed from fine-grained operation control to binary admin on/off setting * Lots of documentation and docstring updates * The docs are just going to be wrong for now. I'll file a bug upstream * Change how wsgified doc wrapping happens to fix test * merge to trunk * pep8 * Adding contributors and names * merge with trunk * base commit * saw a duplicate import ... statement in the code while reading through unit tests - this removes the dupe * removed redundant unit test import * add in bzr link * adding a bit more networking documentation * remove tab * fix title * tweak * Fix heading * merge in anne's changes * tweak * Just a few more edits, misspellings and the like * fix spacing to enable block * merge to remote * unify env syntax * Add sample puppet scripts * fix install guide * getting started * create SPHINX_DEBUG env var. Setting this will disable aggressive autodoc generation. Also provide some sample for P syntax * fix conf file from earlier merge * notes, and add code to enable sorted "..todo:: P[1-5] xyz" syntax * merge in more networking docs - still a work in progress * anne's changes to the networking documentation * Updated Networking doc * anne gentle's changes to community page * merge in heckj's corrections to multi-node install * Added a .mailmap that maps addresses in bzr to people's real, preferred e-mail addresses. (I made a few guesses along the way, feel free to adjust according to what is actually the preferred e-mail) * Updated community.rst to fix a link to the IRC logs * merging in changes from ~anso/nova/trunkdoc * fixed another spacing typo causing poor rendering * fixed spacing typo causing poor rendering * merge in anne's work * add docs for ubuntu 4, 10, others * Updated Cloud101 and admonition color * merge heckj's multi install notes * working on single node install * updating install notes to reference Vish' nova.sh and installing in MYSQL * Add Flat mode doc * Add Flat mode doc * Add Flat mode doc * Add VLAN Mode doc * Add VLAN Mode doc * merge in anne's changes * home page tweaks * Updated CSS and community.rst file * modifications and additions based on doc sprint * incorporate some feedback from todd and anne * merge in trunk * working on novadoc structure * add some info on authentication and keys * Since we're autodocumenting from a sphinx ext, we can scrap it in Makefile * Use the autodoc tools in the setup.py build_sphinx toolchain * Fix include paths so setup.py build_sphinx works again * Cleanups to doc process * quieter doc building (less warnings) * File moves from "merge" of termie's branch * back out stacked merge * Doc updates: * quieter build (fewer warnings) * move api reference out of root directory * auto glob api reference into a TOC * remove old dev entries for new-fangled auto-generated docs * Normalization of Dev reference docs * Switch to module-per-file for the module index * Allow case-by-case overriding of autodocs * add exec flags, apparently bzr shelve/unshelve does not keep track of them * Build autodocs for all our libraries * add dmz to flags and change a couple defaults * Per-project vpns, certificates, and revocation * remove finished todo * Fix docstrings for wsigfied methods * fix default twitter username * shrink tweet text a bit * Document nova.sh environment * add twitter feed to the home page * Community contact info * small tweaks before context switch * use include to grab todd's quickstart * add in custom todo, and custom css * Format TODO items for sphinx todo extension * additions to home page * Change order of secions so puppeting is last, add more initial setup tasks * update types of services that may run on machines * Change directory structure for great justice! * Refactored smoketests to use novarc environment and to separate user and admin specific tests * start adding info to multi-node admin guide * document purpose of documentation * Getting Started Guide * Nova quickstart: move vish's novascript into contrib, and convert reademe.md to a quickstart.rst * merge trunk * Add a templating mechanism in the flag parsing. Add a state_path flag that will be used as the top-level dir for all other state (such as images, instances, buckets, networks, etc). This way you only need to change one flag to put all your state in e.g. /var/lib/nova * add missing file * Cleanup nova-manage section * have "contents" look the same as other headings * Enables the exclusive flag for DirectConsumer queues * Ensures that keys for context from the queue are passed to the context constructor as strings. This prevents hangs on older versions of python that can't handle unicode kwargs * Fix for bug #640400, enables the exclusive flag on the temporary queues * pep8 whitespace and line length fixes * make sure context keys are not unicode so they can be passed as kwargs * merged trunk * merged source * prettier theme * Added an extra argument to the objectstore listen to separate out the listening host from the connecting host * Change socket type in nova.utils.get_my_ip() to SOCK_DGRAM. This way, we don't actually have to set up a connection. Also, change the destination host to an IP (chose one of Google's DNS's at random) rather than a hostname, so we avoid doing a DNS lookup * Fix for bug#613264, allowing hosts to be specified for nova-api and objectstore listeners * Fixes issue with security groups not being associated with instances * Doc cleanups * Fix flags help display * Change socket type in nova.utils.get_my_ip() to SOCK_DGRAM. This way, we don't actually have to set up a connection. Also, change the destination host to an IP (chose one of Google's DNS's at random) rather than a hostname, so we avoid doing a DNS lookup * ISCSI Volume support * merged * more descriptive title for cloudpipe * update of the architecture and fix some links * Fixes after trunk merge * removed some old instructions and updated concepts * merge * Documentation on Services, Managers, and Drivers * Document final undocumented python modules * merged trunk * cloudpipe docs * Fixed --help display for non-twisted bin/* commands * Adds support for multiple API ports, one for each API type (OS, EC2) * Fixed tests to work with new default API argument * Added support for OpenStack and EC2 APIs to run on different ports * More docs * Language change for conformity * Add ec2 api docs * Exceptions docs * API endpoint documentation * basics to get proxied ajaxterm working with virsh * :noindex: on the fakes page for virt.fakes which is included in compute.rst * Virt documentation * Change retrieval of security groups from kwargs so they are associated properly and add test to verify * don't check for vgroup in fake mode * merged trunk, just in case * Update compute/disk.py docs * Change volume TODO list * Volume documentation * Remove fakes duplication * Update database docs * Add support for google analytics to only the hudson-produced docs * Changes to conf.py * Updated location of layout.html and change conf.py to use a build variable * Update database page a bit * Fakes cleanup (stop duplicate autodoc of FakeAOEDriver) * Document Fakes * Remove "nova Packages and Dependencies" * Finished TODO item * Pep-257 * Pep-257 cleanups * Clean up todos and the like for docs * A shell script for showing modules that aren't documented in .rst files * merge trunkdoc * link binaries section to concepts * :func: links to python functions in the documentation * Todo cleanups in docs * cleanup todos * fix title levels * wip architecture, a few auth formatting fixes, binaries, and overview * volume cleanups * Remove objectstore, not referenced anywhere * Clean up volumes / storage info * Moves db writes into compute manager class. Cleans up sqlalchemy model/api to remove redundant calls for updating what is really a dict * Another heading was too distracting, use instead * Fix underlining -> heading in rst file * Whitespace and docstring cleanups * Remove outdated endpoint documentation * Clean up indentation error by preformatting * Add missing rst file * clean up the compute documentation a bit * Remove unused updated_data variable * Fix wiki link * added nova-manage docs * merged and fixed conflicts * updates to auth, concepts, and network, fix of docstring * cleanup rrd doc generation * Doc skeleton from collaborative etherpad hack session * OK, let's try this one more time * Doc updates * updates from review, fix models.get and note about exception raising * Style cleanups and review from Eric * New structure for documentation * Fixes PEP8 violations from the last few merges * More PEP8 fixes that were introduced in the last couple commits * Adding Google Analytics code to nova.openstack.org * Fixes service unit tests after tornado excision * Added Google Analytics code * renamed target_id to iscsi_target * merged gundlach's excision * Oops, didn't mean to check this one in. Ninja-patch * Delete BaseTestCase and with it the last reference to tornado * fix completely broken ServiceTestCase * Removes some cruft from sqlalchemy/models.py like unused imports and the unused str_id method * Adds rescue and unrescue commands * actually remove the conditional * fix tests by removing missed reference to prefix and unnecessary conditional in generate_uid * Making net injection create /etc/network if non-existant * Documentation was missing; added * Moving the openldap schema out of nova.sh into it's own files, and adding sun (opends/opendj/sun directory server/fedora ds) schema files * validates device parameter for attach-volume * add nova-debug to setup.py * nova-debug, relaunch an instance with a serial console * Remove the last vestigial bits of tornado code still in use * pep8 cleanup * print the exception on fail, because it doesn't seem to reraise it * use libvirt connection for attaching disks and avoid the symlink * update error message * Exceptions in the OpenStack API will be converted to Faults as they should be, rather than barfing a stack trace to the user * pep8 * pep8 * Duplicate the two trivial escaping functions remaining from tornado's code and remove the dependency * more bugfixes, flag for local volumes * fix bugs, describe volumes, detach on terminate * ISCSI Volume support * Removed unused imports and left over references to str_id * logging.warn not raise logging.Warn * whitespace * move create_console to cloud.py from admin.py * merge lp:nova * add NotFound to fake.py and document it * add in the xen rescue template * pep 8 cleanup and typo in resize * add methods to cloud for rescue and unrescue * update tests * merged trunk and fixed conflicts/changes * part way through porting the codebase off of twisted * Another pep8 cleanup branch for nova/tests, should be merged after lp:~eday/nova/pep8-fixes-other. After this, the pep8 violation count is 0! * Changes block size for dd to a reasonable number * Another pep8 cleanup branch for nova/api, should be merged after lp:~eday/nova/pep8-fixes * Created Authors file * Actually adding Authors file * Created Authors file and added to manifest for Austin Release * speed up disk generation by increasing block size * PEP8 cleanup in nova/tests, except for tests. There should be no functional changes here, just style changes to get violations down * PEP8 cleanup in nova/*, except for tests. There should be no functional changes here, just style changes to get violations down * PEP8 cleanup in nova/db. There should be no functional changes here, just style changes to get violations down * PEP8 cleanup in nova/api. There should be no functional changes here, just style changes to get violations down * PEP8 and pylint cleanup. There should be no functional changes here, just style changes to get violations down * Moves db writes into compute manager class. Cleans up sqlalchemy model/api to remove redundant calls for updating what is really a dict * validate device in AttachDisk * Cleanup of doc for dependencies (redis optional, remove tornado, etc). Please check for accuracy * Delays the creation of the looping calls that that check the queue until startService is called * Made updates based on review comments * Authorize image access instead of just blindly giving it away * Checks the pid of dnsmasq to make sure it is actually referring to the right process * change boto version from 1.9b1 to 1.9b in pip-requires * Check the pid to make sure it refers to the correct dnsmasq process * make sure looping calls are created after service starts and add some tests to verify service delegation works * fix typo in boto line of pip-requires * Updated documentation * Update version set in setup.py to 2010.1 in preparation for Austin release * Also update version in docs * Update version to 2010.1 in preparation for Austin release * * Fills out the Parallax/Glance API calls for update/create/delete and adds unit tests for them. * Modifies the ImageController and GlanceImageService/LocalImageService calls to use index and detail routes to comply perfectly with the RS/OpenStack API * Makes disk.partition resize root drive to 10G, unless it is m1.tiny which just leaves it as is. Larger images are just used as is * reverted python-boto version in pip-requires to 1.9b1 * Construct exception instead of raising a class * Authorize Image before download * Add unit test for XML requests converting errors to Faults * Fixes https://bugs.launchpad.net/nova/+bug/663551 by catching exceptions at the top level of the API and turning them into Faults * Adds reasonable default local storage gb to instance sizes * reverted python-boto version in pip-requires to 1.9b1.\ * Fix typo in test case * Remember to call limited() on detail() in image controller * Makes nova-dhcpbridge notify nova-network on old network lease updates * add reasonable gb to instance types * it is flags.DEFINE_integer, not FLAGS.define_int * Makes disk.partition resize root drive to 10G, unless it is m1.tiny which just leaves it as is. Larger images are just used as is * update leases on old leases as well * Adds a simple nova-manage command called scrub to deallocate the network and remove security groups for a projeect * Refresh MANIFEST.in to make the tarball include all the stuff that belongs in the tarball * Added test case to reproduce bug #660668 and provided a fix by using the user_id from the auth layer instead of the username header * Add the last few things to MANIFEST.in * Also add Xen template to manifest * Fix two problems with get_console_log: * libvirt has this annoying "feature" where it chown()s your console to the uid running libvirt. That gets in the way of reading it. Add a call to "sudo chown ...." right before we read it to make sure it works out well. * We were looking in the wrong directory for console.log. *blush* * This branch converts incoming data to the api into the proper type * Fixes deprecated use of context in nova-manage network create * Add a bunch of stuff to MANIFEST.in that has been added to the tree over the last couple of months * Fix the --help flag for printing help on twistd-based services * Fix two problems with get_console_log: libvirt has this annoying "feature" where it chown()s your console to the uid running libvirt. That gets in the way of reading it. We were looking in the wrong directory for console.log. *blush* * Fix for bug 660818 by adding the resource ID argument * Reorg the image services code to push glance stuff into its own directory * Fix some unit tests: * One is a race due to the polling nature of rpc in eventlet based unit tests. * The other is a more real problem. It was caused by datastore.py being removed. It wasn't caught earlier because the .pyc file was still around on the tarmac box * Add a greenthread.sleep(0.3) in get_console_output unit test. This is needed because, for eventlet based unit tests, rpc polls, and there's a bit of a race. We need to fix this properly later on * Perform a redisectomy on bin/nova-dhcpbridge * Removed 'and True' oddity * use context for create_networks * Make Redis completely optional: * make --help work for twistd-based services * trivial style change * prevent leakage of FLAGS changes across tests * run_tests.sh presents a prompt: * Also accept 'y' * A few more fixes for deprecations * make run_tests.sh's default perform as expected * Added test case to reproduce bug #660668 and provided a fix by using the user_id from the auth layer instead of the username header * get flags for nova-manage and fix a couple more deprecations * Fix for bug#660818, allows tests to pass since delete expects a resource ID * This branch modifies the fixes all of the deprecation warnings about empty context. It does this by adding the following fixes/features * promotes api/context.py to context.py because it is used by the whole system * adds more information to the context object * passes the context through rpc * adds a helper method for promoting to admin context (elevate()) * modifies most checks to use context.project_id instead of context.project.id to avoid trips to the database * timestamps are passed as unicode * Removed stray spaces that were causing an unnecessary diff line * merged trunk * Minimized diff, fixed formatting * remove nonexistent exception * Merged with trunk, fixed broken stuff * revert to generic exceptions * fix indent * Fixes LP Bug#660095 * Move Redis code into fakeldap, since it's the only thing that still uses it. Adjust auth unittests to skip fakeldap tests if Redis isn't around. Adjust auth unittests to actually run the fakeldap tests if Redis /is/ around * fix nosetests * Fixes a few concurrency issues with creating volumes and instances. Most importantly it adds retries to a number of the volume shell commands and it adds a unique constraint on export_devices and a safe create so that there aren't multiple copies of export devices in the database * unit tests and fix * call stuff project_id instead of project * review fixes * fix context in bin files * add scrub command to clean up networks and sec groups * merged trunk * merged concurrency * review comments * Added a unit test but not integrated it * merged trunk * fix remaining tests * cleaned up most of the issues * remove accidental paste * use context.project_id because it is more efficient * elevate in proper places, fix a couple of typos * merged trunk * Fixes bug 660115 * Address cerberus's comment * Fix several problems keeping AuthMiddleware from functioning in the OpenStack API * Implement the REST calls for create/update/delete in Glance * Adds unit test for WSGI image controller for OpenStack API using Glance Service * Fixes LP Bug#660095 * Xen support * Adds flat networking + dhcpserver mode * This patch removes the ugly network_index that is used by VlanManager and turns network itself into a pool. It adds support for creating the networks through an api command: nova-manage network create # creates all of the networks defined by flags or nova-manage network create 5 # create the first five networks * Newlines again, reorder imports * Remove extraneous newlines * Fix typo, fix import * merged upstream * cleanup leftover addresses * super teardown * fix tests * merged trunk * merged trunk * merged trunk * merged trunk * Revert the conversion to 64-bit ints stored in a PickleType column, because PickleType is incompatible with having a unique constraint * Revert 64 bit storage and use 32 bit again. I didn't notice that we verify that randomly created uids don't already exist in the DB, so the chance of collision isn't really an issue until we get to tens of thousands of machines. Even then we should only expect a few retries before finding a free ID * Add design doc, docstrings, document hyper-v wmi, python wmi usage. Adhere to pep-8 more closely * This patch adds support for EC2 security groups using libvirt's nwfilter mechanism, which in turn uses iptables and ebtables on the individual compute nodes. This has a number of benefits: * Inter-VM network traffic can take the fastest route through the network without our having to worry about getting it through a central firewall. * Not relying on a central firewall also removes a potential SPOF. * The filtering load is distributed, offering great scalability * Change internal_id from a 32 bit int to a 64 bit int * 32 bit internal_ids become 64 bit. Since there is no 64 bit native type in SqlAlchemy, we use PickleType which uses the Binary SqlAlchemy type under the hood * Make Instance.name a string again instead of an integer * Now that the ec2 id is not the same as the name of the instance, don't compare internal_id [nee ec2_id] to instance names provided by the virtualization driver. Compare names directly instead * Fix bug 659330 * Catch exception.NotFound when getting project VPN data * Improve the virt unit tests * Remove spurious project_id addition to KeyPair model * APIRequestContext.admin is no more. * Rename ec2_id_list back to instance_id to conform to EC2 argument spec * Fix bug 657001 (rename all Rackspace references to OpenStack references) * Extracts the kernel and ramdisk id from manifests and puts in into images' metadata * Fix EC2 GetConsoleOutput method and add unit tests for it * Rename rsapi to osapi, and make the default subdomain for OpenStack API calls be 'api' instead of 'rs' * Fix bug 658444 * Adds --force option to run_tests.sh to clear virtualenv. Useful when dependencies change * If machine manifest includes a kernel and/or ramdisk id, include it in the image's metadata * Rename ec2 get_console_output's instance ID argument to 'instance_id'. It's passed as a kwarg, based on key in the http query, so it must be named this way * if using local copy (use_s3=false) we need to know where to find the image * curl not available on Windows for s3 download. also os-agnostic local copy * Register the Hyper-V module into the list of virt modules * hyper-v driver created * Twisted pidfile and other flag parameters simply do not function on Windows * Renames every instance of "rackspace" in the API and test code base. Also includes a minor patch for the API Servers controller to use images correctly in the absence of Glance * That's what I get for not using a good vimrc * Mass renaming * Start stripping out the translators * Remove redis dependency from RS Images API * Remove redis dependency from Images controller * Since FLAGS.images_path was not set for nova-compute, I could not launch instances due to an exception at _fetch_local_image() trying to access to it. I think that this is the reason of Bug655217 * Imported images_path from nova.objectstore for nova-compute. Without its setting, it fails to launch instances by exception at _fetch_local_image * Defined images_path for nova-compute. Without its setting, it fails to launch instances by exception at _fetch_local_image * Cleans up a broken servers unit test * Huge sweeping changes * Adds stubs and tests for GlanceImageService and LocalImageService. Adds basic plumbing for ParallaxClient and TellerClient and hooks that into the GlanceImageService * Typo * Missed an ec2_id conversion to internal_id * Cleanup around the rackspace API for the ec2 to internal_id transition * merge prop fixes * A little more clean up * Replace model.Instance.ec2_id with an integer internal_id so that both APIs can represent the ID to external users * Fix clause comparing id to internal_id * Adds unit test for calling show() on a non-existing image. Changes return from real Parallax service per sirp's recommendation for actual returned dict() values * Remove debugging code, and move import to the top * Make (some) cloud unit tests run without a full-blown set up * Stub out ec2.images.list() for unit tests * Make rpc calls work in unit tests by adding extra declare_consumer and consume methods on the FakeRabbit backend * Add a connect_to_eventlet method * Un-twistedify get_console_ouptut * Create and destroy user appropriately. Remove security group related tests (since they haven't been merged yet) * Run the virt tests by default * Keep handles to loggers open after daemonizing * merged trunk and fixed tests * Cleans up the unit tests that are meant to be run with nosetests * Update Parallax default port number to match Glance * One last bad line * merge from gundlach ec2 conversion * Adds ParallaxClient and TellerClient plumbing for GlanceImageService. Adds stubs FakeParallaxClient and unit tests for LocalImageService and GlanceImageService * Fix broken unit tests * Matches changes in the database / model layer with corresponding fixes to nova.virt.xenapi * Replace the embarrasingly crude string based tests for to_xml with some more sensible ElementTree based stuff * A shiny, new Auth driver backed by SQLAlchemy. Read it and weep. I did * Move manager_class instantiation and db.service_* calls out of nova.service.Service.__init__ into a new nova.service.Service.startService method which gets called by twisted. This delays opening db connections (and thus sqlite file creation) until after privileges have been shed by twisted * Add pylint thingamajig for startService (name defined by Twisted) * Revert r312 * Add a context of None to the call to db.instance_get_all * Honour the --verbose flag by setting the logging level to DEBUG * Accidentally renamed volume related stuff * More clean up and conflict resolution * Move manager_class instantiation and db.service_* calls out of nova.service.Service.__init__ into a new nova.service.Service.startService method which gets called by twisted. This delays opening db connections (and thus sqlite file creation) until after privileges have been shed by twisted * Bug #653560: AttributeError in VlanManager.periodic_tasks * Bug #653534: NameError on session_get in sqlalchemy.api.service_update * Fixes to address the following issues: * s/APIRequestContext/get_admin_context/ <-- sudo for request contexts * Bug #654034: nova-manage doesn't honour --verbose flag * Bug #654025: nova-manage project zip and nova-manage vpn list broken by change in DB semantics when networks are missing * Bug #654023: nova-manage vpn commands broken, resulting in erroneous "Wrong number of arguments supplied" message * fix typo in setup_compute_network * pack and unpack context * add missing to_dict * Bug #653651: XenAPI support completely broken by orm-refactor merge * Bug #653560: AttributeError in VlanManager.periodic_tasks * Bug #653534: NameError on session_get in sqlalchemy.api.service_update * Adjust db api usage according to recent refactoring * Make _dhcp_file ensure the existence of the directory containing the files it returns * Keep handles to loggers open after daemonizing * Adds BaseImageService and flag to control image service loading. Adds unit test for local image service * Cleans up the unit tests that are meant to be run with nosetests * Refactor sqlalchemy api to perform contextual authorization * automatically convert strings passed into the api into their respective original values * Fix the deprecation warnings for passing no context * Address a few comments from Todd * Merged trunk * Locked down fixed ips and improved network tests * merged remove-network-index * Fixed flat network manager with network index gone * merged trunk * show project ids for groups instead of user ids * create a new manager for flat networking including dhcp * First attempt at a uuid generator -- but we've lost a 'topic' input so i don't know what that did * Find other places in the code that used ec2_id or get_instance_by_ec2_id and use internal_id as appropriate * Convert EC2 cloud.py from assuming that EC2 IDs are stored directly in the database, to assuming that EC2 IDs should be converted to internal IDs * Method cleanup and fixing the servers tests * merged trunk, removed extra quotas * Adds support for periodic_tasks on manager that are regularly called by the service and recovers fixed_ips that didn't get disassociated properly * Replace database instance 'ec2_id' with 'internal_id' throughout the nova.db package. internal_id is now an integer -- we need to figure out how to make this a bigint or something * merged trunk * refactoring * refactoring * Includes changes for creating instances via the Rackspace API. Utilizes much of the existing EC2 functionality to power the Rackspace side of things, at least for now * Get rid of mention of mongo, since we are using openstack/swift * Mongo bad, swift good * Add a DB backend for auth manager * Bug #652103: NameError in exception handler in sqlalchemy API layer * Bug #652103: NameError in exception handler in sqlalchemy API layer * Bug #651887: xenapi list_instances completely broken * Grabbed the wrong copyright info * Cleaned up db/api.py * Refactored APIRequestContext * Bug #651887: xenapi list_instances completely broken * Simplified authorization with decorators" " * Removed deprecated bits from NovaBase * Wired up context auth for keypairs * Completed quota context auth * Finished context auth for network * Finished instance context auth * Finished instance context auth * Made network tests pass again * Whoops, forgot the exception handling bit * Missed a few attributes while mirroring the ec2 instance spin up * pylint and pep8 cleanup * Forgot the context module * Some minor cleanup * Servers stuff * merge rsapi_reboot from gundlach * Wired up context auth for services * Server creation up to, but not including, network configuration * Progress on volumes Fixed foreign keys to respect deleted flag * Support reboot in api.rackspace by extracting reboot function from api.ec2 into api.cloud * Make Fault raiseable, and add a test to verify that * Make Fault raiseable by inheriting from webob.exc.HTTPException * Related: https://code.launchpad.net/~anso/nova/authupdate/+merge/36925 * Remove debuggish print statement * Make update work correctly * Server update name and password * Support the pagination interface in RS API -- the &offset and &limit parameters are now recognized * Update from trunk to handle one-line merge conflict * Support fault notation in error messages in the RS API * Limit entity lists by &offset and &limit * After update from trunk, a few more exceptions that need to be converted to Faults * fix ordering of rules to actually allow out and drop in * fix the primary and secondary join * autocreate the models and use security_groups * Began wiring up context authorization * Apply patch from Vish to fix a hardcoded id in the unit tests * removed a few extra items * merged with soren's branch * fix loading to ignore deleted items * Add user-editable name & notes/description to volumes, instances, and images * merged trunk * patch for test * fix join and misnamed method * fix eagerload to be joins that filter by deleted == False * * Create an AuthManager#update_user method to change keys and admin status. * Refactor the auth_unittest to not care about test order * Expose the update_user method via nova-manage * Updates the fix-iptables branch with a number of bugfixes * Fixes reversed arguments in nova-manage project environment * Makes sure that multiple copies of nova-network don't create multiple copies of the same NetworkIndex * Fix a few errors in api calls related to mistyped database methods for floating_ips: specifically describe addresses and and associate address * Merged Termie's branch that starts tornado removal and fixed rpc test cases for twisted. Nothing is testing the Eventlet version of rpc.call though yet * Adds bpython support to nova-manage shell, because it is super sexy * Adds a disabled flag to service model and check for it when scheduling instances and volumes * Adds bpython support to nova-manage shell, because it is super sexy * Added random ec2 style id's for volumes and instances * fix security group revoke * Fixed tests * Removed str_id from FixedIp references * missed a comma * improved commenting * Fault support * fix flag defaults * typo s/boo/bool * merged and removed duplicated methods * fixed merge conflicts * removed extra code that slipped in from a test branch * Fixed name property on instance model * Implementation of the Rackspace servers API controller * Added checks for uniqueness for ec2 id * fix test for editable image * Add authorization info for cloud endpoints * Remove TODO, since apparently newer boto doesn't die on extra fields * add disabled column to services and check for it in scheduler * Hook the AuthManger#modify_user method into nova-manage commands * Refactored adminclient to support multiple regions * merged network-lease-fix * merged floating-ips * move default group creation to api * Implemented random instance and volume strings for ec2 api * Adds --force option to run_tests.sh to clear virtualenv. Useful when dependencies change * merge from trunk * Instance & Image renaming fixes * merge from gundlach * Testing testing testing * get rid of network indexes and make networks into a pool * Add Serializer.deserialize(xml_or_json_string) * merged trunk * return a value if possible from export_device_create_safe * merged floating-ip-by-project * merged network-lease-fix * merged trunk * Stop trying to install nova-api-new (it's gone). Install nova-scheduler * Call out to 'sudo kill' instead of using os.kill. dnsmasq runs as root or nobody, nova may or may not be running as root, so os.kill won't work * Make sure we also start dnsmasq on startup if we're managing networks * Improve unit tests for network filtering. It now tracks recursive filter dependencies, so even if we change the filter layering, it still correctly checks for the presence of the arp, mac, and ip spoofing filters * Make sure arguments to string format are in the correct order * Make the incoming blocking rules take precedence over the output accept rules * db api call to get instances by user and user checking in each of the server actions * More cleanup, backup_schedules controller, server details and the beginnings of the servers action route * This is getting ridiculous * Power state mapping * Set priority of security group rules to 300 to make sure they override the defaults * Recreate ensure_security_group_filter. Needed for refresh * Clean up nwfilter code. Move our filters into the ipv4 chain * If neither a security group nor a cidr has been passed, assume cidr=0.0.0.0/0 * More re-work around the ORM changes and testing * Support content type detection in serializer * If an instance never got scheduled for whatever reason, its host will turn up as None. Filter those out to make sure refresh works * Only call _on_set_network_host on nova-network hosts * Allow DHCP requests through, pass the IP of the gateway as the dhcp server * Add a flag the specifies where to find nova-dhcpbridge * Ensure dnsmasq can read updates to dnsmasq conffile * Set up network at manager instantiation time to ensure we're ready to handle the networks we're already supposed to handle * Add db api methods for retrieving the networks for which a host is the designated network host * Apply IP configuration to bridge regardless of whether it existed before. The fixes a race condition on hosts running both compute and network where, if compute got there first, it would set up the bridge, but not do IP configuration (because that's meant to happen on the network host), and when network came around, it would see the interface already there and not configure it further * Removed extra logging from debugging * reorganize iptables clear and make sure use_nova_chains is a boolean * allow in and out for network and compute hosts * Modification of test stubbing to match new domain requirements for the router, and removal of the unnecessary rackspace base controller * Minor changes to be committed so trunk can be merged in * disable output drop for the moment because it is too restrictive * add forwarding ACCEPT for outgoing packets on compute host * fix a few missed calls to _confirm_rule and 80 char issues * allow mgmt ip access to api * flush the nova chains * Test the AuthManager interface explicitly, in case the user/project wrappers fail or change at some point. Those interfaces should be tested on their own * Update auth manager to have a update_user method and better tests * add a reset command * Merged Termie's branch and fixed rpc test cases for tesited. Nothing is testing the Eventlet version of rpc.call though yet * improved the shell script for iptables * Finished making admin client work for multi-region * Install nova-scheduler * nova-api-new is no more. Don't attempt to install it * Add multi region support for adminclient * Merging in changes from rs_auth, since I needed something modern to develop on while waiting for Hudson to right itself * whatever * Put EC2 API -> eventlet back into trunk, fixing the bits that I missed when I put it into trunk on 9/21 * Apply vish's patch * Applied vish's fixes * Implementation of Rackspace token based authentication for the Openstack API * fixed a few missing params from iptables rules * removed extra line in manage * made use of nova_ chains a flag and fixed a few typos * put setup_iptables in the right dir * Fixed rpc consumer to use unique return connection to prevent overlap. This could be reworked to share a connection, but it should be a wait operation and not a fast poll like it was before. We could also keep a cache of opened connections to be used between requests * fixed a couple of typos * Re-added the ramdisk line I accidentally removed * Added a primary_key to AuthToken, fixed some unbound variables, and now all unit tests pass * Missed the model include, and fixed a broken test after the merge * Some more refactoring and another unit test * Refactored the auth branch based on review feedback * Replaced the existing Rackspace Auth Mechanism with one that mirrors the implementation in the design document * Merged gundlach's branch * renamed ipchains to iptables * merged trunk * Fixed cloudpipe lib init * merged fix-iptables * When calculating timedeltas make sure both timestamps are in UTC. For people ahead of UTC, it makes the scheduler unit tests pass. For people behind UTC, it makes their services time out after 60 seconds without a heart beat rather than X hours and 60 seconds without a heart beat (where X is the number of hours they're behind UTC) * Spot-fix endpoint reference * Wrap WSGI container in server.serve to make it properly handle command line arguments as well as daemonise properly. Moved api and wsgi imports in the main() function to delay their inclusion until after python-daemon has closed all the file descriptors. Without this, eventlet's epoll fd gets opened before daemonize is called and thus its fd gets closed leading to very, very, very confusing errors * Apply vish's patch * Added FLAGS.FAKE_subdomain letting you manually set the subdomain for testing on localhost * Address Vishy's comments * All timestamps should be in UTC. Without this patch, the scheduler unit tests fail for anyone sufficiently East of Greenwich * Compare project_id to '' using == (equality) rather than 'is' (identity). This is needed because '' isn't the same as u'' * Various loose ends for endpoint and tornado removal cleanup, including cloudpipe API addition, rpc.call() cleanup by removing tornado ioloop, and fixing bin/* programs. Tornado still exists as part of some test cases and those should be reworked to not require it * Re-add root and metadata request handlers to EC2 API * Re-added the ramdisk line I accidentally removed * Soren's patch to fix part of ec2 * Add user display fields to instances & volumes * Responding to eday's feedback -- make a clearer inner wsgi app * Added a primary_key to AuthToken, fixed some unbound variables, and now all unit tests pass * merge from trunk * typo in instance_get * typo in instance_get * User updatable name & description for images * merged trunk and fixed errors * cleaned up exception handling for fixed_ip_get * Added server index and detail differentiation * merged trunk * typo s/an/a * Reenable access_unittest now that it works with new rbac * Rewrite rbac tests to use Authorizer middleware * Missed the model include, and fixed a broke test after the merge * Delete nova.endpoint module, which used Tornado to serve up the Amazon EC2 API. Replace it with nova.api.ec2 module, which serves up the same API via a WSGI app in Eventlet. Convert relevant unit tests from Twisted to eventlet * Remove eventlet test, now that eventlet 0.9.10 has indeed been replaced by 0.9.12 per mtaylor * In desperation, I'm raising eventlet.__version__ so I can see why the trunk tests are failing * merged trunk * bpython is amazing * Fix quota unittest and don't run rbac unit tests for the moment * merged trunk * Some more refactoring and another unit test * Implements quotas with overrides for instances, volumes, and floating ips * Renamed cc_ip flag to cc_host * Moves keypairs out of ldap and into the common datastore * Fixes server error on get metadata when instances are started without keypairs * allows api servers to have a list of regions, allowing multi-cluster support if you have a shared image store and user database * Don't use something the shell will escape as a separator. | is now = * Added modify project command to auth manager to allow changing of project manager and description * merged trunk * merged trunk * Refactored the auth branch based on review feedback * Whitespace fixes * Support querying version list, per the RS API spec. Fixes bug 613117 * Undo run_tests.py modification in the hopes of making this merge * Add a RateLimitingMiddleware to the Rackspace API, implementing the rate limits as defined by the current Cloud Servers spec. The Middleware can do rate counting in memory, or (for deployments that have more than one API Server) can offload to a rate limiting service * Use assertRaises * A small fix to the install_venv program to allow us to run it on the tarmac box as part of the tarmac build * Removes second copy of ProcessExecutionError that creeped in during a bad merge * Adds an omitted yield in compute manager detach_volume * Move the code that extracts the console output into the virt drivers. Move the code that formats it up into the API layer. Add support for Xen console * Add Xen template and use it by default if libvirt_type=xen * added rescue mode support and made reboot work from any state * Adds timing fields to instances and volumes to track launch times and schedule times * Fixes two errors in cloud.py in the nova_orm branch: a) self.network is actually called network_manager b) the logic for describe-instances check on is_admin was reversed * Adds timing fields to instances and volumes to track launch times and schedule times * updated docstring * add in a few comments * s/\t/ /g, and add some comments * add in support for ajaxterm console access * add security and session timeout to ajaxterm * initial commit of ajaxterm * Replaced the existing Rackspace Auth Mechanism with one that mirrors the implementation in the design document * Whitespace fixes * Added missing masquerade rules * Fix things not quite merged perfectly -- all tests now pass * Better error message on the failure of a spawned process, and it's a ProcessExecutionException irrespective of how the process is run (twisted or not) * Added iptables host initial configuration * Added iptables host initial configuration * Proposing merge to get feedback on orm refactoring. I am very interested in feedback to all of these changes * Support querying version list * Add support for middleware proxying to a ratelimiting.WSGIApp, for deployments that use more than one API Server and thus can't store ratelimiting counters in memory * Test the WSGIApp * RateLimitingMiddleware tests * Address a couple of the TODO's: We now have half-decent input validation for AuthorizeSecurityGroupIngress and RevokeDitto * Clean up use of ORM to remove the need for scoped_session * Roll back my slightly over-zealous clean up work * More ORM object cleanup * Clean up use of objects coming out of the ORM * RateLimitingMiddleware * Add ratelimiting package into Nova. After Austin it'll be pulled out into PyPI * When destroying a VM using the XenAPI backend, if the VM is still running (the usual case) the destroy fails. It needs to be powered-off first * Leave out the network setting from the interfaces template. It does not get passed anymore * Network model has network_str attribute * Cast process input to a str. It must not be unicode, but stuff that comes out of the database might very well be unicode, so using such a value in a template makes the whole thing unicode * Make refresh_security_groups play well with inlineCallbacks * Fix up rule generation. It turns out nwfilter gets very, very wonky indeed if you mix rules and rules. Setting a TCP rule adds an early rule to ebtables that ends up overriding the rules which are last in that table * Add a bunch of TODO's to the API implementation * Multiple security group support * Remove power state constants that have ended up duplicated following a bad merge. They were moved from nova.compute.node.Instance into nova.compute.power_state at the same time that Instance was moved into nova.compute.service. We've ended up with these constants in both places * now we can run files - thanks vish * Move vol.destroy() call out of the _check method in test_multiple_volume_race_condition test and into a callback of the DeferredList. This should fix the intermittent failure of that test. I /think/ test_too_many_volumes's failure was caused by test_multiple_volume_race_condition failure, since I have not been able to reproduce its failure after fixing this one * Adds 'shell run' to nova manage, which spawns a shell with flags properly imported * Finish pulling S3ImageService out of this mergeprop * Pull S3ImageService out of this mergeprop * Correctly pass ip_address to templates * Fix call to listNWFilters * Make changes to security group rules propagate to the relevant compute nodes * Filters all get defined when running an instance * added missing yield in detach_volume * multiple network controllers will not create duplicate indexes * renamed _get_quota to get_quota and moved int(size) into quota.py * add a shell to nova-manage, which respects flags (taken from django) * Move vol.destroy() call out of the _check method in test_multiple_volume_race_condition test and into a callback of the DeferredList. This should fix the intermittent failure of that test. I /think/ test_too_many_volumes's failure was caused by test_multiple_volume_race_condition failure, since I have not been able to reproduce its failure after fixing this one * removed second copy of ProcessExecutionError * move the warnings about leasing ips * simplified query * missed a space * set leased = 0 as well on disassociate update * speed up the query and make sure allocated is false * workaround for mysql select in update * Periodic callback for services and managers. Added code to automatically disassociate stale ip addresses * fixed typo * flag for retries on volume commands * auto all and start all exceptions should be ignored * generalized retry into try_execute * more error handling in volume driver code * handle exceptions thrown by vblade stop and vblade destroy * merged trunk * deleting is set by cloud * re added missing volume update * Integrity error is in a different exc file * allow multiple volumes to run ensure_blades without creating duplicates * fixed name for unique constraint * export devices unique * merged instance time and added better concurrency * make fixed_ip_get_by_address return the instance as well so we don't run into concurrency issues where it is disassociated in between * disassociate floating is supposed to take floating_address * speed up generation of dhcp_hosts and don't run into None errors if instance is deleted * don't allocate the same floating ip multiple times * don't allow deletion or attachment of volume unless it is available * fixed reference to misnamed method * manage command for project quotas * merged trunk * implement floating_ip_get_all_by_project and renamed db methods that get more then one to get_all_by instead of get_by * fixed reversed args in nova-manage project environment * merged scheduler * fix instance time * move volume to the scheduler * tests for volumes work * update query and test * merged quotas * use gigabytes and cores * use a string version of key name when constructing mpi dict because None doesn't work well in lookup * db not self.db * Security Group API layer cleanup * merged trunk * added terminated_at to volume and moved setting of terminated_at into cloud * remerged scheduler * merged trunk * merged trunk * merged trunk * merged trunk * fixed reversed admin logic on describe instances * fixed typo network => network_manager in cloud.py * fixed old key reference and made keypair name constistent -> key_pair * typo fixes, add flag to nova-dhcpbridge * fixed tests, added a flag for updating dhcp on disassociate * simplified network instance association * fix network association issue * merged trunk * improved network error case handling for fixed ips * it is called regionEndpoint, and use pipe as a separator * move keypair generation out of auth and fix tests * Fixed manager_user reference in create_project * Finished security group / project refactor * delete keypairs when a user is deleted * remove keypair from driver * moved keypairs to db using the same interface * multi-region flag for describe regions * make api error messages more readable * Refactored to security group api to support projects * set dnsName on describe * merged orm and put instance in scheduling state * just warn if an ip was already deallocated * fix mpi 500 on fixed ip * hostname should be string id * dhcpbridge needed host instead of node name * add a simple iterator to NovaBase to support converting into dictionary * Adjust a few things to make the unit tests happy again * First pass of nwfilter based security group implementation. It is not where it is supposed to be and it does not actually do anything yet * couple more errors in metadata * typo in metadata call * fixed messed up call in metadata * added modify project command to allow project manager and description to be updated * Change "exn" to "exc" to fit with the common style * Create and delete security groups works. Adding and revoking rules works. DescribeSecurityGroups returns the groups and rules. So, the API seems to be done. Yay * merged describe_speed * merged scheduler * set host when item is scheduled * remove print statements * removed extra quotes around instance_type * don't pass topic into schedule_run_instance * added scheduled_at to instances and volumes * quotas working and tests passing * address test almost works * quota tests * merged orm * fix unittest * merged orm * fix rare condition where describe is called before instance has an ip * merged orm * make the db creates return refs instead of ids * add missing files for quota * kwargs don't work if you prepend an underscore * merged orm, added database methods for getting volume and ip data for projects * database support for quotas * Correct style issues brought up in termie's review * mocking out quotas * don't need to pass instance_id to network on associate * floating_address is the name for the cast * merged support code from orm branch * faster describe_addresses * added floating ip commands and launched_at terminated_at, deleted_at for objects * merged orm * solution that works with this version * fix describe addresses * remove extraneous get_host calls that were requiring an extra db trip * pass volume['id'] instead of string id to delete volume * fix volume delete issue and volume hostname display * fix logging for scheduler to properly display method name * fixed logic in set_state code to stop endless loops * Authorize and Revoke access now works * list command for floating ips * merged describe speed * merged orm * floating ip commands * removed extraneous rollback * speed up describe by loading fixed and floating ips * AuthorizeSecurityGroupIngress now works * switch to using utcnow * Alright, first hole poked all the way through. We can now create security groups and read them back * don't fail in db if context isn't a dict, since we're still using a class based context in the api * logging for backend is now info instead of error * merged orm * merged orm * set state everywhere * put soren's fancy path code in scheduler bin as well * missing deleted ref * merged orm * merged orm * consistent naming for instance_set_state * Tests turn things into inlineCallbacks * Missed an instance of attach_to_tornado * Remove tornado-related code from almost everything * It's annoying and confusing to have to set PYTHONPATH to point to your development tree before you run any of the scripts * deleted typo * merged orm * merged orm * fixed missing paren * merge orm * make timestamps for instances and volumes, includes additions to get deleted objects from db using deleted flag * merged orm * remove end of line slashes from models.py * Make the scripts in bin/ detect if they're being run from a bzr checkout or an extracted release tarball or whatever and adjust PYTHONPATH accordingly * merged orm * merged orm branch * set state moved to db layer * updated to the new orm code * changed a few unused context to _context * a few formatting fixes and moved exception * fixed a few bugs in volume handling * merged trunk * Last of cleanup, including removing fake_storage flage * more fixes from code review * review db code cleanup * review cleanup for compute manager * first pass at cleanup rackspace/servers.py * dhcpbridge fixes from review * more fixes to session handling * few typos in updates * don't log all sql statements * one more whitespace fix * whitespace fixes * fix for getting reference on service update * clean up of session handling * New version of eventlet handles Twisted & eventlet running at the same time * fix docstrings and formatting * Oops, APIRequestContext's signature has changed * merged orm * fix floating_ip to follow standard create pattern * Add stubbed out handler for AuthorizeSecurityGroupIngress EC2 API call * merged orm_deux * Merged trunk * Add a clean-traffic filterref to the libvirt templates to prevent spoofing and snooping attacks from the guests * Lots of fixes to make the nova commands work properly and make datamodel work with mysql properly * Bug #630640: Duplicated power state constants * Bug #630636: XenAPI VM destroy fails when the VM is still running * removed extra equals * Just a couple of UML-only fixes: * Due to an issue with libvirt, we need to chown the disk image to root. * Just point UML's console directly at a file, and don't bother with the pty. It was only used for debugging * removed extra file and updated sql note * merged fixed format instances from orm * fixed up format_instances * merged server.py change from orm branch * reverting accidental search/replace change to server.py * merged orm * removed model from nova-manage * merged orm branch * removed references to compute.model * send ultimate topic in to scheduler * more scheduler tests * test for too many instances work * merged trunk * fix service unit tests * removed dangling files * merged orm branch * merged trunk and cleaned up test * renamed daemon to service and update db on create and destroy * pass all extra args from service to manager * fix test to specify host * inject host into manager * Servers API remodeling and serialization handling * Move nova.endpoint.images to api.ec2 and delete nova.endpoint * Cloud tests pass * OMG got api_unittests to pass * send requests to the main API instead of to the EC2 subset -- so that it can parse out the '/services/' prefix. Also, oops, match on path_info instead of path like we're supposed to * Remove unused APIRequestContext.handler * Use port that boto expects * merged orm branch * scheduler + unittests * removed underscores from used context * updated models a bit and removed service classes * Small typos, plus rework api_unittest to use WSGI instead of Tornado * Replace an if/else with a dict lookup to a factory method * Nurrr * Abstractified generalization mechanism * Revert the changes to the qemu libvirt template and make the appropriate changes in the UML template where they belong * Create console.log ahead of time. This ensures that the user running nova-compute maintains read privileges * This improves the changelog generated as part of "setup.py sdist". If you look at it now, it says that Tarmac has done everything and every little commit is listed. With this patch, it only logs the "top-most" commit and credits the author rather than the committer * Fix simple errors to the point where we can run the tests [but not pass] * notes -- conversion 'complete' except now the unit tests won't work and surely i have bugs :) * Moved API tests into a sub-folder of the tests/ and added a stubbed-out test declarations to mirror existing API tickets * Delete rbac.py, moving @rbac decorator knowledge into api.ec2.Authorizer WSGI middleware * Break Router() into Router() and Executor(), and put Authorizer() (currently a stub) in between them * Return error Responses properly, and don't muck with req.params -- make a copy instead * merged orm branch * pylint clean of manager and service * pylint cleanup of db classes * rename node_name to host * merged trunk * Call getInfo() instead of getVersion() on the libvirt connection object. virConnectGetVersion was not exposed properly in the python bindings until quite recently, so this makes us rather more backwards compatible * Better log formatter for Nova. It's just like gnuchangelog, but logs the author rather than the committer * Remove all Twisted defer references from cloud.py * Remove inlineCallbacks and yield from cloud.py, as eventlet doesn't need it * Move cloudcontroller and admincontroller into new api * Adjust setup.py to match nova-rsapi -> nova-api-new rename * small import cleanup * Get rid of some convoluted exception handling that we don't need in eventlet * First steps in reworking EC2 APIRequestHandler into separate Authenticate() and Router() WSGI apps * Call getInfo() instead of getVersion() on the libvirt connection object. virConnectGetVersion was not exposed properly in the python bindings until quite recently, so this makes us rather more backwards compatible * Fix up setup.py to match nova-rsapi -> nova-api-new rename * a little more cleanup in compute * pylint cleanup of tests * add missing manager classes * volume cleanup * more cleanup and pylint fixes * more pep8 * more pep8 * pep8 cleanup * add sqlalchemy to pip requires * merged trunk, fixed a couple errors * Delete __init__.py in prep for turning apirequesthandler into __init__ * Move APIRequestContext into its own file * Move APIRequest into its own file * run and terminate work * Move class into its own file * fix daemon get * Notes for converting Tornado to Eventlet * undo change to get_my_ip * all tests pass again * rollback on exit * merged session from devin * Added session.py * Removed get_backup_schedules from the image test * merged devin's sqlalchemy changes * Making tests pass * Reconnect to libvirt on broken connection * pylint fixes for /nova/virt/connection.py * pylint fixes for nova/objectstore/handler.py * ip addresses work now * Add Flavors controller supporting * Resolve conflicts and merge trunk * Detect if libvirt connection has been broken and reestablish it * instance runs * Dead code removal * remove creation of volume groups on boot * tests pass * Making tests pass * Making tests pass * Refactored orm to support atomic actions * moved network code into business layer * move None context up into cloud * split volume into service/manager/driver * moved models.py * removed the last few references to models.py * chown disk images to root for uml. Due to libvirt dropping CAP_DAC_OVERRIDE for uml, root needs to have explicit access to the disk images for stuff to work * Create console.log ahead of time. This ensures that the user running nova-compute maintains read privileges * fixed service mox test cases * Renamed test.py and moved a test as per merge proposal feedback * fixed volume unit tests * work endpoint/images.py into an S3ImageService. The translation isn't perfect, but it's a start * get to look like trunk * Set UML guests to use a file as their console. This halfway fixes get-console-output for them * network tests pass again * Fixes issue with the same ip being assigned to multiple instances * merged trunk and fixed tests * Support GET //detail * Moved API tests into a sub-folder of the tests/ and added a stubbed-out test declarations to mirror existing API tickets * Turn imageid translator into general translator for rackspace api ids * move network_type flag so it is accesible in data layer * Use compute.instance_types for flavor data instead of a FlavorService * more data layer breakouts, lots of fixes to cloud.py * merged jesse * Initial support for Rackspace API /image requests. They will eventually be backed by Glance * Fix a pep8 violation * improve the volume export - sleep & check export * missing context and move volume_update to before the export * update volume create code * A few small changes to install_venv to let venv builds work on the tarmac box * small tweaks * move create volume to work like instances * work towards volumes using db layer * merge vish * fix setup compute network * merge vish * merge vish * use vlan for network type since it works * merge vish * more work on getting running instances to work * merge vish * more cleanup * Flavors work * pep8 * Delete unused directory * Move imageservice to its own directory * getting run/terminate/describe to work * OK, break out ternary operator (good to know that it slowed you down to read it) * Style fixes * fix some errors with networking rules * typo in release_ip * run instances works * Ensure that --gid and --uid options work for both twisted and non-twisted daemons * Fixes an error in setup_compute_network that was causing network setup to fail * add back in the needed calls for dhcpbridge * removed old imports and moved flags * merge and fixes to creates to all return id * bunch more fixes * moving network code and fixing run_instances * jesse's run_instances changes * fix daemons and move network code * Rework virt.xenapi's concurrency model. There were many places where we were inadvertently blocking the reactor thread. The reworking puts all calls to XenAPI on background threads, so that they won't block the reactor thread * merged trunk and fixed merge errors * Refactored network model access into data abstraction layer * Get the output formatting correct * Typo * Don't serialize in Controller subclass now that wsgi.Controller handles it for us * Move serialize() to wsgi.Controller so __call__ can serialize() action return values if they are dicts * Serialize properly * Support opaque id to rs int id as well * License * Moves auth.manager to the data layer * Add db abstraction and unittets for service.py * Clarified what the 'Mapped device not found' exception really means. Fixed TODO. Some formatting to be closer to 80 chars * Added missing "self." * Alphabetize the methods in the db layer * fix concurrency issue with multiple instances getting the same ip * small fixes to network * Fixed typo * Better error message on subprocess spawn fail, and it's a ProcessExecutionException irrespective of how the process is run * Check exit codes when spawning processes by default Also pass --fail to curl so that it sets exit code when download fails * PEP8/pylint cleanup in bin and nova/auth * move volume code into datalayer and cleanup * Complete the Image API against a LocalImageService until Glance's API exists (at which point we'll make a GlanceImageService and make the choice of ImageService plugin configurable.) * Added unit tests for WSGI helpers and base WSGI API * merged termies abstractions * Move deferredToThread into utils, as suggested by termie * Remove whitespace to match style guide * Data abstraction for compute service * this file isn't being used * Cleaned up pep8/pylint style issues in nova/auth. There are still a few pylint warnings in manager.py, but the patch is already fairly large * More pylintrc updates * fix report state * Removed old cloud_topic queue setup, it is no longer used * last few test fixes * More bin/ pep8/pylint cleanup * fixing more network issues * Added '-' as possible charater in module rgx * Merged with trunk * Updated the tests to use webob, removed the 'called' thing and just use return values instead * Fix unit test bug this uncovered: don't release_ip that we haven't got from issue_ip * Fix to better reflect (my believed intent) as to the meaning of error_ok (ignore stderr vs accept failure) * Merged with trunk * use with_lockmode for concurrency issues * First in a series of patches to port the API from Tornado to WSGI. Also includes a few small style fixes in the new API code * Pull in ~eday/nova/api-port * Merged trunk * Merged api-port into api-port-1 * Since pylint=0.19 is our version, force everyone to use the disable-msg syntax * Missed one * Removed the 'controllers' directory under 'rackspace' due to full class name redundancy * pep8 typo * Changed our minds: keep pylint equal to Ubuntu Lucid version, and use disable-msg throughout * Fixed typo * Image API work * Newest pylint supports 'disable=', not 'disable-msg=' * Fix pep8 violation * tests pass * network tests pass * Added unittests for wsgi and api * almost there * progress on tests passing * remove references to deleted files so tests run * fix vpn access for auth * merged trunk * removed extra files * network datamodel code * In an effort to keep new and old API code separate, I've created a nova.api to put all new API code under. This means nova.endpoint only contains the old Tornado implementation. I also cleaned up a few pep8 and other style nits in the new API code * No longer installs a virtualenv automatically and adds new options to bypass the interactive prompt * Stylistic improvements * Add documentation to spawn, reboot, and destroy stating that those functions should return Deferreds. Update the fake implementations to do so (the libvirt ones already do, and making the xenapi ones do so is the subject of a current merge request) * start with model code * clean up linux_net * merged refresh from sleepsonthefloor * See description of change... what's the difference between that message and this message again? * Move eventlet-using class out of endpoint/__init__.py into its own submodule, so that twisted-related code using endpoint.[other stuff] wouldn't run eventlet and make unit tests throw crazy errors about eventlet 0.9.10 not playing nicely with twisted * Remove duplicate definition of flag * The file that I create automates this step in http://wiki.openstack.org/InstallationNova20100729 : * Simpler installation, and, can run install_venv from anywhere instead of just from checkout root * Use the argument handler specified by twistd, if any * Fixes quite a few style issues across the entire nova codebase bringing it much closer to the guide described in HACKING * merge from trunk * merged trunk * merged trunk and fixed conflicts * Fixes issues with allocation and deallocation of fixed and elastic addresses * Added documentation for the nova.virt connection interface, a note about the need to chmod the objectstore script, and a reference for the XenAPI module * Make individual disables for R0201 instead of file-level * All controller actions receive a 'req' parameter containing the webob Request * improve compatibility with ec2 clients * PEP8 and name corrections * rather comprehensive style fixes * fix launching and describing instances to work with sqlalchemy * Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml * typos * don't try to create and destroy lvs in fake mode * refactoring volume and some cleanup in model and compute * Add documentation to spawn, reboot, and destroy stating that those functions should return Deferreds. Update the fake implementations to do so (the libvirt ones already do, and making the xenapi ones do so is the subject of a current merge request) * Rework virt.xenapi's concurrency model. There were many places where we were inadvertently blocking the reactor thread. The reworking puts all calls to XenAPI on background threads, so that they won't block the reactor thread * add refresh on model * merge in latedt from vish * Catches and logs exceptions for rpc calls and raises a RemoteError exception on the caller side * Removes requirement of internet connectivity to run api server * Fixed path to keys directory * Update cloud_unittest to match renamed internal function * Removes the workaround for syslog-ng of removing newlines * Fixes bug lp:616312 by reversing the order of args in nova-manage when it calls AuthManager.get_credentials * merged trunk * Sets a hostname for instances that properly resolves and cleans up network classes * merged fix-hostname and fixed conflict * Implemented admin client / admin api for fetching user roles * Improves pep8 compliance and pylint score in network code * Bug #617776: DescribeImagesResponse contains type element, when it should be called imageType * Bug 617913: RunInstances response doesn't meet EC2 specification * remove more direct session interactions * refactor to have base helper class with shared session and engine * ComputeConnectionTestCase is almost working again * more work on trying to get compute tests passing * re-add redis clearing * make the fake-ldap system work again * got run_tests.py to run (with many failed tests) * Bug #617776: DescribeImagesResponse contains type element, when it should be called imageType * initial commit for orm based models * Add a few unit tests for libvirt_conn * Move interfaces template into virt/, too * Refactor LibvirtConnection a little bit for easier testing * Remove extra "uml" from os.type * Fixes out of order arguments in get_credentials * pep8 and pylint cleanup * Support JSON and XML in Serializer * Added note regarding dependency upon XenAPI.py * Added documentation to the nova.virt interface * make rpc.call propogate exception info. Includes tests * Undo the changes to cloud.py that somehow diverged from trunk * Mergeprop cleanup * Mergeprop cleanup * Make WSGI routing support routing to WSGI apps or to controller+action * Make --libvirt_type=uml do the right thing: Sets the correct libvirt URI and use a special template for the XML * renamed missed reference to Address * die classmethod * merged fix-dhcpbridge * remove class method * typo allocated should be relased * rename address stuff to avoid name collision and make the .all() iterator work again * keep track of leasing state so we can delete ips that didn't ever get leased * remove syslog-ng workaround * Merged with trunk * Implement the same fix as lp:~vishvananda/nova/fix-curl-project, but for virt.xenapi * Fix exception in get_info * Move libvirt.xml template into nova/virt * Parameterise libvirt URI * Merged with trunk * fix dhcpbridge issues * Adapts the run_tests.sh script to allow interactive or automated creation of virtualenv, or to run tests outside of a virtualenv * Prototype implementation of Servers controller * Working router that can target WSGI middleware or a standard controller+action * Added a xapi plugin that can pull images from nova-objectstore, and use that to get a disk, kernel, and ramdisk for the VM * Serializing in middleware after all... by tying to the router. maybe a good idea? * Merged with trunk * Actually pass in hostname and create a proper model for data in network code * Improved roles functionality (listing & improved test coverage) * support a hostname that can be looked up * updated virtualenv to add eventlet, which is now a requirement * Changes the run_tests.sh and /tools/install_venv.py scripts to be more user-friendly and not depend on PIP while not in the virtual environment * Fixed admin api for user roles * Merged list_roles * fix spacing issue in ldapdriver * Fixes bug lp:615857 by changing the name of the zip export method in nova-manage * Wired up admin api for user roles * change get_roles to have a flag for project_roles or not. Don't show 'projectmanager' in list of roles * Throw exceptions for illegal roles on role add * Adds get_roles commands to manager and driver classes * more pylint fixes * Implement VIF creation in the xenapi module * lots more pylint fixes * work on a router that works with wsgi and non-wsgi routing * Pylint clean of vpn.py * Further pylint cleanup * Oops, we need eventlet as well * pylint cleanup * pep8 cleanup * merged trunk * pylint fixes for nova/objectstore/handler.py * rename create_zip to zipfile so lazy match works * Quick fix on location of printouts when trying to install virtualenv * Changes the run_tests.sh and /tools/install_venv.py scripts to be more user-friendly and not depend on PIP while not in the virtual environment. Running run_tests.sh should not just work out of the box on all systems supporting easy_install.. * 2 changes in doing PEP8 & Pylint cleaning: * adding pep8 and pylint to the PIP requirements files for Tools * light cleaning work (mostly formatting) on nova/endpoints/cloud.py * More changes to volume to fix concurrency issues. Also testing updates * Merge * Merged nova-tests-apitest into pylint * Merged nova-virt-connection into nova-tests-apitest * Pylint fixes for /nova/tests/api_unittest.py * pylint fixes for nova/virt/connection.py * merged trunk, fixed an error with releasing ip * fix releasing to work properly * Add some useful features to our flags * pylint fixes for /nova/test.py * Fixes pylint issues in /nova/server.py * importing merges from hudson branch * fixing - removing unused imports per Eric & Jay review * initial cleanup of tests for network * Implement the same fix as lp:~vishvananda/nova/fix-curl-project, but for virt.xenapi * Run correctly even if called while in tools/ directory, as 'python install_venv.py' * This branch builds off of Todd and Michael's API branches to rework the Rackspace API endpoint and WSGI layers * separated scheduler types into own modules * Fix up variable names instead of disabling pylint naming rule. Makes variables able to be a single letter in pylintrc * Disables warning about TODO in code comments in pylintrc * More pylint/pep8 cleanup, this time in bin/* files * pylint fixes for nova/server.py * remove duplicated report_state that exists in the base class more pylint fixes * Fixed docstring format per Jay's review * pylint fixes for /nova/test.py * Move the xenapi top level directory under plugins, as suggested by Jay Pipes * Pull trunk merge through lp:~ewanmellor/nova/add-contains * Pull trunk merge through lp:~ewanmellor/nova/xapi-plugin * Merged with trunk again * light cleanup - convention stuff mostly * convention and variable naming cleanup for pylint/pep8 * Used new (clearer) flag names when calling processes * Merged with trunk * Greater compliance with pep8/pylint style checks * removing what appears to be an unused try/except statement - nova.auth.manager.UserError doesn't exist in this codebase. Leftover? Something intended to be there but never added? * variable name cleanup * attempting some cleanup work * adding pep8 and pylint for regular cleanup tasks * Cleaned up pep8/pylint for bin/* files. I did not fix rsapi since this is already cleaned up in another branch * Merged trunk * Reworked WSGI helper module and converted rackspace API endpoint to use it * Changed the network imports to use new network layout * merged with trunk * Change nova/virt/images.py's _fetch_local_image to accept 4 args, since fetch() tries to call it with that many * Merged Todd and Michael's changes * pep8 and pylint cleanups * Some pylink and pep8 cleanups. Added a pylintrc file * fix copyrights for new files, etc * a few more commands were putting output on stderr. In general, exceptions on stderr output seems like a bad idea * Moved Scheduler classes into scheduler.py. Created a way to specify scheduler class that the SchedulerService uses.. * Make network its own worker! This separates the network logic from the api server, allowing us to have multiple network controllers. There a lot of stuff in networking that is ugly and should be modified with the datamodel changes. I've attempted not to mess with those things too much to keep the changeset small(ha!) * Fixed instance model associations to host (node) and added association to ip * Fixed write authorization for public images * Fixes a bug where if a user was removed from a group after he had a role, he could not be re-added * fix search/replace error * merged trunk * Start breaking out scheduler classes.. * WsgiStack class, eventletserver.serve. Trying to work toward a simple API that anyone can use to start an eventlet-based server composed of several WSGI apps * Use webob to simplify wsgi middleware * Made group membership check only search group instead of subtree. Roles in a group are removed when a user is removed from that group. Added test * Fixes bug#614090 -- nova.virt.images._fetch_local_image being called with 4 args but only has 3 * Fixed image modification authorization, API cleanup * fixed doc string * compute topic for a node is compute.node not compute:node! * almost there on random scheduler. not pushing to correct compute node topic, yet, apparently.. * First pass at making a file pass pep8 and pylint tests as an example * merged trunk * rename networkdata to vpn * remove extra line accidentally added * compute nodes should store total memory and disk space available for VMs * merged from trunk * added bin/nova-listinstances, which is mostly just a duplication of euca-describe-instances but doesn't go through the API * Fixes various concurrency issues in volume worker * Changed volumes to use a pool instead of globbing filesystem for concurrency reasons. Fixed broken tests * clean up nova-manage. If vpn data isn't set for user it skips it * method is called set_network_host * fixed circular reference and tests * renamed Vpn to NetworkData, moved the creation of data to inside network * fix rpc command line call, remove useless deferreds * fix error on terminate instance relating to elastic ip * Move the xenapi top level directory under plugins, as suggested by Jay Pipes * fixed tests, moved compute network config call, added notes, made inject option into a boolean * fix extra reference, method passing to network, various errors in elastic_ips * use iteritems * reference to self.project instead of context.project + self.network_model instead of network_model * fixes in get public address and extra references to self.network * method should return network topic instead of network host * use deferreds in network * don't __ module methods * inline commands use returnValue * it helps to save files BEFORE committing * Added note to README * Fixes the curl to pass in the project properly * Adds flag for libvirt type (hvm, qemu, etc) * Fix deprecation warning in AuthManager. __new__ isn't allowed to take args * created assocaition between project and host, modified commands to get host async, simplified calls to network * use get to retrieve node_name from initial_state * change network_service flag to network_type and don't take full class name * vblade commands randomly toss stuff into stderr, ignore it * delete instance doesn't fail if instances dir doesn't exist * Huge network refactor, Round I * Fixes boto imports to support both beta and older versions of boto * Get IP doesn't fail of you not connected to the intetnet * updated doc string and wrapper * add copyright headers * Fix exception in get_info * Implement VIF creation * Define __contains__ on BasicModel, so that we can use "x in datamodel" * Fixed instance model associations to host (node) and added association to ip * Added a xapi plugin that can pull images from nova-objectstore, and use that to get a disk, kernel, and ramdisk for the VM. The VM actually boots! * Added project as parameter to admin client x509 zip file download * Turn the private _image_url(path) into a public image_url(image). This will be used by virt.xenapi to instruct xapi as to which images to download * Merged in configurable libvirt_uri, and fixes to raw disk images from the virtualbox branch * Fixed up some of the raw disk stuff that broke in the abstraction out of libvirt * Merged with raw disk image * Recognize 'magic' kernel value that means "don't use a kernel" - currently aki-00000000 * Fix Tests * Fixes nova volumes. The async commands yield properly. Simplified the call to create volume in cloud. Added some notes * another try on fix boto * use user.access instead of user.id * Fixes access key passing in curl statement * Accept a configurable libvirt_uri * Added Cheetah to pip-requires * Removed duplicate toXml method * Merged with trunk * Merged with trunk, added note about suspicious behaviour * Added exit code checking to process.py (twisted process utils). A bit of class refactoring to make it work & cleaner. Also added some more instructive messages to install_venv.py, because otherwise people that don't know what they're doing will install the wrong pip... i.e. I did :-) * Make nodaemon twistd processes log to stdout * Make nodaemon twistd processes log to stdout * use the right tag * flag for libvirt type * boto.s3 no longer imports connection, so we need to explicitly import it * Added project param to admin client zip download * boto.utils import doesn't work with new boto, import boto instead * fix imports in endpoint/images.py boto.s3 no longer imports connection, so we need to explicitly import it * Added --fail argument to curl invocations, so that HTTP request fails get surfaced as non-zero exit codes * Merged with trunk * Merged with trunk * strip out some useless imports * Add some useful features to our flags * Fixed pep8 in run_test.py * Blank commit to get tarmac merge to pick up the tags * Fixed assertion "Someone released me too many times: too many tokens!" * Replace the second singleton unit test, lost during a merge * Merged with trunk to resolve merge conflicts * oops retry and add extra exception check * Fix deprecation warning in AuthManager. __new__ isn't allowed to take args * Added ChangeLog generation * Implemented admin api for rbac * Move the reading of API parameters above the call to _get_image, so that they have a chance to take effect * Move the reading of API parameters above the call to _get_image, so that they have a chance to take effect * Adds initial support for XenAPI (not yet finished) * More merges from trunk. Not everything came over the first time * Allow driver specification in AuthManager creation * pep8 * Fixed pep8 issues in setup.py - thanks redbo * Use default kernel and ramdisk properly by default * Adds optional user param to the get projects command * Ensures default redis keys are lowercase like they were in prior versions of the code * Pass in environment to dnsmasq properly * Releaed 0.9.0, now on 0.9.1 * Merged trunk * Added ChangeLog generation * Wired up get/add/remove project members * Merged lp:~vishvananda/nova/lp609749 * Removes logging when associating a model to something that isn't a model class * allow driver to be passed in to auth manager instead of depending solely on flag * make redis name default to lower case * Merged get-projects-by-user * Merged trunk * Fixed project api * Specify a filter by user for get projects * Create a model for storing session tokens * Fixed a typo from the the refactor of auth code * Makes ldap flags work again * bzr merge lp:nova/trunk * Tagged 0.9.0 and bumped the version to 0.9.1 * Silence logs when associated models aren't found. Also document methods used ofr associating things. And get rid of some duplicated code * Fix dnsmasq commands to pass in environment properly 0.9.0 ----- * Got the tree set for debian packaging * use default kernel and ramdisk and check for legal access * import ldapdriver for flags * Removed extra include * Added the gitignore files back in for the folks who are still on the git * Added a few more missing files to MANIFEST.in and added some placeholder files so that setup.py would carry the empty dir * Updated setup.py file to install stuff on a python setup.py install command * Removed gitignore files * Made run_tests.sh executable * Put in a single MANIFEST.in file that takes care of things * Changed Makefile to shell script. The Makefile approach completely broke debhelper's ability to figure out that this was a python package * fixed typo from auth refactor * Add sdist make target to build the MANIFEST.in file * Removes debian dir from main tree. We'll add it back in in a different branch * Merged trunk * Wired up user:project auth calls * Bump version to 0.9.0 * Makes the compute and volume daemon workers use a common base class called Service. Adds a NetworkService in preparation for splitting out networking code. General cleanup and standardizarion of naming * fixed path to keys directory * Fixes Bug lp:610611: deleted project vlans are deleted from the datastore before they are reused * Add a 'sdist' make target. It first generates a MANIFEST.in based on what's in bzr, then calls python setup.py sdist * properly delete old vlans assigned to deleted projects * Remove debian/ from main branch * Bump version to 0.9.0. Change author to "OpenStack". Change author_email to nova@lists.launchpad.net. Change url to http://www.openstack.org/. Change description to "cloud computing fabric controller" * Make "make test" detect whether to use virtualenv or not, thus making virtualenv optional * merged trunk * Makes the objectstore require authorization, checks it properly, and makes nova-compute provide it when fetching images * Automatically choose the correct type of test (virtualenv or system) * Ensure that boto's config has a "Boto" section before attempting to set a value in it * fixes buildpackage failing with dh_install: missing files * removed old reference from nova-common.install and fixed spacing * Flag for SessionToken ttl setting * resolving conflict w/ merge, cleaning up virtenv setups * resolving conflict w/ merge, cleaning up virtenv setups * Fixes bug#610140. Thanks to Vish and Muharem for the patch * A few minor fixes to the virtualenv installer that were breaking on ubuntu * Give SessionToken an is_expired method * Refactor of auth code * Fixes bug#610140. Thanks to Vish and Muharem for the patch * Share my updates to the Rackspace API * Fixes to the virtualenv installer * Ensure consistent use of filename for dhcp bridge flag file * renamed xxxservice to service * Began wiring up rbac admin api * fix auth_driver flag to default to usable driver * Adds support scripts for installing deps into a virtualenv * In fact, it should delete them * Lookup should only not return expired tokens * Adds support scripts for installing deps into a virtualenv * default flag file full path * moved misnamed nova-dchp file * Make _fetch_s3_image pass proper AWS Authorization headers so that image downloads work again * Make image downloads work again in S3 handler. Listing worked, but fetching the images failed because I wasn't clever enough to use twisted.web.static.File correctly * Move virtualenv installation out of the makefile * Expiry awareness for SessionToken * class based singleton for SharedPool * Basic standup of SessionToken model for shortlived auth tokens * merged trunk * merged trunk * Updated doc layout to the Sphinx two-dir layout * Replace hardcoded "nova" with FLAGS.control_exchange * Add a simple set of tests for S3 API (using boto) * Fix references to image_object. This caused an internal error when using euca-deregister * Set durable=False on TopicPublisher * Added missing import * Replace hardcoded example URL, username, and password with flags called xenapi_connection_url, xenapi_connection_username, xenapi_connection_password * Fix instance cleanup * Fix references to image_object. This caused an internal error when using euca-deregister * removed unused assignment * More Cleanup of code * Fix references to get_argument, fixing internal error when calling euca-deregister * Changes nova-volume to use twisted * Fixes up Bucket to throw proper NotFound and NotEmpty exceptions in constructor and delete() method, and fixes up objectstore_unittest to properly use assertRaises() to check for proper exceptions and remove the assert_ calls * Adds missing yield statement that was causing partitioning to intermittently fail * Merged lp:~ewanmellor/nova/lp609792 * Merged lp:~ewanmellor/nova/lp609791 * Replace hardcoded "nova" with FLAGS.control_exchange * Set durable=False on TopicPublisher, so that it matches the flag on TopicConsumer. This ensures that either redeclaration of the control_exchange will use the same flag, and avoid AMQPChannelException * Add an import so that nova-compute sees the images_path flag, so that it can be used on the command line * Return a 404 when attempting to access a bucket that does not exist * Removed creation of process pools. We don't use these any more now that we're using process.simple_execute * Fix assertion "Someone released me too many times: too many tokens!" when more than one process was running at the same time. This was caused by the override of SharedPool.__new__ not stopping ProcessPool.__init__ from being run whenever process.simple_execute is called * Always make sure to set a Date headers, since it's needed to calculate the S3 Auth header * Updated the README file * Updated sphinx layout to a two-dir layout like swift. Updated a doc string to get rid of a Sphinx warning * Updated URLs in the README file to point to current locations * Add missing import following merge from trunk (cset 150) * Merged with trunk, since a lot of useful things have gone in there recently * fixed bug where partition code was sometimes failing due to initial dd not being yielded properly * Fixed bug 608505 - was freeing the wrong address (should have freed 'secondaddress', was freeing 'address') * renamed xxxnode to xxservice * Add (completely untested) code to include an Authorization header for the S3 request to fetch an image * Check signature for S3 requests * Fixes problem with describe-addresses returning all public ips instead of the ones for just the user's project * Fix for extra spaces in export statements in scripts relating to x509 certs * Adds a Makefile to fill dependencies for testing * Fix syslogging of exceptions by stripping newlines from the exception info * Merged fix for bug 608505 so unit tests pass * Check exit codes when spawning processes by default * Nobody wants to take on this twisted cleanup. It works for now, but could be much nicer if twisted has a nice hook-point for exception mapping * syslog changes * typo fixes and extra print statements removed * added todo for ABC * Fixed bug 608505 - was freeing the wrong address (should have freed 'secondaddress', was freeing 'address') * Merged trunk, fixed extra references to fake_users * refactoring of imports for fakeldapdriver * make nova-network executable * refactor daemons to use common base class in preparation for network refactor * reorder import statement and remove commented-out test case that is the same as api_unittest in objectstore_unittest * Fixes up Bucket to throw proper NotFound and NotEmpty exceptions in constructor and delete() method, and fixes up objectstore_unittest to properly use assertRaises() to check for proper exceptions and remove the assert_ calls * Fix bug 607501. Raise 403, not exception if Authorization header not passed. Also added missing call to request.finish() & Python exception-handling style tweak * merge with twisted-volume * remove all of the unused saved return values from attach_to_twisted * fix for describe addresses showing everyone's public ips * update the logic for calculating network sizes * Locally administered mac addresses have the second least significant bit of the most significant byte set. If this byte is set then udev on ubuntu doesn't set persistent net rules * use a locally administered mac address so it isn't saved by udev * Convert processpool to a singleton, and switch node.py calls to use it. (Replaces passing a processpool object around all the time.) * Fixed the broken reference to * remove spaces from export statements in scripts relating to certs * Cleanups * Able to set up DNS, and remove udev network rules * Move self.ldap to global ldap to make changes easier if we ever implement settings * Cleanup per suggestions * network unittest clean up * Test cleanup, make driver return dictionaries and construct objects in manager * Able to boot without kernel or ramdisk. libvirt.xml.template is now a Cheetah template * Merged https://code.launchpad.net/~justin-fathomdb/nova/copy-error-handling * Merged bug fixes * Map exceptions to 404 / 403 codes, as was done before the move to twisted. However, I don't think this is the right way to do this in Twisted. For example, exceptions thrown after the render method returns will not be mapped * Merged lp:~justin-fathomdb/nova/bug607501 * Merged trunk. Fixed new references to UserManager * I put the call to request.finish() in the wrong place. :-( * More docstrings, don't autocreate projects * Raise 401, not exception if Authorization header not passed. Also minor fixes & Python exception-handling style tweak * LdapDriver cleanup: docstrings and parameter ordering * Ask curl to set exit code if resource was not found * Fixes to dhcp lease code to use a flagfile * merged trunk * Massive refactor of users.py * Hmm, serves me right for not understanding the request, eh? :) Now too_many_addresses test case is idempotent in regards to running in isolation and uses self.flags.network_size instead of the magic number 32 * Redirect STDERR to output to an errlog file when running run_tests.py * Send message ack in rpc.call and make queues durable * Fixed name change caused by remove-vendor merge * Replace tornado objectstore with twisted web * merged in trunk and fixed import merge errors * First commit of XenAPI-specific code (i.e. connections to the open-source community project Xen Cloud Platform, or the open-source commercial product Citrix XenServer) * Remove the tight coupling between nova.compute.monitor and libvirt. The libvirt-specific code was placed in nova.virt.libvirt_conn by the last changeset. This greatly simplifies the monitor code, and puts the libvirt-specific XML record parsing in a libvirt-specific place * In preparation for XenAPI support, refactor the interface between nova.compute and the hypervisor (i.e. libvirt) * Fixed references to nova.utils that were broken by a change of import statement in the remove-vendor merge * Remove s3_internal_port setting. Objectstore should be able to handle the beatings now. As such, nginx is no longer needed, so it's removed from the dependencies and the configuration files are removed * Replace nova-objectstore with a twistd style wrapper. Add a get_application method to objectstore handler * Minor post-merge fixes * Fixed _redis_name and _redis_key * Add build_sphinx support * fix conf file to no longer have daemonize=1 because twistd daemonizes by default * make nova-volume start with twisteds daemonize stuff * Makin the queues non-durable by default * Ack messages during call so rabbit leaks less * simplify call to simple_execute * merge extra singleton-pool changes * Added a config file to let setup.py drive building the sphinx docs * make simple method wrapper for process pool simple_execute * change volume code to use twisted * remove calls to runthis from node * merge with singleton pool * Removed unused Pool from process.py, added a singleton pool called SharedPool, changed calls in node to use singleton pool * Fixes things that were not quite right after big merge party * Make S3 API handler more idiomatic Twisted Web-y * _redis_name wasn't picking up override_type correctly, and _redis_key wasn't using it * Quick fix to variable names for consistency in documentation.. * Adds a fix to the idempotency of the test_too_many_addresses test case by adding a simple property to the BaseNetwork class and calculating the number of available IPs by asking the network class to tell the test how many static and preallocated IP addresses are in use before entering the loop to "blow up" the address allocation.. * Adds a flag to redirect STDERR when running run_tests.py. Defaults to a truncate-on-write logfile named run_tests.err.log. Adds ignore rule for generated errlog file * no more print in storage unittest * reorder imports spacing * Fixes to dhcp lease code to use a flagfile * merged trunk * This branch fixes some unfortunate interaction between Nova and boto * Make sure we pass str objects instead of unicode objects to boto as our credentials * remove import of vendor since we have PPA now * Updates the test suite to work * Disabled a tmpdir cleanup * remove vendor * update copyrights * Volume_ID identifier needed a return in the property. Also looking for race conditions in the destructor * bin to import images from canonical image store * add logging import to datastore * fix merge errors * change default vpn ports and remove complex vpn ip iteration * fix reference to BasicModel and imports * Cleanups related to BasicModel (whitespace, names, etc) * Updating buildbot address * Fixed buildbot * work on importing images * When destroying an Instance, disassociate with Node * Smiteme * Smiteme * Smiteme * Smiteme * Move BasicModel into datastore * Smiteme * Smiteme * Whitespace change * unhardcode the binary name * Fooish * Finish singletonizing UserManager usage * Debian package additions for simple network template * Foo * Whitespace fix * Remove debug statement * Foo * fix a typo * Added build-deps to debian/control that are needed to run test suite. Fixed an error in a test case * optimization to not load all instances when describe instances is called * More buildbot testing * More buildbot testing * More buildbot testing * More buildbot testing * More buildbot testing * More buildbot testing * Addin buildbot * Fix merge changelog and merge errors in utils.py * Fixes from code review * release 0.2.2-10 * fix for extra space in vblade-persist * Avoid using s-expr, pkcs1-conv, and lsh-export-key * release 0.2.2-9 * fixed bug in auth group_exists * Move nova related configuration files into /etc/nova/ * move check for none before get mpi data * Refactored smoketests flags * Fixes to smoketest flags * Minor smoketest refactoring * fixes from code review * typo in exception in crypto * datetime import typo * added missing isotime method from utils * release 0.2.2-8 * missed a comma * release 0.2.2-7 * use a flag for cert subject * whitespace fixes and header changes * Fixed the os.environ patch (bogus) * Fixes as per Vish review (whitespace, import statements) * Off by one error in the allocation test (can someone check my subnet math?) * Adding more tests, refactoring for dhcp logic * Got dhcpleasor working, with test ENV for testing, and rpc.cast for real world * Capture signals from dnsmasq and use them to update network state * Relax the Twisted dependency to python-twisted-core (rather than the full stack) * releasing version 0.3.0+really0.2.2-0ubuntu0ppa3 * If set, pass KernelId and RamdiskId from RunInstances call to the target compute node * Add a default flag file for nova-manage to help it find the CA * Ship the CA directory in nova-common * Add a dependency on nginx from nova-objectsstore and install a suitable configuration file * releasing version 0.3.0+really0.2.2-0ubuntu0ppa2 * Don't pass --daemonize=1 to nova-compute. It's already daemonising by default * Add debian/nova-common.dirs to create var/lib/nova/{buckets,CA,images,instances,keys,networks} * keeper_path is really caled datastore_path * Fixed package version * Move templates from python directories to /usr/share/nova * Added --network_path setting to nova-compute's flagfile * releasing version 0.3.0+really0.2.2-0ubuntu0ppa1 * Use rmdir instead of rm -rf to remove a tempdir * Set better defaults in flagfiles * Fixes and add interface template * Simple network injection * Simple Network avoids vlans * clean a few merge errors from network * Add curl as a dependency of nova-compute * getting started update * getting started update * Remove _s errors from merge * fix typos in node from merge * remove spaces from default cert * Make sure get_assigned_vlans and BaseNetwork.hosts always return a dict, even if the key is currently empty in the KVS * Add _s instance attribute to Instance class. It's referenced in a bunch of places, but is never set. This is unlikely to be the right fix (why have two attributes pointing to the same object?), but it seems to make ends meet * Replace spaces in x509 cert subject with underscores. It ends up getting split(' ')'ed and passed to subprocess.Popen, so it needs to not have spaces in it, otherwise openssl gets very upset * Expand somewhat on the short and long descriptions in debian/control * Use separate configuration files for the different daemons * Removed trailing whitespace from header * Updated licenses * Added flags to smoketests. General cleanup * removed all references to keeper * reformatting * Vpn ips and ports use redis * review reformat * code review reformat * We need to be able to look up Instance by Node (live migration) * Get rid of RedisModel * formatting fixes and refactoring from code review * reformatting to fit within 80 characters * simplified handling of tempdir for Fakes * fix for multiple shelves for each volume node * add object class violation exception to fakeldap * remove spaces from default cert * remove silly default from generate cert * fix of fakeldap imports and exceptions * More Comments, cleanup, and reformatting * users.py cleanup for exception handling and typo * Make fakeldap use redis * Refactor network.Vlan to be a BasicModel, since it touched Redis * bugfix: rename _s to datamodel in Node in some places it was overlooked * fix key injection script * Fixes based on code review 27001 * added TODO * Admin API + Worker Tracking * fixed typo * style cleanup * add more info to vpn list * Use flag for vpn key suffix instead of hardcoded string * don't fail to create vpn key if dir exists * Create Volume should only take an integer between 0 and 1000 * Placeholders for missing describe commands * Set forward delay to zero (partial fix to bug #518) * more comment reformatting * fit comment within 80 lines * removed extraneous reference to rpc in objectstore unit test * Fix queue connection bugs * Fix deletion of user when he is the last member of the group * Fix error message for checking for projectmanager role * Installer now creates global developer role * Removed trailing whitespace from header * added nova-instancemonitor debian config * Updated licenses * Added flags to smoketests. General cleanup * A few missing files from the twisted patch * Tweaks to get instancemonitor running * Initial commit of nodemonitor * Create DescribeImageAttribute api method * release 0.2.2-6 * disk.py needed input for key injection to work * release 2.2-5 * message checking callbacks only need to run 10 times a second * release 2.2-4 * trackback formatting isn't logging correctly * documentation updates * fix missing tab in nova-manage * Release 2.2-3 * use logger to print trace of unhandled exceptions * add exit status to nova-manage * fix fakeldap so it can use redis keeper * fix is_running failing because state was stored as a string * more commands in nova-manage for projects and roles * More volume test fixes * typo in reboot instances * Fix mount of drive for test image * don't need sudo anymore * Cleaning up smoketests * boto uses instance_type not size * Fix to volume smoketests * fix display of project name for admin in describe instances * make sure to deexpress before we remove the host since deexpress uses the host * fix error in disassociate address * fixed reversed filtering logic * filter keypairs for vpn keys * allow multiple vpn connections with the same credentials * Added admin command to restart networks * hide vpn instances unless you are an admin and allow run_instances to launch vpn image even if it is private * typo in my ping call * try to ping vpn instances * sensible defaults for instance types * add missing import to pipelib * Give vpns the proper ip address * Fix format addresses * Release 0.2.2-2 * fix more casing errors and make attachment set print * removed extraneous .volume_id * don't allow volumes to be attached to the same mountpoint * fix case for volume attributes * fix sectors off by one * Don't use keeper for instances * fix default state to be 0 instead of pending * Release 0.2.2 * Fix for mpi cpu reporting * fix detach volume * fix status code printing in cloud * add project ids to volumes * add back accidentally removed bridge name. str is reserved, so don't use it as a variable name * whitespace fixes and format instances set of object fixes * Use instdir to iterate through instances * fix bridge name * Adding basic validation of volume size on creation, plus tests for it * finished gutting keeper from volume * First pass at validation unit tests. Haven't figured out class methods yet * Removing keeper sludge * Set volume status properly, first pass at validation decorators * Adding missing default values and fixing bare Redis fetch for volume list * one more handler typo * fix objectstore handler typo * fix modify image attribute typo * NetworkNode doesn't exist anymore * Added back in missing gateway property on networks * Refactored Instance to get rid of _s bits, and fixed some bugs in state management * Delete instance files on shutdown * Flush redis db in setup and teardown of tests * Cleaning up my accidental merge of the docs branch * change pipelib to work with projects * Volumes support intermediate state. Don't have to cast to storage nodes for attach/detach anymore, just let node update redis with state * Adding nojekyll for directories * Fix for #437 (deleting attached volumes), plus some >9 blade_id fixes * fix instance iteration to use self.instdir.all instead of older iterators * nasa ldap defaults * sensible rbac defaults * Tests for rbac code * Patch to allow rbac * Adding mpi data * Adding cloudpipe and vpn data back in to network.py * how we build our debs * Revert "fix a bug with AOE number generation" * re-added cloudpipe * devin's smoketests * tools to clean vlans and run our old install script * fix a bug with AOE number generation * Initial commit of nodemonitor * Create DescribeImageAttribute api method * Create DescribeImageAttribute api method * More rackspace API * git checkpoint commit post-wsgi * update spacing * implement image serving in objectstore so nginx isn't required in development * update twitter username * make a "Running" topic instead of having it flow under "Configuration" * Make nginx config be in a code block * More doc updates: nginx & pycurl * Add a README, because GitHub loves them. Update the getting started docs * update spacing * Commit what I have almost working before diverging * first go at moving from tornado to twisted * implement image serving in objectstore so nginx isn't required in development * update twitter username * Update documentation * fix for reactor.spawnProcess sending deprecation warning * patch from issue 4001 * Fix for LoopingCall failing Added in exception logging around amqp calls Creating deferred in receive before ack() message was causing IOError (interrupted system calls), probably because the same message was getting processed twice in some situations, causing the system calls to be doubled. Moving the ack() earlier fixed the problem. The code works now with an interval of 0 but that causes heavy processor usage. An interval of 0.01 keeps the cpu usage within reasonable limits * get rid of anyjson in rpc and fix bad reference to rpc.Connection * gateway undefined * fix cloud instances method * Various cloud fixes * make get_my_ip return 127.0.0.1 for testing * Adds a Twisted implementation of a process pool * make a "Running" topic instead of having it flow under "Configuration" * Make nginx config be in a code block * More doc updates: nginx & pycurl * Add a README, because GitHub loves them. Update the getting started docs * whitespace fixes for nova/utils.py * Add project methods to nova-manage * Fix novarc to use project when creating access key * removed reference to nonexistent flag * Josh's networking refactor, modified to work with projects * Merged Vish's work on adding projects to nova * missed the gitignore * initial commit nova-13.0.0/PKG-INFO0000664000567000056710000000632112701410205014766 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: nova Version: 13.0.0 Summary: Cloud computing fabric controller Home-page: http://docs.openstack.org/developer/nova/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: OpenStack Nova README ===================== OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies, including KVM, Xen, LXC, VMware, and more. In addition to its native API, it includes compatibility with the commonly encountered Amazon EC2 and S3 APIs. OpenStack Nova is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Nova primarily consists of a set of Python daemons, though it requires and integrates with a number of native system components for databases, messaging and virtualization capabilities. To keep updated with new developments in the OpenStack project follow `@openstack `_ on Twitter. To learn how to deploy OpenStack Nova, consult the documentation available online at: http://docs.openstack.org For information about the different compute (hypervisor) drivers supported by Nova, read this page on the wiki: https://wiki.openstack.org/wiki/HypervisorSupportMatrix In the unfortunate event that bugs are discovered, they should be reported to the appropriate bug tracker. If you obtained the software from a 3rd party operating system vendor, it is often wise to use their own bug tracker for reporting problems. In all other cases use the master OpenStack bug tracker, available at: http://bugs.launchpad.net/nova Developers wishing to work on the OpenStack Nova project should always base their work on the latest Nova code, available from the master GIT repository at: https://git.openstack.org/cgit/openstack/nova Developers should also join the discussion on the mailing list, at: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev Any new code must follow the development guidelines detailed in the HACKING.rst file, and pass all unit tests. Further developer focused documentation is available at: http://docs.openstack.org/developer/nova/ For information on how to contribute to Nova, please see the contents of the CONTRIBUTING.rst file. -- End of broadcast Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 nova-13.0.0/tox.ini0000664000567000056710000001234412701410011015201 0ustar jenkinsjenkins00000000000000[tox] minversion = 2.0 envlist = py34,py27,functional,pep8,pip-missing-reqs skipsdist = True [testenv] usedevelop = True # tox is silly... these need to be separated by a newline.... whitelist_externals = bash find rm install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=./nova/tests/unit LANGUAGE=en_US LC_ALL=en_US.utf-8 deps = -r{toxinidir}/test-requirements.txt commands = find . -type f -name "*.pyc" -delete bash tools/pretty_tox.sh '{posargs}' passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY # there is also secret magic in pretty_tox.sh which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. [testenv:pep8] basepython = python2.7 deps = hacking commands = bash tools/flake8wrap.sh {posargs} # Check that .po and .pot files are valid. bash -c "find nova -type f -regex '.*\.pot?' -print0| \ xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:py34] # NOTE(mriedem): If py34 fails with "db type could not be determined", delete # .testrepository and try again. Running py34 before py27 is OK, but not the # other way around. See: https://bugs.launchpad.net/testrepository/+bug/1212909 setenv = {[testenv]setenv} commands = find . -type f -name "*.pyc" -delete ostestr --blacklist_file tests-py3.txt [testenv:functional] usedevelop = True setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=./nova/tests/functional LANGUAGE=en_US commands = find . -type f -name "*.pyc" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:api-samples] usedevelop = True setenv = VIRTUAL_ENV={envdir} GENERATE_SAMPLES=True PYTHONHASHSEED=0 OS_TEST_PATH=./nova/tests/functional/api_sample_tests LANGUAGE=en_US commands = find . -type f -name "*.pyc" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:genconfig] commands = oslo-config-generator --config-file=etc/nova/nova-config-generator.conf [testenv:cover] # Also do not run test_coverage_ext tests while gathering coverage as those # tests conflict with coverage. # NOTE(sdague): this target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = coverage erase python setup.py testr --coverage \ --testr-args='{posargs}' coverage combine coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i [testenv:venv] # NOTE(jaegerandi): This target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = {posargs} [testenv:docs] commands = rm -rf doc/source/api doc/build api-guide/build python setup.py build_sphinx bash -c '! find doc/ -type f -name *.json | xargs -t -n1 python -m json.tool 2>&1 > /dev/null | grep -B1 -v ^python' oslo-config-generator --config-file=etc/nova/nova-config-generator.conf sphinx-build -b html api-guide/source api-guide/build/html [testenv:api-guide] # This environment is called from CI scripts to test and publish # the API Guide to developer.openstack.org. # NOTE(sdague): this target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = sphinx-build -b html -d api-guide/build/doctrees api-guide/source api-guide/build/html [testenv:bandit] commands = bandit -c bandit.yaml -r nova -n 5 -ll [testenv:releasenotes] # NOTE(sdague): this target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [flake8] # E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126 # The rest of the ignores are TODOs # New from hacking 0.9: E129, E131, H407, H405 # E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301 ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405 exclude = .venv,.git,.tox,dist,doc,*openstack/common/*,*lib/python*,*egg,build,tools/xenserver*,releasenotes # To get a list of functions that are more complex than 25, set max-complexity # to 25 and run 'tox -epep8'. # 34 is currently the most complex thing we have # TODO(jogo): get this number down to 25 or so max-complexity=35 [hacking] local-check-factory = nova.hacking.checks.factory import_exceptions = nova.i18n [testenv:pip-missing-reqs] # do not install test-requirements as that will pollute the virtualenv for # determining missing packages # this also means that pip-missing-reqs must be installed separately, outside # of the requirements.txt files deps = pip_missing_reqs commands=pip-missing-reqs -d --ignore-file=nova/tests/* --ignore-file=nova/test.py nova nova-13.0.0/contrib/0000775000567000056710000000000012701410205015327 5ustar jenkinsjenkins00000000000000nova-13.0.0/contrib/xen/0000775000567000056710000000000012701410205016121 5ustar jenkinsjenkins00000000000000nova-13.0.0/contrib/xen/vif-openstack0000775000567000056710000000153012701407773020637 0ustar jenkinsjenkins00000000000000#!/bin/bash ## copyright: B1 Systems GmbH , 2012. ## author: Christian Berendt , 2012. ## license: Apache License, Version 2.0 ## ## purpose: ## Creates a new vif device without attaching it to a ## bridge. Neutron Linux Bridge Agent will attach the ## created device to the belonging bridge. ## ## usage: ## place the script in ${XEN_SCRIPT_DIR}/vif-openstack and ## set (vif-script vif-openstack) in /etc/xen/xend-config.sxp. dir=$(dirname "$0") . "$dir/vif-common.sh" case "$command" in online) setup_virtual_bridge_port "$dev" ip link set $dev up ;; offline) ip link set $dev down ;; add) setup_virtual_bridge_port "$dev" ip link set $dev up ;; esac if [ "$type_if" = vif -a "$command" = "online" ] then success fi nova-13.0.0/contrib/profile_caching_scheduler.sh0000775000567000056710000000231512701407773023061 0ustar jenkinsjenkins00000000000000#!/bin/bash # Copyright (c) 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # This runs a unit test that uses pycallgraph # to profile the select_destinations call # in the CachingScheduler # # For this script to work please run: # python setup.py develop # pip install -r requirements.txt # pip install -r test-requirements.txt # pip install pycallgraph # export EVENTLET_NO_GREENDNS='yes' # BASEDIR=$(dirname $0) TEST=$BASEDIR/../nova/tests/scheduler/test_caching_scheduler.py echo echo "Running this unit test file as a python script:" echo $TEST python $TEST RESULTDIR=$(pwd) echo echo "For profiler result see: " echo $RESULTDIR/scheduler.png echo nova-13.0.0/bandit.yaml0000664000567000056710000001161512701407773016040 0ustar jenkinsjenkins00000000000000# optional: after how many files to update progress #show_progress_every: 100 # optional: plugins directory name #plugins_dir: 'plugins' # optional: plugins discovery name pattern plugin_name_pattern: '*.py' # optional: terminal escape sequences to display colors #output_colors: # DEFAULT: '\033[0m' # HEADER: '\033[95m' # LOW: '\033[94m' # MEDIUM: '\033[93m' # HIGH: '\033[91m' # optional: log format string #log_format: "[%(module)s]\t%(levelname)s\t%(message)s" # globs of files which should be analyzed include: - '*.py' # a list of strings, which if found in the path will cause files to be excluded # for example /tests/ - to remove all all files in tests directory exclude_dirs: - '/tests/' profiles: XSS: include: - jinja2_autoescape_false - use_of_mako_templates ShellInjection: include: - subprocess_popen_with_shell_equals_true - subprocess_without_shell_equals_true - any_other_function_with_shell_equals_true - start_process_with_a_shell - start_process_with_no_shell exclude: SqlInjection: include: - hardcoded_sql_expressions blacklist_calls: bad_name_sets: - pickle: qualnames: [pickle.loads, pickle.load, pickle.Unpickler, cPickle.loads, cPickle.load, cPickle.Unpickler] message: "Pickle library appears to be in use, possible security issue." - marshal: qualnames: [marshal.load, marshal.loads] message: "Deserialization with the marshal module is possibly dangerous." - md5: qualnames: [hashlib.md5] message: "Use of insecure MD5 hash function." - mktemp_q: qualnames: [tempfile.mktemp] message: "Use of insecure and deprecated function (mktemp)." - eval: qualnames: [eval] message: "Use of possibly insecure function - consider using safer ast.literal_eval." - mark_safe: names: [mark_safe] message: "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed." - httpsconnection: qualnames: [httplib.HTTPSConnection] message: "Use of HTTPSConnection does not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033" - yaml_load: qualnames: [yaml.load] message: "Use of unsafe yaml load. Allows instantiation of arbitrary objects. Consider yaml.safe_load()." - urllib_urlopen: qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener, urllib.FancyURLopener, urllib2.urlopen, urllib2.Request] message: "Audit url open for permitted schemes. Allowing use of file:/ or custom schemes is often unexpected." - paramiko_injection: qualnames: [paramiko.exec_command, paramiko.invoke_shell] message: "Paramiko exec_command() and invoke_shell() usage may expose command injection vulnerabilities and should be reviewed." shell_injection: # Start a process using the subprocess module, or one of its wrappers. subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, utils.execute, utils.execute_with_timeout] # Start a process with a function vulnerable to shell injection. shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # Start a process with a function that is not vulnerable to shell injection. no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve, os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe, os.startfile] blacklist_imports: bad_import_sets: - telnet: imports: [telnetlib] level: HIGH message: "Telnet is considered insecure. Use SSH or some other encrypted protocol." - info_libs: imports: [pickle, cPickle, subprocess, Crypto] level: LOW message: "Consider possible security implications associated with {module} module." hardcoded_password: word_list: "wordlist/default-passwords" ssl_with_bad_version: bad_protocol_versions: - 'PROTOCOL_SSLv2' - 'SSLv2_METHOD' - 'SSLv23_METHOD' - 'PROTOCOL_SSLv3' # strict option - 'PROTOCOL_TLSv1' # strict option - 'SSLv3_METHOD' # strict option - 'TLSv1_METHOD' # strict option password_config_option_not_marked_secret: function_names: - oslo.config.cfg.StrOpt - oslo_config.cfg.StrOpt execute_with_run_as_root_equals_true: function_names: - nova.utils.execute - nova.utils.trycmd nova-13.0.0/.testr.conf0000664000567000056710000000053512701407773016000 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./nova/tests} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list nova-13.0.0/AUTHORS0000664000567000056710000013454512701410204014752 0ustar jenkinsjenkins00000000000000Aaron Lee Aaron Rosen Aaron Rosen Aarti Kriplani Abhijeet Malawade Abhishek Anand Abhishek Chanda Abhishek Talwar Adalberto Medeiros Adam Gandelman Adam Gandelman Adam Gandelman Adam Johnson Adam Kacmarsky Adam Spiers Adelina Tuvenie Aditi Rajagopal Aditi Raveesh Aditi Raveesh Adrian Smith Adrian Vladu Adrien Cunin Adrien Cunin Ahmad Hassan Akash Gangil Akihiro MOTOKI Akira Yoshiyama Akira Yoshiyama Ala Rezmerita Alberto Planas Alessandro Pilotti Alessandro Pilotti Alessandro Tagliapietra Alessio Ababilov Alessio Ababilov Alex Gaynor Alex Glikson Alex Handle Alex Hmelevsky Alex Holden Alex Meade Alex Xu AlexFrolov Alexander Bochkarev Alexander Burluka Alexander Gordeev Alexander Gorodnev Alexander Sakhnov Alexander Schmidt Alexandre Levine Alexei Kornienko Alexey I. Froloff Alexey Roytman Alexis Lee Alexis Lee Alin Gabriel Serdean Allen Gao Alvaro Lopez Garcia Amandeep Amir Sadoughi Ana Krivokapic Anand Shanmugam Andras Gyacsok Andre Andre Aranha Andrea Rosa Andreas Jaeger Andreas Jaeger Andreas Scheuring Andrei Bacos Andrew Bogott Andrew Boik Andrew Clay Shafer Andrew Glen-Young Andrew James Andrew Laski Andrew Laski Andrew Lazarev Andrew Melton Andrew Woodward Andrey Brindeyev Andrey Kurilin Andrey Kurilin Andrey Pavlov Andy Hill Andy McCrae Andy Smith Andy Southgate Aneesh Puliyedath Udumbath Angus Lees Anish Bhatt Anita Kuno Ankit Agrawal Ann Kamyshnikova Anne Gentle Ante Karamatic Ante Karamatić Ante Karamatić Anthony Lee Anthony PERARD Anthony Woods Anthony Young Anton Gorenkov Anton V. Yanchenko Antoni Segura Puimedon Antony Messerli Anuj Mathur Arata Notsu Arathi Armando Migliaccio Armando Migliaccio Arnaud Legendre Arnaud Legendre Artur Malinowski Arvind Somya Arx Cruz Asbjørn Sannes Aswad Rangnekar Atsushi SAKAI Attila Fazekas Augustina Ragwitz Avinash Prasad Avishay Traeger Avishay Traeger Ayush Garg Balazs Gibizer Bartosz Fic Belmiro Moreira Ben McGraw Ben Nemec Ben Nemec Ben Nemec Ben Roble Ben Swartzlander Bernhard M. Wiedemann Bharath Thiruveedula Bhuvan Arumugam Bilal Akhtar Bill Owen Bo Quan Bo Wang Bob Ball Boden R Boris Filippov Boris Pavlovic Brad Hall Brad McConnell Brad Pokorny Brant Knudson Brendan Maguire Brent Eagles Brian D. Elliott Brian Elliott Brian Elliott Brian Haley Brian Lamar Brian Rosmaita Brian Schott Brian Waldon Brianna Poulos Brooks Kaminski Burt Holzman Cale Rath Cao ShuFeng Carlos Goncalves Cedric Brandily Chandan Kumar Chang Bo Guo ChangBo Guo(gcb) Changbin Liu ChenZheng Chet Burgess Chiradeep Vittal Chmouel Boudjnah Chris Behrens Chris Buccella Chris Dent Chris Friesen Chris J Arges Chris Jones Chris Krelle Chris Krelle Chris St. Pierre Chris Yeoh Christian Berendt Christine Wang Christoph Thiel Christopher Lefelhocz Christopher Lefelhocz Christopher MacGown Christopher Yeoh Chuck Carmack Chuck Short Chung Chih, Hung Cian O'Driscoll Clark Boylan Claudiu Belu Claxton Clay Gerrard Clemens Perz Clif Houck Clint Byrum Cole Robinson Cor Cornelisse Corey Bryant Corey Wright Cory Stone Cory Wright Craig Tracey Craig Vyvial Cyril Roelandt Dan Emmons Dan Florea Dan Genin Dan Genin Dan Prince Dan Smith Dan Smith Dan Smith Dan Wendlandt Dane Fichter Daniel Berrange (berrange@redhat.com) Daniel Genin Daniel Kuffner Daniel L Jones Daniel P. Berrange Daniel Stelter-Gliese Danny Al-Gaaf Darragh O'Reilly Darren Birkett Darren Sanders Darren Worrall Davanum Srinivas Davanum Srinivas Dave Lapsley Dave McCowan Dave McNally Dave Walker (Daviey) Dave Walker (Daviey) David Besen David Bingham David Edery David Hill David Kang David McNally David Medberry David Peraza David Pravec David Ripton David Scannell David Shrewsbury David Subiros David Xie Dazhao Dean Troyer Debo Dutta Debo~ Dutta Deepak C Shetty Deepak Garg Deliang Fan Demontiê Junior Dennis Kliban DennyZhang Derek Higgins Devananda van der Veen Devdeep Singh Devendra Modium Devin Carlen Dheeraj Gupta Diana Clarke Dima Shulyak Dimitri Mazmanov Dina Belova Dinesh Bhor Dirk Mueller Divya Dmitry Borodaenko Dmitry Guryanov Dmitry Spikhalskiy Dmitry Tantsur Dolph Mathews Dominik Heidler Don Dugger Donal Lafferty Dongcan Ye Dongdong Zhou Donovan Finch Dorin Paslaru Doug Hellmann Doug Hellmann Doug Royal Drew Thorstensen DuYaHong Duncan McGreggor Earle F. Philhower, III Ed Bak Ed Leafe EdLeafe Edgar Magana Eduardo Costa Edward Hope-Morley Edwin Zhai Eiich Aikawa Eiichi Aikawa Einst Crazy Eldar Nugaev Elena Ezhova Eli Qiao Eli Qiao Ellen Hui Emma Foley En Eoghan Glynn Eohyung Lee Eric Blake Eric Brown Eric Day Eric Guo Eric Harney Eric Harney Eric Windisch Eric Windisch Erik Zaadi Erwan Gallen Esra Celik Ethan Chu Euan Harris Eugene Kirpichov Eugene Nikanorov Eugeniya Kudryashova Evan Callicoat Evgeny Fedoruk Ewan Mellor Facundo Farias Facundo Maldonado Fang Jinxing Fei Long Wang Fei Long Wang Felix Li Feng Xi Yan Fengqian Gao Feodor Tersin Feodor Tersin Flaper Fesp Flavia Missi Flavio Percoco Flavio Percoco Florent Flament Florian Haas Forest Romain François Charlier Frederic Lepied Gabe Westmaas Gabriel Hurley Gabriel Samfira Gary Kotton Gary Kotton Gaurav Gupta Gauvain Pocentek Gauvain Pocentek George Shuklin Gergo Debreczeni Ghanshyam Ghe Rivero Giampaolo Lauria Giridhar Jayavelu Giulio Fidente Gleb Stepanov Gonéri Le Bouder Gordon Chung Gorka Eguileor Grant Murphy Greg Althaus Greg Ball Gregory Haynes Grzegorz Grasza Guan Qiang Guangya Liu Guangyu Suo Guohui Liu Gábor Antal Haiwei Xu Hans Lindgren Haomai Wang Harshada Mangesh Kakad Haruka Tanizawa He Jie Xu He Jie Xu He Jie Xu He Yongli Hendrik Volkmer Hengqing Hu Hirofumi Ichihara Hironori Shiina Hiroyuki Eguchi Hisaharu Ishii Hisaki Ohara Huan Xie Huang Rui Hyunsun Moon IWAMOTO Toshihiro Ian Cordasco Ian Wells Ian Wienand Ihar Hrachyshka Ildiko Vancsa Ilya Alekseyev Ilya Pekelny Inbar IonuÈ› ArțăriÈ™i Irena Berezovsky Isaku Yamahata Itzik Brown Ivan A. Melnikov Ivan Kolodyazhny J. Daniel Schmidt JC Martin Jacob Cherkas Jake Dahn Jake Liu Jakub Ruzicka James Carey James Chapman James E. Blair James Page Jamie Lennox Jan Grant Janis Gengeris Jared Culp Jason Cannavale Jason Dillaman Jason Koelker Jason.Zhao Javeme Jay Lau Jay Lau Jay Lee Jay Pipes Jay S. Bryant Jean-Baptiste RANSY Jean-Marc Saffroy Jeegn Chen Jeegn Chen Jeffrey Zhang Jenkins Jennifer Mulsow Jens Jorritsma Jens Rosenboom Jeremy Stanley Jesse Andrews Jesse J. Cook Jesse Keating Jesse Keating Jiajun Liu Jian Wen Jianghua Wang Jie Li Jim Fehlig Jim Rollenhagen Jimmy Bergman Jin Hui Jinwoo 'Joseph' Suh Joe Cropper Joe Gordon Joe Heck Joe Julian Joe Mills Joe Talerico Joel Coffman Joel Moore Johannes Erdfelt Johannes Erdfelt John Bresnahan John Dewey John Garbutt John Garbutt John Garbutt John Griffith John Griffith John H. Tran John Haan John Herndon John Hua John Kennedy John L. Villalovos John Stanford John Tran John Tran John Warren Johnson koil raj Jolyon Brown Jon Bernard Jon Grimm Jonathan Bryce Jordan Pittier Jordan Rinke JordanP JordanP Jorge Niedbalski Joseph Suh Joseph W. Breu Josh Durgin Josh Durgin Josh Gachnang Josh Kearney Josh Kleinpeter Joshua Harlow Joshua Harlow Joshua Hesketh Joshua McKenty JuPing Juan Antonio Osorio Robles Juan Manuel Olle Juerg Haefliger Julia Varlamova Julien Danjou Julien Danjou Justin Hammond Justin Santa Barbara Justin Shepherd Jérôme Gallard KIYOHIRO ADACHI Kamil Rykowski Kanagaraj Manickam Karen Noel Kartik Bommepally Kashi Reddy Kashyap Chamarthy Kaushik Chandrashekar Kaushik Chandrashekar Kei Masumoto Keisuke Tagami Ken Igarashi Ken Pepple Ken'ichi Ohmichi Ken'ichi Ohmichi Kenji Yasui Kent Wang Kentaro Matsumoto Keshava Bharadwaj Kevin Benton Kevin Benton Kevin Benton Kevin Bringard Kevin L. Mitchell Kevin_Zheng Kiall Mac Innes Kieran Spear Kirill Shileev Kobi Samoray Koert van der Veer Koichi Yoshigoe Koji Iida Komei Shimamura Kost Kravchenko Pavel Krisztian Gacsal Kui Shi Kun Huang Kurt Taylor Kurt Taylor Kylin CG Lan Qi song Lance Bragstad Lance Bragstad Lars Kellogg-Stedman Launchpad Translations on behalf of nova-core <> Lauren Taylor Leander Bessa Beernaert Leandro I. Costantino Lee Yarwood Li Chen Liam Kelleher Liam Young Lianhao Lu Likitha Shetty Lin Hua Cheng Lin Tan Lingxian Kong LingxianKong LiuNanke Loganathan Parthipan Lorin Hochstein Lucas Alvares Gomes Lucian Petrut Lucian Petrut Ludovic Beliveau Luis A. Garcia Luis Fernandez Alvarez Luiz Capitulino Luo Gangyi Lvov Maxim MORITA Kazutaka Madhu Mohan Nelemane Madhuri Kumari Mahesh K P Mahesh Panchaksharaiah Maithem Major Hayden Malini Bhandaru Mana Kaneko Mandar Vaze Mandell Degerness Marcio Roberto Starke Marco Sinhoreli Marcos Lobo Marian Horban Maris Fogels Mark Doffman Mark Goddard Mark McClain Mark McLoughlin Mark T. Voelker Mark Washenberger Markus Zoeller Martin Kletzander Martin Packman Martin Schuppert Martins Jakubovics Maru Newby Masaki Matsushita Masanori Itoh Masayuki Igawa Mate Lakat Mathew Odden Mathieu GagneÌ Mathieu Mitchell Matt Dietz Matt Fischer Matt Joyce Matt Odden Matt Rabe Matt Riedemann Matt Stephenson Matt Thompson Matthew Booth Matthew Gilliard Matthew Hooker Matthew Macdonald-Wallace Matthew Oliver Matthew Sherborne Matthew Treinish Matthew Treinish Mauro S. M. Rodrigues Maxim Nestratov Maxim Nestratov Maxime Leroy Md Nadeem Mehdi Abaakouk Melanie Witt Michael Bayer Michael Davies Michael Gundlach Michael H Wilson Michael J Fork Michael Kerrin Michael Krotscheck Michael Still Michael Turek Michael Wilson Michal Dulko Michal Pryc Miguel Lavalle Miguel Lavalle Mike Bayer Mike Dorman Mike Durnosvistov Mike Lundy Mike Milner Mike Perez Mike Pittaro Mike Scherbakov Mike Spreitzer Mikhail Durnosvistov Mikyung Kang Ming Yang Mitsuhiko Yamazaki Mitsuhiro SHIGEMATSU Mitsuhiro Tanino Mohammed Naser Monsyne Dragon Monty Taylor Morgan Fainberg Moshe Levi MotoKen Muneyuki Noguchi NTT PF Lab. Nachi Ueno Naveed Massjouni Navneet Kumar Neil Jerram Newptone Nicholas Kuechler Nick Bartos Nicolas Simonds Nikhil Komawar Nikhil Komawar Nikita Gerasimov Nikola Dipanov Nikola Äipanov Nikolai Korablin Nikolay Sokolov Nirmal Ranganathan Nisha Agarwal Noorul Islam K M Numan Siddique OctopusZhang Oleg Bondarev Olga Kopilova Ollie Leahy OndÅ™ej Nový Oshrit Feder Pablo Fernando Cargnelutti Pallavi Patrick East Patrick Schaefer Paul Green Paul Griffin Paul McMillan Paul Murray Paul Murray Paul Voccio Pavel Kholkin Pavel Kirpichyov Pavel Kravchenco Pavlo Shchelokovskyy Pawel Koniszewski Pawel Palucki Pedro Navarro Perez Pekelny "I159" Ilya Peng Yong Pengfei Zhang Peter Feiner Peter Krempa Petrut Lucian Phil Day Philip Knouff Philip Schwartz Phong Ly Pranali Deore PranaliDeore Pranav Salunke Praveen Yalagandula Prem Karat Przemyslaw Czesnowicz Puneet Goyal Pádraig Brady Qiang Guan Qiaowei Ren Qin Zhao Qin Zhao QingXin Meng Qiu Yu Qiu Yu Racha Ben Ali Radomir Dopieralski Radoslav Gerganov Radoslaw Smigielski Rafael Folco Rafi Khardalian Rajesh Tailor Rakesh H S Ralf Haferkamp Ram Nalluri Ravi Shekhar Jethani Ray Chen Ray Sun Renier Morales Renuka Apte Ricardo Carrillo Cruz Richard Jones Richard W.M. Jones Rick Clark Rick Harris Ripal Nathuji Ripal Nathuji Rob Esker Robert Collins Robert Collins Robert Kukura Robert Li Robert Pothier Robert Tingirica Rodolfo Alonso Hernandez Rohan Kanade Rohan Kanade Rohan Kanade Rohan Rhishikesh Kanade Rohit Karajgi Roland Hochmuth Romain Chantereau Romain Hardouin Roman Bogorodskiy Roman Bogorodskiy Roman Dobosz Roman Podoliaka Roman Podolyaka Ronen Kat Rongze Zhu RongzeZhu Rosario Di Somma Ruby Loo Rui Chen Rushi Agrawal Russell Bryant Russell Cloran Russell Sim Ryan Hsu Ryan Lane Ryan Lucio Ryan McNair Ryan Moe Ryan Moore Ryan Rossiter Ryota MIBU Ryu Ishimoto Sabari Kumar Murugesan Sachi King Sagar Ratnakara Nikam Sahid Orentino Ferdjaoui Sahid Orentino Ferdjaoui Sahid Orentino Ferdjaoui Salvatore Orlando Sam Alba Sam Betts Sam Morrison Sam Morrison Sam Yaple Sampath Priyankara Samuel Matzek Sandy Walsh Santiago Baldassin Sascha Peilicke Sascha Peilicke Sascha Peilicke Sateesh Chodapuneedi Sathish Nagappan Satyanarayana Patibandla Satyanarayana Patibandla Scott Moser Scott Reeve Sean Chen Sean Dague Sean Dague Sean Dague Sean M. Collins Sean M. Collins Sean McGinnis Sean Mooney Seif Lotfy Senhua Huang Sergey Nikitin Sergey Skripnick Sergey Vilgelm Sergio Cazzolato Shane Wang ShaoHe Feng Shawn Harsock Shawn Hartsock Shawn Hartsock Shih-Hao Li Shilla Saebi Shlomi Sasson Shraddha Pandhe Shraddha Pandhe Shuangtai Tian Shunya Kitada Shuquan Huang Sidharth Surana Silvan Kaiser Simon Chang Simon Pasquier Simon Pasquier Simona Iuliana Toader Sirisha Devineni Sirushti Murugesan Solly Ross Somik Behera Soren Hansen Soren Hansen Spencer Krum Stanislaw Pitucha StanisÅ‚aw Pitucha Stef T Stefan Amann Stephanie Reese Stephen Finucane Stephen Gran StephenSun Steve Baker Steve Baker Steve Kowalik Steven Dake Steven Hardy Steven Kaufer Stuart McLaren Subashini Soundararajan Subhadeep De Sudarshan Acharya Sudipta Biswas Sujitha Sukhdev Kapur Sulochan Acharya Sumanth Nagadavalli Sumit Naiksatam Sunil Thaha Surojit Pathak Sven Anderson Svetlana Shturm Swami Reddy Sylvain Bauza Sylvain Bauza Takaaki Suzuki Takashi NATSUME Takashi Natsume Takashi Sogabe Takenori Yoshimatsu Tang Chen Tang Chen TaoBai Taylor Peoples Taylor Smith Teng Li Teran McKinney Thang Pham Thelo Gaultier Thierry Carrez Thomas Bechtold Thomas Bechtold Thomas Herve Thomas Maddox Thorsten Tarrach Tiago Mello Tianpeng Wang Tiantian Gao Tim Miller Tim Potter Tim Pownall Tim Pownall Tim Simpson Timofey Durakov Todd Willey Tom Cammann Tom Fifield Tom Fifield Tom Hancock Tomi Juvonen Tomoe Sugihara Tomofumi Hayashi Tomoki Sekiyama Tong Li Tony Breeds Tony NIU Tony Yang Toshiaki Higuchi Tracy Jones Travis Ankrom Trey Morris Tristan Cacqueray Tristan Cacqueray Troy Toman Trung Trinh Tushar Kalra Tushar Patil Unmesh Gurjar Unmesh Gurjar Unmesh Gurjar Vasiliy Shlykov Vasyl Saienko Venkateswarlu Pallamala Vic Howard Victor Sergeyev Victor Stinner Victor Stinner Vijaya Erukala Vikhyat Umrao Vilobh Meshram Vincent Hou Vincent Untz Vipin Balachandran Vishvananda Ishaya Vivek YS Vladan Popovic Vladik Romanovsky Vladik Romanovsky Vui Lam Vui Lam Waldemar Znoinski Walter A. Boring IV Wangpan Wei Jiangang Wen Zhi Yu Wen Zhi Yu Wenhao Xu Will Foster William Wolf Wu Wenxiang Xavier Queralt Xiang Hui Xiangyang Chu Xiao Chen Xiaowei Qian Xiaoyan Ding Xing Yang Xinyuan Huang Xu Han Peng Xurong Yang YAMAMOTO Takashi Yaguang Tang Yaguang Tang Yang Hongyang Yang Yu YangLei Yassine Lamgarchal Yasuaki Nagata Yingxin Yingxin Cheng Yixing Jia Yolanda Robla Yong Sheng Gong Yongli he Yoon Doyoul Yosef Berman Yoshiaki Tamura You Ji Yufang Zhang Yuiko Takada Yuiko Takada YuikoTakada Yukihiro KAWADA Yulia Portnova Yun Mao Yun Shen Yunhong Jiang Yunhong, Jiang Yuriy Taraday Yuriy Zveryanskyy Yuuichi Fujioka Yuzlikeev Eduard ZHU ZHU Zaina Afoulki Zane Bitter Zed Shaw Zhao Lei Zheng Yue Zhengguang Zhenguo Niu Zhenguo Niu Zhenzan Zhou Zhi Yan Liu Zhi Yan Liu ZhiQiang Fan ZhiQiang Fan Zhihai Song Zhilong.JI Zhiteng Huang Zhiteng Huang Zhongyue Luo Zhou ShaoYu ZhuRongze Ziad Sawalha Zoltan Arnold Nagy abhishek-kekane abhishek.talwar abhishekkekane alexpilotti andrewbogott ankitagrawal april armando-migliaccio armando-migliaccio ashoksingh aulbachj benjamin.grassart bhagyashris boh.ricky chaochin@gmail.com chenxiao chhagarw chinmay chris fattarsi daisy-ycguo daisy-ycguo david martin deevi rani dekehn dimtruck dineshbhor divakar-padiyar-nandavar dzyu eddie-sheffield eewayhsu ericzhou facundo Farias ftersin fujioka yuuichi galstrom21 garyk garyk gengjh gh159m ghanshyam git-harry gong yong sheng gongysh grace.yu gregory.cunha gseverina guillaume-thouvenin guohliu gustavo panizzo hartsocks heha heijlong hgangwx hill hua zhang huangpengtao huangtianhua huangtianhua hzguanqiang iccha.sethi isethi ivan-zhu jakedahn javeme jaypei jcooklin jenny-shieh jianghua wang jiangwt100 jiataotj jichen jichenjc jmeridth jokcylou jufeng jufeng julykobe kairoaraujo karimb kashivreddy kirankv kiwik-chenrui ladquin lapgoon lawrancejing lei zhang lianghuifei ling-yun linwwu liu-sheng liudong liyingjun liyingjun liyuanyuan lizheming lkhysy llg8212 lqslan lrqrun lvdongbing lyanchih m.benchchaoui@cloudbau.de maqi mark.sturdevant mathieu-rohon mathrock mathrock mbasnight melanie witt mingyan bao mjbright mkislinska msdubov oleksii park hei park hei parklong partys pcarlton pengyuwei piyush110786 pkholkin pmoosh pran1990 pyw rackerjoe ruichen ryo.kurahashi s iwata saradpatel sarvesh-ranjan sarvesh-ranjan scottda shihanzhang shreeduth-awasthi shuangtai sonu.kumar sridevik sridhargaddam stanzgy tanlin tilottama gaat uberj unicell vaddi-kiran venakata anil venkata anil venkatamahesh vijaya-erukala vladimir.p wangbo wanghao wangxiyuan warewang watanabe isao wingwj wuhao xhzhf xiaoding xiexs xushichao yangyapeng yatin ydoyeul yongiman yugsuo yunhong jiang yuntong yuntongjin yuntongjin zhang-jinnan zhang.yufei@99cloud.net <1004988384@qq.com> zhangchao010 zhangchunlong zhangchunlong1@huawei.com zhangfeng zhangtralon zhangyanzi zhhuabj zhiyanliu zhiyanliu zhiyuan_cai zhoudongshu Édouard Thuleau Édouard Thuleau Édouard Thuleau Émilien Macchi nova-13.0.0/tests-py3.txt0000664000567000056710000003432712701410011016307 0ustar jenkinsjenkins00000000000000nova.tests.unit.api.openstack.compute.legacy_v2.test_extensions.ActionExtensionTest nova.tests.unit.api.openstack.compute.legacy_v2.test_extensions.ControllerExtensionTest nova.tests.unit.api.openstack.compute.legacy_v2.test_extensions.ExtensionControllerIdFormatTest nova.tests.unit.api.openstack.compute.legacy_v2.test_extensions.ExtensionManagerTest nova.tests.unit.api.openstack.compute.legacy_v2.test_extensions.ResourceExtensionTest nova.tests.unit.api.openstack.compute.legacy_v2.test_servers.ServersControllerCreateTest nova.tests.unit.api.openstack.compute.legacy_v2.test_servers.ServersControllerTest nova.tests.unit.api.openstack.compute.test_api.APITest nova.tests.unit.api.openstack.compute.test_api.APITestV21 nova.tests.unit.api.openstack.compute.test_console_output.ConsoleOutputExtensionTestV2 nova.tests.unit.api.openstack.compute.test_console_output.ConsoleOutputExtensionTestV21 nova.tests.unit.api.openstack.compute.test_createserverext.CreateserverextTest nova.tests.unit.api.openstack.compute.test_disk_config.DiskConfigTestCaseV2 nova.tests.unit.api.openstack.compute.test_disk_config.DiskConfigTestCaseV21 nova.tests.unit.api.openstack.compute.test_extended_availability_zone.ExtendedAvailabilityZoneTestV2 nova.tests.unit.api.openstack.compute.test_extended_availability_zone.ExtendedAvailabilityZoneTestV21 nova.tests.unit.api.openstack.compute.test_extended_ips.ExtendedIpsTestV2 nova.tests.unit.api.openstack.compute.test_extended_ips.ExtendedIpsTestV21 nova.tests.unit.api.openstack.compute.test_extended_ips_mac.ExtendedIpsMacTestV2 nova.tests.unit.api.openstack.compute.test_extended_ips_mac.ExtendedIpsMacTestV21 nova.tests.unit.api.openstack.compute.test_extended_server_attributes.ExtendedServerAttributesTestV2 nova.tests.unit.api.openstack.compute.test_extended_server_attributes.ExtendedServerAttributesTestV21 nova.tests.unit.api.openstack.compute.test_extended_server_attributes.ExtendedServerAttributesTestV23 nova.tests.unit.api.openstack.compute.test_floating_ip_dns.FloatingIPDNSDomainPolicyEnforcementV21 nova.tests.unit.api.openstack.compute.test_floating_ip_dns.FloatingIPDNSEntryPolicyEnforcementV21 nova.tests.unit.api.openstack.compute.test_floating_ip_dns.FloatingIpDNSTestV2 nova.tests.unit.api.openstack.compute.test_floating_ip_dns.FloatingIpDNSTestV21 nova.tests.unit.api.openstack.compute.test_hide_server_addresses.HideServerAddressesTestV2 nova.tests.unit.api.openstack.compute.test_hide_server_addresses.HideServerAddressesTestV21 nova.tests.unit.api.openstack.compute.test_keypairs.KeypairsTestV2 nova.tests.unit.api.openstack.compute.test_keypairs.KeypairsTestV21 nova.tests.unit.api.openstack.compute.test_keypairs.KeypairsTestV210 nova.tests.unit.api.openstack.compute.test_keypairs.KeypairsTestV22 nova.tests.unit.api.openstack.compute.test_limits.WsgiLimiterProxyTest nova.tests.unit.api.openstack.compute.test_neutron_security_groups.TestNeutronSecurityGroupRulesV2 nova.tests.unit.api.openstack.compute.test_neutron_security_groups.TestNeutronSecurityGroupRulesV21 nova.tests.unit.api.openstack.compute.test_neutron_security_groups.TestNeutronSecurityGroupsOutputTest nova.tests.unit.api.openstack.compute.test_scheduler_hints.SchedulerHintsTestCaseV2 nova.tests.unit.api.openstack.compute.test_scheduler_hints.SchedulerHintsTestCaseV21 nova.tests.unit.api.openstack.compute.test_security_group_default_rules.TestSecurityGroupDefaultRulesNeutronV21 nova.tests.unit.api.openstack.compute.test_security_group_default_rules.TestSecurityGroupDefaultRulesV21 nova.tests.unit.api.openstack.compute.test_security_groups.SecurityGroupsOutputTestV2 nova.tests.unit.api.openstack.compute.test_security_groups.SecurityGroupsOutputTestV21 nova.tests.unit.api.openstack.compute.test_security_groups.TestSecurityGroupRulesV2 nova.tests.unit.api.openstack.compute.test_security_groups.TestSecurityGroupRulesV21 nova.tests.unit.api.openstack.compute.test_server_actions.ServerActionsControllerTestV2 nova.tests.unit.api.openstack.compute.test_server_actions.ServerActionsControllerTestV21 nova.tests.unit.api.openstack.compute.test_serversV21.Base64ValidationTest nova.tests.unit.api.openstack.compute.test_serversV21.ServersControllerCreateTest nova.tests.unit.api.openstack.compute.test_serversV21.ServersControllerRebuildInstanceTest nova.tests.unit.api.openstack.compute.test_serversV21.ServersControllerRebuildTestV219 nova.tests.unit.api.openstack.compute.test_serversV21.ServersControllerTest nova.tests.unit.api.openstack.compute.test_serversV21.ServersControllerTestV29 nova.tests.unit.api.openstack.compute.test_simple_tenant_usage.SimpleTenantUsageTestV2 nova.tests.unit.api.openstack.compute.test_simple_tenant_usage.SimpleTenantUsageTestV21 nova.tests.unit.api.openstack.compute.test_urlmap.UrlmapTest nova.tests.unit.api.openstack.compute.test_user_data.ServersControllerCreateTest nova.tests.unit.api.openstack.compute.test_versions.VersionsTestV20 nova.tests.unit.api.openstack.compute.test_versions.VersionsTestV21 nova.tests.unit.api.openstack.compute.test_versions.VersionsTestV21WithV2CompatibleWrapper nova.tests.unit.api.openstack.compute.test_volumes.BootFromVolumeTest nova.tests.unit.api.openstack.compute.test_volumes.VolumeApiTestV2 nova.tests.unit.api.openstack.compute.test_volumes.VolumeApiTestV21 nova.tests.unit.api.test_compute_req_id.RequestIdTest nova.tests.unit.api.test_validator.ValidatorTestCase nova.tests.unit.api.test_wsgi.Test nova.tests.unit.compute.test_compute.ComputeAPITestCase.test_create_with_base64_user_data nova.tests.unit.compute.test_compute.ComputeInjectedFilesTestCase.test_injected_invalid nova.tests.unit.compute.test_compute.ComputeTestCase.test_finish_resize_with_volumes nova.tests.unit.compute.test_compute.ComputeVolumeTestCase.test_boot_volume_serial nova.tests.unit.compute.test_compute.ComputeVolumeTestCase.test_poll_bandwidth_usage_not_implemented nova.tests.unit.compute.test_compute.ComputeVolumeTestCase.test_prep_block_device_over_quota_failure nova.tests.unit.compute.test_compute.ComputeVolumeTestCase.test_prep_block_device_with_blanks nova.tests.unit.compute.test_compute_cells.CellsComputeAPITestCase.test_create_with_base64_user_data nova.tests.unit.compute.test_compute_mgr.ComputeManagerUnitTestCase.test_run_pending_deletes nova.tests.unit.compute.test_host_api.ComputeHostAPICellsTestCase nova.tests.unit.compute.test_resources.BaseTestCase nova.tests.unit.compute.test_tracker.TestMoveClaim nova.tests.unit.console.test_websocketproxy.NovaProxyRequestHandlerBaseTestCase nova.tests.unit.consoleauth.test_consoleauth.ControlauthMemcacheEncodingTestCase nova.tests.unit.db.test_migrations.TestNovaMigrationsMySQL nova.tests.unit.db.test_migrations.TestNovaMigrationsPostgreSQL nova.tests.unit.db.test_migrations.TestNovaMigrationsSQLite nova.tests.unit.image.test_fake.FakeImageServiceTestCase nova.tests.unit.keymgr.test_barbican.BarbicanKeyManagerTestCase nova.tests.unit.keymgr.test_conf_key_mgr.ConfKeyManagerTestCase nova.tests.unit.keymgr.test_key.SymmetricKeyTestCase nova.tests.unit.keymgr.test_mock_key_mgr.MockKeyManagerTestCase nova.tests.unit.keymgr.test_single_key_mgr.SingleKeyManagerTestCase nova.tests.unit.network.test_manager.LdapDNSTestCase nova.tests.unit.pci.test_manager.PciDevTrackerTestCase nova.tests.unit.pci.test_stats.PciDeviceStatsTestCase nova.tests.unit.pci.test_stats.PciDeviceStatsWithTagsTestCase nova.tests.unit.test_api_validation.Base64TestCase nova.tests.unit.test_bdm.BlockDeviceMappingEc2CloudTestCase nova.tests.unit.test_block_device.BlockDeviceTestCase nova.tests.unit.test_block_device.TestBlockDeviceDict nova.tests.unit.test_cinder.CinderTestCase nova.tests.unit.test_cinder.CinderV2TestCase nova.tests.unit.test_configdrive2.ConfigDriveTestCase nova.tests.unit.test_hacking.HackingTestCase nova.tests.unit.test_ipv6.IPv6AccountIdentiferTestCase nova.tests.unit.test_matchers.TestDictMatches nova.tests.unit.test_matchers.TestXMLMatches nova.tests.unit.test_metadata.MetadataHandlerTestCase nova.tests.unit.test_metadata.MetadataPasswordTestCase nova.tests.unit.test_metadata.MetadataTestCase nova.tests.unit.test_metadata.OpenStackMetadataTestCase nova.tests.unit.test_nova_manage.CellCommandsTestCase nova.tests.unit.test_pipelib.PipelibTest nova.tests.unit.test_policy.AdminRolePolicyTestCase nova.tests.unit.test_quota.QuotaIntegrationTestCase nova.tests.unit.test_test_utils.TestUtilsTestCase nova.tests.unit.test_wsgi.TestWSGIServerWithSSL nova.tests.unit.virt.disk.mount.test_nbd.NbdTestCase nova.tests.unit.virt.ironic.test_driver.IronicDriverTestCase nova.tests.unit.virt.ironic.test_patcher.IronicDriverFieldsTestCase nova.tests.unit.virt.libvirt.storage.test_lvm.LvmTestCase nova.tests.unit.virt.libvirt.storage.test_rbd.RbdTestCase nova.tests.unit.virt.libvirt.test_config.LibvirtConfigCPUFeatureTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigCPUTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigCapsTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestCPUFeatureTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestCPUNUMATest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestCPUTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestCPUTuneTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestChannelTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestClockTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestConsoleTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestControllerTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestDiskTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestFeatureTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestFilesysTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestGraphicsTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestHostdev nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestHostdevPCI nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestIDMap nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestInputTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestInterfaceTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestMemoryBackingTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestMemoryTuneTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestMetadataNovaTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestNUMATuneTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestRngTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestSMBIOSTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestSeclabel nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestSerialTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestSnapshotDiskTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestSnapshotTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestSysinfoTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestTimerTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestVideoTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigGuestWatchdogTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigMemoryBalloonTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigSecretTest nova.tests.unit.virt.libvirt.test_config.LibvirtConfigTest nova.tests.unit.virt.libvirt.test_driver.LibvirtConnTestCase nova.tests.unit.virt.libvirt.test_driver.LibvirtDriverTestCase nova.tests.unit.virt.libvirt.test_driver.LibvirtVolumeSnapshotTestCase nova.tests.unit.virt.libvirt.test_fakelibvirt.FakeLibvirtTests.test_numa_topology_generation nova.tests.unit.virt.libvirt.test_firewall.IptablesFirewallTestCase nova.tests.unit.virt.libvirt.test_imagebackend.EncryptedLvmTestCase nova.tests.unit.virt.libvirt.test_imagebackend.LvmTestCase nova.tests.unit.virt.libvirt.test_imagebackend.RawTestCase nova.tests.unit.virt.libvirt.test_imagebackend.RbdTestCase nova.tests.unit.virt.libvirt.test_imagecache.ImageCacheManagerTestCase nova.tests.unit.virt.libvirt.test_imagecache.VerifyChecksumTestCase nova.tests.unit.virt.libvirt.test_utils.LibvirtUtilsTestCase nova.tests.unit.virt.libvirt.test_vif.LibvirtVifTestCase nova.tests.unit.virt.test_hardware.CPUPinningCellTestCase nova.tests.unit.virt.test_hardware.CPUPinningTestCase nova.tests.unit.virt.test_virt_drivers.FakeConnectionTestCase nova.tests.unit.virt.test_virt_drivers.LibvirtConnTestCase nova.tests.unit.virt.vmwareapi.test_images.VMwareImagesTestCase nova.tests.unit.virt.vmwareapi.test_read_write_util.ReadWriteUtilTestCase nova.tests.unit.virt.vmwareapi.test_vmops.VMwareVMOpsTestCase nova.tests.unit.virt.xenapi.client.test_session.CallPluginTestCase nova.tests.unit.virt.xenapi.image.test_utils.RawTGZTestCase nova.tests.unit.virt.xenapi.image.test_vdi_through_dev.TestTarGzProducer nova.tests.unit.virt.xenapi.test_agent.FileInjectionTestCase nova.tests.unit.virt.xenapi.test_vm_utils.ResizeFunctionTestCase nova.tests.unit.virt.xenapi.test_vm_utils.ScanSrTestCase nova.tests.unit.virt.xenapi.test_vm_utils.UnplugVbdTestCase nova.tests.unit.virt.xenapi.test_vmops.GetConsoleOutputTestCase nova.tests.unit.virt.xenapi.test_volume_utils.ParseVolumeInfoTestCase nova.tests.unit.virt.xenapi.test_xenapi.HypervisorPoolTestCase nova.tests.unit.virt.xenapi.test_xenapi.XenAPIDiffieHellmanTestCase nova.tests.unit.virt.xenapi.test_xenapi.XenAPIDom0IptablesFirewallTestCase nova.tests.unit.virt.xenapi.test_xenapi.XenAPIVMTestCase nova.tests.unit.volume.encryptors.test_cryptsetup.CryptsetupEncryptorTestCase nova.tests.unit.volume.encryptors.test_luks.LuksEncryptorTestCase nova.tests.unit.volume.test_cinder.CinderApiTestCase ########################################################################## # NOTE(dims): The following tests randomly fail in the gate. Please be # careful before you re-enable them ########################################################################## nova.tests.unit.compute.test_compute_mgr.ComputeManagerUnitTestCase nova.tests.unit.network.test_neutronv2.TestNeutronv2 nova.tests.unit.virt.test_virt_drivers.AbstractDriverTestCase nova.tests.unit.virt.vmwareapi.test_configdrive.ConfigDriveTestCase nova.tests.unit.virt.vmwareapi.test_driver_api.VMwareAPIVMTestCase nova.tests.unit.virt.xenapi.test_vmops.BootableTestCase nova.tests.unit.virt.xenapi.test_vmops.SpawnTestCase # The XenAPI plugins run in a Python 2.4 environment, so avoid attempting # to run their unit tests in a Python 3 environment. nova.tests.unit.virt.xenapi.plugins nova-13.0.0/requirements.txt0000664000567000056710000000352712701410011017155 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.6 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT boto>=2.32.1 # MIT decorator>=3.4.0 # BSD eventlet!=0.18.3,>=0.18.2 # MIT Jinja2>=2.8 # BSD License (3 clause) keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 lxml>=2.3 # BSD Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7' # MIT Routes!=2.0,>=1.12.3;python_version!='2.7' # MIT cryptography>=1.0 # BSD/Apache-2.0 WebOb>=1.2.3 # MIT greenlet>=0.3.2 # MIT PasteDeploy>=1.5.0 # MIT Paste # MIT PrettyTable<0.8,>=0.7 # BSD sqlalchemy-migrate>=0.9.6 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD netifaces>=0.10.4 # MIT paramiko>=1.16.0 # LGPL Babel>=1.3 # BSD iso8601>=0.1.9 # MIT jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT python-cinderclient>=1.3.1 # Apache-2.0 keystoneauth1>=2.1.0 # Apache-2.0 python-neutronclient!=4.1.0,>=2.6.0 # Apache-2.0 python-glanceclient>=2.0.0 # Apache-2.0 requests!=2.9.0,>=2.8.1 # Apache-2.0 six>=1.9.0 # MIT stevedore>=1.5.0 # Apache-2.0 setuptools>=16.0 # PSF/ZPL websockify>=0.6.1 # LGPLv3 oslo.cache>=1.5.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 oslo.config>=3.7.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.messaging>=4.0.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0 rfc3986>=0.2.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 psutil<2.0.0,>=1.1.1 # BSD oslo.versionedobjects>=1.5.0 # Apache-2.0 alembic>=0.8.0 # MIT os-brick>=1.0.0 # Apache-2.0 os-win>=0.2.3 # Apache-2.0 castellan>=0.3.1 # Apache-2.0 nova-13.0.0/HACKING.rst0000664000567000056710000001446112701410011015466 0ustar jenkinsjenkins00000000000000Nova Style Commandments ======================= - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on Nova Specific Commandments --------------------------- - ``nova.db`` imports are not allowed in ``nova/virt/*`` - [N309] no db session in public API methods (disabled) This enforces a guideline defined in ``oslo.db.sqlalchemy.session`` - [N310] timeutils.utcnow() wrapper must be used instead of direct calls to datetime.datetime.utcnow() to make it easy to override its return value in tests - [N311] importing code from other virt drivers forbidden Code that needs to be shared between virt drivers should be moved into a common module - [N312] using config vars from other virt drivers forbidden Config parameters that need to be shared between virt drivers should be moved into a common module - [N313] capitalize help string Config parameter help strings should have a capitalized first letter - [N314] vim configuration should not be kept in source files. - [N316] Change assertTrue(isinstance(A, B)) by optimal assert like assertIsInstance(A, B). - [N317] Change assertEqual(type(A), B) by optimal assert like assertIsInstance(A, B) - [N318] Change assertEqual(A, None) or assertEqual(None, A) by optimal assert like assertIsNone(A) - [N319] Validate that debug level logs are not translated. - [N320] Setting CONF.* attributes directly in tests is forbidden. Use self.flags(option=value) instead. - [N321] Validate that LOG messages, except debug ones, have translations - [N322] Method's default argument shouldn't be mutable - [N323] Ensure that the _() function is explicitly imported to ensure proper translations. - [N324] Ensure that jsonutils.%(fun)s must be used instead of json.%(fun)s - [N325] str() and unicode() cannot be used on an exception. Remove use or use six.text_type() - [N326] Translated messages cannot be concatenated. String should be included in translated message. - [N328] Validate that LOG.info messages use _LI. - [N329] Validate that LOG.exception messages use _LE. - [N330] Validate that LOG.warning and LOG.warn messages use _LW. - [N332] Check that the api_version decorator is the first decorator on a method - [N334] Change assertTrue/False(A in/not in B, message) to the more specific assertIn/NotIn(A, B, message) - [N335] Check for usage of deprecated assertRaisesRegexp - [N336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs. - [N337] Don't import translation in tests - [N338] Change assertEqual(A in B, True), assertEqual(True, A in B), assertEqual(A in B, False) or assertEqual(False, A in B) to the more specific assertIn/NotIn(A, B) - [N339] Check common raise_feature_not_supported() is used for v2.1 HTTPNotImplemented response. - [N340] Check nova.utils.spawn() is used instead of greenthread.spawn() and eventlet.spawn() - [N341] contextlib.nested is deprecated - [N342] Config options should be in the central location ``nova/conf/`` - [N343] Check for common double word typos - [N344] Python 3: do not use dict.iteritems. - [N345] Python 3: do not use dict.iterkeys. - [N346] Python 3: do not use dict.itervalues. - [N347] Provide enough help text for config options - [N348] Deprecated library function os.popen() Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. For more information on creating unit tests and utilizing the testing infrastructure in OpenStack Nova, please read ``nova/tests/unit/README.rst``. Running Tests ------------- The testing system is based on a combination of tox and testr. The canonical approach to running tests is to simply run the command ``tox``. This will create virtual environments, populate them with dependencies and run all of the tests that OpenStack CI systems run. Behind the scenes, tox is running ``testr run --parallel``, but is set up such that you can supply any additional testr arguments that are needed to tox. For example, you can run: ``tox -- --analyze-isolation`` to cause tox to tell testr to add --analyze-isolation to its argument list. Python packages may also have dependencies that are outside of tox's ability to install. Please refer to ``doc/source/development.environment.rst`` for a list of those packages on Ubuntu, Fedora and Mac OS X. To run a single or restricted set of tests, pass a regex that matches the class name containing the tests as an extra ``tox`` argument; e.g. ``tox -- TestWSGIServer`` (note the double-hypen) will test all WSGI server tests from ``nova/tests/unit/test_wsgi.py``; ``-- TestWSGIServer.test_uri_length_limit`` would run just that test, and ``-- TestWSGIServer|TestWSGIServerWithSSL`` would run tests from both classes. It is also possible to run the tests inside of a virtual environment you have created, or it is possible that you have all of the dependencies installed locally already. In this case, you can interact with the testr command directly. Running ``testr run`` will run the entire test suite. ``testr run --parallel`` will run it in parallel (this is the default incantation tox uses.) More information about testr can be found at: http://wiki.openstack.org/testr Building Docs ------------- Normal Sphinx docs can be built via the setuptools ``build_sphinx`` command. To do this via ``tox``, simply run ``tox -e docs``, which will cause a virtualenv with all of the needed dependencies to be created and then inside of the virtualenv, the docs will be created and put into doc/build/html. Building a PDF of the Documentation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you'd like a PDF of the documentation, you'll need LaTeX and ImageMagick installed, and additionally some fonts. On Ubuntu systems, you can get what you need with:: apt-get install texlive-full imagemagick Then you can then use the ``build_latex_pdf.sh`` script in tools/ to take care of both the the sphinx latex generation and the latex compilation. For example:: tools/build_latex_pdf.sh The script must be run from the root of the Nova repository and it'll copy the output pdf to Nova.pdf in that directory. nova-13.0.0/CONTRIBUTING.rst0000664000567000056710000000102712701407773016350 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/nova nova-13.0.0/releasenotes/0000775000567000056710000000000012701410205016360 5ustar jenkinsjenkins00000000000000nova-13.0.0/releasenotes/notes/0000775000567000056710000000000012701410205017510 5ustar jenkinsjenkins00000000000000nova-13.0.0/releasenotes/notes/filters_use_reqspec-9f92b9c0ead76093.yaml0000664000567000056710000000066712701407773026723 0ustar jenkinsjenkins00000000000000--- upgrade: - | Filters internal interface changed using now the RequestSpec NovaObject instead of an old filter_properties dictionary. In case you run out-of-tree filters, you need to modify the host_passes() method to accept a new RequestSpec object and modify the filter internals to use that new object. You can see other in-tree filters for getting the logic or ask for help in #openstack-nova IRC channel. nova-13.0.0/releasenotes/notes/libvirt-live-migration-new-tunneled-option-d7ebb1eb1e95e683.yaml0000664000567000056710000000043212701407773033306 0ustar jenkinsjenkins00000000000000--- features: - The libvirt driver now has a live_migration_tunnelled configuration option which should be used where the VIR_MIGRATE_TUNNELLED flag would previously have been set or unset in the live_migration_flag and block_migration_flag configuration options. nova-13.0.0/releasenotes/notes/compute_upgrade_levels_auto-97acebc7b45b76df.yaml0000664000567000056710000000120512701407773030645 0ustar jenkinsjenkins00000000000000--- features: - | A new ``auto`` value for the configuration option ``upgrade_levels.compute`` is accepted, that allows automatic determination of the compute service version to use for RPC communication. By default, we still use the newest version if not set in the config, a specific version if asked, and only do this automatic behavior if 'auto' is configured. When 'auto' is used, sending a SIGHUP to the service will cause the value to be re-calculated. Thus, after an upgrade is complete, sending SIGHUP to all services will cause them to start sending messages compliant with the newer RPC version. nova-13.0.0/releasenotes/notes/1516578-628b417b372f4f0f.yaml0000664000567000056710000000116412701407773023415 0ustar jenkinsjenkins00000000000000--- features: - | Enables NUMA topology reporting on PowerPC architecture from the libvirt driver in Nova but with a caveat as mentioned below. NUMA cell affinity and dedicated cpu pinning code assumes that the host operating system is exposed to threads. PowerPC based hosts use core based scheduling for processes. Due to this, the cores on the PowerPC architecture are treated as threads. Since cores are always less than or equal to the threads on a system, this leads to non-optimal resource usage while pinning. This feature is supported from libvirt version 1.2.19 for PowerPC. nova-13.0.0/releasenotes/notes/live_migration_uri-dependent-on-virt_type-595c46c2310f45c3.yaml0000664000567000056710000000053312701407773032760 0ustar jenkinsjenkins00000000000000--- upgrade: - The libvirt driver has changed the default value of the 'live_migration_uri' flag, that now is dependent on the 'virt_type'. The old default 'qemu+tcp://%s/system' now is adjusted for each of the configured hypervisors. For Xen this will be 'xenmigr://%s/system', for kvm/qemu this will be 'qemu+tcp://%s/system'. nova-13.0.0/releasenotes/notes/service-status-notification-e137297f5d5aa45d.yaml0000664000567000056710000000055712701407773030312 0ustar jenkinsjenkins00000000000000--- features: - A new service.status versioned notification has been introduced. When the status of the Service object is changed nova will send a new service.update notification with versioned payload according to bp versioned-notification-api. The new notification is documented in http://docs.openstack.org/developer/nova/notifications.html nova-13.0.0/releasenotes/notes/reserved-hugepages-per-nodes-f36225d5fca807e4.yaml0000664000567000056710000000001712701407773030311 0ustar jenkinsjenkins00000000000000--- prelude: > nova-13.0.0/releasenotes/notes/libvirt-deprecate-migration-flags-config-4ba1e2d6c9ef09ff.yaml0000664000567000056710000000054612701407773033014 0ustar jenkinsjenkins00000000000000--- deprecations: - | The libvirt live_migration_flag and block_migration_flag config options are deprecated. These options gave too fine grained control over the flags used and, in some cases, misconfigurations could have dangerous side effects. Please note the availability of a new live_migration_tunnelled configuration option. nova-13.0.0/releasenotes/notes/bp-split-network-plane-for-live-migration-40bc127734173759.yaml0000664000567000056710000000062012701407773032356 0ustar jenkinsjenkins00000000000000--- features: - | A new option "live_migration_inbound_addr" has been added in the configuration file, set None as default value. If this option is present in pre_migration_data, the ip address/hostname provided will be used instead of the migration target compute node's hostname as the uri for live migration, if it's None, then the mechanism remains as it is before. nova-13.0.0/releasenotes/notes/bp-rbd-instance-snapshots-130e860b726ddc16.yaml0000664000567000056710000000123512701407773027534 0ustar jenkinsjenkins00000000000000--- features: - When RBD is used for ephemeral disks and image storage, make snapshot use Ceph directly, and update Glance with the new location. In case of failure, it will gracefully fallback to the "generic" snapshot method. This requires changing the typical permissions for the Nova Ceph user (if using authx) to allow writing to the pool where vm images are stored, and it also requires configuring Glance to provide a v2 endpoint with direct_url support enabled (there are security implications to doing this). See http://docs.ceph.com/docs/master/rbd/rbd-openstack/ for more information on configuring OpenStack with RBD. nova-13.0.0/releasenotes/notes/13.0.0-cve-bugs-fe43ef267a82f304.yaml0000664000567000056710000000144612701407773025072 0ustar jenkinsjenkins00000000000000--- security: - | [OSSA 2016-001] Nova host data leak through snapshot (CVE-2015-7548) * `Bug 1524274 `_ * `Announcement `__ [OSSA 2016-002] Xen connection password leak in logs via StorageError (CVE-2015-8749) * `Bug 1516765 `_ * `Announcement `__ [OSSA 2016-007] Host data leak during resize/migrate for raw-backed instances (CVE-2016-2140) * `Bug 1548450 `_ * `Announcement `__nova-13.0.0/releasenotes/notes/config_scheduler_host_manager_driver-a543a74ea70f5e90.yaml0000664000567000056710000000071112701407773032234 0ustar jenkinsjenkins00000000000000--- upgrade: - | The option ``scheduler_host_manager`` is now changed to use entrypoint instead of full class path. Set one of the entrypoints under the namespace 'nova.scheduler.host_manager' in 'setup.cfg'. Its default value is 'host_manager'. The full class path style is still supported in current release. But it is not recommended because class path can be changed and this support will be dropped in the next major release. nova-13.0.0/releasenotes/notes/add-aggregate-type-extra-specs-affinity-filter-79a2d3ee152b8ecd.yaml0000664000567000056710000000001712701407773033752 0ustar jenkinsjenkins00000000000000--- prelude: > nova-13.0.0/releasenotes/notes/bp-boot-from-uefi-b413b96017db76dd.yaml0000664000567000056710000000010312701407773026052 0ustar jenkinsjenkins00000000000000--- features: - Add support for enabling uefi boot with libvirt. nova-13.0.0/releasenotes/notes/deprecate_glance_opts-eab01aba5dcda38a.yaml0000664000567000056710000000031612701407773027475 0ustar jenkinsjenkins00000000000000--- deprecations: - The host, port, and protocol options in the [glance] configuration section are deprecated, and will be removed in the N release. The api_servers value should be used instead. nova-13.0.0/releasenotes/notes/bp-instance-crash-dump-7ccbba7799dc66f9.yaml0000664000567000056710000000016212701407773027260 0ustar jenkinsjenkins00000000000000--- features: - A new server action trigger_crash_dump has been added to the REST API in microversion 2.17. nova-13.0.0/releasenotes/notes/deprecate_db_driver-91c76ca8011d663c.yaml0000664000567000056710000000053112701407773026522 0ustar jenkinsjenkins00000000000000--- deprecations: - Deprecate the ``db_driver`` config option. Previously this let you replace our SQLAlchemy database layer with your own. This approach is deprecated. Deployments that felt the need to use the facility are encourage to work with upstream Nova to address db driver concerns in the main SQLAlchemy code paths. nova-13.0.0/releasenotes/notes/user-settable-server-description-89dcfc75677e31bc.yaml0000664000567000056710000000120012701407773031325 0ustar jenkinsjenkins00000000000000--- features: - In Nova Compute API microversion 2.19, you can specify a "description" attribute when creating, rebuilding, or updating a server instance. This description can be retrieved by getting server details, or list details for servers. Refer to the Nova Compute API documentation for more information. Note that the description attribute existed in prior Nova versions, but was set to the server name by Nova, and was not visible to the user. So, servers you created with microversions prior to 2.19 will return the description equals the name on server details microversion 2.19. ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.0.0/releasenotes/notes/instance-hostname-used-to-populate-ports-dns-name-08341ec73dc076c0.yamlnova-13.0.0/releasenotes/notes/instance-hostname-used-to-populate-ports-dns-name-08341ec73dc076c0.ya0000664000567000056710000000250112701407773033701 0ustar jenkinsjenkins00000000000000--- features: - When booting an instance, its sanitized 'hostname' attribute is now used to populate the 'dns_name' attribute of the Neutron ports the instance is attached to. This functionality enables the Neutron internal DNS service to know the ports by the instance's hostname. As a consequence, commands like 'hostname -f' will work as expected when executed in the instance. When a port's network has a non-blank 'dns_domain' attribute, the port's 'dns_name' combined with the network's 'dns_domain' will be published by Neutron in an external DNS as a service like Designate. As a consequence, the instance's hostname is published in the external DNS as a service. This functionality is added to Nova when the 'DNS Integration' extension is enabled in Neutron. The publication of 'dns_name' and 'dns_domain' combinations to an external DNS as a service additionaly requires the configuration of the appropriate driver in Neutron. When the 'Port Binding' extension is also enabled in Neutron, the publication of a 'dns_name' and 'dns_domain' combination to the external DNS as a service will require one additional update operation when Nova allocates the port during the instance boot. This may have a noticeable impact on the performance of the boot process. nova-13.0.0/releasenotes/notes/api-database-now-required-6245f39d36885d1c.yaml0000664000567000056710000000032112701407773027434 0ustar jenkinsjenkins00000000000000--- upgrade: - During an upgrade to Mitaka, operators must create and initialize a database for the API service. Configure this in [api_database]/connection, and then run ``nova-manage api_db sync`` nova-13.0.0/releasenotes/notes/deprecate_compute_stats_class-229abfcb8816bdbd.yaml0000664000567000056710000000052712701407773031146 0ustar jenkinsjenkins00000000000000--- deprecations: - Deprecate ``compute_stats_class`` config option. This allowed loading an alternate implementation for collecting statistics for the local compute host. Deployments that felt the need to use this facility are encoraged to propose additions upstream so we can create a stable and supported interface here. nova-13.0.0/releasenotes/notes/os-migrations-ef225e5b309d5497.yaml0000664000567000056710000000051112701407773025352 0ustar jenkinsjenkins00000000000000--- deprecations: - The old top-level resource `/os-migrations` is deprecated, it won't be extended anymore. And migration_type for /os-migrations, also add ref link to the /servers/{uuid}/migrations/{id} for it when the migration is an in-progress live-migration. This has been added in microversion 2.23. nova-13.0.0/releasenotes/notes/remove-on-shared-storage-flag-from-evacuate-api-76a3d58616479fe9.yaml0000664000567000056710000000054112701407773033547 0ustar jenkinsjenkins00000000000000--- features: - Remove ``onSharedStorage`` parameter from server's evacuate action in microversion 2.14. Nova will automatically detect if the instance is on shared storage. Also adminPass is removed from the response body which makes the response body empty. The user can get the password with the server's os-server-password action. nova-13.0.0/releasenotes/notes/disk-weight-scheduler-98647f9c6317d21d.yaml0000664000567000056710000000051012701407773026700 0ustar jenkinsjenkins00000000000000--- features: - A disk space scheduling filter is now available, which prefers compute nodes with the most available disk space. By default, free disk space is given equal importance to available RAM. To increase the priority of free disk space in scheduling, increase the disk_weight_multiplier option. nova-13.0.0/releasenotes/notes/libvirt_hardware_policy_from_libosinfo-19e261851d1ad93a.yaml0000664000567000056710000000102612701407773032543 0ustar jenkinsjenkins00000000000000--- features: - For the libvirt driver, by default hardware properties will be retrieved from the Glance image and if such haven't been provided, it will use a libosinfo database to get those values. If users want to force a specific guest OS ID for the image, they can now use a new glance image property ``os_distro`` (eg. ``--property os_distro=fedora21``). In order to use the libosinfo database, you need to separately install the related native package provided for your operating system distribution. nova-13.0.0/releasenotes/notes/cinder-backend-report-discard-1def1c28140def9b.yaml0000664000567000056710000000067712701407773030551 0ustar jenkinsjenkins00000000000000--- features: - Add support for enabling discard support for block devices with libvirt. This will be enabled for Cinder volume attachments that specify support for the feature in their connection properties. This requires support to be present in the version of libvirt (v1.0.6+) and qemu (v1.6.0+) used along with the configured virtual drivers for the instance. The virtio-blk driver does not support this functionality. nova-13.0.0/releasenotes/notes/deprecate_ert-449b16638c008457.yaml0000664000567000056710000000062112701407773025144 0ustar jenkinsjenkins00000000000000--- upgrade: - | The Extensible Resource Tracker is deprecated and will be removed in the 14.0.0 release. If you use this functionality and have custom resources that are managed by the Extensible Resource Tracker, please contact the Nova development team by posting to the openstack-dev mailing list. There is no future planned support for the tracking of custom resources. nova-13.0.0/releasenotes/notes/neutron-mtu-6a7edd9e396107d7.yaml0000664000567000056710000000121212701407773025140 0ustar jenkinsjenkins00000000000000--- deprecations: - The ``network_device_mtu`` option in Nova is deprecated for removal since network MTU should be specified when creating the network with nova-network. With Neutron networks, the MTU value comes from the ``segment_mtu`` configuration option in Neutron. other: - The Neutron network MTU value is now used when plugging virtual interfaces in nova-compute. If the value is 0, which is the default value for the ``segment_mtu`` configuration option in Neutron before Mitaka, then the (deprecated) ``network_device_mtu`` configuration option in Nova is used, which defaults to not setting an MTU value. nova-13.0.0/releasenotes/notes/lock_policy-75bea372036acbd5.yaml0000664000567000056710000000001712701407773025206 0ustar jenkinsjenkins00000000000000--- prelude: > nova-13.0.0/releasenotes/notes/.placeholder0000664000567000056710000000000012701407773022001 0ustar jenkinsjenkins00000000000000nova-13.0.0/releasenotes/notes/server_migrations-30519b35d3ea6763.yaml0000664000567000056710000000044012701407773026230 0ustar jenkinsjenkins00000000000000--- features: - | Add two new list/show API for server-migration. The list API will return the in progress live migratons information of a server. The show API will return a specified in progress live migration of a server. This has been added in microversion 2.23. nova-13.0.0/releasenotes/notes/remove-deprecated-neutron-options-5f3a782aa9082fb5.yaml0000664000567000056710000000035112701407773031406 0ustar jenkinsjenkins00000000000000--- upgrade: - The old neutron communication options that were slated for removal in Mitaka are no longer available. This means that going forward communication to neutron will need to be configured using auth plugins. nova-13.0.0/releasenotes/notes/xen_rename-03edd9b78f3e81e5.yaml0000664000567000056710000000030612701407773025044 0ustar jenkinsjenkins00000000000000--- upgrade: - XenServer hypervisor type has been changed from ``xen`` to ``XenServer``. It could impact your aggregate metadata or your flavor extra specs if you provide only the former. nova-13.0.0/releasenotes/notes/instance-actions-read-deleted-instances-18bbb327924b66c7.yaml0000664000567000056710000000060612701407773032321 0ustar jenkinsjenkins00000000000000--- features: - The os-instance-actions methods now read actions from deleted instances. This means that 'GET /v2.1/{tenant-id}/servers/{server-id}/os-instance-actions' and 'GET /v2.1/{tenant-id}/servers/{server-id}/os-instance-actions/{req-id}' will return instance-action items even if the instance corresponding to '{server-id}' has been deleted. nova-13.0.0/releasenotes/notes/deprecate_hooks-6f6d60ac206a6da6.yaml0000664000567000056710000000050012701407773026035 0ustar jenkinsjenkins00000000000000--- deprecations: - Deprecate the use of nova.hooks. This facility used to let arbitrary out of tree code be executed around certain internal actions, but is unsuitable for having a well maintained API. Anyone using this facility should bring forward their use cases in the Newton cycle as nova-specs. nova-13.0.0/releasenotes/notes/bp-virt-driver-cpu-thread-pinning-1aaeeb6648f8e009.yaml0000664000567000056710000000075612701407773031272 0ustar jenkinsjenkins00000000000000--- features: - Added support for CPU thread policies, which can be used to control how the libvirt virt driver places guests with respect to CPU SMT "threads". These are provided as instance and image metadata options, 'hw:cpu_thread_policy' and 'hw_cpu_thread_policy' respectively, and provide an additional level of control over CPU pinning policy, when compared to the existing CPU policy feature. These changes were introduced in commits '83cd67c' and 'aaaba4a'. nova-13.0.0/releasenotes/notes/mitaka_prelude-c8b955ed78a5ad65.yaml0000664000567000056710000000150412701407773025715 0ustar jenkinsjenkins00000000000000--- prelude: | Nova 13.0.0 release is including a lot of new features and bugfixes. It can be extremely hard to mention all the changes we introduced during that release but we beg you to read at least the upgrade section which describes the required modifications that you need to do for upgrading your cloud from 12.0.0 (Liberty) to 13.0.0 (Mitaka). That said, a few major changes are worth to notice here. This is not an exhaustive list of things to notice, rather just important things you need to know : - Latest API microversion supported for Mitaka is v2.25 - Nova now requires a second database (called 'API DB'). - A new nova-manage script allows you to perform all online DB migrations once you upgrade your cloud - EC2 API support is fully removed. nova-13.0.0/releasenotes/notes/remove-ec2-api-service-c17a35ed297355b8.yaml0000664000567000056710000000042212701407773026726 0ustar jenkinsjenkins00000000000000--- other: - Nova's EC2 API support which was deprecated in Kilo (https://wiki.openstack.org/wiki/ReleaseNotes/Kilo#Upgrade_Notes_2) is removed from Mitaka. This has been replaced by the new ec2-api project (http://git.openstack.org/cgit/openstack/ec2-api/). nova-13.0.0/releasenotes/notes/zookeeper-servicegroup-driver-removed-c3bcaa6f9fe976ed.yaml0000664000567000056710000000134412701407773032615 0ustar jenkinsjenkins00000000000000--- deprecations: - | The Zookeeper Service Group driver has been removed. The driver has no known users and is not actively mantained. A warning log message about the driver's state was added for the Kilo release. Also, evzookeeper library that the driver depends on is unmaintained and `incompatible with recent eventlet releases`_. A future release of Nova will `use the Tooz library to track service liveliness`_, and Tooz supports Zookeeper. .. _`incompatible with recent eventlet releases`: https://bugs.launchpad.net/nova/+bug/1443910 .. _`use the Tooz library to track service liveliness`: http://specs.openstack.org/openstack/nova-specs/specs/liberty/approved/service-group-using-tooz.html nova-13.0.0/releasenotes/notes/config_scheduler_driver-e751ae392bc1a1d0.yaml0000664000567000056710000000067512701407773027561 0ustar jenkinsjenkins00000000000000--- upgrade: - | The option ``scheduler_driver`` is now changed to use entrypoint instead of full class path. Set one of the entrypoints under the namespace 'nova.scheduler.driver' in 'setup.cfg'. Its default value is 'host_manager'. The full class path style is still supported in current release. But it is not recommended because class path can be changed and this support will be dropped in the next major release. nova-13.0.0/releasenotes/notes/drop_instancev1_obj-4447ddd2bea644fa.yaml0000664000567000056710000000043412701407773026721 0ustar jenkinsjenkins00000000000000--- upgrade: - | (Only if you do continuous deployment) 1337890ace918fa2555046c01c8624be014ce2d8 drops support for an instance major version, which means that you must have deployed at least commit 713d8cb0777afb9fe4f665b9a40cac894b04aacb before deploying this one. nova-13.0.0/releasenotes/notes/new-oslo-reports-option-619c3dbf3ae320fb.yaml0000664000567000056710000000001712701407773027524 0ustar jenkinsjenkins00000000000000--- prelude: > nova-13.0.0/releasenotes/notes/xenserver-glance-plugin-1.3-11c3b70b8c928263.yaml0000664000567000056710000000036012701407773027527 0ustar jenkinsjenkins00000000000000--- upgrade: - The glance xenserver plugin has been bumped to version 1.3 which includes new interfaces for referencing glance servers by url. All dom0 will need to be upgraded with this plugin before upgrading the nova code. nova-13.0.0/releasenotes/notes/disk_ratio_to_rt-b6224ab8c0272d86.yaml0000664000567000056710000000211712701407773026104 0ustar jenkinsjenkins00000000000000--- feature: - On Mitaka compute nodes, if you want to modify the default disk allocation ratio of 1.0, you should set that on every compute node, rather than setting it in the scheduler. This means the disk, RAM and CPU allocation ratios now all work in the same way. upgrade: - For Liberty compute nodes, the disk_allocation_ratio works as before, you must set it on the scheduler if you want to change it. For Mitaka compute nodes, the disk_allocation_ratio set on the compute nodes will be used only if the configuration is not set on the scheduler. This is to allow, for backwards compatibility, the ability to still override the disk allocation ratio by setting the configuration on the scheduler node. In Newton, we plan to remove the ability to set the disk allocation ratio on the scheduler, at which point the compute nodes will always define the disk allocation ratio, and pass that up to the scheduler. None of this changes the default disk allocation ratio of 1.0. This matches the behaviour of the RAM and CPU allocation ratios. nova-13.0.0/releasenotes/notes/add-novnc-proxy-config-to-vnc-group-f5bb68740f623744.yaml0000664000567000056710000000023112701407773031326 0ustar jenkinsjenkins00000000000000--- upgrade: - All noVNC proxy configuration options have been added to the 'vnc' group. They should no longer be included in the 'DEFAULT' group. ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.0.0/releasenotes/notes/attach-detach-vol-for-shelved-and-shelved-offloaded-instances-93f70cfd49299f05.yamlnova-13.0.0/releasenotes/notes/attach-detach-vol-for-shelved-and-shelved-offloaded-instances-93f70cf0000664000567000056710000000061712701407773034340 0ustar jenkinsjenkins00000000000000--- features: - It is possible to call attach and detach volume API operations for instances which are in shelved and shelved_offloaded state. For an instance in shelved_offloaded state Nova will set to None the value for the device_name field, the right value for that field will be set once the instance will be unshelved as it will be managed by a specific compute manager. nova-13.0.0/releasenotes/notes/vmware_integration_bridge-249567087da5ecb2.yaml0000664000567000056710000000031612701407773027777 0ustar jenkinsjenkins00000000000000--- upgrade: - For backward compatible support the setting ``CONF.vmware.integration_bridge`` needs to be set when using the Neutron NSX|MH plugin. The default value has been set to ``None``. nova-13.0.0/releasenotes/notes/add-xvp-config-to-vnc-group-349cca99f05fcfd3.yaml0000664000567000056710000000023112701407773030143 0ustar jenkinsjenkins00000000000000--- upgrade: - All VNC XVP configuration options have been added to the 'vnc' group. They should no longer be included in the 'DEFAULT' group. nova-13.0.0/releasenotes/notes/versioned-notifications-423f4d8d2a3992c6.yaml0000664000567000056710000000066012701407773027426 0ustar jenkinsjenkins00000000000000--- features: - As part of refactoring the notification interface of Nova a new config option 'notification_format' has been added to specifies which notification format shall be used by nova. The possible values are 'unversioned' (e.g. legacy), 'versioned', 'both'. The default value is 'both'. The new versioned notifications are documented in http://docs.openstack.org/developer/nova/notifications.html nova-13.0.0/releasenotes/notes/online-data-migrations-48dde6a1d8661e47.yaml0000664000567000056710000000063612701407773027215 0ustar jenkinsjenkins00000000000000--- features: - Added a `nova-manage db online_data_migrations` command for forcing online data migrations, which will run all registered migrations for the release, instead of there being a separate command for each logical data migration. Operators need to make sure all data is migrated before upgrading to the next release, and the new command provides a unified interface for doing it. nova-13.0.0/releasenotes/notes/policy-sample-defaults-changed-b5eea1daeb305251.yaml0000664000567000056710000000127612701407773030730 0ustar jenkinsjenkins00000000000000--- other: - The sample policy file shipped with Nova contained many policies set to ""(allow all) which was not the proper default for many of those checks. It was also a source of confusion as some people thought "" meant to use the default rule. These empty policies have been updated to be explicit in all cases. Many of them were changed to match the default rule of "admin_or_owner" which is a more restrictive policy check but does not change the restrictiveness of the API calls overall because there are similar checks in the database already. This does not affect any existing deployment, just the sample file included for use by new deployments. nova-13.0.0/releasenotes/notes/bp-get-valid-server-state-a817488f4c8d3822.yaml0000664000567000056710000000056412701407773027410 0ustar jenkinsjenkins00000000000000--- features: - | A new host_status attribute for servers/detail and servers/{server_id}. In order to use this new feature, user have to contain the header of request microversion v2.16 in the API request. A new policy ``os_compute_api:servers:show:host_status`` added to enable the feature. By default, this is only exposed to cloud administrators. nova-13.0.0/releasenotes/notes/deprecate_vendordata_driver-eefc745365a881c3.yaml0000664000567000056710000000066412701407773030366 0ustar jenkinsjenkins00000000000000--- deprecations: - Deprecate the ``vendordata_driver`` config option. This allowed creating a different class loader for defining vendordata metadata. The default driver loads from a json file that can be arbitrarily specified, so is still quite flexible. Deployments that felt the need to use this facility are encoraged to propose additions upstream so we can create a stable and supported interface here. nova-13.0.0/releasenotes/notes/vhost-user-mtu-23d0af36a8adfa56.yaml0000664000567000056710000000053412701407773025706 0ustar jenkinsjenkins00000000000000--- fixes: - When plugging virtual interfaces of type vhost-user the MTU value will not be applied to the interface by nova. vhost-user ports exist only in userspace and are not backed by kernel netdevs, for this reason it is not possible to set the mtu on a vhost-user interface using standard tools such as ifconfig or ip link. nova-13.0.0/releasenotes/notes/deprecate_pluggable_managers-ca0224bcd779454c.yaml0000664000567000056710000000121712701407773030461 0ustar jenkinsjenkins00000000000000--- deprecations: - | Nova used to support the concept that ``service managers`` were replaceable components. There are many config options where you can replace a manager by specifying a new class. This concept is deprecated in Mitaka as are the following config options. * [cells] manager * metadata_manager * compute_manager * console_manager * consoleauth_manager * cert_manager * scheduler_manager Many of these will be removed in Newton. Users of these options are encouraged to work with Nova upstream on any features missing in the default implementations that are needed. nova-13.0.0/releasenotes/notes/request-spec-api-db-b9cc6e0624d563c5.yaml0000664000567000056710000000162312701407773026407 0ustar jenkinsjenkins00000000000000--- upgrade: - | The commit with change-id Idd4bbbe8eea68b9e538fa1567efd304e9115a02a requires that the nova_api database is setup and Nova is configured to use it. Instructions on doing that are provided below. Nova now requires that two databases are available and configured. The existing nova database needs no changes, but a new nova_api database needs to be setup. It is configured and managed very similarly to the nova database. A new connection string configuration option is available in the api_database group. An example:: [api_database] connection = mysql+pymysql://user:secret@127.0.0.1/nova_api?charset=utf8 And a new nova-manage command has been added to manage db migrations for this database. "nova-manage api_db sync" and "nova-manage api_db version" are available and function like the parallel "nova-manage db ..." version. nova-13.0.0/releasenotes/notes/conductor_rpcapi_v2_drop-9893c27bb32d9786.yaml0000664000567000056710000000007212701407773027505 0ustar jenkinsjenkins00000000000000--- other: - Conductor RPC API no longer supports v2.x. nova-13.0.0/releasenotes/notes/neutron-ovs-bridge-name-7b3477103622f4cc.yaml0000664000567000056710000000020712701407773027133 0ustar jenkinsjenkins00000000000000--- features: - Add support for allowing Neutron to specify the bridge name for the OVS, Linux Bridge, and vhost-user VIF types. nova-13.0.0/releasenotes/notes/disable_ec2_api_by_default-0ec0946433fc7119.yaml0000664000567000056710000000001712701407773027652 0ustar jenkinsjenkins00000000000000--- prelude: > nova-13.0.0/releasenotes/notes/block-live-migrate-with-attached-volumes-ee02afbfe46937c7.yaml0000664000567000056710000000033712701407773032674 0ustar jenkinsjenkins00000000000000--- features: - It is possible to block live migrate instances with additional cinder volumes attached. This requires libvirt version to be >=1.2.17 and does not work when live_migration_tunnelled is set to True. nova-13.0.0/releasenotes/notes/optional_project_id-6aebf1cb394d498f.yaml0000664000567000056710000000104712701407773027033 0ustar jenkinsjenkins00000000000000--- features: - Provides API 2.18, which makes the use of project_ids in API urls optional. upgrade: - In order to make project_id optional in urls, we must constrain the set of allowed values for project_id in our urls. This defaults to a regex of ``[0-9a-f\-]+``, which will match hex uuids (with / without dashes), and integers. This covers all known project_id formats in the wild. If your site uses other values for project_id, you can set a site specific validation with ``project_id_regex`` config variable. nova-13.0.0/releasenotes/notes/hyperv_2k8_drop-fb309f811767c7c4.yaml0000664000567000056710000000047212701407773025612 0ustar jenkinsjenkins00000000000000--- upgrade: - Support for Windows / Hyper-V Server 2008 R2 has been deprecated in Liberty (12.0.0) and it is no longer supported in Mitaka (13.0.0). If you have compute nodes running that version, please consider moving the running instances to other compute nodes before upgrading those to Mitaka. nova-13.0.0/releasenotes/notes/ironic_api_version_opt_deprecated-50c9b0486e78fe6e.yaml0000664000567000056710000000111112701407773031565 0ustar jenkinsjenkins00000000000000--- deprecations: - The configuration option ``api_version`` in the ``ironic`` group was marked as deprecated and will be removed in the future. The only possible value for that configuration was "1" (because Ironic only has 1 API version) and the Ironic team came to an agreement that setting the API version via configuration option should not be supported anymore. As the Ironic driver in Nova requests the Ironic v1.8 API, that means that Nova 13.0.0 ("Mitaka") requires Ironic 4.0.0 ("Liberty") or newer if you want to use the Ironic driver. nova-13.0.0/releasenotes/notes/scheduling-to-disabled-hosts-79f5b5d20a42875a.yaml0000664000567000056710000000033312701407773030224 0ustar jenkinsjenkins00000000000000--- upgrade: - The FilterScheduler is now including disabled hosts. Make sure you include the ComputeFilter in the ``scheduler_default_filters`` config option to avoid placing instances on disabled hosts. nova-13.0.0/releasenotes/notes/deprecate_security_group_api-3d96d683a3723e2c.yaml0000664000567000056710000000044012701407773030504 0ustar jenkinsjenkins00000000000000--- deprecations: - Deprecate ``security_group_api`` configuration option. The current values are ``nova`` and ``neutron``. In future the correct security_group_api option will be chosen based on the value of ``use_neutron`` which provides a more coherent user experience. nova-13.0.0/releasenotes/notes/switch-to-oslo-cache-7114a0ab2dea52df.yaml0000664000567000056710000000047612701407773026712 0ustar jenkinsjenkins00000000000000--- prelude: > deprecations: - Option ``memcached_servers`` is deprecated in Mitaka. Operators should use oslo.cache configuration instead. Specifically ``enabled`` option under [cache] section should be set to True and the url(s) for the memcached servers should be in [cache]/memcache_servers option.nova-13.0.0/releasenotes/notes/min_libvirt_bump-d9916d9c4512dd11.yaml0000664000567000056710000000020612701407773026113 0ustar jenkinsjenkins00000000000000--- upgrade: - The minimum required libvirt is now version 0.10.2. The minimum libvirt for the N release has been set to 1.2.1. nova-13.0.0/releasenotes/notes/parallels_support_snapshot-29b4ffae300c1f05.yaml0000664000567000056710000000013612701407773030370 0ustar jenkinsjenkins00000000000000--- features: - Libvirt with Virtuozzo virtualisation type now supports snapshot operations nova-13.0.0/releasenotes/notes/disco_volume_libvirt_driver-916428b8bd852732.yaml0000664000567000056710000000012212701407773030213 0ustar jenkinsjenkins00000000000000--- features: - Libvirt driver in Nova now supports Cinder DISCO volume driver. nova-13.0.0/releasenotes/notes/ebtables-version-fde659fe18b0e0c0.yaml0000664000567000056710000000015012701407773026232 0ustar jenkinsjenkins00000000000000--- upgrade: - nova now requires ebtables 2.0.10 or later - nova recommends libvirt 1.2.11 or later nova-13.0.0/releasenotes/notes/aggregate-uuid-generation-1f029af7a9af519b.yaml0000664000567000056710000000047712701407773027746 0ustar jenkinsjenkins00000000000000--- upgrade: - Upon first startup of the scheduler service in Mitaka, all defined aggregates will have UUIDs generated and saved back to the database. If you have a significant number of aggregates, this may delay scheduler start as that work is completed, but it should be minor for most deployments.nova-13.0.0/releasenotes/notes/force_config_drive_opt-e087055e14c40d88.yaml0000664000567000056710000000043712701407773027176 0ustar jenkinsjenkins00000000000000--- upgrade: - | The ``force_config_drive`` configuration option provided an ``always`` value which was deprecated in the previous release. That ``always`` value is now no longer accepted and deployments using that value have to change it to ``True`` before upgrading. nova-13.0.0/releasenotes/notes/force-live-migration-be5a10cd9c8eb981.yaml0000664000567000056710000000015612701407773027017 0ustar jenkinsjenkins00000000000000--- features: - A new REST API to force live migration to complete has been added in microversion 2.22. nova-13.0.0/releasenotes/notes/bp-add-project-and-user-id-a560d087656157d4.yaml0000664000567000056710000000036612701407773027324 0ustar jenkinsjenkins00000000000000--- features: - | Project-id and user-id are now also returned in the return data of os-server-groups APIs. In order to use this new feature, user have to contain the header of request microversion v2.13 in the API request. nova-13.0.0/releasenotes/notes/rm_volume_manager-78fed5be43d285b3.yaml0000664000567000056710000000102112701410011026366 0ustar jenkinsjenkins00000000000000--- upgrade: - A new ``use_neutron`` option is introduced which replaces the obtuse ``network_api_class`` option. This defaults to 'False' to match existing defaults, however if ``network_api_class`` is set to the known Neutron value Neutron networking will still be used as before. deprecations: - Deprecate ``volume_api_class`` and ``network_api_class`` config options. We only have one sensible backend for either of these. These options will be removed and turned into constants in Newton. nova-13.0.0/releasenotes/notes/upgrade_rootwrap_compute_filters-428ca239f2e4e63d.yaml0000664000567000056710000000124212701407773031503 0ustar jenkinsjenkins00000000000000--- upgrade: - Upgrade the rootwrap configuration for the compute service, so that patches requiring new rootwrap configuration can be tested with grenade. fixes: - In a race condition if base image is deleted by ImageCacheManager while imagebackend is copying the image to instance path, then the instance goes in to error state. In this case when libvirt has changed the base file ownership to libvirt-qemu while imagebackend is copying the image, then we get permission denied error on updating the file access time using os.utime. Fixed this issue by updating the base file access time with root user privileges using 'touch' command.nova-13.0.0/releasenotes/notes/soft-affinity-for-server-group-f45e191bd8cdbd15.yaml0000664000567000056710000000050612701407773030776 0ustar jenkinsjenkins00000000000000--- features: - Two new policies soft-affinty and soft-anti-affinity have been implemented for the server-group feature of Nova. This means that POST /v2.1/{tenant_id}/os-server-groups API resource now accepts 'soft-affinity' and 'soft-anti-affinity' as value of the 'policies' key of the request body. nova-13.0.0/releasenotes/notes/deprecate-conductor-manager-class-03620676d939b0eb.yaml0000664000567000056710000000015412701407773031136 0ustar jenkinsjenkins00000000000000--- deprecations: - The conductor.manager configuration option is now deprecated and will be removed. nova-13.0.0/releasenotes/notes/remove_ec2_and_objectstore_api-4ccb539db1d171fa.yaml0000664000567000056710000000051412701407773031065 0ustar jenkinsjenkins00000000000000--- upgrade: - All code and tests for Nova's EC2 and ObjectStore API support which was deprecated in Kilo (https://wiki.openstack.org/wiki/ReleaseNotes/Kilo#Upgrade_Notes_2) has been completely removed in Mitaka. This has been replaced by the new ec2-api project (http://git.openstack.org/cgit/openstack/ec2-api/).nova-13.0.0/releasenotes/notes/deprecate-nova-manage-service-subcommand-7626f7692bd62e41.yaml0000664000567000056710000000035212701407773032413 0ustar jenkinsjenkins00000000000000--- other: - The service subcommand of nova-manage is deprecated. Use the nova service-* commands from python-novaclient instead or the os-services REST resource. The service subcommand will be removed in the 14.0 release. nova-13.0.0/releasenotes/notes/bp-making-live-migration-api-friendly-3b547f4e0958ee05.yaml0000664000567000056710000000062712701407773031741 0ustar jenkinsjenkins00000000000000--- feature: - Make block_migration to support 'auto' value, which means nova will decide the value of block_migration during live-migration, and remove disk_over_commit flag for os-migrateLive action in microversion 2.23. upgrade: - We can not use microversion 2.25 to do live-migration during upgrade, nova-api will raise bad request if there is still old compute node in the cluster. nova-13.0.0/releasenotes/notes/deprecate-local-conductor-9cb9f45728281eb0.yaml0000664000567000056710000000037212701407773027606 0ustar jenkinsjenkins00000000000000--- upgrade: - | The local conductor mode is now deprecated and may be removed as early as the 14.0.0 release. If you are using local conductor mode, plan on deploying remote conductor by the time you upgrade to the 14.0.0 release. nova-13.0.0/releasenotes/notes/vmware_limits-16edee7a9ad023bc.yaml0000664000567000056710000000464612701407773025737 0ustar jenkinsjenkins00000000000000--- features: - | For the VMware driver, the flavor extra specs for quotas has been extended to support: - quota:cpu_limit - The cpu of a virtual machine will not exceed this limit, even if there are available resources. This is typically used to ensure a consistent performance of virtual machines independent of available resources. Units are MHz. - quota:cpu_reservation - guaranteed minimum reservation (MHz) - quota:cpu_shares_level - the allocation level. This can be 'custom', 'high', 'normal' or 'low'. - quota:cpu_shares_share - in the event that 'custom' is used, this is the number of shares. - quota:memory_limit - The memory utilization of a virtual machine will not exceed this limit, even if there are available resources. This is typically used to ensure a consistent performance of virtual machines independent of available resources. Units are MB. - quota:memory_reservation - guaranteed minimum reservation (MB) - quota:memory_shares_level - the allocation level. This can be 'custom', 'high', 'normal' or 'low'. - quota:memory_shares_share - in the event that 'custom' is used, this is the number of shares. - quota:disk_io_limit - The I/O utilization of a virtual machine will not exceed this limit. The unit is number of I/O per second. - quota:disk_io_reservation - Reservation control is used to provide guaranteed allocation in terms of IOPS - quota:disk_io_shares_level - the allocation level. This can be 'custom', 'high', 'normal' or 'low'. - quota:disk_io_shares_share - in the event that 'custom' is used, this is the number of shares. - quota:vif_limit - The bandwidth limit for the virtual network adapter. The utilization of the virtual network adapter will not exceed this limit, even if there are available resources. Units in Mbits/sec. - quota:vif_reservation - Amount of network bandwidth that is guaranteed to the virtual network adapter. If utilization is less than reservation, the resource can be used by other virtual network adapters. Reservation is not allowed to exceed the value of limit if limit is set. Units in Mbits/sec. - quota:vif_shares_level - the allocation level. This can be 'custom', 'high', 'normal' or 'low'. - quota:vif_shares_share - in the event that 'custom' is used, this is the number of shares. nova-13.0.0/releasenotes/notes/libvirt-live-migration-flags-mangling-a2407a31ddf17427.yaml0000664000567000056710000000101512701407773032017 0ustar jenkinsjenkins00000000000000--- upgrade: - The libvirt driver will now correct unsafe and invalid values for the live_migration_flag and block_migration_flag configuration options. The live_migration_flag must not contain VIR_MIGRATE_SHARED_INC but block_migration_flag must contain it. Both options must contain the VIR_MIGRATE_PEER2PEER, except when using the 'xen' virt type this flag is not supported. Both flags must contain the VIR_MIGRATE_UNDEFINE_SOURCE flag and not contain the VIR_MIGRATE_PERSIST_DEST flag. nova-13.0.0/releasenotes/notes/abort-live-migration-cb902bb0754b11b6.yaml0000664000567000056710000000027012701407773026643 0ustar jenkinsjenkins00000000000000--- features: - A new REST API to cancel an ongoing live migration has been added in microversion 2.24. Initially this operation will only work with the libvirt virt driver. nova-13.0.0/releasenotes/notes/api_servers_no_scheme-e4aa216d251022f2.yaml0000664000567000056710000000040612701407773027067 0ustar jenkinsjenkins00000000000000--- prelude: > deprecations: - It is now deprecated to use [glance] api_servers without a protocol scheme (http / https). This is required to support urls throughout the system. Update any api_servers list with fully qualified https / http urls. nova-13.0.0/releasenotes/source/0000775000567000056710000000000012701410205017660 5ustar jenkinsjenkins00000000000000nova-13.0.0/releasenotes/source/index.rst0000664000567000056710000000036212701410011021515 0ustar jenkinsjenkins00000000000000Welcome to Nova Release Notes documentation! ============================================== Contents ======== .. toctree:: :maxdepth: 2 liberty unreleased Indices and tables ================== * :ref:`genindex` * :ref:`search` nova-13.0.0/releasenotes/source/_templates/0000775000567000056710000000000012701410205022015 5ustar jenkinsjenkins00000000000000nova-13.0.0/releasenotes/source/_templates/.placeholder0000664000567000056710000000000012701407773024306 0ustar jenkinsjenkins00000000000000nova-13.0.0/releasenotes/source/unreleased.rst0000664000567000056710000000015312701407773022560 0ustar jenkinsjenkins00000000000000============================ Current Series Release Notes ============================ .. release-notes:: nova-13.0.0/releasenotes/source/liberty.rst0000664000567000056710000000022212701407773022100 0ustar jenkinsjenkins00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty nova-13.0.0/releasenotes/source/conf.py0000664000567000056710000002027712701407773021207 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Nova Release Notes documentation build configuration file, created by # sphinx-quickstart on Thu Nov 5 11:50:32 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Nova Release Notes' copyright = u'2015, Nova developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from nova.version import version_info as nova_version # The short X.Y version. version = nova_version.canonical_version_string() # The full version, including alpha/beta/rc tags. release = nova_version.version_string_with_vcs() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'NovaReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'NovaReleaseNotes.tex', u'Nova Release Notes Documentation', u'Nova developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'novareleasenotes', u'Nova Release Notes Documentation', [u'Nova developers'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'NovaReleaseNotes', u'Nova Release Notes Documentation', u'Nova developers', 'NovaReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False nova-13.0.0/releasenotes/source/_static/0000775000567000056710000000000012701410205021306 5ustar jenkinsjenkins00000000000000nova-13.0.0/releasenotes/source/_static/.placeholder0000664000567000056710000000000012701407773023577 0ustar jenkinsjenkins00000000000000nova-13.0.0/README.rst0000664000567000056710000000411512701407773015377 0ustar jenkinsjenkins00000000000000OpenStack Nova README ===================== OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies, including KVM, Xen, LXC, VMware, and more. In addition to its native API, it includes compatibility with the commonly encountered Amazon EC2 and S3 APIs. OpenStack Nova is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Nova primarily consists of a set of Python daemons, though it requires and integrates with a number of native system components for databases, messaging and virtualization capabilities. To keep updated with new developments in the OpenStack project follow `@openstack `_ on Twitter. To learn how to deploy OpenStack Nova, consult the documentation available online at: http://docs.openstack.org For information about the different compute (hypervisor) drivers supported by Nova, read this page on the wiki: https://wiki.openstack.org/wiki/HypervisorSupportMatrix In the unfortunate event that bugs are discovered, they should be reported to the appropriate bug tracker. If you obtained the software from a 3rd party operating system vendor, it is often wise to use their own bug tracker for reporting problems. In all other cases use the master OpenStack bug tracker, available at: http://bugs.launchpad.net/nova Developers wishing to work on the OpenStack Nova project should always base their work on the latest Nova code, available from the master GIT repository at: https://git.openstack.org/cgit/openstack/nova Developers should also join the discussion on the mailing list, at: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev Any new code must follow the development guidelines detailed in the HACKING.rst file, and pass all unit tests. Further developer focused documentation is available at: http://docs.openstack.org/developer/nova/ For information on how to contribute to Nova, please see the contents of the CONTRIBUTING.rst file. -- End of broadcast nova-13.0.0/.mailmap0000664000567000056710000001726712701407773015345 0ustar jenkinsjenkins00000000000000# Format is: # # Alvaro Lopez Garcia Alvaro Lopez Andrew Bogott Andrew Bogott Andy Smith Andy Smith Andy Smith andy Andy Smith termie Andy Smith termie Anne Gentle annegentle Anthony Young Anthony Young Sleepsonthefloor Arvind Somya Arvind Somya Arvind Somya asomya@cisco.com <> Brad McConnell Brad McConnell bmcconne@rackspace.com <> Brian Lamar Brian Lamar brian-lamar Dan Wendlandt danwent Dan Wendlandt danwent Dan Wendlandt danwent@gmail.com <> Dan Wendlandt danwent@gmail.com Davanum Srinivas Davanum Srinivas Édouard Thuleau Thuleau Édouard Ethan Chu Guohui Liu Jake Dahn jakedahn Jason Koelker Jason Kölker Jay Pipes jaypipes@gmail.com <> Jiajun Liu Jian Wen Jian Wen Joe Gordon Joel Moore Joel Moore joelbm24@gmail.com <> John Griffith john-griffith John Tran John Tran Joshua Hesketh Joshua Hesketh Justin Santa Barbara Justin Santa Barbara Justin SB Justin Santa Barbara Superstack Kei Masumoto Kei Masumoto Kei masumoto Kei Masumoto masumotok Kun Huang lawrancejing Matt Dietz Matt Dietz Cerberus Matt Dietz Matthew Dietz Matt Dietz matt.dietz@rackspace.com <> Matt Dietz mdietz NTT PF Lab. NTT PF Lab. NTT PF Lab. Nachi Ueno NTT PF Lab. nova Nikolay Sokolov Nickolay Sokolov Paul Voccio paul@openstack.org <> Philip Knouff Phlip Knouff Renuka Apte renukaapte Sandy Walsh SandyWalsh Sateesh Chodapuneedi sateesh Tiantian Gao Tiantian Gao Vishvananda Ishaya Vishvananda Ishaya Vivek YS Vivek YS vivek.ys@gmail.com <> Yaguang Tang Yolanda Robla yolanda.robla Zhenguo Niu Zhongyue Luo nova-13.0.0/setup.py0000664000567000056710000000200412701407773015415 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) nova-13.0.0/api-guide/0000775000567000056710000000000012701410205015533 5ustar jenkinsjenkins00000000000000nova-13.0.0/api-guide/source/0000775000567000056710000000000012701410205017033 5ustar jenkinsjenkins00000000000000nova-13.0.0/api-guide/source/versions.rst0000664000567000056710000001000612701407773021452 0ustar jenkinsjenkins00000000000000======== Versions ======== The OpenStack Compute API uses both a URI and a MIME type versioning scheme. In the URI scheme, the first element of the path contains the target version identifier (e.g. https://servers.api.openstack.org/ v2.1/...). The MIME type versioning scheme uses HTTP content negotiation where the ``Accept`` or ``Content-Type`` headers contains a MIME type that identifies the version (application/vnd.openstack.compute.v2.1+json). A version MIME type is always linked to a base MIME type, such as application/json. If conflicting versions are specified using both an HTTP header and a URI, the URI takes precedence. **Example: Request with MIME type versioning** .. code:: GET /214412/images HTTP/1.1 Host: servers.api.openstack.org Accept: application/vnd.openstack.compute.v2.1+json X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb **Example: Request with URI versioning** .. code:: GET /v2.1/214412/images HTTP/1.1 Host: servers.api.openstack.org Accept: application/json X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb Permanent Links ~~~~~~~~~~~~~~~ The MIME type versioning approach allows for creating of permanent links, because the version scheme is not specified in the URI path: https://api.servers.openstack.org/224532/servers/123. If a request is made without a version specified in the URI or via HTTP headers, then a multiple-choices response (300) follows that provides links and MIME types to available versions. **Example: Multiple choices: JSON response** .. code:: { "choices": [ { "id": "v2.0", "links": [ { "href": "http://servers.api.openstack.org/v2/7f5b2214547e4e71970e329ccf0b257c/servers/detail", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2" } ], "status": "SUPPORTED" }, { "id": "v2.1", "links": [ { "href": "http://servers.api.openstack.org/v2.1/7f5b2214547e4e71970e329ccf0b257c/servers/detail", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2.1" } ], "status": "CURRENT" } ] } The API with ``CURRENT`` status is the newest API and continues to be improved by the Nova project. The API with ``SUPPORTED`` status is the old API, where new features are frozen. The API with ``DEPRECATED`` status is the API that will be removed in the foreseeable future. Providers should work with developers and partners to ensure there is adequate time to migrate to the new version before deprecated versions are discontinued. For any API which is under development but isn't released as yet, the API status is ``EXPERIMENTAL``. Your application can programmatically determine available API versions by performing a **GET** on the root URL (i.e. with the version and everything following that truncated) returned from the authentication system. You can also obtain additional information about a specific version by performing a **GET** on the base version URL (such as, ``https://servers.api.openstack.org/v2.1/``). Version request URLs must always end with a trailing slash (``/``). If you omit the slash, the server might respond with a 302 redirection request. For examples of the list versions and get version details requests and responses, see `*API versions* `__. The detailed version response contains pointers to both a human-readable and a machine-processable description of the API service. The machine-processable description is written in the Web Application Description Language (WADL). nova-13.0.0/api-guide/source/index.rst0000664000567000056710000000554112701410011020674 0ustar jenkinsjenkins00000000000000.. Copyright 2009-2015 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========== Compute API =========== The nova project has a RESTful HTTP service called the OpenStack Compute API. Through this API, the service provides massively scalable, on demand, self-service access to compute resources. Depending on the deployment those compute resources might be Virtual Machines, Physical Machines or Containers. This guide covers the concepts in the OpenStack Compute API. For a full reference listing, please see: `Compute API Reference `__. We welcome feedback, comments, and bug reports at `bugs.launchpad.net/nova `__. Intended audience ================= This guide assists software developers who want to develop applications using the OpenStack Compute API. To use this information, you should have access to an account from an OpenStack Compute provider, or have access to your own deployment, and you should also be familiar with the following concepts: * OpenStack Compute service * RESTful HTTP services * HTTP/1.1 * JSON data serialization formats End User and Operator APIs ========================== The Compute API includes all end user and operator API calls. The API works with keystone and oslo.policy to deliver RBAC (Role-based access control). The default policy file gives suggestions on what APIs should not be made available to most end users but this is fully configurable. API Versions ============ Following the Liberty release, every Nova deployment should have the following endpoints: * / - list of available versions * /v2 - the first version of the Compute API, uses extensions (we call this Compute API v2.0) * /v1.1 - an alias for v2.0 for backwards compatibility * /v2.1 - same API, except uses microversions While this guide concentrates on documenting the v2.1 API, please note that the v2.0 and v1.1 API are (almost) identical to first microversion of the v2.1 API and are also covered by this guide. Contents ======== .. toctree:: :maxdepth: 2 users versions extensions microversions general_info server_concepts authentication faults limits links_and_references paginated_collections polling_changes-since_parameter request_and_response_formats nova-13.0.0/api-guide/source/general_info.rst0000664000567000056710000002525212701410011022216 0ustar jenkinsjenkins00000000000000======================== Key Compute API Concepts ======================== The OpenStack Compute API is defined as a RESTful HTTP service. The API takes advantage of all aspects of the HTTP protocol (methods, URIs, media types, response codes, etc.) and providers are free to use existing features of the protocol such as caching, persistent connections, and content compression among others. Providers can return information identifying requests in HTTP response headers, for example, to facilitate communication between the provider and client applications. OpenStack Compute is a compute service that provides server capacity in the cloud. Compute Servers come in different flavors of memory, cores, disk space, and CPU, and can be provisioned in minutes. Interactions with Compute Servers can happen programmatically with the OpenStack Compute API. User Concepts ============= To use the OpenStack Compute API effectively, you should understand several key concepts: - **Server** A virtual machine (VM) instance, physical machine or a container in the compute system. Flavor and image are requisite elements when creating a server. A name for the server is also required. For more details, such as server actions and server metadata, please see: :doc:`server_concepts` - **Flavor** Virtual hardware configuration for the requested server. Each flavor has a unique combination of disk space, memory capacity and priority for CPU time. - **Flavor Extra Specs** TODO: Short description at here. The detail reference to :doc:`extra_specs_and_properties` - **Image** A collection of files used to create or rebuild a server. Operators provide a number of pre-built OS images by default. You may also create custom images from cloud servers you have launched. These custom images are useful for backup purposes or for producing “gold†server images if you plan to deploy a particular server configuration frequently. - **Image Properties** TODO: Short description at here. The detail reference to :doc:`extra_specs_and_properties` - **Key Pair** An ssh or x509 keypair that can be injected into a server at it's boot time. This allows you to connect to your server once it has been created without having to use a password. If you don't specify a key pair, Nova will create a root password for you, and return it in plain text in the server create response. - **Volume** A block storage device that Nova can use as permanent storage. When a server is created it has some disk storage available, but that is considered ephemeral, as it is destroyed when the server is destroyed. A volume can be attached to a server, then later detached and used by another server. Volumes are created and managed by the Cinder service, though the Nova API can proxy some of these calls. - **Quotas** An upper limit on the amount of resources any individual tenant may consume. Quotas can be used to limit the number of servers a tenant creates, or the amount of disk space consumed, so that no one tenant can overwhelm the system and prevent normal operation for others. Changing quotas is an administrator-level action. - **Rate Limiting** Please see :doc:`limits` - **Availability zone** A grouping of host machines that can be used to control where a new server is created. There is some confusion about this, as the name "availability zone" is used in other clouds, such as Amazon Web Services, to denote a physical separation of server locations that can be used to distribute cloud resources for fault tolerance in case one zone is unavailable for any reason. Such a separation is possible in Nova if an administrator carefully sets up availability zones for that, but it is not the default. Networking Concepts ------------------- In this section we focus on this related to networking. - **Port** TODO - **Floating IPs, Pools and DNS** TODO - **Security Groups** TODO - **Cloudpipe** TODO - **Extended Networks** TODO Administrator Concepts ====================== Some APIs are largely focused on administration of Nova, and generally focus on compute hosts rather than servers. - **Services** Services are provided by Nova components. Normally, the Nova component runs as a process on the controller/compute node to provide the service. These services may be end-user facing, such as the the OpenStack Compute REST API service, but most just work with other Nova services. The status of each service is monitored by Nova, and if it is not responding normally, Nova will update its status so that requests are not sent to that service anymore. The service can also be controlled by an Administrator in order to run maintenance or upgrades, or in response to changing workloads. - **nova-osapi_compute** This service provides the OpenStack Compute REST API to end users and application clients. - **nova-metadata** This service provides the OpenStack Metadata API to servers. The metadata is used to configure the running servers. - **nova-scheduler** This service provides compute request scheduling by tracking available resources, and finding the host that can best fulfill the request. - **nova-conductor** This service provides database access for Nova and the other OpenStack services, and handles internal version compatibility when different services are running different versions of code. The conductor service also handles long-running requests. - **nova-compute** This service runs on every compute node, and communicates with a hypervisor for managing compute resources on that node. - **nova-network** This service handles networking of virtual servers. It is no longer under active development, and is being replaced by Neutron. - **nova-ec2(deprecated)** This service provides AWS EC2 API compatibility. - **nova-consoleauth** This service provides authorization for compute instances consoles. - **nova-cert** This service handles the management of X509 certificates. - **Services Actions** - **enable, disable, disable-log-reason** The service can be disabled to indicate the service is not available anymore. This is used by administrator to stop service for maintenance. For example, when Administrator wants to maintain a specific compute node, Administrator can disable nova-compute service on that compute node. Then nova won't dispatch any new compute request to that compute node anymore. Administrator also can add note for disable reason. - **forced-down** This action allows you set the state of service down immediately. Actually Nova only provides the health monitor of service status, there isn't any guarantee about health status of other parts of infrastructure, like the health status of data network, storage network and other components. The more complete health monitor of infrastructure is provided by external system normally. An external health monitor system can mark the service down for notifying the fault. `(This action is enabled in Microversion 2.11)` - **Hosts** Hosts are the *physical machines* that provide the resources for the virtual servers created in Nova. They run a ``hypervisor`` (see definition below) that handles the actual creation and management of the virtual servers. Hosts also run the ``Nova compute service``, which receives requests from Nova to interact with the virtual servers on that machine. When compute service receives a request, it calls the appropriate methods of the driver for that hypervisor in order to carry out the request. The driver acts as the translator from generic Nova requests to hypervisor-specific calls. Hosts report their current state back to Nova, where it is tracked by the scheduler service, so that the scheduler can place requests for new virtual servers on the hosts that can best fit them. - **Host Actions** A *host action* is one that affects the physical host machine, as opposed to actions that only affect the virtual servers running on that machine. There are three 'power' actions that are supported: *startup*, *shutdown*, and *reboot*. There are also two 'state' actions: enabling/disabling the host, and setting the host into or out of maintenance mode. Of course, carrying out these actions can affect running virtual servers on that host, so their state will need to be considered before carrying out the host action. For example, if you want to call the 'shutdown' action to turn off a host machine, you might want to migrate any virtual servers on that host before shutting down the host machine so that the virtual servers continue to be available without interruption. - **Hypervisors** A hypervisor, or virtual machine monitor (VMM), is a piece of computer software, firmware or hardware that creates and runs virtual machines. In nova, each Host (see `Hosts`) runs a hypervisor. Administrators are able to query the hypervisor for information, such as all the virtual servers currently running, as well as detailed info about the hypervisor, such as CPU, memory, or disk related configuration. Currently nova-compute also supports Ironic and LXC, but they don't have a hypervisor running. - **Aggregates** Please see :doc:`aggregates.rst` - **Migrations** Migrations are the process where a virtual server is moved from one host to another. Please see :doc:`server_concepts` for details about moving servers. Administrators are able to query the records in database for information about migrations. For example, they can determine the source and destination hosts, type of migration, or changes in the server's flavor. - **Certificates** Nova service "nova-cert" handles the management of X509 certificates which are used to generate certificates for euca-bundle-image. Relationship with Volume API ============================ Here we discuss about Cinder's API and how Nova users volume UUIDs. TODO - add more details. Relationship with Image API =========================== Here we discuss about Glance's API and how Nova uses image UUIDs. We also discuss how Nova proxies setting image metadata. TODO - add more details. Interactions with Neutron and Nova-Network ========================================== We talk about how networking can be provided be either Nova or Neutron. Here we discuss about Neutron's API and how Nova users port UUIDs. We also discuss Nova automatically creating ports, proxying security groups, and proxying floating IPs. Also talk about the APIs we do not proxy. TODO - add more details. nova-13.0.0/api-guide/source/faults.rst0000664000567000056710000001143212701407773021104 0ustar jenkinsjenkins00000000000000====== Faults ====== This doc explains how to understand what has happened to your API request. Every HTTP request has a status code. 2xx codes signify the API was a success. However, that is often not the end of the story. That generally only means the request to start the operation has been accepted. It does not mean the action you requested has successfully completed. Tracking Errors by Request ID ============================= Every request made has a unique Request ID. This is returned in a response header. Here is an example response header: X-Compute-Request-ID: req-4b9e5c04-c40f-4b4f-960e-6ac0858dca6c Server Actions -------------- There is an API for end users to list the outcome of Server Actions, referencing the requested action by request id. For more details, please see: http://developer.openstack.org/api-ref-compute-v2.1.html#os-instance-actions-v2.1 Logs ---- All logs on the system, by default, include the request-id when available. This allows an administrator to track the API request processing as it transitions between all the different nova services. Instance Faults --------------- Nova often adds an instance fault DB entry for an exception that happens while processing an API request. This often includes more administrator focused information, such as a stack trace. However, there is currently no API to retrieve this information. Notifications ------------- In many cases there are also notifications emitted that describe the error. This is an administrator focused API, that works best when treated as structured logging. Synchronous Faults ================== If an error occurs while processing our API request, you get a non 2xx API status code. The system also returns additional information about the fault in the body of the response. **Example: Fault: JSON response** .. code:: { "itemNotFound":{ "code": 404, "message":"Aggregate agg_h1 could not be found." } } The error ``code`` is returned in the body of the response for convenience. The ``message`` section returns a human-readable message that is appropriate for display to the end user. The ``details`` section is optional and may contain information—for example, a stack trace—to assist in tracking down an error. The ``details`` section might or might not be appropriate for display to an end user. The root element of the fault (such as, computeFault) might change depending on the type of error. The following link contains a list of possible elements along with their associated error codes. For more information on possible error code, please see: http://specs.openstack.org/openstack/api-wg/guidelines/http.html#http-response-codes Asynchronous faults =================== An error may occur in the background while a server is being built or while a server is executing an action. In these cases, the server is usually placed in an ``ERROR`` state. For some operations, like resize, its possible that the operations fails but the instance gracefully returned to its original state before attempting the operation. In both of these cases, you should be able to find out more from the Server Actions API described above. When a server is placed into an ``ERROR`` state, a fault is embedded in the offending server. Note that these asynchronous faults follow the same format as the synchronous ones. The fault contains an error code, a human readable message, and optional details about the error. Additionally, asynchronous faults may also contain a ``created`` timestamp that specifies when the fault occurred. **Example: Server in error state: JSON response** .. code:: { "server": { "id": "52415800-8b69-11e0-9b19-734f0000ffff", "tenant_id": "1234", "user_id": "5678", "name": "sample-server", "created": "2010-08-10T12:00:00Z", "hostId": "e4d909c290d0fb1ca068ffafff22cbd0", "status": "ERROR", "progress": 66, "image" : { "id": "52415800-8b69-11e0-9b19-734f6f007777" }, "flavor" : { "id": "52415800-8b69-11e0-9b19-734f216543fd" }, "fault" : { "code" : 500, "created": "2010-08-10T11:59:59Z", "message": "No valid host was found. There are not enough hosts available.", "details": [snip] }, "links": [ { "rel": "self", "href": "http://servers.api.openstack.org/v2/1234/servers/52415800-8b69-11e0-9b19-734f000004d2" }, { "rel": "bookmark", "href": "http://servers.api.openstack.org/1234/servers/52415800-8b69-11e0-9b19-734f000004d2" } ] } } nova-13.0.0/api-guide/source/users.rst0000664000567000056710000000453512701407773020755 0ustar jenkinsjenkins00000000000000.. Copyright 2015 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ===== Users ===== The Compute API includes all end user and administrator API calls. Role based access control ========================= Keystone middleware is used to authenticate users and identify their roles. The Compute API uses these roles, along with oslo.policy, to decide what the user is authorized to do. TODO - link to compute admin guide for details. Personas used in this guide =========================== While the policy can be configured in many ways, to make it easy to understand the most common use cases the API have been designed for, we should standardize on the following types of user: * application deployer: creates/deletes servers, directly or indirectly via API * application developer: creates images and applications that run on the cloud * cloud administrator: deploys, operates and maintains the cloud Now in reality the picture is much more complex. Specifically, there are likely to be different roles for observer, creator and administrator roles for the application developer. Similarly, there are likely to be various levels of cloud administrator permissions, such as a read-only role that is able to view a lists of servers for a specific tenant but is not able to perform any actions on any of them. Note: this is not attempting to be an exhaustive set of personas that consider various facets of the different users but instead aims to be a minimal set of users such that we use a consistent terminology throughout this document. TODO - could assign names to these users, or similar, to make it more "real". Discovering Policy ================== An API to discover what actions you are authorized to perform is still a work in progress. Currently this reported by a HTTP 403 error. TODO - link to the doc on errors. nova-13.0.0/api-guide/source/conf.py0000664000567000056710000002344312701407773020360 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Compute API documentation build configuration file # # All configuration values have a default; values that are commented out # serve to show the default. # import sys import subprocess import openstackdocstheme # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Compute API Guide' bug_tag = u'api-guide' copyright = u'2015, OpenStack contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.1.0' # The full version, including alpha/beta/rc tags. release = '2.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # A few variables have to be set for the log-a-bug feature. # giturl: The location of conf.py on Git. Must be set manually. # gitsha: The SHA checksum of the bug description. Extracted from git log. # bug_tag: Tag for categorizing the bug. Must be set manually. # bug_project: Launchpad project to file bugs against. # These variables are passed to the logabug code via html_context. giturl = u'http://git.openstack.org/cgit/openstack/nova/tree/api-guide/source' git_cmd = ["/usr/bin/git", "rev-parse", "HEAD"] gitsha = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] # source tree pwd = subprocess.Popen( "pwd", stdout=subprocess.PIPE).communicate()[0].strip('\n') # html_context allows us to pass arbitrary values into the html template html_context = {"pwd": pwd, "gitsha": gitsha, "bug_tag": bug_tag, "giturl": giturl, "bug_project": "nova"} # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [openstackdocstheme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'compute-api-guide' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ComputeAPI.tex', u'Compute API Documentation', u'OpenStack contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'computeapi', u'Compute API Documentation', [u'OpenStack contributors'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ComputeAPIGuide', u'Compute API Guide', u'OpenStack contributors', 'APIGuide', 'This guide teaches OpenStack Compute service users concepts about ' 'managing resources in an OpenStack cloud with the Compute API.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] # -- Options for PDF output -------------------------------------------------- pdf_documents = [ ('index', u'ComputeAPIGuide', u'Compute API Guide', u'OpenStack ' 'contributors') ] nova-13.0.0/api-guide/source/request_and_response_formats.rst0000664000567000056710000000230412701407773025567 0ustar jenkinsjenkins00000000000000============================ Request and response formats ============================ The OpenStack Compute API only supports JSON request and response formats, with a mime-type of ``application/json``. As there is only one supported content type, all content is assumed to be ``application/json`` in both request and response formats. Request and response examples ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The examples below show a request body in JSON format. **Example: JSON request with headers** | POST /v2/010101/servers HTTP/1.1 | Host: servers.api.openstack.org | X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb .. code:: { "server": { "name": "server-test-1", "imageRef": "b5660a6e-4b46-4be3-9707-6b47221b454f", "flavorRef": "2", "max_count": 1, "min_count": 1, "networks": [ { "uuid": "d32019d3-bc6e-4319-9c1d-6722fc136a22" } ], "security_groups": [ { "name": "default" }, { "name": "another-secgroup-name" } ] } } nova-13.0.0/api-guide/source/links_and_references.rst0000664000567000056710000001007412701407773023752 0ustar jenkinsjenkins00000000000000==================== Links and references ==================== Often resources need to refer to other resources. For example, when creating a server, you must specify the image from which to build the server. You can specify the image by providing an ID or a URL to a remote image. When providing an ID, it is assumed that the resource exists in the current OpenStack deployment. **Example: ID image reference: JSON request** .. code:: { "server":{ "flavorRef":"http://openstack.example.com/openstack/flavors/1", "imageRef":"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", "metadata":{ "My Server Name":"Apache1" }, "name":"new-server-test", "personality":[ { "contents":"ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==", "path":"/etc/banner.txt" } ] } } **Example: Full image reference: JSON request** .. code:: { "server": { "name": "server-test-1", "imageRef": "b5660a6e-4b46-4be3-9707-6b47221b454f", "flavorRef": "2", "max_count": 1, "min_count": 1, "networks": [ { "uuid": "d32019d3-bc6e-4319-9c1d-6722fc136a22" } ], "security_groups": [ { "name": "default" }, { "name": "another-secgroup-name" } ] } } For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: - A ``self`` link contains a versioned link to the resource. Use these links when the link is followed immediately. - A ``bookmark`` link provides a permanent link to a resource that is appropriate for long term storage. - An ``alternate`` link can contain an alternate representation of the resource. For example, an OpenStack Compute image might have an alternate representation in the OpenStack Image service. .. note:: The ``type`` attribute provides a hint as to the type of representation to expect when following the link. **Example: Server with self links: JSON** .. code:: { "server":{ "id":"52415800-8b69-11e0-9b19-734fcece0043", "name":"my-server", "links":[ { "rel":"self", "href":"http://servers.api.openstack.org/v2/1234/servers/52415800-8b69-11e0-9b19-734fcece0043" }, { "rel":"bookmark", "href":"http://servers.api.openstack.org/1234/servers/52415800-8b69-11e0-9b19-734fcece0043" } ] } } **Example: Server with alternate link: JSON** .. code:: { "image" : { "id" : "52415800-8b69-11e0-9b19-734f5736d2a2", "name" : "My Server Backup", "links": [ { "rel" : "self", "href" : "http://servers.api.openstack.org/v2/1234/images/52415800-8b69-11e0-9b19-734f5736d2a2" }, { "rel" : "bookmark", "href" : "http://servers.api.openstack.org/1234/images/52415800-8b69-11e0-9b19-734f5736d2a2" }, { "rel" : "alternate", "type" : "application/vnd.openstack.image", "href" : "http://glance.api.openstack.org/1234/images/52415800-8b69-11e0-9b19-734f5736d2a2" } ] } } nova-13.0.0/api-guide/source/server_concepts.rst0000664000567000056710000007576112701410011023004 0ustar jenkinsjenkins00000000000000=============== Server concepts =============== For the OpenStack Compute API, a server is a virtual machine (VM) instance, a physical machine or a container. Server status ~~~~~~~~~~~~~ TODO: This section's content is old, we need to update the status list. The task_state and vm_state which expose to Administrator need description to help user to understand the difference. You can filter the list of servers by image, flavor, name, and status through the respective query parameters. Server contains a status attribute that indicates the current server state. You can filter on the server status when you complete a list servers request. The server status is returned in the response body. The server status is one of the following values: **Server status values** - ``ACTIVE``: The server is active. - ``BUILD``: The server has not yet finished the original build process. - ``DELETED``: The server is deleted. - ``ERROR``: The server is in error. - ``HARD_REBOOT``: The server is hard rebooting. This is equivalent to pulling the power plug on a physical server, plugging it back in, and rebooting it. - ``MIGRATING``: The server is migrating. This is caused by a live migration (moving a server that is active) action. - ``PASSWORD``: The password is being reset on the server. - ``PAUSED``: The server is paused. - ``REBOOT``: The server is in a soft reboot state. A reboot command was passed to the operating system. - ``REBUILD``: The server is currently being rebuilt from an image. - ``RESCUE``: The server is in rescue mode. - ``RESIZE``: Server is performing the differential copy of data that changed during its initial copy. Server is down for this stage. - ``REVERT_RESIZE``: The resize or migration of a server failed for some reason. The destination server is being cleaned up and the original source server is restarting. - ``SHELVED``: The server is in shelved state. Depends on the shelve offload time, the server will be automatically shelved off loaded. - ``SHELVED_OFFLOADED``: The shelved server is offloaded (removed from the compute host) and it needs unshelved action to be used again. - ``SHUTOFF``: The server was powered down by the user, but not through the OpenStack Compute API. For example, the user issued a ``shutdown -h`` command from within the server. If the OpenStack Compute manager detects that the VM was powered down, it transitions the server to the SHUTOFF status. If you use the OpenStack Compute API to restart the server, it might be deleted first, depending on the value in the *``shutdown_terminate``* database field on the Instance model. - ``SOFT_DELETED``: The server is marked as deleted while will keep in the cloud for some time(configurable), during the period authorized user can restore the server back to normal state. When the time expires, the server will be deleted permanently. - ``SUSPENDED``: The server is suspended, either by request or necessity. This status appears for only the following hypervisors: XenServer/XCP, KVM, and ESXi. Administrative users may suspend a server if it is infrequently used or to perform system maintenance. When you suspend a server, its state is stored on disk, all memory is written to disk, and the server is stopped. Suspending a server is similar to placing a device in hibernation; memory and vCPUs become available to create other servers. - ``UNKNOWN``: The state of the server is unknown. Contact your cloud provider. - ``VERIFY_RESIZE``: System is awaiting confirmation that the server is operational after a move or resize. The compute provisioning algorithm has an anti-affinity property that attempts to spread customer VMs across hosts. Under certain situations, VMs from the same customer might be placed on the same host. hostId represents the host your server runs on and can be used to determine this scenario if it is relevant to your application. .. note:: HostId is unique *per account* and is not globally unique. Server creation ~~~~~~~~~~~~~~~ Status Transition: ``BUILD`` ``ACTIVE`` ``ERROR`` (on error) When you create a server, the operation asynchronously provisions a new server. The progress of this operation depends on several factors including location of the requested image, network I/O, host load, and the selected flavor. The progress of the request can be checked by performing a **GET** on /servers/*``id``*, which returns a progress attribute (from 0% to 100% complete). The full URL to the newly created server is returned through the ``Location`` header and is available as a ``self`` and ``bookmark`` link in the server representation. Note that when creating a server, only the server ID, its links, and the administrative password are guaranteed to be returned in the request. You can retrieve additional attributes by performing subsequent **GET** operations on the server. Server query ~~~~~~~~~~~~ Nova allows both general user and administrator to filter the server query result by using query options. For general user, ``reservation_id``, ``name``, ``status``, ``image``, ``flavor``, ``ip``, ``changes-since``, ``ip6 (microversion 2.5)`` are supported options to be used. The other options will be ignored by nova silently only with a debug log. For administrator, there are more fields can be used. The ``all_tenants`` option allows the servers owned by all tenants to be reported (otherwise only the servers associated with the calling tenant are included in the response). Additionally, the filter is applied to the database schema definition of ``class Instance``, e.g there is a field named 'locked' in the schema then the filter can use 'locked' as search options to filter servers. Also, there are some special options such as ``changes-since`` can be used and interpreted by nova. - **General user & Administrator supported options** General user supported options are listed above and administrator can use almost all the options except the options parameters for sorting and pagination. .. code:: Precondition: there are 2 servers existing in cloud with following info: "servers":[ { "name": "t1", "locked": "true", ... } { "name":"t2", "locked": "false", ... } **Example: General user query server with administrator only options** .. code:: Request with non-administrator context: GET /servers/detail?locked=1 Note that 'locked' is not returned through API layer Response: { "servers":[ { "name": "t1", ... } { "name":"t2", ... } ] } **Example: Administrator query server with administrator only options** .. code:: Request with administrator context: GET /servers/detail?locked=1 Response: { "servers":[ { "name": "t1", ... } ] } - **Exact matching and regex matching of the search options** Depending on the name of a filter, matching for that filter is performed using either exact matching or as regular expression matching. ``project_id``, ``user_id``, ``image_ref``, ``vm_state``, ``instance_type_id``, ``uuid``, ``metadata``, ``host``, ``system_metadata`` are the options that are applied by exact matching when filtering. **Example: User query server using exact matching on host** .. code:: Precondition: Request with administrator context: GET /servers/detail Response: { "servers":[ { "name": "t1", "OS-EXT-SRV-ATTR:host": "devstack" ... } { "name": "t2", "OS-EXT-SRV-ATTR:host": "devstack1" ... } ] } Request with administrator context: GET /servers/detail?host=devstack Response: { "servers":[ { "name": "t1", "OS-EXT-SRV-ATTR:host": "devstack" ... } ] } **Example: Query server using regex matching on name** .. code:: Precondition: Request with administrator context: GET /servers/detail Response: { "servers":[ { "name": "test11", ... } { "name": "test21", ... } { "name": "t1", ... } { "name": "t14", ... } ] } Request with administrator context: GET /servers/detail?name=t1 Response: { "servers":[ { "name": "test11", ... } { "name": "t1", ... } { "name": "t14", ... } ] } **Example: User query server using exact matching on host and regex matching on name** .. code:: Precondition: Request with administrator context: GET /servers/detail Response: { "servers":[ { "name": "test1", "OS-EXT-SRV-ATTR:host": "devstack" ... } { "name": "t2", "OS-EXT-SRV-ATTR:host": "devstack1" ... } { "name": "test3", "OS-EXT-SRV-ATTR:host": "devstack1" ... } ] } Request with administrator context: GET /servers/detail?host=devstack1&name=test Response: { "servers":[ { "name": "test3", "OS-EXT-SRV-ATTR:host": "devstack1" ... } ] } - **Speical keys are used to tweek the query** ``changes-since`` returns instances updated after the given time, ``deleted`` return (or exclude) deleted instances and ``soft_deleted`` modify behavior of 'deleted' to either include or exclude instances whose vm_state is SOFT_DELETED. Please see: :doc:`polling_changes-since_parameter` **Example: User query server with special keys changes-since** .. code:: Precondition: GET /servers/detail Response: { "servers":[ { "name": "t1" "updated": "2015-12-15T15:55:52Z" ... } { "name": "t2", "updated": "2015-12-17T15:55:52Z" ... } } GET /servers/detail?changes-since='2015-12-16T15:55:52Z' Response: { { "name": "t2", "updated": "2015-12-17T15:55:52Z" ... } } Server actions ~~~~~~~~~~~~~~ - **Reboot** Use this function to perform either a soft or hard reboot of a server. With a soft reboot, the operating system is signaled to restart, which allows for a graceful shutdown of all processes. A hard reboot is the equivalent of power cycling the server. The virtualization platform should ensure that the reboot action has completed successfully even in cases in which the underlying domain/VM is paused or halted/stopped. - **Rebuild** Use this function to remove all data on the server and replaces it with the specified image. Server ID and IP addresses remain the same. - **Evacuate** Should a nova-compute service actually go offline, it can no longer report status about any of the servers on it. This means they'll be listed in an 'ACTIVE' state forever. Evacuate is a work around for this that lets an administrator forcibly rebuild these servers on another node. It makes no guarantees that the host was actually down, so fencing is left as an exercise to the deployer. - **Resize** (including **Confirm resize**, **Revert resize**) Use this function to convert an existing server to a different flavor, in essence, scaling the server up or down. The original server is saved for a period of time to allow rollback if there is a problem. All resizes should be tested and explicitly confirmed, at which time the original server is removed. All resizes are automatically confirmed after 24 hours if you do not confirm or revert them. Confirm resize action will delete the old server in the virt layer. The spawned server in the virt layer will be used from then on. On the contrary, Revert resize action will delete the new server spawned in the virt layer and revert all changes. The original server will be used from then on. Also, there is a periodic task configured by configuration option resize_confirm_window(in seconds), if this value is not 0, nova compute will check whether the server is in resized state longer than value of resize_confirm_window, it will automatically confirm the resize of the server. - **Pause**, **Unpause** You can pause a server by making a pause request. This request stores the state of the VM in RAM. A paused server continues to run in a frozen state. Unpause returns a paused server back to an active state. - **Suspend**, **Resume** Administrative users might want to suspend a server if it is infrequently used or to perform system maintenance. When you suspend a server, its VM state is stored on disk, all memory is written to disk, and the virtual machine is stopped. Suspending a server is similar to placing a device in hibernation; memory and vCPUs become available to create other servers. Resume will resume a suspended server to an active state. - **Snapshot** You can store the current state of the server root disk to be saved and uploaded back into the glance image repository. Then a server can later be booted again using this saved image. - **Backup** You can use backup method to store server's current state in the glance repository, in the mean time, old snapshots will be removed based on the given 'daily' or 'weekly' type. - **Start** Power on the server. - **Stop** Power off the server. - **Delete**, **Restore** Power off the given server first then detach all the resources associated to the server such as network and volumes, then delete the server. The configuration option 'reclaim_instance_interval' (in seconds) decides whether the server to be deleted will still be in the system. If this value is greater than 0, the deleted server will not be deleted immediately, instead it will be put into a queue until it's too old (deleted time greater than the value of reclaim_instance_interval). Administrator is able to use Restore action to recover the server from the delete queue. If the deleted server remains longer than the value of reclaim_instance_interval, it will be deleted by compute service automatically. - **Shelve**, **Shelve offload**, **Unshelve** Shelving a server indicates it will not be needed for some time and may be temporarily removed from the hypervisors. This allows its resources to be freed up for use by someone else. By default the configuration option 'shelved_offload_time' is 0 and the shelved server will be removed from the hypervisor immediately after shelve operation; Otherwise, the resource will be kept for the value of 'shelved_offload_time' (in seconds) so that during the time period the unshelve action will be faster, then the periodic task will remove the server from hypervisor after 'shelved_offload_time' time passes. Set the option 'shelved_offload_time' to -1 make it never offload. Shelve will power off the given server and take a snapshot if it is booted from image. The server can then be offloaded from the compute host and its resources deallocated. Offloading is done immediately if booted from volume, but if booted from image the offload can be delayed for some time or infinitely, leaving the image on disk and the resources still allocated. Shelve offload is used to explicitly remove a shelved server that has been left on a host. This action can only be used on a shelved server and is usually performed by an administrator. Unshelve is the reverse operation of Shelve. It builds and boots the server again, on a new scheduled host if it was offloaded, using the shelved image in the glance repository if booted from image. - **Lock**, **Unlock** Lock a server so no further actions are allowed to the server. This can be done by either administrator or the server's owner. By default, only owner or administrator can lock the sever, and administrator can overwrite owner's lock. Unlock will unlock a server in locked state so additional operations can be performed on the server. By default, only owner or administrator can unlock the server. - **Rescue**, **Unrescue** The rescue operation starts a server in a special configuration whereby it is booted from a special root disk image. This enables the tenant to try and restore a broken guest system. Unrescue is the reverse action of Rescue. The server spawned from the special root image will be deleted. - **Set administrator password** Sets the root/administrator password for the given server. It uses an optionally installed agent to set the administrator password. - **Migrate**, **Live migrate** Migrate is usually utilized by administrator, it will move a server to another host; it utilizes the 'resize' action but with same flavor, so during migration, the server will be powered off and rebuilt on another host. Live migrate also moves a server from one host to another, but it won't power off the server in general so the server will not suffer a down time. Administrators may use this to evacuate servers from a host that needs to undergo maintenance tasks. - **Trigger crash dump** Trigger crash dump usually utilized by either administrator or the server's owner, it will dump the memory image as dump file into the given server, and then reboot the kernel again. And this feature depends on the setting about the trigger (e.g. NMI) in the server. Server passwords ~~~~~~~~~~~~~~~~ You can specify a password when you create the server through the optional adminPass attribute. The specified password must meet the complexity requirements set by your OpenStack Compute provider. The server might enter an ``ERROR`` state if the complexity requirements are not met. In this case, a client can issue a change password action to reset the server password. If a password is not specified, a randomly generated password is assigned and returned in the response object. This password is guaranteed to meet the security requirements set by the compute provider. For security reasons, the password is not returned in subsequent **GET** calls. Server metadata ~~~~~~~~~~~~~~~ Custom server metadata can also be supplied at launch time. The maximum size of the metadata key and value is 255 bytes each. The maximum number of key-value pairs that can be supplied per server is determined by the compute provider and may be queried via the maxServerMeta absolute limit. Block Device Mapping ~~~~~~~~~~~~~~~~~~~~ TODO: Add some description about BDM. Scheduler Hints ~~~~~~~~~~~~~~~ TODO: Add description about how to custom scheduling policy for server booting. Server Consoles ~~~~~~~~~~~~~~~ TODO: We have multiple endpoints about consoles, we should explain that. Server networks ~~~~~~~~~~~~~~~ Networks to which the server connects can also be supplied at launch time. One or more networks can be specified. User can also specify a specific port on the network or the fixed IP address to assign to the server interface. Considerations ~~~~~~~~~~~~~~ - The maximum limit refers to the number of bytes in the decoded data and not the number of characters in the encoded data. - The maximum number of file path/content pairs that you can supply is also determined by the compute provider and is defined by the maxPersonality absolute limit. - The absolute limit, maxPersonalitySize, is a byte limit that is guaranteed to apply to all images in the deployment. Providers can set additional per-image personality limits. - The file injection might not occur until after the server is built and booted. - After file injection, personality files are accessible by only system administrators. For example, on Linux, all files have root and the root group as the owner and group owner, respectively, and allow user and group read access only (octal 440). Server access addresses ~~~~~~~~~~~~~~~~~~~~~~~ In a hybrid environment, the IP address of a server might not be controlled by the underlying implementation. Instead, the access IP address might be part of the dedicated hardware; for example, a router/NAT device. In this case, the addresses provided by the implementation cannot actually be used to access the server (from outside the local LAN). Here, a separate *access address* may be assigned at creation time to provide access to the server. This address may not be directly bound to a network interface on the server and may not necessarily appear when a server's addresses are queried. Nonetheless, clients that must access the server directly are encouraged to do so via an access address. In the example below, an IPv4 address is assigned at creation time. **Example: Create server with access IP: JSON request** .. code:: { "server":{ "name":"new-server-test", "imageRef":"52415800-8b69-11e0-9b19-734f6f006e54", "flavorRef":"52415800-8b69-11e0-9b19-734f1195ff37", "accessIPv4":"67.23.10.132" } } .. note:: Both IPv4 and IPv6 addresses may be used as access addresses and both addresses may be assigned simultaneously as illustrated below. Access addresses may be updated after a server has been created. **Example: Create server with multiple access IPs: JSON request** .. code:: { "server":{ "name":"new-server-test", "imageRef":"52415800-8b69-11e0-9b19-734f6f006e54", "flavorRef":"52415800-8b69-11e0-9b19-734f1195ff37", "accessIPv4":"67.23.10.132", "accessIPv6":"::babe:67.23.10.132" } } Moving servers ~~~~~~~~~~~~~~ There are several actions that may result in a server moving from one compute host to another including shelve, resize, migrations and evacuate. The following use cases demonstrate the intention of the actions and the consequence for operational procedures. Cloud operator needs to move a server ------------------------------------- Sometimes a cloud operator may need to redistribute work loads for operational purposes. For example, the operator may need to remove a compute host for maintenance or deploy a kernel security patch that requires the host to be rebooted. The operator has two actions available for deliberately moving work loads: cold migration (moving a server that is not active) and live migration (moving a server that is active). Cold migration moves a server from one host to another by copying its state, local storage and network configuration to new resources allocated on a new host selected by scheduling policies. The operation is relatively quick as the server is not changing its state during the copy process. The user does not have access to the server during the operation. Live migration moves a server from one host to another while it is active, so it is constantly changing its state during the action. As a result it can take considerably longer than cold migration. During the action the server is online and accessible, but only a limited set of management actions are available to the user. The following are common patterns for employing migrations in a cloud: - **Host maintenance** If a compute host is to be removed from the cloud all its servers will need to be moved to other hosts. In this case it is normal for the rest of the cloud to absorb the work load, redistributing the servers by rescheduling them. To prepare the host it will be disabled so it does not receive any further servers. Then each server will be migrated to a new host by cold or live migration, depending on the state of the server. When complete, the host is ready to be removed. - **Rolling updates** Often it is necessary to perform an update on all compute hosts which requires them to be rebooted. In this case it is not strictly necessary to move inactive servers because they will be available after the reboot. However, active servers would be impacted by the reboot. Live migration will allow them to continue operation. In this case a rolling approach can be taken by starting with an empty compute host that has been updated and rebooted. Another host that has not yet been updated is disabled and all its servers are migrated to the new host. When the migrations are complete the new host continues normal operation. The old host will be empty and can be updated and rebooted. It then becomes the new target for another round of migrations. This process can be repeated until the whole cloud has been updated, usually using a pool of empty hosts instead of just one. - **Resource Optimization** To reduce energy usage, some cloud operators will try and move servers so they fit into the minimum number of hosts, allowing some servers to be turned off. Sometimes higher performance might be wanted, so servers are spread out between the hosts to minimize resource contention. Migrating a server is not normally a choice that is available to the cloud user because the user is not normally aware of compute hosts. Management of the cloud and how servers are provisioned in it is the responsibility of the cloud operator. Recover from a failed compute host ---------------------------------- Sometimes a compute host may fail. This is a rare occurrence, but when it happens during normal operation the servers running on the host may be lost. In this case the operator may recreate the servers on the remaining compute hosts using the evacuate action. Failure detection can be proved to be impossible in compute systems with asynchronous communication, so true failure detection cannot be achieved. Usually when a host is considered to have failed it should be excluded from the cloud and any virtual networking or storage associated with servers on the failed host should be isolated from it. These steps are called fencing the host. Initiating these action is outside the scope of Nova. Once the host has been fenced its servers can be recreated on other hosts without worry of the old incarnations reappearing and trying to access shared resources. It is usual to redistribute the servers from a failed host by rescheduling them. Please note, this operation can result in data loss for the user's server. As there is no access to the original server, if there were any disks stored on local storage, that data will be lost. Evacuate does the same operation as a rebuild. It downloads any images from glance and creates new blank ephemeral disks. Any disks that were volumes, or on shared storage, are reconnected. There should be no data loss for those disks. This is why fencing the host is important, to ensure volumes and shared storage are not corrupted by two servers writing simultaneously. Evacuating a server is solely in the domain of the cloud operator because it must be performed in coordination with other operational procedures to be safe. A user is not normally aware of compute hosts but is adversely affected by their failure. User resizes server to get more resources ----------------------------------------- Sometimes a user may want to change the flavor of a server, e.g. change the quantity of cpus, disk, memory or any other resource. This is done by restarting the server with a new flavor. As the server is being moved, it is normal to reschedule the server to another host (although resize to the same host is an option for the operator). Resize involves shutting down the server, finding a host that has the correct resources for the new flavor size, moving the current server (including all storage) to the new host. Once the server has been given the appropriate resources to match the new flavor, the server is started again. After the resize operation, when the user is happy their server is working correctly after the resize, the user calls Confirm Resize. This deletes the 'before-the-resize' server that was kept on the source host. Alternatively, the user can call Revert Resize to delete the new resized server and restore the old that was stored on the source host. If the user does not manually confirm the resize within a configured time period, the resize is automatically confirmed, to free up the space the old is using on the source host. As with shelving, resize provides the cloud operator with an opportunity to redistribute work loads across the cloud according to the operators scheduling policy, providing the same benefits as above. Resizing a server is not normally a choice that is available to the cloud operator because it changes the nature of the server being provided to the user. User doesn't want to be charged when not using a server ------------------------------------------------------- Sometimes a user does not require a server to be active for a while, perhaps over a weekend or at certain times of day. Ideally they don't want to be billed for those resources. Just powering down a server does not free up any resources, but shelving a server does free up resources to be used by other users. This makes it feasible for a cloud operator to offer a discount when a server is shelved. When the user shelves a server the operator can choose to remove it from the compute hosts, i.e. the operator can offload the shelved server. When the user's server is unshelved, it is scheduled to a new host according to the operators policies for distributing work loads across the compute hosts, including taking disabled hosts into account. This will contribute to increased overall capacity, freeing hosts that are ear-marked for maintenance and providing contiguous blocks of resources on single hosts due to moving out old servers. Shelving a server is not normally a choice that is available to the cloud operator because it affects the availability of the server being provided to the user. Configure Guest OS ~~~~~~~~~~~~~~~~~~ Metadata API ------------ TODO Config Drive ------------ TODO User data --------- A user data file is a special key in the metadata service that holds a file that cloud-aware applications in the server can access. Nova has two ways to send user data to the deployed server, one is by metadata service to let server able to access to its metadata through a predefined ip address (169.254.169.254), then other way is to use config drive which will wrap metadata into a iso9660 or vfat format disk so that the deployed server can consume it by active engines such as cloud-init during its boot process. Server personality ------------------ You can customize the personality of a server by injecting data into its file system. For example, you might want to insert ssh keys, set configuration files, or store data that you want to retrieve from inside the server. This feature provides a minimal amount of launch-time personalization. If you require significant customization, create a custom image. Follow these guidelines when you inject files: - The maximum size of the file path data is 255 bytes. - Encode the file contents as a Base64 string. The maximum size of the file contents is determined by the compute provider and may vary based on the image that is used to create the server. nova-13.0.0/api-guide/source/extensions.rst0000664000567000056710000000104412701407773022003 0ustar jenkinsjenkins00000000000000========== Extensions ========== Extensions are a deprecated concept in Nova. Support for extensions will be removed in a future release. In order to keep backwards-compatibility with legacy V2 API users, the ``extension_info`` API will remain as part of the Compute API. However, API extensions will not be supported anymore; there is only one standard API now. For the current V2.1 API, ``Microversions`` are the new mechanism for implementing API features and changes. For more detail about microversions, please refer to :doc:`microversions`. nova-13.0.0/api-guide/source/polling_changes-since_parameter.rst0000664000567000056710000000312612701407773026102 0ustar jenkinsjenkins00000000000000================================================== Efficient polling with the Changes-Since parameter ================================================== The REST API allows you to poll for the status of certain operations by performing a **GET** on various elements. Rather than re-downloading and re-parsing the full status at each polling interval, your REST client may use the *``changes-since``* parameter to check for changes since a previous request. The *``changes-since``* time is specified as an `ISO 8601 `__ dateTime (2011-01-24T17:08Z). The form for the timestamp is CCYY-MM-DDThh:mm:ss. An optional time zone may be written in by appending the form ±hh:mm which describes the timezone as an offset from UTC. When the timezone is not specified (2011-01-24T17:08), the UTC timezone is assumed. If nothing has changed since the *``changes-since``* time, an empty list is returned. If data has changed, only the items changed since the specified time are returned in the response. For example, performing a **GET** against https://api.servers.openstack.org/v2/224532/servers?\ *``changes-since``*\ =2015-01-24T17:08Z would list all servers that have changed since Mon, 24 Jan 2015 17:08:00 UTC. To allow clients to keep track of changes, the changes-since filter displays items that have been *recently* deleted. Both images and servers contain a ``DELETED`` status that indicates that the resource has been removed. Implementations are not required to keep track of deleted resources indefinitely, so sending a changes since time in the distant past may miss deletions. nova-13.0.0/api-guide/source/paginated_collections.rst0000664000567000056710000000766512701407773024155 0ustar jenkinsjenkins00000000000000===================== Paginated collections ===================== To reduce load on the service, list operations return a maximum number of items at a time. The maximum number of items returned is determined by the compute provider. To navigate the collection, the *``limit``* and *``marker``* parameters can be set in the URI. For example: .. code:: ?limit=100&marker=1234 The *``marker``* parameter is the ID of the last item in the previous list. By default, the service sorts items by create time in descending order. When the service cannot identify a create time, it sorts items by ID. The *``limit``* parameter sets the page size. Both parameters are optional. If the client requests a *``limit``* beyond one that is supported by the deployment an overLimit (413) fault may be thrown. A marker with an invalid ID returns a badRequest (400) fault. For convenience, collections should contain atom ``next`` links. They may optionally also contain ``previous`` links but the current implementation does not contain ``previous`` links. The last page in the list does not contain a link to "next" page. The following examples illustrate three pages in a collection of images. The first page was retrieved through a **GET** to ``http://servers.api.openstack.org/v2/1234/servers?limit=1``. In these examples, the *``limit``* parameter sets the page size to a single item. Subsequent links honor the initial page size. Thus, a client can follow links to traverse a paginated collection without having to input the *``marker``* parameter. **Example: Servers collection: JSON (first page)** .. code:: { "servers_links":[ { "href":"https://servers.api.openstack.org/v2/1234/servers?limit=1&marker=fc45ace4-3398-447b-8ef9-72a22086d775", "rel":"next" } ], "servers":[ { "id":"fc55acf4-3398-447b-8ef9-72a42086d775", "links":[ { "href":"https://servers.api.openstack.org/v2/1234/servers/fc45ace4-3398-447b-8ef9-72a22086d775", "rel":"self" }, { "href":"https://servers.api.openstack.org/v2/1234/servers/fc45ace4-3398-447b-8ef9-72a22086d775", "rel":"bookmark" } ], "name":"elasticsearch-0" } ] } In JSON, members in a paginated collection are stored in a JSON array named after the collection. A JSON object may also be used to hold members in cases where using an associative array is more practical. Properties about the collection itself, including links, are contained in an array with the name of the entity an underscore (\_) and ``links``. The combination of the objects and arrays that start with the name of the collection and an underscore represent the collection in JSON. The approach allows for extensibility of paginated collections by allowing them to be associated with arbitrary properties. It also allows collections to be embedded in other objects as illustrated below. Here, a subset of metadata items are presented within the image. Clients must keep following the ``next`` link to retrieve the full set of metadata. **Example: Paginated metadata: JSON** .. code:: { "server": { "id": "52415800-8b69-11e0-9b19-734f6f006e54", "name": "Elastic", "metadata": { "Version": "1.3", "ServiceType": "Bronze" }, "metadata_links": [ { "rel": "next", "href": "https://servers.api.openstack.org/v2/1234/servers/fc55acf4-3398-447b-8ef9-72a42086d775/meta?marker=ServiceType" } ], "links": [ { "rel": "self", "href": "https://servers.api.openstack.org/v2/1234/servers/fc55acf4-3398-447b-8ef9-72a42086d775" } ] } } nova-13.0.0/api-guide/source/extra_specs_and_properties.rst0000664000567000056710000000101012701407773025213 0ustar jenkinsjenkins00000000000000======================================= Flavor Extra Specs and Image Properties ======================================= TODO: Generic description about Flavor Extra Specs and Image Properties. Flavor Extra Specs ================== TODO: List the extra specs which we supported at here. The best is the extra specs can auto-gen from the nova code. Image Properties ================ TODO: List the properties which affect the server creation. The best is the properties can auto-gen from the image properties object. nova-13.0.0/api-guide/source/authentication.rst0000664000567000056710000000102312701407773022620 0ustar jenkinsjenkins00000000000000============== Authentication ============== Each HTTP request against the OpenStack Compute system requires the inclusion of specific authentication credentials. A single deployment may support multiple authentication schemes (OAuth, Basic Auth, Token). The authentication scheme is provided by the OpenStack Identity service. You can contact your provider to determine the best way to authenticate against the Compute API. .. note:: Some authentication schemes may require that the API operate using SSL over HTTP (HTTPS). nova-13.0.0/api-guide/source/microversions.rst0000664000567000056710000001310712701407773022511 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============= Microversions ============= API v2.1 supports microversions: small, documented changes to the API. A user can use microversions to discover the latest API microversion supported in their cloud. A cloud that is upgraded to support newer microversions will still support all older microversions to maintain the backward compatibility for those users who depend on older microversions. Users can also discover new features easily with microversions, so that they can benefit from all the advantages and improvements of the current cloud. There are multiple cases which you can resolve with microversions: - **Older clients with new cloud** Before using an old client to talk to a newer cloud, the old client can check the minimum version of microversions to verify whether the cloud is compatible with the old API. This prevents the old client from breaking with backwards incompatible API changes. Currently the minimum version of microversions is `2.1`, which is a microversion compatible with the legacy v2 API. That means the legacy v2 API user doesn't need to worry that their older client software will be broken when their cloud is upgraded with new versions. And the cloud operator doesn't need to worry that upgrading their cloud to newer versions will break any user with older clients that don't expect these changes. - **User discovery of available features between clouds** The new features can be discovered by microversions. The user client should check the microversions firstly, and new features are only enabled when clouds support. In this way, the user client can work with clouds that have deployed different microversions simultaneously. Version Discovery ================= The Version API will return the minimum and maximum microversions. These values are used by the client to discover the API's supported microversion(s). Requests to '/' will get version info for all endpoints. A response would look as follows:: { "versions": [ { "id": "v2.0", "links": [ { "href": "http://openstack.example.com/v2/", "rel": "self" } ], "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z" }, { "id": "v2.1", "links": [ { "href": "http://openstack.example.com/v2.1/", "rel": "self" } ], "status": "CURRENT", "version": "2.14", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } ] } "version" is the maximum microversion, "min_version" is the minimum microversion. If the value is the empty string, it means this endpoint doesn't support microversions; it is a legacy v2 API endpoint -- for example, the endpoint `http://openstack.example.com/v2/` in the above sample. The endpoint `http://openstack.example.com/v2.1/` supports microversions; the maximum microversion is '2.14', and the minimum microversion is '2.1'. The client should specify a microversion between (and including) the minimum and maximum microversion to access the endpoint. You can also obtain specific endpoint version information by performing a GET on the base version URL (e.g., `http://openstack.example.com/v2.1/`). You can get more information about the version API at :doc:`versions`. Client Interaction ================== A client specifies the microversion of the API they want by using the following HTTP header:: X-OpenStack-Nova-API-Version: 2.4 This acts conceptually like the "Accept" header. Semantically this means: * If `X-OpenStack-Nova-API-Version` is not provided, act as if the minimum supported microversion was specified. * If `X-OpenStack-Nova-API-Version` is provided, respond with the API at that microversion. If that's outside of the range of microversions supported, return 406 Not Acceptable. * If `X-OpenStack-Nova-API-Version` is ``latest`` (special keyword), act as if maximum was specified. .. warning:: The ``latest`` value is mostly meant for integration testing and would be dangerous to rely on in client code since microversions are not following semver and therefore backward compatibility is not guaranteed. Clients should always require a specific microversion but limit what is acceptable to the microversion range that it understands at the time. This means that out of the box, an old client without any knowledge of microversions can work with an OpenStack installation with microversions support. Two extra headers are always returned in the response: * X-OpenStack-Nova-API-Version: microversion_number * Vary: X-OpenStack-Nova-API-Version The first header specifies the microversion number of the API which was executed. The second header is used as a hint to caching proxies that the response is also dependent on the X-OpenStack-Nova-API-Version and not just the body and query parameters. See :rfc:`2616` section 14.44 for details. nova-13.0.0/api-guide/source/limits.rst0000664000567000056710000001134312701407773021110 0ustar jenkinsjenkins00000000000000====== Limits ====== Accounts may be pre-configured with a set of thresholds (or limits) to manage capacity and prevent abuse of the system. The system recognizes two kinds of limits: *rate limits* and *absolute limits*. Rate limits are thresholds that are reset after a certain amount of time passes. Absolute limits are fixed. Limits are configured by operators and may differ from one deployment of the OpenStack Compute service to another. Please contact your provider to determine the limits that apply to your account. Your provider may be able to adjust your account's limits if they are too low. Also see the API Reference for `*Limits* `__. Rate limits ~~~~~~~~~~~ Rate limits are specified in terms of both a human-readable wild-card URI and a machine-processable regular expression. The human-readable limit is intended for displaying in graphical user interfaces. The machine-processable form is intended to be used directly by client applications. The regular expression boundary matcher "^" for the rate limit takes effect after the root URI path. For example, the regular expression ^/servers would match the bolded portion of the following URI: https://servers.api.openstack.org/v2/3542812\ **/servers**. **Table: Sample rate limits** +------------+-------------------+----------------------+----------+ | Verb | URI | RegEx | Default | +------------+-------------------+----------------------+----------+ | **POST** | \* | .\* | 120/min | +------------+-------------------+----------------------+----------+ | **POST** | \*/servers | ^/servers | 120/min | +------------+-------------------+----------------------+----------+ | **PUT** | \* | .\* | 120/min | +------------+-------------------+----------------------+----------+ | **GET** | \*changes-since\* | .\*changes-since.\* | 120/min | +------------+-------------------+----------------------+----------+ | **DELETE** | \* | .\* | 120/min | +------------+-------------------+----------------------+----------+ | **GET** | \*/os-fping\* | ^/os-fping | 12/min | +------------+-------------------+----------------------+----------+ Rate limits are applied in order relative to the verb, going from least to most specific. In the event a request exceeds the thresholds established for your account, a 413 HTTP response is returned with a ``Retry-After`` header to notify the client when they can attempt to try again. Absolute limits ~~~~~~~~~~~~~~~ Absolute limits are specified as name/value pairs. The name of the absolute limit uniquely identifies the limit within a deployment. Please consult your provider for an exhaustive list of absolute limits names. An absolute limit value is always specified as an integer. The name of the absolute limit determines the unit type of the integer value. For example, the name maxServerMeta implies that the value is in terms of server metadata items. **Table: Sample absolute limits** +-------------------+-------------------+------------------------------------+ | Name | Value | Description | +-------------------+-------------------+------------------------------------+ | maxTotalRAMSize | 51200 | Maximum total amount of RAM (MB) | +-------------------+-------------------+------------------------------------+ | maxServerMeta | 5 | Maximum number of metadata items | | | | associated with a server. | +-------------------+-------------------+------------------------------------+ | maxImageMeta | 5 | Maximum number of metadata items | | | | associated with an image. | +-------------------+-------------------+------------------------------------+ | maxPersonality | 5 | The maximum number of file | | | | path/content pairs that can be | | | | supplied on server build. | +-------------------+-------------------+------------------------------------+ | maxPersonalitySize| 10240 | The maximum size, in bytes, for | | | | each personality file. | +-------------------+-------------------+------------------------------------+ Determine limits programmatically ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Applications can programmatically determine current account limits. For information, see `*Limits* `__. nova-13.0.0/test-requirements.txt0000664000567000056710000000174512701407773020157 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking<0.11,>=0.10.0 coverage>=3.6 # Apache-2.0 fixtures>=1.3.1 # Apache-2.0/BSD mock>=1.2 # BSD mox3>=0.7.0 # Apache-2.0 psycopg2>=2.5 # LGPL/ZPL PyMySQL>=0.6.2 # MIT License python-barbicanclient>=3.3.0 # Apache-2.0 python-ironicclient>=1.1.0 # Apache-2.0 python-subunit>=0.0.18 # Apache-2.0/BSD requests-mock>=0.7.0 # Apache-2.0 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0 os-testr>=0.4.1 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT tempest-lib>=0.14.0 # Apache-2.0 bandit>=0.17.3 # Apache-2.0 openstackdocstheme>=1.0.3 # Apache-2.0 # vmwareapi driver specific dependencies oslo.vmware>=1.16.0 # Apache-2.0 # releasenotes reno>=0.1.1 # Apache2 nova-13.0.0/nova.egg-info/0000775000567000056710000000000012701410205016324 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova.egg-info/requires.txt0000664000567000056710000000213312701410204020722 0ustar jenkinsjenkins00000000000000pbr>=1.6 SQLAlchemy<1.1.0,>=1.0.10 boto>=2.32.1 decorator>=3.4.0 eventlet!=0.18.3,>=0.18.2 Jinja2>=2.8 keystonemiddleware!=4.1.0,>=4.0.0 lxml>=2.3 cryptography>=1.0 WebOb>=1.2.3 greenlet>=0.3.2 PasteDeploy>=1.5.0 Paste PrettyTable<0.8,>=0.7 sqlalchemy-migrate>=0.9.6 netaddr!=0.7.16,>=0.7.12 netifaces>=0.10.4 paramiko>=1.16.0 Babel>=1.3 iso8601>=0.1.9 jsonschema!=2.5.0,<3.0.0,>=2.0.0 python-cinderclient>=1.3.1 keystoneauth1>=2.1.0 python-neutronclient!=4.1.0,>=2.6.0 python-glanceclient>=2.0.0 requests!=2.9.0,>=2.8.1 six>=1.9.0 stevedore>=1.5.0 setuptools>=16.0 websockify>=0.6.1 oslo.cache>=1.5.0 oslo.concurrency>=3.5.0 oslo.config>=3.7.0 oslo.context>=0.2.0 oslo.log>=1.14.0 oslo.reports>=0.6.0 oslo.serialization>=1.10.0 oslo.utils>=3.5.0 oslo.db>=4.1.0 oslo.rootwrap>=2.0.0 oslo.messaging>=4.0.0 oslo.policy>=0.5.0 oslo.i18n>=2.1.0 oslo.service>=1.0.0 rfc3986>=0.2.0 oslo.middleware>=3.0.0 psutil<2.0.0,>=1.1.1 oslo.versionedobjects>=1.5.0 alembic>=0.8.0 os-brick>=1.0.0 os-win>=0.2.3 castellan>=0.3.1 [:(python_version!='2.7')] Routes!=2.0,>=1.12.3 [:(python_version=='2.7')] Routes!=2.0,!=2.1,>=1.12.3 nova-13.0.0/nova.egg-info/dependency_links.txt0000664000567000056710000000000112701410204022371 0ustar jenkinsjenkins00000000000000 nova-13.0.0/nova.egg-info/not-zip-safe0000664000567000056710000000000112701410146020556 0ustar jenkinsjenkins00000000000000 nova-13.0.0/nova.egg-info/SOURCES.txt0000664000567000056710000044013112701410205020213 0ustar jenkinsjenkins00000000000000.coveragerc .mailmap .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MAINTAINERS README.rst babel.cfg bandit.yaml openstack-common.conf requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tests-py3.txt tox.ini api-guide/source/authentication.rst api-guide/source/conf.py api-guide/source/extensions.rst api-guide/source/extra_specs_and_properties.rst api-guide/source/faults.rst api-guide/source/general_info.rst api-guide/source/index.rst api-guide/source/limits.rst api-guide/source/links_and_references.rst api-guide/source/microversions.rst api-guide/source/paginated_collections.rst api-guide/source/polling_changes-since_parameter.rst api-guide/source/request_and_response_formats.rst api-guide/source/server_concepts.rst api-guide/source/users.rst api-guide/source/versions.rst contrib/profile_caching_scheduler.sh contrib/xen/vif-openstack devstack/tempest-dsvm-cells-rc devstack/tempest-dsvm-lxc-rc doc/README.rst doc/api_samples/all_extensions/extensions-list-resp-v2.json doc/api_samples/all_extensions/extensions-list-resp-v21-compatible.json doc/api_samples/all_extensions/extensions-list-resp.json doc/api_samples/all_extensions/flavor-get-resp.json doc/api_samples/all_extensions/flavors-detail-resp.json doc/api_samples/all_extensions/flavors-list-resp.json doc/api_samples/all_extensions/server-action-confirm-resize.json doc/api_samples/all_extensions/server-action-create-image.json doc/api_samples/all_extensions/server-action-reboot.json doc/api_samples/all_extensions/server-action-rebuild-resp.json doc/api_samples/all_extensions/server-action-rebuild.json doc/api_samples/all_extensions/server-action-resize.json doc/api_samples/all_extensions/server-action-revert-resize.json doc/api_samples/all_extensions/server-create-req.json doc/api_samples/all_extensions/server-create-resp.json doc/api_samples/all_extensions/server-get-resp.json doc/api_samples/all_extensions/servers-details-resp.json doc/api_samples/all_extensions/servers-list-resp.json doc/api_samples/consoles/consoles-get-resp.json doc/api_samples/consoles/consoles-list-get-resp.json doc/api_samples/extension-info/extensions-get-resp-v2.json doc/api_samples/extension-info/extensions-get-resp.json doc/api_samples/flavor-access/flavor-access-add-tenant-req.json doc/api_samples/flavor-access/flavor-access-add-tenant-resp.json doc/api_samples/flavor-access/flavor-access-create-req.json doc/api_samples/flavor-access/flavor-access-create-resp.json doc/api_samples/flavor-access/flavor-access-detail-resp.json doc/api_samples/flavor-access/flavor-access-list-resp.json doc/api_samples/flavor-access/flavor-access-remove-tenant-req.json doc/api_samples/flavor-access/flavor-access-remove-tenant-resp.json doc/api_samples/flavor-access/flavor-access-show-resp.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json doc/api_samples/flavor-manage/flavor-create-post-req.json doc/api_samples/flavor-manage/flavor-create-post-resp.json doc/api_samples/flavors/flavor-get-resp.json doc/api_samples/flavors/flavors-detail-resp.json doc/api_samples/flavors/flavors-list-resp.json doc/api_samples/images/image-get-resp.json doc/api_samples/images/image-meta-key-get.json doc/api_samples/images/image-meta-key-put-req.json doc/api_samples/images/image-meta-key-put-resp.json doc/api_samples/images/image-metadata-get-resp.json doc/api_samples/images/image-metadata-post-req.json doc/api_samples/images/image-metadata-post-resp.json doc/api_samples/images/image-metadata-put-req.json doc/api_samples/images/image-metadata-put-resp.json doc/api_samples/images/images-details-get-resp.json doc/api_samples/images/images-list-get-resp.json doc/api_samples/keypairs/keypairs-get-resp.json doc/api_samples/keypairs/keypairs-import-post-req.json doc/api_samples/keypairs/keypairs-import-post-resp.json doc/api_samples/keypairs/keypairs-list-resp.json doc/api_samples/keypairs/keypairs-post-req.json doc/api_samples/keypairs/keypairs-post-resp.json doc/api_samples/keypairs/v2.10/keypairs-get-resp.json doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json doc/api_samples/keypairs/v2.10/keypairs-list-resp.json doc/api_samples/keypairs/v2.10/keypairs-post-req.json doc/api_samples/keypairs/v2.10/keypairs-post-resp.json doc/api_samples/keypairs/v2.2/keypairs-get-resp.json doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json doc/api_samples/keypairs/v2.2/keypairs-list-resp.json doc/api_samples/keypairs/v2.2/keypairs-post-req.json doc/api_samples/keypairs/v2.2/keypairs-post-resp.json doc/api_samples/limits/limit-get-resp.json doc/api_samples/limits/v2-limit-get-resp.json doc/api_samples/os-access-ips/server-action-rebuild-resp.json doc/api_samples/os-access-ips/server-action-rebuild.json doc/api_samples/os-access-ips/server-get-resp.json doc/api_samples/os-access-ips/server-post-req.json doc/api_samples/os-access-ips/server-post-resp.json doc/api_samples/os-access-ips/servers-details-resp.json doc/api_samples/os-admin-actions/admin-actions-inject-network-info.json doc/api_samples/os-admin-actions/admin-actions-reset-network.json doc/api_samples/os-admin-actions/admin-actions-reset-server-state.json doc/api_samples/os-admin-password/admin-password-change-password.json doc/api_samples/os-agents/agent-post-req.json doc/api_samples/os-agents/agent-post-resp.json doc/api_samples/os-agents/agent-update-put-req.json doc/api_samples/os-agents/agent-update-put-resp.json doc/api_samples/os-agents/agents-get-resp.json doc/api_samples/os-aggregates/aggregate-add-host-post-req.json doc/api_samples/os-aggregates/aggregate-metadata-post-req.json doc/api_samples/os-aggregates/aggregate-post-req.json doc/api_samples/os-aggregates/aggregate-post-resp.json doc/api_samples/os-aggregates/aggregate-remove-host-post-req.json doc/api_samples/os-aggregates/aggregate-update-post-req.json doc/api_samples/os-aggregates/aggregate-update-post-resp.json doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json doc/api_samples/os-aggregates/aggregates-get-resp.json doc/api_samples/os-aggregates/aggregates-list-get-resp.json doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.json doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json doc/api_samples/os-availability-zone/availability-zone-detail-resp.json doc/api_samples/os-availability-zone/availability-zone-list-resp.json doc/api_samples/os-availability-zone/availability-zone-post-req.json doc/api_samples/os-availability-zone/availability-zone-post-resp.json doc/api_samples/os-baremetal-nodes/baremetal-node-get-resp.json doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json doc/api_samples/os-cells/cells-capacities-resp.json doc/api_samples/os-cells/cells-get-resp.json doc/api_samples/os-cells/cells-list-empty-resp.json doc/api_samples/os-cells/cells-list-resp.json doc/api_samples/os-certificates/certificate-create-resp.json doc/api_samples/os-certificates/certificate-get-root-resp.json doc/api_samples/os-cloudpipe/cloud-pipe-create-req.json doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json doc/api_samples/os-cloudpipe/cloud-pipe-update-req.json doc/api_samples/os-config-drive/server-config-drive-get-resp.json doc/api_samples/os-config-drive/server-post-req.json doc/api_samples/os-config-drive/server-post-resp.json doc/api_samples/os-config-drive/servers-config-drive-details-resp.json doc/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json doc/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json doc/api_samples/os-console-output/console-output-post-req.json doc/api_samples/os-console-output/console-output-post-resp.json doc/api_samples/os-create-backup/create-backup-req.json doc/api_samples/os-deferred-delete/force-delete-post-req.json doc/api_samples/os-deferred-delete/restore-post-req.json doc/api_samples/os-disk-config/list-servers-detail-get.json doc/api_samples/os-disk-config/server-action-rebuild-req.json doc/api_samples/os-disk-config/server-action-rebuild-resp.json doc/api_samples/os-disk-config/server-get-resp.json doc/api_samples/os-disk-config/server-post-req.json doc/api_samples/os-disk-config/server-post-resp.json doc/api_samples/os-disk-config/server-resize-post-req.json doc/api_samples/os-evacuate/server-evacuate-find-host-req.json doc/api_samples/os-evacuate/server-evacuate-find-host-resp.json doc/api_samples/os-evacuate/server-evacuate-req.json doc/api_samples/os-evacuate/server-evacuate-resp.json doc/api_samples/os-evacuate/v2.14/server-evacuate-find-host-req.json doc/api_samples/os-evacuate/v2.14/server-evacuate-req.json doc/api_samples/os-extended-availability-zone/server-get-resp.json doc/api_samples/os-extended-availability-zone/servers-detail-resp.json doc/api_samples/os-extended-server-attributes/server-get-resp.json doc/api_samples/os-extended-server-attributes/server-post-resp.json doc/api_samples/os-extended-server-attributes/servers-detail-resp.json doc/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json doc/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json doc/api_samples/os-extended-server-attributes/v2.3/server-get-resp.json doc/api_samples/os-extended-server-attributes/v2.3/servers-detail-resp.json doc/api_samples/os-extended-status/server-get-resp.json doc/api_samples/os-extended-status/servers-detail-resp.json doc/api_samples/os-extended-volumes/server-get-resp.json doc/api_samples/os-extended-volumes/servers-detail-resp.json doc/api_samples/os-extended-volumes/v2.3/server-get-resp.json doc/api_samples/os-extended-volumes/v2.3/servers-detail-resp.json doc/api_samples/os-fixed-ips/fixedip-post-req.json doc/api_samples/os-fixed-ips/fixedips-get-resp.json doc/api_samples/os-fixed-ips/v2.4/fixedip-post-req.json doc/api_samples/os-fixed-ips/v2.4/fixedips-get-resp.json doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.json doc/api_samples/os-floating-ips/floating-ips-create-req.json doc/api_samples/os-floating-ips/floating-ips-create-resp.json doc/api_samples/os-floating-ips/floating-ips-get-resp.json doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json doc/api_samples/os-floating-ips/floating-ips-list-resp.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json doc/api_samples/os-fping/fping-get-details-resp.json doc/api_samples/os-fping/fping-get-resp.json doc/api_samples/os-hide-server-addresses/server-get-resp.json doc/api_samples/os-hide-server-addresses/servers-details-resp.json doc/api_samples/os-hide-server-addresses/servers-list-resp.json doc/api_samples/os-hosts/host-get-reboot.json doc/api_samples/os-hosts/host-get-resp.json doc/api_samples/os-hosts/host-get-shutdown.json doc/api_samples/os-hosts/host-get-startup.json doc/api_samples/os-hosts/host-put-maintenance-req.json doc/api_samples/os-hosts/host-put-maintenance-resp.json doc/api_samples/os-hosts/hosts-list-resp.json doc/api_samples/os-hypervisors/hypervisors-detail-resp.json doc/api_samples/os-hypervisors/hypervisors-list-resp.json doc/api_samples/os-hypervisors/hypervisors-search-resp.json doc/api_samples/os-hypervisors/hypervisors-show-resp.json doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json doc/api_samples/os-hypervisors/hypervisors-uptime-resp.json doc/api_samples/os-hypervisors/hypervisors-with-servers-resp.json doc/api_samples/os-hypervisors/hypervisors-without-servers-resp.json doc/api_samples/os-instance-actions/instance-action-get-resp.json doc/api_samples/os-instance-actions/instance-actions-list-resp.json doc/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json doc/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-index-get-resp.json doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-show-get-resp.json doc/api_samples/os-lock-server/lock-server.json doc/api_samples/os-lock-server/unlock-server.json doc/api_samples/os-migrate-server/live-migrate-server.json doc/api_samples/os-migrate-server/migrate-server.json doc/api_samples/os-migrate-server/v2.25/live-migrate-server.json doc/api_samples/os-migrations/migrations-get.json doc/api_samples/os-migrations/v2.23/migrations-get.json doc/api_samples/os-multinic/multinic-add-fixed-ip-req.json doc/api_samples/os-multinic/multinic-remove-fixed-ip-req.json doc/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json doc/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json doc/api_samples/os-multiple-create/multiple-create-post-req.json doc/api_samples/os-multiple-create/multiple-create-post-resp.json doc/api_samples/os-networks/network-add-req.json doc/api_samples/os-networks/network-create-req.json doc/api_samples/os-networks/network-create-resp.json doc/api_samples/os-networks/network-show-resp.json doc/api_samples/os-networks/networks-disassociate-req.json doc/api_samples/os-networks/networks-list-resp.json doc/api_samples/os-networks-associate/network-associate-host-req.json doc/api_samples/os-networks-associate/network-disassociate-host-req.json doc/api_samples/os-networks-associate/network-disassociate-project-req.json doc/api_samples/os-networks-associate/network-disassociate-req.json doc/api_samples/os-pause-server/pause-server.json doc/api_samples/os-pause-server/unpause-server.json doc/api_samples/os-pci/hypervisors-pci-detail-resp.json doc/api_samples/os-pci/hypervisors-pci-show-resp.json doc/api_samples/os-pci/pci-detail-resp.json doc/api_samples/os-pci/pci-index-resp.json doc/api_samples/os-pci/pci-show-resp.json doc/api_samples/os-pci/server-get-resp.json doc/api_samples/os-pci/servers-detail-resp.json doc/api_samples/os-personality/server-action-rebuild-req.json doc/api_samples/os-personality/server-action-rebuild-resp.json doc/api_samples/os-personality/server-post-req.json doc/api_samples/os-personality/server-post-resp.json doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral.json doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json doc/api_samples/os-quota-sets/quotas-show-get-resp.json doc/api_samples/os-quota-sets/quotas-update-force-post-req.json doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json doc/api_samples/os-quota-sets/quotas-update-post-req.json doc/api_samples/os-quota-sets/quotas-update-post-resp.json doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json doc/api_samples/os-quota-sets/user-quotas-update-post-req.json doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json doc/api_samples/os-remote-consoles/get-rdp-console-post-req.json doc/api_samples/os-remote-consoles/get-rdp-console-post-resp.json doc/api_samples/os-remote-consoles/get-serial-console-post-req.json doc/api_samples/os-remote-consoles/get-serial-console-post-resp.json doc/api_samples/os-remote-consoles/get-spice-console-post-req.json doc/api_samples/os-remote-consoles/get-spice-console-post-resp.json doc/api_samples/os-remote-consoles/get-vnc-console-post-req.json doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-req.json doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json doc/api_samples/os-remote-consoles/v2.8/create-mks-console-req.json doc/api_samples/os-remote-consoles/v2.8/create-mks-console-resp.json doc/api_samples/os-rescue/server-get-resp-rescue.json doc/api_samples/os-rescue/server-get-resp-unrescue.json doc/api_samples/os-rescue/server-rescue-req-with-image-ref.json doc/api_samples/os-rescue/server-rescue-req.json doc/api_samples/os-rescue/server-rescue.json doc/api_samples/os-rescue/server-unrescue-req.json doc/api_samples/os-scheduler-hints/scheduler-hints-post-req.json doc/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json doc/api_samples/os-security-groups/security-group-add-post-req.json doc/api_samples/os-security-groups/security-group-post-req.json doc/api_samples/os-security-groups/security-group-remove-post-req.json doc/api_samples/os-security-groups/security-groups-create-resp.json doc/api_samples/os-security-groups/security-groups-get-resp.json doc/api_samples/os-security-groups/security-groups-list-get-resp.json doc/api_samples/os-security-groups/server-get-resp.json doc/api_samples/os-security-groups/server-post-req.json doc/api_samples/os-security-groups/server-post-resp.json doc/api_samples/os-security-groups/server-security-groups-list-resp.json doc/api_samples/os-security-groups/servers-detail-resp.json doc/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json doc/api_samples/os-server-external-events/event-create-req.json doc/api_samples/os-server-external-events/event-create-resp.json doc/api_samples/os-server-groups/server-groups-get-resp.json doc/api_samples/os-server-groups/server-groups-list-resp.json doc/api_samples/os-server-groups/server-groups-post-req.json doc/api_samples/os-server-groups/server-groups-post-resp.json doc/api_samples/os-server-groups/v2.13/server-groups-get-resp.json doc/api_samples/os-server-groups/v2.13/server-groups-list-resp.json doc/api_samples/os-server-groups/v2.13/server-groups-post-req.json doc/api_samples/os-server-groups/v2.13/server-groups-post-resp.json doc/api_samples/os-server-password/get-password-resp.json doc/api_samples/os-server-usage/server-get-resp.json doc/api_samples/os-server-usage/servers-detail-resp.json doc/api_samples/os-services/service-disable-log-put-req.json doc/api_samples/os-services/service-disable-log-put-resp.json doc/api_samples/os-services/service-disable-put-req.json doc/api_samples/os-services/service-disable-put-resp.json doc/api_samples/os-services/service-enable-put-req.json doc/api_samples/os-services/service-enable-put-resp.json doc/api_samples/os-services/services-list-get-resp.json doc/api_samples/os-services/v2.11/service-disable-log-put-req.json doc/api_samples/os-services/v2.11/service-disable-log-put-resp.json doc/api_samples/os-services/v2.11/service-disable-put-req.json doc/api_samples/os-services/v2.11/service-disable-put-resp.json doc/api_samples/os-services/v2.11/service-enable-put-req.json doc/api_samples/os-services/v2.11/service-enable-put-resp.json doc/api_samples/os-services/v2.11/service-force-down-put-req.json doc/api_samples/os-services/v2.11/service-force-down-put-resp.json doc/api_samples/os-services/v2.11/services-list-get-resp.json doc/api_samples/os-shelve/os-shelve-offload.json doc/api_samples/os-shelve/os-shelve.json doc/api_samples/os-shelve/os-unshelve.json doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json doc/api_samples/os-suspend-server/server-resume.json doc/api_samples/os-suspend-server/server-suspend.json doc/api_samples/os-tenant-networks/networks-list-res.json doc/api_samples/os-tenant-networks/networks-post-req.json doc/api_samples/os-tenant-networks/networks-post-res.json doc/api_samples/os-used-limits/usedlimits-get-resp.json doc/api_samples/os-used-limits/v2-usedlimits-get-resp.json doc/api_samples/os-user-data/userdata-post-req.json doc/api_samples/os-user-data/userdata-post-resp.json doc/api_samples/os-virtual-interfaces/vifs-list-resp-v2.json doc/api_samples/os-virtual-interfaces/vifs-list-resp.json doc/api_samples/os-virtual-interfaces/v2.12/vifs-list-resp.json doc/api_samples/os-volumes/attach-volume-to-server-req.json doc/api_samples/os-volumes/attach-volume-to-server-resp.json doc/api_samples/os-volumes/list-volume-attachments-resp.json doc/api_samples/os-volumes/os-volumes-detail-resp.json doc/api_samples/os-volumes/os-volumes-get-resp.json doc/api_samples/os-volumes/os-volumes-index-resp.json doc/api_samples/os-volumes/os-volumes-post-req.json doc/api_samples/os-volumes/os-volumes-post-resp.json doc/api_samples/os-volumes/snapshot-create-req.json doc/api_samples/os-volumes/snapshot-create-resp.json doc/api_samples/os-volumes/snapshots-detail-resp.json doc/api_samples/os-volumes/snapshots-list-resp.json doc/api_samples/os-volumes/snapshots-show-resp.json doc/api_samples/os-volumes/update-volume-req.json doc/api_samples/os-volumes/volume-attachment-detail-resp.json doc/api_samples/server-ips/server-ips-network-resp.json doc/api_samples/server-ips/server-ips-resp.json doc/api_samples/server-metadata/server-metadata-all-req.json doc/api_samples/server-metadata/server-metadata-all-resp.json doc/api_samples/server-metadata/server-metadata-req.json doc/api_samples/server-metadata/server-metadata-resp.json doc/api_samples/server-migrations/force_complete.json doc/api_samples/server-migrations/live-migrate-server.json doc/api_samples/server-migrations/v2.23/migrations-get.json doc/api_samples/server-migrations/v2.23/migrations-index.json doc/api_samples/servers/server-action-confirm-resize.json doc/api_samples/servers/server-action-create-image.json doc/api_samples/servers/server-action-reboot.json doc/api_samples/servers/server-action-rebuild-preserve-ephemeral.json doc/api_samples/servers/server-action-rebuild-resp.json doc/api_samples/servers/server-action-rebuild.json doc/api_samples/servers/server-action-resize.json doc/api_samples/servers/server-action-revert-resize.json doc/api_samples/servers/server-action-start.json doc/api_samples/servers/server-action-stop.json doc/api_samples/servers/server-create-req.json doc/api_samples/servers/server-create-resp.json doc/api_samples/servers/server-get-resp.json doc/api_samples/servers/server-post-req.json doc/api_samples/servers/server-post-resp.json doc/api_samples/servers/server-update-req.json doc/api_samples/servers/server-update-resp.json doc/api_samples/servers/servers-details-resp.json doc/api_samples/servers/servers-list-resp.json doc/api_samples/servers-sort/server-sort-keys-list-resp.json doc/api_samples/servers/v2.16/server-get-resp.json doc/api_samples/servers/v2.16/servers-details-resp.json doc/api_samples/servers/v2.17/server-action-trigger-crash-dump.json doc/api_samples/servers/v2.19/server-action-rebuild-resp.json doc/api_samples/servers/v2.19/server-action-rebuild.json doc/api_samples/servers/v2.19/server-get-resp.json doc/api_samples/servers/v2.19/server-post-req.json doc/api_samples/servers/v2.19/server-post-resp.json doc/api_samples/servers/v2.19/server-put-req.json doc/api_samples/servers/v2.19/server-put-resp.json doc/api_samples/servers/v2.19/servers-details-resp.json doc/api_samples/servers/v2.19/servers-list-resp.json doc/api_samples/servers/v2.9/server-get-resp.json doc/api_samples/servers/v2.9/servers-details-resp.json doc/api_samples/servers/v2.9/servers-list-resp.json doc/api_samples/versions/v2-version-get-resp.json doc/api_samples/versions/v21-version-get-resp.json doc/api_samples/versions/versions-get-resp.json doc/ext/__init__.py doc/ext/nova_todo.py doc/ext/support_matrix.py doc/ext/versioned_notifications.py doc/notification_samples/service-update.json doc/source/addmethod.openstackapi.rst doc/source/aggregates.rst doc/source/api_microversion_dev.rst doc/source/api_microversion_history.rst doc/source/api_plugins.rst doc/source/architecture.rst doc/source/block_device_mapping.rst doc/source/blueprints.rst doc/source/cells.rst doc/source/code-review.rst doc/source/conductor.rst doc/source/conf.py doc/source/development.environment.rst doc/source/feature_classification.rst doc/source/filter_scheduler.rst doc/source/gmr.rst doc/source/how_to_get_involved.rst doc/source/i18n.rst doc/source/index.rst doc/source/notifications.rst doc/source/policies.rst doc/source/policy_enforcement.rst doc/source/process.rst doc/source/project_scope.rst doc/source/rpc.rst doc/source/sample_config.rst doc/source/scheduler_evolution.rst doc/source/services.rst doc/source/stable_api.rst doc/source/support-matrix.ini doc/source/support-matrix.rst doc/source/test_strategy.rst doc/source/threading.rst doc/source/upgrade.rst doc/source/vmstates.rst doc/source/_ga/layout.html doc/source/_static/support-matrix.css doc/source/image_src/Nova_spec_process.graphml doc/source/image_src/PowerStates.odp doc/source/image_src/architecture.dia doc/source/image_src/create_vm_states.diag doc/source/images/Nova_spec_process.svg doc/source/images/PowerStates1.png doc/source/images/PowerStates2.png doc/source/images/architecture.svg doc/source/images/create_vm_states.svg doc/source/images/filteringWorkflow1.png doc/source/images/filteringWorkflow2.png doc/source/images/run_instance_walkthrough.png doc/source/images/rpc/arch.png doc/source/images/rpc/arch.svg doc/source/images/rpc/flow1.png doc/source/images/rpc/flow1.svg doc/source/images/rpc/flow2.png doc/source/images/rpc/flow2.svg doc/source/images/rpc/rabt.png doc/source/images/rpc/rabt.svg doc/source/images/rpc/state.png doc/source/man/index.rst doc/source/man/nova-all.rst doc/source/man/nova-api-metadata.rst doc/source/man/nova-api-os-compute.rst doc/source/man/nova-api.rst doc/source/man/nova-cells.rst doc/source/man/nova-cert.rst doc/source/man/nova-compute.rst doc/source/man/nova-conductor.rst doc/source/man/nova-console.rst doc/source/man/nova-consoleauth.rst doc/source/man/nova-dhcpbridge.rst doc/source/man/nova-idmapshift.rst doc/source/man/nova-manage.rst doc/source/man/nova-network.rst doc/source/man/nova-novncproxy.rst doc/source/man/nova-rootwrap.rst doc/source/man/nova-scheduler.rst doc/source/man/nova-serialproxy.rst doc/source/man/nova-spicehtml5proxy.rst doc/source/man/nova-xvpvncproxy.rst doc/source/testing/libvirt-numa.rst doc/source/testing/serial-console.rst etc/nova/README-nova.conf.txt etc/nova/api-paste.ini etc/nova/cells.json etc/nova/logging_sample.conf etc/nova/nova-config-generator.conf etc/nova/policy.json etc/nova/release.sample etc/nova/rootwrap.conf etc/nova/rootwrap.d/api-metadata.filters etc/nova/rootwrap.d/compute.filters etc/nova/rootwrap.d/network.filters nova/__init__.py nova/availability_zones.py nova/baserpc.py nova/block_device.py nova/cache_utils.py nova/config.py nova/context.py nova/crypto.py nova/debugger.py nova/exception.py nova/filters.py nova/hooks.py nova/i18n.py nova/loadables.py nova/manager.py nova/netconf.py nova/notifications.py nova/opts.py nova/paths.py nova/policy.py nova/quota.py nova/rpc.py nova/safe_utils.py nova/service.py nova/signature_utils.py nova/test.py nova/utils.py nova/version.py nova/weights.py nova/wsgi.py nova.egg-info/PKG-INFO nova.egg-info/SOURCES.txt nova.egg-info/dependency_links.txt nova.egg-info/entry_points.txt nova.egg-info/not-zip-safe nova.egg-info/pbr.json nova.egg-info/requires.txt nova.egg-info/top_level.txt nova/CA/.gitignore nova/CA/geninter.sh nova/CA/genrootca.sh nova/CA/openssl.cnf.tmpl nova/CA/newcerts/.placeholder nova/CA/private/.placeholder nova/CA/projects/.gitignore nova/CA/projects/.placeholder nova/CA/reqs/.gitignore nova/CA/reqs/.placeholder nova/api/__init__.py nova/api/auth.py nova/api/compute_req_id.py nova/api/manager.py nova/api/opts.py nova/api/sizelimit.py nova/api/validator.py nova/api/ec2/__init__.py nova/api/ec2/cloud.py nova/api/ec2/ec2utils.py nova/api/metadata/__init__.py nova/api/metadata/base.py nova/api/metadata/handler.py nova/api/metadata/password.py nova/api/metadata/vendordata_json.py nova/api/openstack/__init__.py nova/api/openstack/api_version_request.py nova/api/openstack/auth.py nova/api/openstack/common.py nova/api/openstack/extensions.py nova/api/openstack/rest_api_version_history.rst nova/api/openstack/urlmap.py nova/api/openstack/versioned_method.py nova/api/openstack/wsgi.py nova/api/openstack/compute/__init__.py nova/api/openstack/compute/access_ips.py nova/api/openstack/compute/admin_actions.py nova/api/openstack/compute/admin_password.py nova/api/openstack/compute/agents.py nova/api/openstack/compute/aggregates.py nova/api/openstack/compute/assisted_volume_snapshots.py nova/api/openstack/compute/attach_interfaces.py nova/api/openstack/compute/availability_zone.py nova/api/openstack/compute/baremetal_nodes.py nova/api/openstack/compute/block_device_mapping.py nova/api/openstack/compute/block_device_mapping_v1.py nova/api/openstack/compute/cells.py nova/api/openstack/compute/certificates.py nova/api/openstack/compute/cloudpipe.py nova/api/openstack/compute/config_drive.py nova/api/openstack/compute/console_auth_tokens.py nova/api/openstack/compute/console_output.py nova/api/openstack/compute/consoles.py nova/api/openstack/compute/create_backup.py nova/api/openstack/compute/deferred_delete.py nova/api/openstack/compute/disk_config.py nova/api/openstack/compute/evacuate.py nova/api/openstack/compute/extended_availability_zone.py nova/api/openstack/compute/extended_server_attributes.py nova/api/openstack/compute/extended_status.py nova/api/openstack/compute/extended_volumes.py nova/api/openstack/compute/extension_info.py nova/api/openstack/compute/fixed_ips.py nova/api/openstack/compute/flavor_access.py nova/api/openstack/compute/flavor_manage.py nova/api/openstack/compute/flavor_rxtx.py nova/api/openstack/compute/flavors.py nova/api/openstack/compute/flavors_extraspecs.py nova/api/openstack/compute/floating_ip_dns.py nova/api/openstack/compute/floating_ip_pools.py nova/api/openstack/compute/floating_ips.py nova/api/openstack/compute/floating_ips_bulk.py nova/api/openstack/compute/fping.py nova/api/openstack/compute/hide_server_addresses.py nova/api/openstack/compute/hosts.py nova/api/openstack/compute/hypervisors.py nova/api/openstack/compute/image_metadata.py nova/api/openstack/compute/image_size.py nova/api/openstack/compute/images.py nova/api/openstack/compute/instance_actions.py nova/api/openstack/compute/instance_usage_audit_log.py nova/api/openstack/compute/ips.py nova/api/openstack/compute/keypairs.py nova/api/openstack/compute/limits.py nova/api/openstack/compute/lock_server.py nova/api/openstack/compute/migrate_server.py nova/api/openstack/compute/migrations.py nova/api/openstack/compute/multinic.py nova/api/openstack/compute/multiple_create.py nova/api/openstack/compute/networks.py nova/api/openstack/compute/networks_associate.py nova/api/openstack/compute/pause_server.py nova/api/openstack/compute/pci.py nova/api/openstack/compute/personality.py nova/api/openstack/compute/preserve_ephemeral_rebuild.py nova/api/openstack/compute/quota_classes.py nova/api/openstack/compute/quota_sets.py nova/api/openstack/compute/remote_consoles.py nova/api/openstack/compute/rescue.py nova/api/openstack/compute/scheduler_hints.py nova/api/openstack/compute/security_group_default_rules.py nova/api/openstack/compute/security_groups.py nova/api/openstack/compute/server_diagnostics.py nova/api/openstack/compute/server_external_events.py nova/api/openstack/compute/server_groups.py nova/api/openstack/compute/server_metadata.py nova/api/openstack/compute/server_migrations.py nova/api/openstack/compute/server_password.py nova/api/openstack/compute/server_usage.py nova/api/openstack/compute/servers.py nova/api/openstack/compute/services.py nova/api/openstack/compute/shelve.py nova/api/openstack/compute/simple_tenant_usage.py nova/api/openstack/compute/suspend_server.py nova/api/openstack/compute/tenant_networks.py nova/api/openstack/compute/used_limits.py nova/api/openstack/compute/user_data.py nova/api/openstack/compute/versions.py nova/api/openstack/compute/versionsV21.py nova/api/openstack/compute/virtual_interfaces.py nova/api/openstack/compute/volumes.py nova/api/openstack/compute/legacy_v2/__init__.py nova/api/openstack/compute/legacy_v2/consoles.py nova/api/openstack/compute/legacy_v2/extensions.py nova/api/openstack/compute/legacy_v2/flavors.py nova/api/openstack/compute/legacy_v2/image_metadata.py nova/api/openstack/compute/legacy_v2/images.py nova/api/openstack/compute/legacy_v2/ips.py nova/api/openstack/compute/legacy_v2/limits.py nova/api/openstack/compute/legacy_v2/server_metadata.py nova/api/openstack/compute/legacy_v2/servers.py nova/api/openstack/compute/legacy_v2/versions.py nova/api/openstack/compute/legacy_v2/contrib/__init__.py nova/api/openstack/compute/legacy_v2/contrib/admin_actions.py nova/api/openstack/compute/legacy_v2/contrib/agents.py nova/api/openstack/compute/legacy_v2/contrib/aggregates.py nova/api/openstack/compute/legacy_v2/contrib/assisted_volume_snapshots.py nova/api/openstack/compute/legacy_v2/contrib/attach_interfaces.py nova/api/openstack/compute/legacy_v2/contrib/availability_zone.py nova/api/openstack/compute/legacy_v2/contrib/baremetal_ext_status.py nova/api/openstack/compute/legacy_v2/contrib/baremetal_nodes.py nova/api/openstack/compute/legacy_v2/contrib/block_device_mapping_v2_boot.py nova/api/openstack/compute/legacy_v2/contrib/cell_capacities.py nova/api/openstack/compute/legacy_v2/contrib/cells.py nova/api/openstack/compute/legacy_v2/contrib/certificates.py nova/api/openstack/compute/legacy_v2/contrib/cloudpipe.py nova/api/openstack/compute/legacy_v2/contrib/cloudpipe_update.py nova/api/openstack/compute/legacy_v2/contrib/config_drive.py nova/api/openstack/compute/legacy_v2/contrib/console_auth_tokens.py nova/api/openstack/compute/legacy_v2/contrib/console_output.py nova/api/openstack/compute/legacy_v2/contrib/consoles.py nova/api/openstack/compute/legacy_v2/contrib/createserverext.py nova/api/openstack/compute/legacy_v2/contrib/deferred_delete.py nova/api/openstack/compute/legacy_v2/contrib/disk_config.py nova/api/openstack/compute/legacy_v2/contrib/evacuate.py nova/api/openstack/compute/legacy_v2/contrib/extended_availability_zone.py nova/api/openstack/compute/legacy_v2/contrib/extended_evacuate_find_host.py nova/api/openstack/compute/legacy_v2/contrib/extended_floating_ips.py nova/api/openstack/compute/legacy_v2/contrib/extended_hypervisors.py nova/api/openstack/compute/legacy_v2/contrib/extended_ips.py nova/api/openstack/compute/legacy_v2/contrib/extended_ips_mac.py nova/api/openstack/compute/legacy_v2/contrib/extended_networks.py nova/api/openstack/compute/legacy_v2/contrib/extended_quotas.py nova/api/openstack/compute/legacy_v2/contrib/extended_rescue_with_image.py nova/api/openstack/compute/legacy_v2/contrib/extended_server_attributes.py nova/api/openstack/compute/legacy_v2/contrib/extended_services.py nova/api/openstack/compute/legacy_v2/contrib/extended_services_delete.py nova/api/openstack/compute/legacy_v2/contrib/extended_status.py nova/api/openstack/compute/legacy_v2/contrib/extended_virtual_interfaces_net.py nova/api/openstack/compute/legacy_v2/contrib/extended_volumes.py nova/api/openstack/compute/legacy_v2/contrib/fixed_ips.py nova/api/openstack/compute/legacy_v2/contrib/flavor_access.py nova/api/openstack/compute/legacy_v2/contrib/flavor_disabled.py nova/api/openstack/compute/legacy_v2/contrib/flavor_rxtx.py nova/api/openstack/compute/legacy_v2/contrib/flavor_swap.py nova/api/openstack/compute/legacy_v2/contrib/flavorextradata.py nova/api/openstack/compute/legacy_v2/contrib/flavorextraspecs.py nova/api/openstack/compute/legacy_v2/contrib/flavormanage.py nova/api/openstack/compute/legacy_v2/contrib/floating_ip_dns.py nova/api/openstack/compute/legacy_v2/contrib/floating_ip_pools.py nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py nova/api/openstack/compute/legacy_v2/contrib/floating_ips_bulk.py nova/api/openstack/compute/legacy_v2/contrib/fping.py nova/api/openstack/compute/legacy_v2/contrib/hide_server_addresses.py nova/api/openstack/compute/legacy_v2/contrib/hosts.py nova/api/openstack/compute/legacy_v2/contrib/hypervisor_status.py nova/api/openstack/compute/legacy_v2/contrib/hypervisors.py nova/api/openstack/compute/legacy_v2/contrib/image_size.py nova/api/openstack/compute/legacy_v2/contrib/instance_actions.py nova/api/openstack/compute/legacy_v2/contrib/instance_usage_audit_log.py nova/api/openstack/compute/legacy_v2/contrib/keypairs.py nova/api/openstack/compute/legacy_v2/contrib/migrations.py nova/api/openstack/compute/legacy_v2/contrib/multinic.py nova/api/openstack/compute/legacy_v2/contrib/multiple_create.py nova/api/openstack/compute/legacy_v2/contrib/networks_associate.py nova/api/openstack/compute/legacy_v2/contrib/os_networks.py nova/api/openstack/compute/legacy_v2/contrib/os_tenant_networks.py nova/api/openstack/compute/legacy_v2/contrib/preserve_ephemeral_rebuild.py nova/api/openstack/compute/legacy_v2/contrib/quota_classes.py nova/api/openstack/compute/legacy_v2/contrib/quotas.py nova/api/openstack/compute/legacy_v2/contrib/rescue.py nova/api/openstack/compute/legacy_v2/contrib/scheduler_hints.py nova/api/openstack/compute/legacy_v2/contrib/security_group_default_rules.py nova/api/openstack/compute/legacy_v2/contrib/security_groups.py nova/api/openstack/compute/legacy_v2/contrib/server_diagnostics.py nova/api/openstack/compute/legacy_v2/contrib/server_external_events.py nova/api/openstack/compute/legacy_v2/contrib/server_group_quotas.py nova/api/openstack/compute/legacy_v2/contrib/server_groups.py nova/api/openstack/compute/legacy_v2/contrib/server_list_multi_status.py nova/api/openstack/compute/legacy_v2/contrib/server_password.py nova/api/openstack/compute/legacy_v2/contrib/server_sort_keys.py nova/api/openstack/compute/legacy_v2/contrib/server_start_stop.py nova/api/openstack/compute/legacy_v2/contrib/server_usage.py nova/api/openstack/compute/legacy_v2/contrib/services.py nova/api/openstack/compute/legacy_v2/contrib/shelve.py nova/api/openstack/compute/legacy_v2/contrib/simple_tenant_usage.py nova/api/openstack/compute/legacy_v2/contrib/used_limits.py nova/api/openstack/compute/legacy_v2/contrib/used_limits_for_admin.py nova/api/openstack/compute/legacy_v2/contrib/user_data.py nova/api/openstack/compute/legacy_v2/contrib/user_quotas.py nova/api/openstack/compute/legacy_v2/contrib/virtual_interfaces.py nova/api/openstack/compute/legacy_v2/contrib/volume_attachment_update.py nova/api/openstack/compute/legacy_v2/contrib/volumes.py nova/api/openstack/compute/schemas/__init__.py nova/api/openstack/compute/schemas/access_ips.py nova/api/openstack/compute/schemas/admin_password.py nova/api/openstack/compute/schemas/agents.py nova/api/openstack/compute/schemas/aggregates.py nova/api/openstack/compute/schemas/assisted_volume_snapshots.py nova/api/openstack/compute/schemas/attach_interfaces.py nova/api/openstack/compute/schemas/availability_zone.py nova/api/openstack/compute/schemas/block_device_mapping.py nova/api/openstack/compute/schemas/block_device_mapping_v1.py nova/api/openstack/compute/schemas/cells.py nova/api/openstack/compute/schemas/cloudpipe.py nova/api/openstack/compute/schemas/config_drive.py nova/api/openstack/compute/schemas/console_output.py nova/api/openstack/compute/schemas/create_backup.py nova/api/openstack/compute/schemas/disk_config.py nova/api/openstack/compute/schemas/evacuate.py nova/api/openstack/compute/schemas/fixed_ips.py nova/api/openstack/compute/schemas/flavor_access.py nova/api/openstack/compute/schemas/flavor_manage.py nova/api/openstack/compute/schemas/flavors_extraspecs.py nova/api/openstack/compute/schemas/floating_ip_dns.py nova/api/openstack/compute/schemas/floating_ips.py nova/api/openstack/compute/schemas/floating_ips_bulk.py nova/api/openstack/compute/schemas/hosts.py nova/api/openstack/compute/schemas/image_metadata.py nova/api/openstack/compute/schemas/keypairs.py nova/api/openstack/compute/schemas/migrate_server.py nova/api/openstack/compute/schemas/multinic.py nova/api/openstack/compute/schemas/multiple_create.py nova/api/openstack/compute/schemas/networks.py nova/api/openstack/compute/schemas/networks_associate.py nova/api/openstack/compute/schemas/personality.py nova/api/openstack/compute/schemas/preserve_ephemeral_rebuild.py nova/api/openstack/compute/schemas/quota_classes.py nova/api/openstack/compute/schemas/quota_sets.py nova/api/openstack/compute/schemas/remote_consoles.py nova/api/openstack/compute/schemas/rescue.py nova/api/openstack/compute/schemas/reset_server_state.py nova/api/openstack/compute/schemas/scheduler_hints.py nova/api/openstack/compute/schemas/security_groups.py nova/api/openstack/compute/schemas/server_external_events.py nova/api/openstack/compute/schemas/server_groups.py nova/api/openstack/compute/schemas/server_metadata.py nova/api/openstack/compute/schemas/server_migrations.py nova/api/openstack/compute/schemas/servers.py nova/api/openstack/compute/schemas/services.py nova/api/openstack/compute/schemas/tenant_networks.py nova/api/openstack/compute/schemas/user_data.py nova/api/openstack/compute/schemas/volumes.py nova/api/openstack/compute/views/__init__.py nova/api/openstack/compute/views/addresses.py nova/api/openstack/compute/views/flavors.py nova/api/openstack/compute/views/images.py nova/api/openstack/compute/views/limits.py nova/api/openstack/compute/views/servers.py nova/api/openstack/compute/views/versions.py nova/api/validation/__init__.py nova/api/validation/parameter_types.py nova/api/validation/validators.py nova/cells/__init__.py nova/cells/driver.py nova/cells/manager.py nova/cells/messaging.py nova/cells/opts.py nova/cells/rpc_driver.py nova/cells/rpcapi.py nova/cells/scheduler.py nova/cells/state.py nova/cells/utils.py nova/cells/filters/__init__.py nova/cells/filters/different_cell.py nova/cells/filters/image_properties.py nova/cells/filters/target_cell.py nova/cells/weights/__init__.py nova/cells/weights/mute_child.py nova/cells/weights/ram_by_instance_type.py nova/cells/weights/weight_offset.py nova/cert/__init__.py nova/cert/manager.py nova/cert/rpcapi.py nova/cloudpipe/__init__.py nova/cloudpipe/bootscript.template nova/cloudpipe/client.ovpn.template nova/cloudpipe/pipelib.py nova/cmd/__init__.py nova/cmd/all.py nova/cmd/api.py nova/cmd/api_metadata.py nova/cmd/api_os_compute.py nova/cmd/baseproxy.py nova/cmd/cells.py nova/cmd/cert.py nova/cmd/compute.py nova/cmd/conductor.py nova/cmd/console.py nova/cmd/consoleauth.py nova/cmd/dhcpbridge.py nova/cmd/idmapshift.py nova/cmd/manage.py nova/cmd/network.py nova/cmd/novnc.py nova/cmd/novncproxy.py nova/cmd/scheduler.py nova/cmd/serialproxy.py nova/cmd/spicehtml5proxy.py nova/cmd/xvpvncproxy.py nova/common/__init__.py nova/common/config.py nova/compute/__init__.py nova/compute/api.py nova/compute/arch.py nova/compute/build_results.py nova/compute/cells_api.py nova/compute/claims.py nova/compute/cpumodel.py nova/compute/flavors.py nova/compute/hv_type.py nova/compute/instance_actions.py nova/compute/manager.py nova/compute/opts.py nova/compute/power_state.py nova/compute/resource_tracker.py nova/compute/rpcapi.py nova/compute/stats.py nova/compute/task_states.py nova/compute/utils.py nova/compute/vm_mode.py nova/compute/vm_states.py nova/compute/monitors/__init__.py nova/compute/monitors/base.py nova/compute/monitors/cpu/__init__.py nova/compute/monitors/cpu/virt_driver.py nova/compute/resources/__init__.py nova/compute/resources/base.py nova/conductor/__init__.py nova/conductor/api.py nova/conductor/manager.py nova/conductor/rpcapi.py nova/conductor/tasks/__init__.py nova/conductor/tasks/base.py nova/conductor/tasks/live_migrate.py nova/conductor/tasks/migrate.py nova/conf/__init__.py nova/conf/availability_zone.py nova/conf/cells.py nova/conf/cert.py nova/conf/compute.py nova/conf/conductor.py nova/conf/ephemeral_storage.py nova/conf/ironic.py nova/conf/opts.py nova/conf/pci.py nova/conf/scheduler.py nova/conf/serial_console.py nova/conf/virt.py nova/conf/vnc.py nova/conf/wsgi.py nova/console/__init__.py nova/console/api.py nova/console/fake.py nova/console/manager.py nova/console/rpcapi.py nova/console/serial.py nova/console/type.py nova/console/websocketproxy.py nova/console/xvp.conf.template nova/console/xvp.py nova/consoleauth/__init__.py nova/consoleauth/manager.py nova/consoleauth/rpcapi.py nova/db/__init__.py nova/db/api.py nova/db/base.py nova/db/migration.py nova/db/sqlalchemy/__init__.py nova/db/sqlalchemy/api.py nova/db/sqlalchemy/api_models.py nova/db/sqlalchemy/migration.py nova/db/sqlalchemy/models.py nova/db/sqlalchemy/types.py nova/db/sqlalchemy/utils.py nova/db/sqlalchemy/api_migrations/__init__.py nova/db/sqlalchemy/api_migrations/migrate_repo/README nova/db/sqlalchemy/api_migrations/migrate_repo/__init__.py nova/db/sqlalchemy/api_migrations/migrate_repo/migrate.cfg nova/db/sqlalchemy/api_migrations/migrate_repo/versions/001_cell_mapping.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/002_instance_mapping.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/003_host_mapping.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/004_add_request_spec.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/005_flavors.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/006_build_request.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/007_instance_mapping_nullable_cellid.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/__init__.py nova/db/sqlalchemy/migrate_repo/README nova/db/sqlalchemy/migrate_repo/__init__.py nova/db/sqlalchemy/migrate_repo/manage.py nova/db/sqlalchemy/migrate_repo/migrate.cfg nova/db/sqlalchemy/migrate_repo/versions/216_havana.py nova/db/sqlalchemy/migrate_repo/versions/217_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/218_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/219_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/220_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/221_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/222_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/223_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/224_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/225_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/226_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/227_fix_project_user_quotas_resource_length.py nova/db/sqlalchemy/migrate_repo/versions/228_add_metrics_in_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/229_add_extra_resources_in_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/230_add_details_column_to_instance_actions_events.py nova/db/sqlalchemy/migrate_repo/versions/231_add_ephemeral_key_uuid.py nova/db/sqlalchemy/migrate_repo/versions/232_drop_dump_tables.py nova/db/sqlalchemy/migrate_repo/versions/233_add_stats_in_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py nova/db/sqlalchemy/migrate_repo/versions/235_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/236_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/237_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/238_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/239_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/240_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/241_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/242_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/243_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/244_increase_user_id_length_volume_usage_cache.py nova/db/sqlalchemy/migrate_repo/versions/245_add_mtu_and_dhcp_server.py nova/db/sqlalchemy/migrate_repo/versions/246_add_compute_node_id_fk.py nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql nova/db/sqlalchemy/migrate_repo/versions/247_nullable_mismatch.py nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py nova/db/sqlalchemy/migrate_repo/versions/249_remove_duplicate_index.py nova/db/sqlalchemy/migrate_repo/versions/250_remove_instance_groups_metadata.py nova/db/sqlalchemy/migrate_repo/versions/251_add_numa_topology_to_comput_nodes.py nova/db/sqlalchemy/migrate_repo/versions/252_add_instance_extra_table.py nova/db/sqlalchemy/migrate_repo/versions/253_add_pci_requests_to_instance_extra_table.py nova/db/sqlalchemy/migrate_repo/versions/254_add_request_id_in_pci_devices.py nova/db/sqlalchemy/migrate_repo/versions/255_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/256_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/257_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/258_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/259_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/260_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/261_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/262_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/263_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/264_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/265_remove_duplicated_index.py nova/db/sqlalchemy/migrate_repo/versions/266_add_instance_tags.py nova/db/sqlalchemy/migrate_repo/versions/267_instance_uuid_non_nullable.py nova/db/sqlalchemy/migrate_repo/versions/268_add_host_in_compute_node.py nova/db/sqlalchemy/migrate_repo/versions/269_add_numa_node_column.py nova/db/sqlalchemy/migrate_repo/versions/270_flavor_data_in_extra.py nova/db/sqlalchemy/migrate_repo/versions/271_sqlite_postgresql_indexes.py nova/db/sqlalchemy/migrate_repo/versions/272_add_keypair_type.py nova/db/sqlalchemy/migrate_repo/versions/273_sqlite_foreign_keys.py nova/db/sqlalchemy/migrate_repo/versions/274_update_instances_project_id_index.py nova/db/sqlalchemy/migrate_repo/versions/275_add_keypair_type.py nova/db/sqlalchemy/migrate_repo/versions/276_vcpu_model.py nova/db/sqlalchemy/migrate_repo/versions/277_add_fixed_ip_updated_index.py nova/db/sqlalchemy/migrate_repo/versions/278_remove_service_fk_in_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/279_fix_unique_constraint_for_compute_node.py nova/db/sqlalchemy/migrate_repo/versions/280_add_nullable_false_to_keypairs_name.py nova/db/sqlalchemy/migrate_repo/versions/281_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/282_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/283_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/284_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/285_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/286_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/287_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/288_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/289_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/290_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/291_enforce_flavors_migrated.py nova/db/sqlalchemy/migrate_repo/versions/292_drop_nova_volumes_tables.py nova/db/sqlalchemy/migrate_repo/versions/293_add_migration_type.py nova/db/sqlalchemy/migrate_repo/versions/294_add_service_heartbeat.py nova/db/sqlalchemy/migrate_repo/versions/295_add_virtual_interfaces_uuid_index.py nova/db/sqlalchemy/migrate_repo/versions/296_add_missing_db2_fkeys.py nova/db/sqlalchemy/migrate_repo/versions/297_add_forced_down_for_services.py nova/db/sqlalchemy/migrate_repo/versions/298_mysql_extra_specs_binary_collation.py nova/db/sqlalchemy/migrate_repo/versions/299_service_version_number.py nova/db/sqlalchemy/migrate_repo/versions/300_migration_context.py nova/db/sqlalchemy/migrate_repo/versions/301_add_cpu_and_ram_ratios_for_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/302_pgsql_add_instance_system_metadata_index.py nova/db/sqlalchemy/migrate_repo/versions/303_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/304_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/305_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/306_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/307_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/308_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/309_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/310_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/311_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/312_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/313_add_parent_id_column.py nova/db/sqlalchemy/migrate_repo/versions/314_add_resource_provider_tables.py nova/db/sqlalchemy/migrate_repo/versions/315_add_migration_progresss_detail.py nova/db/sqlalchemy/migrate_repo/versions/316_add_disk_ratio_for_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/317_add_aggregate_uuid.py nova/db/sqlalchemy/migrate_repo/versions/318_resource_provider_name_aggregates.py nova/db/sqlalchemy/migrate_repo/versions/319_add_instances_deleted_created_at_index.py nova/db/sqlalchemy/migrate_repo/versions/__init__.py nova/hacking/__init__.py nova/hacking/checks.py nova/image/__init__.py nova/image/api.py nova/image/glance.py nova/image/s3.py nova/image/download/__init__.py nova/image/download/base.py nova/image/download/file.py nova/ipv6/__init__.py nova/ipv6/account_identifier.py nova/ipv6/api.py nova/ipv6/rfc2462.py nova/keymgr/__init__.py nova/keymgr/barbican.py nova/keymgr/conf_key_mgr.py nova/keymgr/key.py nova/keymgr/key_mgr.py nova/keymgr/mock_key_mgr.py nova/keymgr/not_implemented_key_mgr.py nova/keymgr/single_key_mgr.py nova/locale/nova-log-critical.pot nova/locale/nova-log-error.pot nova/locale/nova-log-info.pot nova/locale/nova-log-warning.pot nova/locale/nova.pot nova/locale/cs/LC_MESSAGES/nova-log-critical.po nova/locale/cs/LC_MESSAGES/nova-log-error.po nova/locale/cs/LC_MESSAGES/nova-log-info.po nova/locale/cs/LC_MESSAGES/nova-log-warning.po nova/locale/cs/LC_MESSAGES/nova.po nova/locale/de/LC_MESSAGES/nova-log-critical.po nova/locale/de/LC_MESSAGES/nova-log-error.po nova/locale/de/LC_MESSAGES/nova-log-info.po nova/locale/de/LC_MESSAGES/nova.po nova/locale/es/LC_MESSAGES/nova-log-critical.po nova/locale/es/LC_MESSAGES/nova-log-error.po nova/locale/es/LC_MESSAGES/nova-log-info.po nova/locale/es/LC_MESSAGES/nova-log-warning.po nova/locale/es/LC_MESSAGES/nova.po nova/locale/es_MX/LC_MESSAGES/nova-log-critical.po nova/locale/fr/LC_MESSAGES/nova-log-critical.po nova/locale/fr/LC_MESSAGES/nova-log-error.po nova/locale/fr/LC_MESSAGES/nova-log-info.po nova/locale/fr/LC_MESSAGES/nova-log-warning.po nova/locale/fr/LC_MESSAGES/nova.po nova/locale/hr/LC_MESSAGES/nova-log-critical.po nova/locale/it/LC_MESSAGES/nova-log-error.po nova/locale/it/LC_MESSAGES/nova-log-info.po nova/locale/it/LC_MESSAGES/nova.po nova/locale/ja/LC_MESSAGES/nova-log-critical.po nova/locale/ja/LC_MESSAGES/nova.po nova/locale/ko_KR/LC_MESSAGES/nova-log-critical.po nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po nova/locale/ko_KR/LC_MESSAGES/nova-log-warning.po nova/locale/ko_KR/LC_MESSAGES/nova.po nova/locale/pa_IN/LC_MESSAGES/nova-log-critical.po nova/locale/pt_BR/LC_MESSAGES/nova-log-critical.po nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po nova/locale/pt_BR/LC_MESSAGES/nova.po nova/locale/ru/LC_MESSAGES/nova.po nova/locale/tr_TR/LC_MESSAGES/nova-log-critical.po nova/locale/tr_TR/LC_MESSAGES/nova-log-error.po nova/locale/tr_TR/LC_MESSAGES/nova-log-info.po nova/locale/tr_TR/LC_MESSAGES/nova-log-warning.po nova/locale/tr_TR/LC_MESSAGES/nova.po nova/locale/zh_CN/LC_MESSAGES/nova-log-critical.po nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po nova/locale/zh_CN/LC_MESSAGES/nova-log-warning.po nova/locale/zh_CN/LC_MESSAGES/nova.po nova/locale/zh_TW/LC_MESSAGES/nova-log-critical.po nova/locale/zh_TW/LC_MESSAGES/nova.po nova/mks/__init__.py nova/network/__init__.py nova/network/api.py nova/network/base_api.py nova/network/dns_driver.py nova/network/driver.py nova/network/floating_ips.py nova/network/l3.py nova/network/ldapdns.py nova/network/linux_net.py nova/network/manager.py nova/network/minidns.py nova/network/model.py nova/network/noop_dns_driver.py nova/network/opts.py nova/network/rpcapi.py nova/network/neutronv2/__init__.py nova/network/neutronv2/api.py nova/network/neutronv2/constants.py nova/network/security_group/__init__.py nova/network/security_group/neutron_driver.py nova/network/security_group/openstack_driver.py nova/network/security_group/security_group_base.py nova/objects/__init__.py nova/objects/agent.py nova/objects/aggregate.py nova/objects/bandwidth_usage.py nova/objects/base.py nova/objects/block_device.py nova/objects/build_request.py nova/objects/cell_mapping.py nova/objects/compute_node.py nova/objects/dns_domain.py nova/objects/ec2.py nova/objects/external_event.py nova/objects/fields.py nova/objects/fixed_ip.py nova/objects/flavor.py nova/objects/floating_ip.py nova/objects/host_mapping.py nova/objects/hv_spec.py nova/objects/image_meta.py nova/objects/instance.py nova/objects/instance_action.py nova/objects/instance_fault.py nova/objects/instance_group.py nova/objects/instance_info_cache.py nova/objects/instance_mapping.py nova/objects/instance_numa_topology.py nova/objects/instance_pci_requests.py nova/objects/keypair.py nova/objects/migrate_data.py nova/objects/migration.py nova/objects/migration_context.py nova/objects/monitor_metric.py nova/objects/network.py nova/objects/network_request.py nova/objects/notification.py nova/objects/numa.py nova/objects/pci_device.py nova/objects/pci_device_pool.py nova/objects/quotas.py nova/objects/request_spec.py nova/objects/resource_provider.py nova/objects/security_group.py nova/objects/security_group_rule.py nova/objects/service.py nova/objects/tag.py nova/objects/task_log.py nova/objects/vcpu_model.py nova/objects/virt_cpu_topology.py nova/objects/virtual_interface.py nova/objects/volume_usage.py nova/openstack/__init__.py nova/openstack/common/README nova/openstack/common/__init__.py nova/openstack/common/_i18n.py nova/openstack/common/cliutils.py nova/pci/__init__.py nova/pci/devspec.py nova/pci/manager.py nova/pci/request.py nova/pci/stats.py nova/pci/utils.py nova/pci/whitelist.py nova/rdp/__init__.py nova/scheduler/__init__.py nova/scheduler/caching_scheduler.py nova/scheduler/chance.py nova/scheduler/driver.py nova/scheduler/filter_scheduler.py nova/scheduler/host_manager.py nova/scheduler/ironic_host_manager.py nova/scheduler/manager.py nova/scheduler/rpcapi.py nova/scheduler/scheduler_options.py nova/scheduler/utils.py nova/scheduler/client/__init__.py nova/scheduler/client/query.py nova/scheduler/client/report.py nova/scheduler/filters/__init__.py nova/scheduler/filters/affinity_filter.py nova/scheduler/filters/aggregate_image_properties_isolation.py nova/scheduler/filters/aggregate_instance_extra_specs.py nova/scheduler/filters/aggregate_multitenancy_isolation.py nova/scheduler/filters/all_hosts_filter.py nova/scheduler/filters/availability_zone_filter.py nova/scheduler/filters/compute_capabilities_filter.py nova/scheduler/filters/compute_filter.py nova/scheduler/filters/core_filter.py nova/scheduler/filters/disk_filter.py nova/scheduler/filters/exact_core_filter.py nova/scheduler/filters/exact_disk_filter.py nova/scheduler/filters/exact_ram_filter.py nova/scheduler/filters/extra_specs_ops.py nova/scheduler/filters/image_props_filter.py nova/scheduler/filters/io_ops_filter.py nova/scheduler/filters/isolated_hosts_filter.py nova/scheduler/filters/json_filter.py nova/scheduler/filters/metrics_filter.py nova/scheduler/filters/num_instances_filter.py nova/scheduler/filters/numa_topology_filter.py nova/scheduler/filters/pci_passthrough_filter.py nova/scheduler/filters/ram_filter.py nova/scheduler/filters/retry_filter.py nova/scheduler/filters/trusted_filter.py nova/scheduler/filters/type_filter.py nova/scheduler/filters/utils.py nova/scheduler/weights/__init__.py nova/scheduler/weights/affinity.py nova/scheduler/weights/disk.py nova/scheduler/weights/io_ops.py nova/scheduler/weights/metrics.py nova/scheduler/weights/ram.py nova/servicegroup/__init__.py nova/servicegroup/api.py nova/servicegroup/drivers/__init__.py nova/servicegroup/drivers/base.py nova/servicegroup/drivers/db.py nova/servicegroup/drivers/mc.py nova/spice/__init__.py nova/tests/__init__.py nova/tests/fixtures.py nova/tests/uuidsentinel.py nova/tests/functional/__init__.py nova/tests/functional/api_paste_fixture.py nova/tests/functional/api_samples_test_base.py nova/tests/functional/integrated_helpers.py nova/tests/functional/test_extensions.py nova/tests/functional/test_instance_actions.py nova/tests/functional/test_legacy_v2_compatible_wrapper.py nova/tests/functional/test_login.py nova/tests/functional/test_middleware.py nova/tests/functional/test_server_group.py nova/tests/functional/test_servers.py nova/tests/functional/api/__init__.py nova/tests/functional/api/client.py nova/tests/functional/api_sample_tests/README.rst nova/tests/functional/api_sample_tests/__init__.py nova/tests/functional/api_sample_tests/api_sample_base.py nova/tests/functional/api_sample_tests/test_access_ips.py nova/tests/functional/api_sample_tests/test_admin_actions.py nova/tests/functional/api_sample_tests/test_admin_password.py nova/tests/functional/api_sample_tests/test_agents.py nova/tests/functional/api_sample_tests/test_aggregates.py nova/tests/functional/api_sample_tests/test_assisted_volume_snapshots.py nova/tests/functional/api_sample_tests/test_attach_interfaces.py nova/tests/functional/api_sample_tests/test_availability_zone.py nova/tests/functional/api_sample_tests/test_baremetal_nodes.py nova/tests/functional/api_sample_tests/test_block_device_mapping_boot.py nova/tests/functional/api_sample_tests/test_cells.py nova/tests/functional/api_sample_tests/test_certificates.py nova/tests/functional/api_sample_tests/test_cloudpipe.py nova/tests/functional/api_sample_tests/test_config_drive.py nova/tests/functional/api_sample_tests/test_console_auth_tokens.py nova/tests/functional/api_sample_tests/test_console_output.py nova/tests/functional/api_sample_tests/test_consoles.py nova/tests/functional/api_sample_tests/test_create_backup.py nova/tests/functional/api_sample_tests/test_deferred_delete.py nova/tests/functional/api_sample_tests/test_disk_config.py nova/tests/functional/api_sample_tests/test_evacuate.py nova/tests/functional/api_sample_tests/test_extended_availability_zone.py nova/tests/functional/api_sample_tests/test_extended_server_attributes.py nova/tests/functional/api_sample_tests/test_extended_status.py nova/tests/functional/api_sample_tests/test_extended_volumes.py nova/tests/functional/api_sample_tests/test_extension_info.py nova/tests/functional/api_sample_tests/test_fixed_ips.py nova/tests/functional/api_sample_tests/test_flavor_access.py nova/tests/functional/api_sample_tests/test_flavor_extraspecs.py nova/tests/functional/api_sample_tests/test_flavor_manage.py nova/tests/functional/api_sample_tests/test_flavor_rxtx.py nova/tests/functional/api_sample_tests/test_flavors.py nova/tests/functional/api_sample_tests/test_floating_ip_dns.py nova/tests/functional/api_sample_tests/test_floating_ip_pools.py nova/tests/functional/api_sample_tests/test_floating_ips.py nova/tests/functional/api_sample_tests/test_floating_ips_bulk.py nova/tests/functional/api_sample_tests/test_fping.py nova/tests/functional/api_sample_tests/test_hide_server_addresses.py nova/tests/functional/api_sample_tests/test_hosts.py nova/tests/functional/api_sample_tests/test_hypervisors.py nova/tests/functional/api_sample_tests/test_images.py nova/tests/functional/api_sample_tests/test_instance_actions.py nova/tests/functional/api_sample_tests/test_instance_usage_audit_log.py nova/tests/functional/api_sample_tests/test_keypairs.py nova/tests/functional/api_sample_tests/test_limits.py nova/tests/functional/api_sample_tests/test_lock_server.py nova/tests/functional/api_sample_tests/test_migrate_server.py nova/tests/functional/api_sample_tests/test_migrations.py nova/tests/functional/api_sample_tests/test_multinic.py nova/tests/functional/api_sample_tests/test_multiple_create.py nova/tests/functional/api_sample_tests/test_networks.py nova/tests/functional/api_sample_tests/test_networks_associate.py nova/tests/functional/api_sample_tests/test_pause_server.py nova/tests/functional/api_sample_tests/test_pci.py nova/tests/functional/api_sample_tests/test_personality.py nova/tests/functional/api_sample_tests/test_preserve_ephemeral_rebuild.py nova/tests/functional/api_sample_tests/test_quota_classes.py nova/tests/functional/api_sample_tests/test_quota_sets.py nova/tests/functional/api_sample_tests/test_remote_consoles.py nova/tests/functional/api_sample_tests/test_rescue.py nova/tests/functional/api_sample_tests/test_scheduler_hints.py nova/tests/functional/api_sample_tests/test_security_group_default_rules.py nova/tests/functional/api_sample_tests/test_security_groups.py nova/tests/functional/api_sample_tests/test_server_diagnostics.py nova/tests/functional/api_sample_tests/test_server_external_events.py nova/tests/functional/api_sample_tests/test_server_groups.py nova/tests/functional/api_sample_tests/test_server_metadata.py nova/tests/functional/api_sample_tests/test_server_migrations.py nova/tests/functional/api_sample_tests/test_server_password.py nova/tests/functional/api_sample_tests/test_server_usage.py nova/tests/functional/api_sample_tests/test_servers.py nova/tests/functional/api_sample_tests/test_servers_ips.py nova/tests/functional/api_sample_tests/test_services.py nova/tests/functional/api_sample_tests/test_shelve.py nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py nova/tests/functional/api_sample_tests/test_suspend_server.py nova/tests/functional/api_sample_tests/test_tenant_networks.py nova/tests/functional/api_sample_tests/test_used_limits.py nova/tests/functional/api_sample_tests/test_user_data.py nova/tests/functional/api_sample_tests/test_versions.py nova/tests/functional/api_sample_tests/test_virtual_interfaces.py nova/tests/functional/api_sample_tests/test_volumes.py nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v2.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v21-compatible.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavor-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavors-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavors-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-confirm-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-create-image.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-reboot.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-rebuild.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-revert-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/consoles/consoles-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/consoles/consoles-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/consoles/consoles-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/extension-info/extensions-get-resp-v2.json.tpl nova/tests/functional/api_sample_tests/api_samples/extension-info/extensions-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-manage/flavor-create-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-manage/flavor-create-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavors/flavor-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavors/flavors-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavors/flavors-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/limits/limit-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/limits/v2-limit-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-action-rebuild.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-admin-password/admin-password-change-password.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-update-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-update-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-agents/agents-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-update-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-baremetal-nodes/baremetal-node-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-capacities-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-list-empty-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-get-root-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-console-output/console-output-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-console-output/console-output-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-create-backup/create-backup-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-deferred-delete/force-delete-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-deferred-delete/restore-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/list-servers-detail-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-action-rebuild-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-resize-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.14/server-evacuate-find-host-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.14/server-evacuate-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-availability-zone/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-status/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-status/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-volumes/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-volumes/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/fixedip-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/v2.4/fixedip-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/v2.4/fixedips-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fping/fping-get-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fping/fping-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-reboot.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-shutdown.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-startup.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-put-maintenance-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-put-maintenance-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/hosts-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-with-servers-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-without-servers-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-action-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-index-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-show-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-lock-server/lock-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-lock-server/unlock-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/live-migrate-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/migrate-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/v2.25/live-migrate-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-migrations/migrations-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-migrations/v2.23/migrations-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/network-add-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/network-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/network-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/network-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/networks-disassociate-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/networks-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-associate-host-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disassociate-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pause-server/pause-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pause-server/unpause-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/pci-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/pci-index-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/pci-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-personality/server-action-rebuild-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-personality/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-personality/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-personality/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.6/create-vnc-console-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.8/create-mks-console-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.8/create-mks-console-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-get-resp-rescue.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-get-resp-unrescue.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-rescue-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-rescue.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-unrescue-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-add-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-remove-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-external-events/event-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-external-events/event-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-password/get-password-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-usage/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-usage/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-log-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-log-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-enable-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-enable-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/services-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-log-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-log-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-enable-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-enable-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-force-down-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-force-down-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/services-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-shelve/os-shelve-offload.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-shelve/os-shelve.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-shelve/os-unshelve.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-suspend-server/server-resume.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-suspend-server/server-suspend.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-list-res.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-post-res.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-used-limits/usedlimits-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-used-limits/v2-usedlimits-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-user-data/userdata-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-user-data/userdata-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/vifs-list-resp-v2.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/v2.12/vifs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/attach-volume-to-server-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/list-volume-attachments-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-index-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshot-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshot-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/update-volume-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-ips/server-ips-network-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-ips/server-ips-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-all-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-all-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-migrations/force_complete.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-migrations/live-migrate-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-migrations/v2.23/migrations-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-migrations/v2.23/migrations-index.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-confirm-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-create-image.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-reboot.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-rebuild.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-revert-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-start.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-stop.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-update-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-update-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers-sort/server-sort-keys-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.17/server-action-trigger-crash-dump.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/versions/v2-version-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/versions/v21-version-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/versions/versions-get-resp.json.tpl nova/tests/functional/db/__init__.py nova/tests/functional/db/test_archive.py nova/tests/functional/db/test_build_request.py nova/tests/functional/db/test_cell_mapping.py nova/tests/functional/db/test_connection_switch.py nova/tests/functional/db/test_flavor_model.py nova/tests/functional/db/test_host_mapping.py nova/tests/functional/db/test_instance_mapping.py nova/tests/functional/db/test_request_spec.py nova/tests/functional/db/test_resource_provider.py nova/tests/functional/db/api/__init__.py nova/tests/functional/db/api/test_migrations.py nova/tests/functional/libvirt/__init__.py nova/tests/functional/libvirt/test_numa_servers.py nova/tests/functional/libvirt/test_rt_servers.py nova/tests/functional/notification_sample_tests/__init__.py nova/tests/functional/notification_sample_tests/notification_sample_base.py nova/tests/functional/notification_sample_tests/test_service_update.py nova/tests/functional/regressions/README.rst nova/tests/functional/regressions/__init__.py nova/tests/functional/regressions/test_bug_1522536.py nova/tests/functional/regressions/test_bug_1541691.py nova/tests/functional/regressions/test_bug_1548980.py nova/tests/functional/regressions/test_bug_1552888.py nova/tests/functional/wsgi/__init__.py nova/tests/functional/wsgi/test_flavor_manage.py nova/tests/functional/wsgi/test_secgroup.py nova/tests/live_migration/hooks/ceph.sh nova/tests/live_migration/hooks/nfs.sh nova/tests/live_migration/hooks/run_tests.sh nova/tests/live_migration/hooks/utils.sh nova/tests/unit/README.rst nova/tests/unit/__init__.py nova/tests/unit/cast_as_call.py nova/tests/unit/conf_fixture.py nova/tests/unit/fake_block_device.py nova/tests/unit/fake_build_request.py nova/tests/unit/fake_crypto.py nova/tests/unit/fake_flavor.py nova/tests/unit/fake_hosts.py nova/tests/unit/fake_instance.py nova/tests/unit/fake_ldap.py nova/tests/unit/fake_network.py nova/tests/unit/fake_network_cache_model.py nova/tests/unit/fake_notifier.py nova/tests/unit/fake_pci_device_pools.py nova/tests/unit/fake_policy.py nova/tests/unit/fake_processutils.py nova/tests/unit/fake_request_spec.py nova/tests/unit/fake_server_actions.py nova/tests/unit/fake_utils.py nova/tests/unit/fake_volume.py nova/tests/unit/image_fixtures.py nova/tests/unit/matchers.py nova/tests/unit/policy_fixture.py nova/tests/unit/test_api_validation.py nova/tests/unit/test_availability_zones.py nova/tests/unit/test_baserpc.py nova/tests/unit/test_block_device.py nova/tests/unit/test_cache.py nova/tests/unit/test_cinder.py nova/tests/unit/test_configdrive2.py nova/tests/unit/test_context.py nova/tests/unit/test_crypto.py nova/tests/unit/test_exception.py nova/tests/unit/test_fixtures.py nova/tests/unit/test_flavors.py nova/tests/unit/test_hacking.py nova/tests/unit/test_hooks.py nova/tests/unit/test_instance_types_extra_specs.py nova/tests/unit/test_iptables_network.py nova/tests/unit/test_ipv6.py nova/tests/unit/test_loadables.py nova/tests/unit/test_matchers.py nova/tests/unit/test_metadata.py nova/tests/unit/test_notifications.py nova/tests/unit/test_notifier.py nova/tests/unit/test_nova_manage.py nova/tests/unit/test_pipelib.py nova/tests/unit/test_policy.py nova/tests/unit/test_quota.py nova/tests/unit/test_rpc.py nova/tests/unit/test_safeutils.py nova/tests/unit/test_service.py nova/tests/unit/test_signature_utils.py nova/tests/unit/test_test.py nova/tests/unit/test_test_utils.py nova/tests/unit/test_utils.py nova/tests/unit/test_uuid_sentinels.py nova/tests/unit/test_versions.py nova/tests/unit/test_weights.py nova/tests/unit/test_wsgi.py nova/tests/unit/utils.py nova/tests/unit/api/__init__.py nova/tests/unit/api/test_auth.py nova/tests/unit/api/test_compute_req_id.py nova/tests/unit/api/test_validator.py nova/tests/unit/api/test_wsgi.py nova/tests/unit/api/openstack/__init__.py nova/tests/unit/api/openstack/common.py nova/tests/unit/api/openstack/fakes.py nova/tests/unit/api/openstack/test_api_version_request.py nova/tests/unit/api/openstack/test_common.py nova/tests/unit/api/openstack/test_faults.py nova/tests/unit/api/openstack/test_legacy_v2_compatible_wrapper.py nova/tests/unit/api/openstack/test_mapper.py nova/tests/unit/api/openstack/test_wsgi.py nova/tests/unit/api/openstack/compute/__init__.py nova/tests/unit/api/openstack/compute/admin_only_action_common.py nova/tests/unit/api/openstack/compute/basic.py nova/tests/unit/api/openstack/compute/dummy_schema.py nova/tests/unit/api/openstack/compute/microversions.py nova/tests/unit/api/openstack/compute/test_access_ips.py nova/tests/unit/api/openstack/compute/test_admin_actions.py nova/tests/unit/api/openstack/compute/test_admin_password.py nova/tests/unit/api/openstack/compute/test_agents.py nova/tests/unit/api/openstack/compute/test_aggregates.py nova/tests/unit/api/openstack/compute/test_api.py nova/tests/unit/api/openstack/compute/test_attach_interfaces.py nova/tests/unit/api/openstack/compute/test_auth.py nova/tests/unit/api/openstack/compute/test_availability_zone.py nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py nova/tests/unit/api/openstack/compute/test_block_device_mapping.py nova/tests/unit/api/openstack/compute/test_block_device_mapping_v1.py nova/tests/unit/api/openstack/compute/test_cells.py nova/tests/unit/api/openstack/compute/test_certificates.py nova/tests/unit/api/openstack/compute/test_cloudpipe.py nova/tests/unit/api/openstack/compute/test_cloudpipe_update.py nova/tests/unit/api/openstack/compute/test_config_drive.py nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py nova/tests/unit/api/openstack/compute/test_console_output.py nova/tests/unit/api/openstack/compute/test_consoles.py nova/tests/unit/api/openstack/compute/test_create_backup.py nova/tests/unit/api/openstack/compute/test_createserverext.py nova/tests/unit/api/openstack/compute/test_deferred_delete.py nova/tests/unit/api/openstack/compute/test_disk_config.py nova/tests/unit/api/openstack/compute/test_evacuate.py nova/tests/unit/api/openstack/compute/test_extended_availability_zone.py nova/tests/unit/api/openstack/compute/test_extended_hypervisors.py nova/tests/unit/api/openstack/compute/test_extended_ips.py nova/tests/unit/api/openstack/compute/test_extended_ips_mac.py nova/tests/unit/api/openstack/compute/test_extended_rescue_with_image.py nova/tests/unit/api/openstack/compute/test_extended_server_attributes.py nova/tests/unit/api/openstack/compute/test_extended_status.py nova/tests/unit/api/openstack/compute/test_extended_virtual_interfaces_net.py nova/tests/unit/api/openstack/compute/test_extended_volumes.py nova/tests/unit/api/openstack/compute/test_extension_info.py nova/tests/unit/api/openstack/compute/test_extensions.py nova/tests/unit/api/openstack/compute/test_fixed_ips.py nova/tests/unit/api/openstack/compute/test_flavor_access.py nova/tests/unit/api/openstack/compute/test_flavor_disabled.py nova/tests/unit/api/openstack/compute/test_flavor_manage.py nova/tests/unit/api/openstack/compute/test_flavor_rxtx.py nova/tests/unit/api/openstack/compute/test_flavor_swap.py nova/tests/unit/api/openstack/compute/test_flavorextradata.py nova/tests/unit/api/openstack/compute/test_flavors.py nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py nova/tests/unit/api/openstack/compute/test_floating_ip_dns.py nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py nova/tests/unit/api/openstack/compute/test_floating_ips.py nova/tests/unit/api/openstack/compute/test_floating_ips_bulk.py nova/tests/unit/api/openstack/compute/test_fping.py nova/tests/unit/api/openstack/compute/test_hide_server_addresses.py nova/tests/unit/api/openstack/compute/test_hosts.py nova/tests/unit/api/openstack/compute/test_hypervisor_status.py nova/tests/unit/api/openstack/compute/test_hypervisors.py nova/tests/unit/api/openstack/compute/test_image_metadata.py nova/tests/unit/api/openstack/compute/test_image_size.py nova/tests/unit/api/openstack/compute/test_images.py nova/tests/unit/api/openstack/compute/test_instance_actions.py nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py nova/tests/unit/api/openstack/compute/test_keypairs.py nova/tests/unit/api/openstack/compute/test_limits.py nova/tests/unit/api/openstack/compute/test_lock_server.py nova/tests/unit/api/openstack/compute/test_microversions.py nova/tests/unit/api/openstack/compute/test_migrate_server.py nova/tests/unit/api/openstack/compute/test_migrations.py nova/tests/unit/api/openstack/compute/test_multinic.py nova/tests/unit/api/openstack/compute/test_multiple_create.py nova/tests/unit/api/openstack/compute/test_networks.py nova/tests/unit/api/openstack/compute/test_neutron_security_groups.py nova/tests/unit/api/openstack/compute/test_pause_server.py nova/tests/unit/api/openstack/compute/test_pci.py nova/tests/unit/api/openstack/compute/test_plugin_framework.py nova/tests/unit/api/openstack/compute/test_quota_classes.py nova/tests/unit/api/openstack/compute/test_quotas.py nova/tests/unit/api/openstack/compute/test_remote_consoles.py nova/tests/unit/api/openstack/compute/test_rescue.py nova/tests/unit/api/openstack/compute/test_scheduler_hints.py nova/tests/unit/api/openstack/compute/test_security_group_default_rules.py nova/tests/unit/api/openstack/compute/test_security_groups.py nova/tests/unit/api/openstack/compute/test_server_actions.py nova/tests/unit/api/openstack/compute/test_server_diagnostics.py nova/tests/unit/api/openstack/compute/test_server_external_events.py nova/tests/unit/api/openstack/compute/test_server_group_quotas.py nova/tests/unit/api/openstack/compute/test_server_groups.py nova/tests/unit/api/openstack/compute/test_server_metadata.py nova/tests/unit/api/openstack/compute/test_server_migrations.py nova/tests/unit/api/openstack/compute/test_server_password.py nova/tests/unit/api/openstack/compute/test_server_reset_state.py nova/tests/unit/api/openstack/compute/test_server_start_stop.py nova/tests/unit/api/openstack/compute/test_server_usage.py nova/tests/unit/api/openstack/compute/test_serversV21.py nova/tests/unit/api/openstack/compute/test_services.py nova/tests/unit/api/openstack/compute/test_shelve.py nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py nova/tests/unit/api/openstack/compute/test_snapshots.py nova/tests/unit/api/openstack/compute/test_suspend_server.py nova/tests/unit/api/openstack/compute/test_tenant_networks.py nova/tests/unit/api/openstack/compute/test_urlmap.py nova/tests/unit/api/openstack/compute/test_used_limits.py nova/tests/unit/api/openstack/compute/test_user_data.py nova/tests/unit/api/openstack/compute/test_versions.py nova/tests/unit/api/openstack/compute/test_virtual_interfaces.py nova/tests/unit/api/openstack/compute/test_volumes.py nova/tests/unit/api/openstack/compute/legacy_v2/__init__.py nova/tests/unit/api/openstack/compute/legacy_v2/test_auth.py nova/tests/unit/api/openstack/compute/legacy_v2/test_extensions.py nova/tests/unit/api/openstack/compute/legacy_v2/test_servers.py nova/tests/unit/api/openstack/compute/legacy_v2/extensions/__init__.py nova/tests/unit/api/openstack/compute/legacy_v2/extensions/foxinsocks.py nova/tests/unit/api_samples_test_base/__init__.py nova/tests/unit/api_samples_test_base/test_compare_result.py nova/tests/unit/cells/__init__.py nova/tests/unit/cells/fakes.py nova/tests/unit/cells/test_cells_filters.py nova/tests/unit/cells/test_cells_manager.py nova/tests/unit/cells/test_cells_messaging.py nova/tests/unit/cells/test_cells_rpc_driver.py nova/tests/unit/cells/test_cells_rpcapi.py nova/tests/unit/cells/test_cells_scheduler.py nova/tests/unit/cells/test_cells_state_manager.py nova/tests/unit/cells/test_cells_utils.py nova/tests/unit/cells/test_cells_weights.py nova/tests/unit/cert/__init__.py nova/tests/unit/cert/test_rpcapi.py nova/tests/unit/cmd/__init__.py nova/tests/unit/cmd/test_baseproxy.py nova/tests/unit/cmd/test_idmapshift.py nova/tests/unit/cmd/test_manage.py nova/tests/unit/compute/__init__.py nova/tests/unit/compute/eventlet_utils.py nova/tests/unit/compute/fake_resource_tracker.py nova/tests/unit/compute/test_arch.py nova/tests/unit/compute/test_claims.py nova/tests/unit/compute/test_compute.py nova/tests/unit/compute/test_compute_api.py nova/tests/unit/compute/test_compute_cells.py nova/tests/unit/compute/test_compute_mgr.py nova/tests/unit/compute/test_compute_utils.py nova/tests/unit/compute/test_compute_xen.py nova/tests/unit/compute/test_flavors.py nova/tests/unit/compute/test_host_api.py nova/tests/unit/compute/test_hvtype.py nova/tests/unit/compute/test_keypairs.py nova/tests/unit/compute/test_multiple_nodes.py nova/tests/unit/compute/test_resource_tracker.py nova/tests/unit/compute/test_resources.py nova/tests/unit/compute/test_rpcapi.py nova/tests/unit/compute/test_shelve.py nova/tests/unit/compute/test_stats.py nova/tests/unit/compute/test_tracker.py nova/tests/unit/compute/test_virtapi.py nova/tests/unit/compute/test_vmmode.py nova/tests/unit/compute/monitors/__init__.py nova/tests/unit/compute/monitors/test_monitors.py nova/tests/unit/compute/monitors/cpu/__init__.py nova/tests/unit/compute/monitors/cpu/test_virt_driver.py nova/tests/unit/conductor/__init__.py nova/tests/unit/conductor/test_conductor.py nova/tests/unit/conductor/tasks/__init__.py nova/tests/unit/conductor/tasks/test_base.py nova/tests/unit/conductor/tasks/test_live_migrate.py nova/tests/unit/conductor/tasks/test_migrate.py nova/tests/unit/console/__init__.py nova/tests/unit/console/test_console.py nova/tests/unit/console/test_rpcapi.py nova/tests/unit/console/test_serial.py nova/tests/unit/console/test_type.py nova/tests/unit/console/test_websocketproxy.py nova/tests/unit/consoleauth/__init__.py nova/tests/unit/consoleauth/test_consoleauth.py nova/tests/unit/consoleauth/test_rpcapi.py nova/tests/unit/db/__init__.py nova/tests/unit/db/fakes.py nova/tests/unit/db/test_db_api.py nova/tests/unit/db/test_migration_utils.py nova/tests/unit/db/test_migrations.py nova/tests/unit/db/test_models.py nova/tests/unit/db/test_sqlalchemy_migration.py nova/tests/unit/fake_loadables/__init__.py nova/tests/unit/fake_loadables/fake_loadable1.py nova/tests/unit/fake_loadables/fake_loadable2.py nova/tests/unit/image/__init__.py nova/tests/unit/image/abs.tar.gz nova/tests/unit/image/fake.py nova/tests/unit/image/rel.tar.gz nova/tests/unit/image/test_fake.py nova/tests/unit/image/test_glance.py nova/tests/unit/image/test_transfer_modules.py nova/tests/unit/keymgr/__init__.py nova/tests/unit/keymgr/fake.py nova/tests/unit/keymgr/test_barbican.py nova/tests/unit/keymgr/test_conf_key_mgr.py nova/tests/unit/keymgr/test_key.py nova/tests/unit/keymgr/test_key_mgr.py nova/tests/unit/keymgr/test_mock_key_mgr.py nova/tests/unit/keymgr/test_not_implemented_key_mgr.py nova/tests/unit/keymgr/test_single_key_mgr.py nova/tests/unit/monkey_patch_example/__init__.py nova/tests/unit/monkey_patch_example/example_a.py nova/tests/unit/monkey_patch_example/example_b.py nova/tests/unit/network/__init__.py nova/tests/unit/network/interfaces-override.template nova/tests/unit/network/test_api.py nova/tests/unit/network/test_config.py nova/tests/unit/network/test_l3.py nova/tests/unit/network/test_linux_net.py nova/tests/unit/network/test_manager.py nova/tests/unit/network/test_network_info.py nova/tests/unit/network/test_neutronv2.py nova/tests/unit/network/test_rpcapi.py nova/tests/unit/network/security_group/__init__.py nova/tests/unit/network/security_group/test_neutron_driver.py nova/tests/unit/objects/__init__.py nova/tests/unit/objects/test_agent.py nova/tests/unit/objects/test_aggregate.py nova/tests/unit/objects/test_bandwidth_usage.py nova/tests/unit/objects/test_block_device.py nova/tests/unit/objects/test_build_request.py nova/tests/unit/objects/test_cell_mapping.py nova/tests/unit/objects/test_compute_node.py nova/tests/unit/objects/test_dns_domain.py nova/tests/unit/objects/test_ec2.py nova/tests/unit/objects/test_external_event.py nova/tests/unit/objects/test_fields.py nova/tests/unit/objects/test_fixed_ip.py nova/tests/unit/objects/test_flavor.py nova/tests/unit/objects/test_floating_ip.py nova/tests/unit/objects/test_host_mapping.py nova/tests/unit/objects/test_hv_spec.py nova/tests/unit/objects/test_image_meta.py nova/tests/unit/objects/test_instance.py nova/tests/unit/objects/test_instance_action.py nova/tests/unit/objects/test_instance_fault.py nova/tests/unit/objects/test_instance_group.py nova/tests/unit/objects/test_instance_info_cache.py nova/tests/unit/objects/test_instance_mapping.py nova/tests/unit/objects/test_instance_numa_topology.py nova/tests/unit/objects/test_instance_pci_requests.py nova/tests/unit/objects/test_keypair.py nova/tests/unit/objects/test_migrate_data.py nova/tests/unit/objects/test_migration.py nova/tests/unit/objects/test_migration_context.py nova/tests/unit/objects/test_monitor_metric.py nova/tests/unit/objects/test_network.py nova/tests/unit/objects/test_network_request.py nova/tests/unit/objects/test_notification.py nova/tests/unit/objects/test_numa.py nova/tests/unit/objects/test_objects.py nova/tests/unit/objects/test_pci_device.py nova/tests/unit/objects/test_pci_device_pool.py nova/tests/unit/objects/test_quotas.py nova/tests/unit/objects/test_request_spec.py nova/tests/unit/objects/test_resource_provider.py nova/tests/unit/objects/test_security_group.py nova/tests/unit/objects/test_security_group_rule.py nova/tests/unit/objects/test_service.py nova/tests/unit/objects/test_tag.py nova/tests/unit/objects/test_task_log.py nova/tests/unit/objects/test_vcpu_model.py nova/tests/unit/objects/test_virt_cpu_topology.py nova/tests/unit/objects/test_virtual_interface.py nova/tests/unit/objects/test_volume_usage.py nova/tests/unit/pci/__init__.py nova/tests/unit/pci/fakes.py nova/tests/unit/pci/test_devspec.py nova/tests/unit/pci/test_manager.py nova/tests/unit/pci/test_request.py nova/tests/unit/pci/test_stats.py nova/tests/unit/pci/test_utils.py nova/tests/unit/pci/test_whitelist.py nova/tests/unit/scheduler/__init__.py nova/tests/unit/scheduler/fakes.py nova/tests/unit/scheduler/ironic_fakes.py nova/tests/unit/scheduler/test_caching_scheduler.py nova/tests/unit/scheduler/test_chance_scheduler.py nova/tests/unit/scheduler/test_client.py nova/tests/unit/scheduler/test_filter_scheduler.py nova/tests/unit/scheduler/test_filters.py nova/tests/unit/scheduler/test_host_filters.py nova/tests/unit/scheduler/test_host_manager.py nova/tests/unit/scheduler/test_ironic_host_manager.py nova/tests/unit/scheduler/test_rpcapi.py nova/tests/unit/scheduler/test_scheduler.py nova/tests/unit/scheduler/test_scheduler_options.py nova/tests/unit/scheduler/test_scheduler_utils.py nova/tests/unit/scheduler/filters/__init__.py nova/tests/unit/scheduler/filters/test_affinity_filters.py nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py nova/tests/unit/scheduler/filters/test_availability_zone_filters.py nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py nova/tests/unit/scheduler/filters/test_compute_filters.py nova/tests/unit/scheduler/filters/test_core_filters.py nova/tests/unit/scheduler/filters/test_disk_filters.py nova/tests/unit/scheduler/filters/test_exact_core_filter.py nova/tests/unit/scheduler/filters/test_exact_disk_filter.py nova/tests/unit/scheduler/filters/test_exact_ram_filter.py nova/tests/unit/scheduler/filters/test_extra_specs_ops.py nova/tests/unit/scheduler/filters/test_image_props_filters.py nova/tests/unit/scheduler/filters/test_io_ops_filters.py nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py nova/tests/unit/scheduler/filters/test_json_filters.py nova/tests/unit/scheduler/filters/test_metrics_filters.py nova/tests/unit/scheduler/filters/test_num_instances_filters.py nova/tests/unit/scheduler/filters/test_numa_topology_filters.py nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py nova/tests/unit/scheduler/filters/test_ram_filters.py nova/tests/unit/scheduler/filters/test_retry_filters.py nova/tests/unit/scheduler/filters/test_trusted_filters.py nova/tests/unit/scheduler/filters/test_type_filters.py nova/tests/unit/scheduler/filters/test_utils.py nova/tests/unit/scheduler/weights/__init__.py nova/tests/unit/scheduler/weights/test_weights_affinity.py nova/tests/unit/scheduler/weights/test_weights_disk.py nova/tests/unit/scheduler/weights/test_weights_hosts.py nova/tests/unit/scheduler/weights/test_weights_ioopsweight.py nova/tests/unit/scheduler/weights/test_weights_metrics.py nova/tests/unit/scheduler/weights/test_weights_ram.py nova/tests/unit/servicegroup/__init__.py nova/tests/unit/servicegroup/test_api.py nova/tests/unit/servicegroup/test_db_servicegroup.py nova/tests/unit/servicegroup/test_mc_servicegroup.py nova/tests/unit/ssl_cert/ca.crt nova/tests/unit/ssl_cert/ca.key nova/tests/unit/ssl_cert/certificate.crt nova/tests/unit/ssl_cert/privatekey.key nova/tests/unit/virt/__init__.py nova/tests/unit/virt/fakelibosinfo.py nova/tests/unit/virt/test_block_device.py nova/tests/unit/virt/test_configdrive.py nova/tests/unit/virt/test_diagnostics.py nova/tests/unit/virt/test_driver.py nova/tests/unit/virt/test_events.py nova/tests/unit/virt/test_fake.py nova/tests/unit/virt/test_hardware.py nova/tests/unit/virt/test_imagecache.py nova/tests/unit/virt/test_images.py nova/tests/unit/virt/test_osinfo.py nova/tests/unit/virt/test_virt.py nova/tests/unit/virt/test_virt_drivers.py nova/tests/unit/virt/test_volumeutils.py nova/tests/unit/virt/disk/__init__.py nova/tests/unit/virt/disk/test_api.py nova/tests/unit/virt/disk/test_inject.py nova/tests/unit/virt/disk/mount/__init__.py nova/tests/unit/virt/disk/mount/test_api.py nova/tests/unit/virt/disk/mount/test_block.py nova/tests/unit/virt/disk/mount/test_loop.py nova/tests/unit/virt/disk/mount/test_nbd.py nova/tests/unit/virt/disk/vfs/__init__.py nova/tests/unit/virt/disk/vfs/fakeguestfs.py nova/tests/unit/virt/disk/vfs/test_guestfs.py nova/tests/unit/virt/disk/vfs/test_localfs.py nova/tests/unit/virt/hyperv/__init__.py nova/tests/unit/virt/hyperv/test_base.py nova/tests/unit/virt/hyperv/test_driver.py nova/tests/unit/virt/hyperv/test_eventhandler.py nova/tests/unit/virt/hyperv/test_hostops.py nova/tests/unit/virt/hyperv/test_imagecache.py nova/tests/unit/virt/hyperv/test_livemigrationops.py nova/tests/unit/virt/hyperv/test_migrationops.py nova/tests/unit/virt/hyperv/test_pathutils.py nova/tests/unit/virt/hyperv/test_rdpconsoleops.py nova/tests/unit/virt/hyperv/test_snapshotops.py nova/tests/unit/virt/hyperv/test_vif.py nova/tests/unit/virt/hyperv/test_vmops.py nova/tests/unit/virt/hyperv/test_volumeops.py nova/tests/unit/virt/image/__init__.py nova/tests/unit/virt/image/test_model.py nova/tests/unit/virt/ironic/__init__.py nova/tests/unit/virt/ironic/test_client_wrapper.py nova/tests/unit/virt/ironic/test_driver.py nova/tests/unit/virt/ironic/test_patcher.py nova/tests/unit/virt/ironic/utils.py nova/tests/unit/virt/libvirt/__init__.py nova/tests/unit/virt/libvirt/fake_imagebackend.py nova/tests/unit/virt/libvirt/fake_libvirt_utils.py nova/tests/unit/virt/libvirt/fake_os_brick_connector.py nova/tests/unit/virt/libvirt/fakelibvirt.py nova/tests/unit/virt/libvirt/test_blockinfo.py nova/tests/unit/virt/libvirt/test_compat.py nova/tests/unit/virt/libvirt/test_config.py nova/tests/unit/virt/libvirt/test_designer.py nova/tests/unit/virt/libvirt/test_driver.py nova/tests/unit/virt/libvirt/test_fakelibvirt.py nova/tests/unit/virt/libvirt/test_firewall.py nova/tests/unit/virt/libvirt/test_guest.py nova/tests/unit/virt/libvirt/test_host.py nova/tests/unit/virt/libvirt/test_imagebackend.py nova/tests/unit/virt/libvirt/test_imagecache.py nova/tests/unit/virt/libvirt/test_utils.py nova/tests/unit/virt/libvirt/test_vif.py nova/tests/unit/virt/libvirt/storage/__init__.py nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py nova/tests/unit/virt/libvirt/storage/test_lvm.py nova/tests/unit/virt/libvirt/storage/test_rbd.py nova/tests/unit/virt/libvirt/volume/__init__.py nova/tests/unit/virt/libvirt/volume/test_aoe.py nova/tests/unit/virt/libvirt/volume/test_disco.py nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py nova/tests/unit/virt/libvirt/volume/test_fs.py nova/tests/unit/virt/libvirt/volume/test_glusterfs.py nova/tests/unit/virt/libvirt/volume/test_gpfs.py nova/tests/unit/virt/libvirt/volume/test_hgst.py nova/tests/unit/virt/libvirt/volume/test_iscsi.py nova/tests/unit/virt/libvirt/volume/test_iser.py nova/tests/unit/virt/libvirt/volume/test_net.py nova/tests/unit/virt/libvirt/volume/test_nfs.py nova/tests/unit/virt/libvirt/volume/test_quobyte.py nova/tests/unit/virt/libvirt/volume/test_remotefs.py nova/tests/unit/virt/libvirt/volume/test_scaleio.py nova/tests/unit/virt/libvirt/volume/test_scality.py nova/tests/unit/virt/libvirt/volume/test_smbfs.py nova/tests/unit/virt/libvirt/volume/test_volume.py nova/tests/unit/virt/vmwareapi/__init__.py nova/tests/unit/virt/vmwareapi/fake.py nova/tests/unit/virt/vmwareapi/ovf.xml nova/tests/unit/virt/vmwareapi/stubs.py nova/tests/unit/virt/vmwareapi/test_configdrive.py nova/tests/unit/virt/vmwareapi/test_driver_api.py nova/tests/unit/virt/vmwareapi/test_ds_util.py nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py nova/tests/unit/virt/vmwareapi/test_imagecache.py nova/tests/unit/virt/vmwareapi/test_images.py nova/tests/unit/virt/vmwareapi/test_io_util.py nova/tests/unit/virt/vmwareapi/test_network_util.py nova/tests/unit/virt/vmwareapi/test_read_write_util.py nova/tests/unit/virt/vmwareapi/test_vif.py nova/tests/unit/virt/vmwareapi/test_vim_util.py nova/tests/unit/virt/vmwareapi/test_vm_util.py nova/tests/unit/virt/vmwareapi/test_vmops.py nova/tests/unit/virt/vmwareapi/test_volumeops.py nova/tests/unit/virt/xenapi/__init__.py nova/tests/unit/virt/xenapi/stubs.py nova/tests/unit/virt/xenapi/test_agent.py nova/tests/unit/virt/xenapi/test_driver.py nova/tests/unit/virt/xenapi/test_network_utils.py nova/tests/unit/virt/xenapi/test_vif.py nova/tests/unit/virt/xenapi/test_vm_utils.py nova/tests/unit/virt/xenapi/test_vmops.py nova/tests/unit/virt/xenapi/test_volume_utils.py nova/tests/unit/virt/xenapi/test_volumeops.py nova/tests/unit/virt/xenapi/test_xenapi.py nova/tests/unit/virt/xenapi/vm_rrd.xml nova/tests/unit/virt/xenapi/client/__init__.py nova/tests/unit/virt/xenapi/client/test_objects.py nova/tests/unit/virt/xenapi/client/test_session.py nova/tests/unit/virt/xenapi/image/__init__.py nova/tests/unit/virt/xenapi/image/test_bittorrent.py nova/tests/unit/virt/xenapi/image/test_glance.py nova/tests/unit/virt/xenapi/image/test_utils.py nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py nova/tests/unit/virt/xenapi/plugins/__init__.py nova/tests/unit/virt/xenapi/plugins/plugin_test.py nova/tests/unit/virt/xenapi/plugins/test_nova_plugin_version.py nova/tests/unit/volume/__init__.py nova/tests/unit/volume/test_cinder.py nova/tests/unit/volume/encryptors/__init__.py nova/tests/unit/volume/encryptors/test_base.py nova/tests/unit/volume/encryptors/test_cryptsetup.py nova/tests/unit/volume/encryptors/test_luks.py nova/tests/unit/volume/encryptors/test_nop.py nova/virt/__init__.py nova/virt/block_device.py nova/virt/configdrive.py nova/virt/diagnostics.py nova/virt/driver.py nova/virt/event.py nova/virt/fake.py nova/virt/firewall.py nova/virt/hardware.py nova/virt/imagecache.py nova/virt/images.py nova/virt/interfaces.template nova/virt/netutils.py nova/virt/opts.py nova/virt/osinfo.py nova/virt/storage_users.py nova/virt/virtapi.py nova/virt/volumeutils.py nova/virt/watchdog_actions.py nova/virt/disk/__init__.py nova/virt/disk/api.py nova/virt/disk/mount/__init__.py nova/virt/disk/mount/api.py nova/virt/disk/mount/block.py nova/virt/disk/mount/loop.py nova/virt/disk/mount/nbd.py nova/virt/disk/vfs/__init__.py nova/virt/disk/vfs/api.py nova/virt/disk/vfs/guestfs.py nova/virt/disk/vfs/localfs.py nova/virt/hyperv/README.rst nova/virt/hyperv/__init__.py nova/virt/hyperv/constants.py nova/virt/hyperv/driver.py nova/virt/hyperv/eventhandler.py nova/virt/hyperv/hostops.py nova/virt/hyperv/imagecache.py nova/virt/hyperv/livemigrationops.py nova/virt/hyperv/migrationops.py nova/virt/hyperv/pathutils.py nova/virt/hyperv/rdpconsoleops.py nova/virt/hyperv/snapshotops.py nova/virt/hyperv/vif.py nova/virt/hyperv/vmops.py nova/virt/hyperv/volumeops.py nova/virt/image/__init__.py nova/virt/image/model.py nova/virt/ironic/__init__.py nova/virt/ironic/client_wrapper.py nova/virt/ironic/driver.py nova/virt/ironic/ironic_states.py nova/virt/ironic/patcher.py nova/virt/libvirt/__init__.py nova/virt/libvirt/blockinfo.py nova/virt/libvirt/compat.py nova/virt/libvirt/config.py nova/virt/libvirt/designer.py nova/virt/libvirt/driver.py nova/virt/libvirt/firewall.py nova/virt/libvirt/guest.py nova/virt/libvirt/host.py nova/virt/libvirt/imagebackend.py nova/virt/libvirt/imagecache.py nova/virt/libvirt/instancejobtracker.py nova/virt/libvirt/utils.py nova/virt/libvirt/vif.py nova/virt/libvirt/storage/__init__.py nova/virt/libvirt/storage/dmcrypt.py nova/virt/libvirt/storage/lvm.py nova/virt/libvirt/storage/rbd_utils.py nova/virt/libvirt/volume/__init__.py nova/virt/libvirt/volume/aoe.py nova/virt/libvirt/volume/disco.py nova/virt/libvirt/volume/fibrechannel.py nova/virt/libvirt/volume/fs.py nova/virt/libvirt/volume/glusterfs.py nova/virt/libvirt/volume/gpfs.py nova/virt/libvirt/volume/hgst.py nova/virt/libvirt/volume/iscsi.py nova/virt/libvirt/volume/iser.py nova/virt/libvirt/volume/net.py nova/virt/libvirt/volume/nfs.py nova/virt/libvirt/volume/quobyte.py nova/virt/libvirt/volume/remotefs.py nova/virt/libvirt/volume/scaleio.py nova/virt/libvirt/volume/scality.py nova/virt/libvirt/volume/smbfs.py nova/virt/libvirt/volume/volume.py nova/virt/vmwareapi/__init__.py nova/virt/vmwareapi/constants.py nova/virt/vmwareapi/driver.py nova/virt/vmwareapi/ds_util.py nova/virt/vmwareapi/error_util.py nova/virt/vmwareapi/host.py nova/virt/vmwareapi/imagecache.py nova/virt/vmwareapi/images.py nova/virt/vmwareapi/io_util.py nova/virt/vmwareapi/network_util.py nova/virt/vmwareapi/read_write_util.py nova/virt/vmwareapi/vif.py nova/virt/vmwareapi/vim_util.py nova/virt/vmwareapi/vm_util.py nova/virt/vmwareapi/vmops.py nova/virt/vmwareapi/volumeops.py nova/virt/xenapi/__init__.py nova/virt/xenapi/agent.py nova/virt/xenapi/driver.py nova/virt/xenapi/fake.py nova/virt/xenapi/firewall.py nova/virt/xenapi/host.py nova/virt/xenapi/network_utils.py nova/virt/xenapi/pool.py nova/virt/xenapi/pool_states.py nova/virt/xenapi/vif.py nova/virt/xenapi/vm_utils.py nova/virt/xenapi/vmops.py nova/virt/xenapi/volume_utils.py nova/virt/xenapi/volumeops.py nova/virt/xenapi/client/__init__.py nova/virt/xenapi/client/objects.py nova/virt/xenapi/client/session.py nova/virt/xenapi/image/__init__.py nova/virt/xenapi/image/bittorrent.py nova/virt/xenapi/image/glance.py nova/virt/xenapi/image/utils.py nova/virt/xenapi/image/vdi_through_dev.py nova/vnc/__init__.py nova/vnc/xvp_proxy.py nova/volume/__init__.py nova/volume/cinder.py nova/volume/encryptors/__init__.py nova/volume/encryptors/base.py nova/volume/encryptors/cryptsetup.py nova/volume/encryptors/luks.py nova/volume/encryptors/nop.py nova/wsgi/nova-api.py nova/wsgi/nova-metadata.py plugins/xenserver/doc/networking.rst plugins/xenserver/networking/etc/init.d/host-rules plugins/xenserver/networking/etc/init.d/openvswitch-nova plugins/xenserver/networking/etc/sysconfig/openvswitch-nova plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules plugins/xenserver/networking/etc/xensource/scripts/novalib.py plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py plugins/xenserver/xenapi/README plugins/xenserver/xenapi/etc/xapi.d/plugins/_bittorrent_seeder plugins/xenserver/xenapi/etc/xapi.d/plugins/agent plugins/xenserver/xenapi/etc/xapi.d/plugins/bandwidth plugins/xenserver/xenapi/etc/xapi.d/plugins/bittorrent plugins/xenserver/xenapi/etc/xapi.d/plugins/config_file plugins/xenserver/xenapi/etc/xapi.d/plugins/console plugins/xenserver/xenapi/etc/xapi.d/plugins/glance plugins/xenserver/xenapi/etc/xapi.d/plugins/ipxe plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel plugins/xenserver/xenapi/etc/xapi.d/plugins/migration plugins/xenserver/xenapi/etc/xapi.d/plugins/nova_plugin_version plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py plugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py releasenotes/notes/.placeholder releasenotes/notes/13.0.0-cve-bugs-fe43ef267a82f304.yaml releasenotes/notes/1516578-628b417b372f4f0f.yaml releasenotes/notes/abort-live-migration-cb902bb0754b11b6.yaml releasenotes/notes/add-aggregate-type-extra-specs-affinity-filter-79a2d3ee152b8ecd.yaml releasenotes/notes/add-novnc-proxy-config-to-vnc-group-f5bb68740f623744.yaml releasenotes/notes/add-xvp-config-to-vnc-group-349cca99f05fcfd3.yaml releasenotes/notes/aggregate-uuid-generation-1f029af7a9af519b.yaml releasenotes/notes/api-database-now-required-6245f39d36885d1c.yaml releasenotes/notes/api_servers_no_scheme-e4aa216d251022f2.yaml releasenotes/notes/attach-detach-vol-for-shelved-and-shelved-offloaded-instances-93f70cfd49299f05.yaml releasenotes/notes/block-live-migrate-with-attached-volumes-ee02afbfe46937c7.yaml releasenotes/notes/bp-add-project-and-user-id-a560d087656157d4.yaml releasenotes/notes/bp-boot-from-uefi-b413b96017db76dd.yaml releasenotes/notes/bp-get-valid-server-state-a817488f4c8d3822.yaml releasenotes/notes/bp-instance-crash-dump-7ccbba7799dc66f9.yaml releasenotes/notes/bp-making-live-migration-api-friendly-3b547f4e0958ee05.yaml releasenotes/notes/bp-rbd-instance-snapshots-130e860b726ddc16.yaml releasenotes/notes/bp-split-network-plane-for-live-migration-40bc127734173759.yaml releasenotes/notes/bp-virt-driver-cpu-thread-pinning-1aaeeb6648f8e009.yaml releasenotes/notes/cinder-backend-report-discard-1def1c28140def9b.yaml releasenotes/notes/compute_upgrade_levels_auto-97acebc7b45b76df.yaml releasenotes/notes/conductor_rpcapi_v2_drop-9893c27bb32d9786.yaml releasenotes/notes/config_scheduler_driver-e751ae392bc1a1d0.yaml releasenotes/notes/config_scheduler_host_manager_driver-a543a74ea70f5e90.yaml releasenotes/notes/deprecate-conductor-manager-class-03620676d939b0eb.yaml releasenotes/notes/deprecate-local-conductor-9cb9f45728281eb0.yaml releasenotes/notes/deprecate-nova-manage-service-subcommand-7626f7692bd62e41.yaml releasenotes/notes/deprecate_compute_stats_class-229abfcb8816bdbd.yaml releasenotes/notes/deprecate_db_driver-91c76ca8011d663c.yaml releasenotes/notes/deprecate_ert-449b16638c008457.yaml releasenotes/notes/deprecate_glance_opts-eab01aba5dcda38a.yaml releasenotes/notes/deprecate_hooks-6f6d60ac206a6da6.yaml releasenotes/notes/deprecate_pluggable_managers-ca0224bcd779454c.yaml releasenotes/notes/deprecate_security_group_api-3d96d683a3723e2c.yaml releasenotes/notes/deprecate_vendordata_driver-eefc745365a881c3.yaml releasenotes/notes/disable_ec2_api_by_default-0ec0946433fc7119.yaml releasenotes/notes/disco_volume_libvirt_driver-916428b8bd852732.yaml releasenotes/notes/disk-weight-scheduler-98647f9c6317d21d.yaml releasenotes/notes/disk_ratio_to_rt-b6224ab8c0272d86.yaml releasenotes/notes/drop_instancev1_obj-4447ddd2bea644fa.yaml releasenotes/notes/ebtables-version-fde659fe18b0e0c0.yaml releasenotes/notes/filters_use_reqspec-9f92b9c0ead76093.yaml releasenotes/notes/force-live-migration-be5a10cd9c8eb981.yaml releasenotes/notes/force_config_drive_opt-e087055e14c40d88.yaml releasenotes/notes/hyperv_2k8_drop-fb309f811767c7c4.yaml releasenotes/notes/instance-actions-read-deleted-instances-18bbb327924b66c7.yaml releasenotes/notes/instance-hostname-used-to-populate-ports-dns-name-08341ec73dc076c0.yaml releasenotes/notes/ironic_api_version_opt_deprecated-50c9b0486e78fe6e.yaml releasenotes/notes/libvirt-deprecate-migration-flags-config-4ba1e2d6c9ef09ff.yaml releasenotes/notes/libvirt-live-migration-flags-mangling-a2407a31ddf17427.yaml releasenotes/notes/libvirt-live-migration-new-tunneled-option-d7ebb1eb1e95e683.yaml releasenotes/notes/libvirt_hardware_policy_from_libosinfo-19e261851d1ad93a.yaml releasenotes/notes/live_migration_uri-dependent-on-virt_type-595c46c2310f45c3.yaml releasenotes/notes/lock_policy-75bea372036acbd5.yaml releasenotes/notes/min_libvirt_bump-d9916d9c4512dd11.yaml releasenotes/notes/mitaka_prelude-c8b955ed78a5ad65.yaml releasenotes/notes/neutron-mtu-6a7edd9e396107d7.yaml releasenotes/notes/neutron-ovs-bridge-name-7b3477103622f4cc.yaml releasenotes/notes/new-oslo-reports-option-619c3dbf3ae320fb.yaml releasenotes/notes/online-data-migrations-48dde6a1d8661e47.yaml releasenotes/notes/optional_project_id-6aebf1cb394d498f.yaml releasenotes/notes/os-migrations-ef225e5b309d5497.yaml releasenotes/notes/parallels_support_snapshot-29b4ffae300c1f05.yaml releasenotes/notes/policy-sample-defaults-changed-b5eea1daeb305251.yaml releasenotes/notes/remove-deprecated-neutron-options-5f3a782aa9082fb5.yaml releasenotes/notes/remove-ec2-api-service-c17a35ed297355b8.yaml releasenotes/notes/remove-on-shared-storage-flag-from-evacuate-api-76a3d58616479fe9.yaml releasenotes/notes/remove_ec2_and_objectstore_api-4ccb539db1d171fa.yaml releasenotes/notes/request-spec-api-db-b9cc6e0624d563c5.yaml releasenotes/notes/reserved-hugepages-per-nodes-f36225d5fca807e4.yaml releasenotes/notes/rm_volume_manager-78fed5be43d285b3.yaml releasenotes/notes/scheduling-to-disabled-hosts-79f5b5d20a42875a.yaml releasenotes/notes/server_migrations-30519b35d3ea6763.yaml releasenotes/notes/service-status-notification-e137297f5d5aa45d.yaml releasenotes/notes/soft-affinity-for-server-group-f45e191bd8cdbd15.yaml releasenotes/notes/switch-to-oslo-cache-7114a0ab2dea52df.yaml releasenotes/notes/upgrade_rootwrap_compute_filters-428ca239f2e4e63d.yaml releasenotes/notes/user-settable-server-description-89dcfc75677e31bc.yaml releasenotes/notes/versioned-notifications-423f4d8d2a3992c6.yaml releasenotes/notes/vhost-user-mtu-23d0af36a8adfa56.yaml releasenotes/notes/vmware_integration_bridge-249567087da5ecb2.yaml releasenotes/notes/vmware_limits-16edee7a9ad023bc.yaml releasenotes/notes/xen_rename-03edd9b78f3e81e5.yaml releasenotes/notes/xenserver-glance-plugin-1.3-11c3b70b8c928263.yaml releasenotes/notes/zookeeper-servicegroup-driver-removed-c3bcaa6f9fe976ed.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/abandon_old_reviews.sh tools/build_latex_pdf.sh tools/clean-vlans tools/colorizer.py tools/ebtables.workaround tools/enable-pre-commit-hook.sh tools/flake8wrap.sh tools/install_venv.py tools/install_venv_common.py tools/nova-manage.bash_completion tools/pretty_tox.sh tools/regression_tester.py tools/reserve-migrations.py tools/with_venv.sh tools/db/schema_diff.py tools/xenserver/cleanup_sm_locks.py tools/xenserver/destroy_cached_images.py tools/xenserver/populate_other_config.py tools/xenserver/rotate_xen_guest_logs.sh tools/xenserver/stress_test.py tools/xenserver/vdi_chain_cleanup.py tools/xenserver/vm_vdi_cleaner.pynova-13.0.0/nova.egg-info/PKG-INFO0000664000567000056710000000632112701410204017422 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: nova Version: 13.0.0 Summary: Cloud computing fabric controller Home-page: http://docs.openstack.org/developer/nova/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: OpenStack Nova README ===================== OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies, including KVM, Xen, LXC, VMware, and more. In addition to its native API, it includes compatibility with the commonly encountered Amazon EC2 and S3 APIs. OpenStack Nova is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Nova primarily consists of a set of Python daemons, though it requires and integrates with a number of native system components for databases, messaging and virtualization capabilities. To keep updated with new developments in the OpenStack project follow `@openstack `_ on Twitter. To learn how to deploy OpenStack Nova, consult the documentation available online at: http://docs.openstack.org For information about the different compute (hypervisor) drivers supported by Nova, read this page on the wiki: https://wiki.openstack.org/wiki/HypervisorSupportMatrix In the unfortunate event that bugs are discovered, they should be reported to the appropriate bug tracker. If you obtained the software from a 3rd party operating system vendor, it is often wise to use their own bug tracker for reporting problems. In all other cases use the master OpenStack bug tracker, available at: http://bugs.launchpad.net/nova Developers wishing to work on the OpenStack Nova project should always base their work on the latest Nova code, available from the master GIT repository at: https://git.openstack.org/cgit/openstack/nova Developers should also join the discussion on the mailing list, at: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev Any new code must follow the development guidelines detailed in the HACKING.rst file, and pass all unit tests. Further developer focused documentation is available at: http://docs.openstack.org/developer/nova/ For information on how to contribute to Nova, please see the contents of the CONTRIBUTING.rst file. -- End of broadcast Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 nova-13.0.0/nova.egg-info/top_level.txt0000664000567000056710000000000512701410204021050 0ustar jenkinsjenkins00000000000000nova nova-13.0.0/nova.egg-info/pbr.json0000664000567000056710000000005612701410204020002 0ustar jenkinsjenkins00000000000000{"is_release": true, "git_version": "7105f88"}nova-13.0.0/nova.egg-info/entry_points.txt0000664000567000056710000002271412701410204021627 0ustar jenkinsjenkins00000000000000[console_scripts] nova-all = nova.cmd.all:main nova-api = nova.cmd.api:main nova-api-metadata = nova.cmd.api_metadata:main nova-api-os-compute = nova.cmd.api_os_compute:main nova-cells = nova.cmd.cells:main nova-cert = nova.cmd.cert:main nova-compute = nova.cmd.compute:main nova-conductor = nova.cmd.conductor:main nova-console = nova.cmd.console:main nova-consoleauth = nova.cmd.consoleauth:main nova-dhcpbridge = nova.cmd.dhcpbridge:main nova-idmapshift = nova.cmd.idmapshift:main nova-manage = nova.cmd.manage:main nova-network = nova.cmd.network:main nova-novncproxy = nova.cmd.novncproxy:main nova-rootwrap = oslo_rootwrap.cmd:main nova-rootwrap-daemon = oslo_rootwrap.cmd:daemon nova-scheduler = nova.cmd.scheduler:main nova-serialproxy = nova.cmd.serialproxy:main nova-spicehtml5proxy = nova.cmd.spicehtml5proxy:main nova-xvpvncproxy = nova.cmd.xvpvncproxy:main [nova.api.v21.extensions] access_ips = nova.api.openstack.compute.access_ips:AccessIPs admin_actions = nova.api.openstack.compute.admin_actions:AdminActions admin_password = nova.api.openstack.compute.admin_password:AdminPassword agents = nova.api.openstack.compute.agents:Agents aggregates = nova.api.openstack.compute.aggregates:Aggregates assisted_volume_snapshots = nova.api.openstack.compute.assisted_volume_snapshots:AssistedVolumeSnapshots attach_interfaces = nova.api.openstack.compute.attach_interfaces:AttachInterfaces availability_zone = nova.api.openstack.compute.availability_zone:AvailabilityZone baremetal_nodes = nova.api.openstack.compute.baremetal_nodes:BareMetalNodes block_device_mapping = nova.api.openstack.compute.block_device_mapping:BlockDeviceMapping cells = nova.api.openstack.compute.cells:Cells certificates = nova.api.openstack.compute.certificates:Certificates cloudpipe = nova.api.openstack.compute.cloudpipe:Cloudpipe config_drive = nova.api.openstack.compute.config_drive:ConfigDrive console_auth_tokens = nova.api.openstack.compute.console_auth_tokens:ConsoleAuthTokens console_output = nova.api.openstack.compute.console_output:ConsoleOutput consoles = nova.api.openstack.compute.consoles:Consoles create_backup = nova.api.openstack.compute.create_backup:CreateBackup deferred_delete = nova.api.openstack.compute.deferred_delete:DeferredDelete disk_config = nova.api.openstack.compute.disk_config:DiskConfig evacuate = nova.api.openstack.compute.evacuate:Evacuate extended_availability_zone = nova.api.openstack.compute.extended_availability_zone:ExtendedAvailabilityZone extended_server_attributes = nova.api.openstack.compute.extended_server_attributes:ExtendedServerAttributes extended_status = nova.api.openstack.compute.extended_status:ExtendedStatus extended_volumes = nova.api.openstack.compute.extended_volumes:ExtendedVolumes extension_info = nova.api.openstack.compute.extension_info:ExtensionInfo fixed_ips = nova.api.openstack.compute.fixed_ips:FixedIps flavor_access = nova.api.openstack.compute.flavor_access:FlavorAccess flavor_manage = nova.api.openstack.compute.flavor_manage:FlavorManage flavor_rxtx = nova.api.openstack.compute.flavor_rxtx:FlavorRxtx flavors = nova.api.openstack.compute.flavors:Flavors flavors_extraspecs = nova.api.openstack.compute.flavors_extraspecs:FlavorsExtraSpecs floating_ip_dns = nova.api.openstack.compute.floating_ip_dns:FloatingIpDns floating_ip_pools = nova.api.openstack.compute.floating_ip_pools:FloatingIpPools floating_ips = nova.api.openstack.compute.floating_ips:FloatingIps floating_ips_bulk = nova.api.openstack.compute.floating_ips_bulk:FloatingIpsBulk fping = nova.api.openstack.compute.fping:Fping hide_server_addresses = nova.api.openstack.compute.hide_server_addresses:HideServerAddresses hosts = nova.api.openstack.compute.hosts:Hosts hypervisors = nova.api.openstack.compute.hypervisors:Hypervisors image_metadata = nova.api.openstack.compute.image_metadata:ImageMetadata image_size = nova.api.openstack.compute.image_size:ImageSize images = nova.api.openstack.compute.images:Images instance_actions = nova.api.openstack.compute.instance_actions:InstanceActions instance_usage_audit_log = nova.api.openstack.compute.instance_usage_audit_log:InstanceUsageAuditLog ips = nova.api.openstack.compute.ips:IPs keypairs = nova.api.openstack.compute.keypairs:Keypairs limits = nova.api.openstack.compute.limits:Limits lock_server = nova.api.openstack.compute.lock_server:LockServer migrate_server = nova.api.openstack.compute.migrate_server:MigrateServer migrations = nova.api.openstack.compute.migrations:Migrations multinic = nova.api.openstack.compute.multinic:Multinic multiple_create = nova.api.openstack.compute.multiple_create:MultipleCreate networks = nova.api.openstack.compute.networks:Networks networks_associate = nova.api.openstack.compute.networks_associate:NetworksAssociate pause_server = nova.api.openstack.compute.pause_server:PauseServer personality = nova.api.openstack.compute.personality:Personality preserve_ephemeral_rebuild = nova.api.openstack.compute.preserve_ephemeral_rebuild:PreserveEphemeralRebuild quota_classes = nova.api.openstack.compute.quota_classes:QuotaClasses quota_sets = nova.api.openstack.compute.quota_sets:QuotaSets remote_consoles = nova.api.openstack.compute.remote_consoles:RemoteConsoles rescue = nova.api.openstack.compute.rescue:Rescue scheduler_hints = nova.api.openstack.compute.scheduler_hints:SchedulerHints security_group_default_rules = nova.api.openstack.compute.security_group_default_rules:SecurityGroupDefaultRules security_groups = nova.api.openstack.compute.security_groups:SecurityGroups server_diagnostics = nova.api.openstack.compute.server_diagnostics:ServerDiagnostics server_external_events = nova.api.openstack.compute.server_external_events:ServerExternalEvents server_groups = nova.api.openstack.compute.server_groups:ServerGroups server_metadata = nova.api.openstack.compute.server_metadata:ServerMetadata server_migrations = nova.api.openstack.compute.server_migrations:ServerMigrations server_password = nova.api.openstack.compute.server_password:ServerPassword server_usage = nova.api.openstack.compute.server_usage:ServerUsage servers = nova.api.openstack.compute.servers:Servers services = nova.api.openstack.compute.services:Services shelve = nova.api.openstack.compute.shelve:Shelve simple_tenant_usage = nova.api.openstack.compute.simple_tenant_usage:SimpleTenantUsage suspend_server = nova.api.openstack.compute.suspend_server:SuspendServer tenant_networks = nova.api.openstack.compute.tenant_networks:TenantNetworks used_limits = nova.api.openstack.compute.used_limits:UsedLimits user_data = nova.api.openstack.compute.user_data:UserData versions = nova.api.openstack.compute.versionsV21:Versions virtual_interfaces = nova.api.openstack.compute.virtual_interfaces:VirtualInterfaces volumes = nova.api.openstack.compute.volumes:Volumes [nova.api.v21.extensions.server.create] access_ips = nova.api.openstack.compute.access_ips:AccessIPs availability_zone = nova.api.openstack.compute.availability_zone:AvailabilityZone block_device_mapping = nova.api.openstack.compute.block_device_mapping:BlockDeviceMapping block_device_mapping_v1 = nova.api.openstack.compute.block_device_mapping_v1:BlockDeviceMappingV1 config_drive = nova.api.openstack.compute.config_drive:ConfigDrive disk_config = nova.api.openstack.compute.disk_config:DiskConfig keypairs_create = nova.api.openstack.compute.keypairs:Keypairs multiple_create = nova.api.openstack.compute.multiple_create:MultipleCreate personality = nova.api.openstack.compute.personality:Personality scheduler_hints = nova.api.openstack.compute.scheduler_hints:SchedulerHints security_groups = nova.api.openstack.compute.security_groups:SecurityGroups user_data = nova.api.openstack.compute.user_data:UserData [nova.api.v21.extensions.server.rebuild] access_ips = nova.api.openstack.compute.access_ips:AccessIPs disk_config = nova.api.openstack.compute.disk_config:DiskConfig personality = nova.api.openstack.compute.personality:Personality preserve_ephemeral_rebuild = nova.api.openstack.compute.preserve_ephemeral_rebuild:PreserveEphemeralRebuild [nova.api.v21.extensions.server.resize] disk_config = nova.api.openstack.compute.disk_config:DiskConfig [nova.api.v21.extensions.server.update] access_ips = nova.api.openstack.compute.access_ips:AccessIPs disk_config = nova.api.openstack.compute.disk_config:DiskConfig [nova.api.v21.test_extensions] basic = nova.tests.unit.api.openstack.compute.basic:Basic microversions = nova.tests.unit.api.openstack.compute.microversions:Microversions [nova.compute.monitors.cpu] virt_driver = nova.compute.monitors.cpu.virt_driver:Monitor [nova.compute.resources] [nova.image.download.modules] file = nova.image.download.file [nova.ipv6_backend] account_identifier = nova.ipv6.account_identifier rfc2462 = nova.ipv6.rfc2462 [nova.scheduler.driver] caching_scheduler = nova.scheduler.caching_scheduler:CachingScheduler chance_scheduler = nova.scheduler.chance:ChanceScheduler fake_scheduler = nova.tests.unit.scheduler.fakes:FakeScheduler filter_scheduler = nova.scheduler.filter_scheduler:FilterScheduler [nova.scheduler.host_manager] host_manager = nova.scheduler.host_manager:HostManager ironic_host_manager = nova.scheduler.ironic_host_manager:IronicHostManager [oslo.config.opts] nova = nova.opts:list_opts nova.api = nova.api.opts:list_opts nova.cache_utils = nova.cache_utils:list_opts nova.cells = nova.cells.opts:list_opts nova.compute = nova.compute.opts:list_opts nova.conf = nova.conf.opts:list_opts nova.network = nova.network.opts:list_opts nova.network.neutronv2 = nova.network.neutronv2.api:list_opts nova.virt = nova.virt.opts:list_opts [oslo.config.opts.defaults] nova.api = nova.common.config:set_middleware_defaults nova-13.0.0/nova/0000775000567000056710000000000012701410205014632 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/test.py0000664000567000056710000004021312701410011016156 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for our unit tests. Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ import contextlib import datetime import eventlet eventlet.monkey_patch(os=False) import copy import inspect import mock import os import fixtures from oslo_cache import core as cache from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_log.fixture import logging_error as log_fixture from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import timeutils from oslotest import moxstubout import six import testtools from nova import context from nova import db from nova.network import manager as network_manager from nova.network.security_group import openstack_driver from nova.objects import base as objects_base from nova.tests import fixtures as nova_fixtures from nova.tests.unit import conf_fixture from nova.tests.unit import policy_fixture from nova import utils CONF = cfg.CONF CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v21') logging.register_options(CONF) CONF.set_override('use_stderr', False) logging.setup(CONF, 'nova') cache.configure(CONF) _TRUE_VALUES = ('True', 'true', '1', 'yes') if six.PY2: nested = contextlib.nested else: @contextlib.contextmanager def nested(*contexts): with contextlib.ExitStack() as stack: yield [stack.enter_context(c) for c in contexts] class SampleNetworks(fixtures.Fixture): """Create sample networks in the database.""" def __init__(self, host=None): self.host = host def setUp(self): super(SampleNetworks, self).setUp() ctxt = context.get_admin_context() network = network_manager.VlanManager(host=self.host) bridge_interface = CONF.flat_interface or CONF.vlan_interface network.create_networks(ctxt, label='test', cidr='10.0.0.0/8', multi_host=CONF.multi_host, num_networks=CONF.num_networks, network_size=CONF.network_size, cidr_v6=CONF.fixed_range_v6, gateway=CONF.gateway, gateway_v6=CONF.gateway_v6, bridge=CONF.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=CONF.vpn_start, vlan_start=CONF.vlan_start, dns1=CONF.flat_network_dns) for net in db.network_get_all(ctxt): network.set_network_host(ctxt, net) class TestingException(Exception): pass class skipIf(object): def __init__(self, condition, reason): self.condition = condition self.reason = reason def __call__(self, func_or_cls): condition = self.condition reason = self.reason if inspect.isfunction(func_or_cls): @six.wraps(func_or_cls) def wrapped(*args, **kwargs): if condition: raise testtools.TestCase.skipException(reason) return func_or_cls(*args, **kwargs) return wrapped elif inspect.isclass(func_or_cls): orig_func = getattr(func_or_cls, 'setUp') @six.wraps(orig_func) def new_func(self, *args, **kwargs): if condition: raise testtools.TestCase.skipException(reason) orig_func(self, *args, **kwargs) func_or_cls.setUp = new_func return func_or_cls else: raise TypeError('skipUnless can be used only with functions or ' 'classes') def _patch_mock_to_raise_for_invalid_assert_calls(): def raise_for_invalid_assert_calls(wrapped): def wrapper(_self, name): valid_asserts = [ 'assert_called_with', 'assert_called_once_with', 'assert_has_calls', 'assert_any_calls'] if name.startswith('assert') and name not in valid_asserts: raise AttributeError('%s is not a valid mock assert method' % name) return wrapped(_self, name) return wrapper mock.Mock.__getattr__ = raise_for_invalid_assert_calls( mock.Mock.__getattr__) # NOTE(gibi): needs to be called only once at import time # to patch the mock lib _patch_mock_to_raise_for_invalid_assert_calls() class TestCase(testtools.TestCase): """Test case base class for all unit tests. Due to the slowness of DB access, please consider deriving from `NoDBTestCase` first. """ USES_DB = True REQUIRES_LOCKING = False TIMEOUT_SCALING_FACTOR = 1 def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() self.useFixture(nova_fixtures.Timeout( os.environ.get('OS_TEST_TIMEOUT', 0), self.TIMEOUT_SCALING_FACTOR)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(nova_fixtures.OutputStreamCapture()) self.useFixture(nova_fixtures.StandardLogging()) # NOTE(sdague): because of the way we were using the lock # wrapper we eneded up with a lot of tests that started # relying on global external locking being set up for them. We # consider all of these to be *bugs*. Tests should not require # global external locking, or if they do, they should # explicitly set it up themselves. # # The following REQUIRES_LOCKING class parameter is provided # as a bridge to get us there. No new tests should be added # that require it, and existing classes and tests should be # fixed to not need it. if self.REQUIRES_LOCKING: lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture( config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') self.useFixture(conf_fixture.ConfFixture(CONF)) self.useFixture(nova_fixtures.RPCFixture('nova.test')) if self.USES_DB: self.useFixture(nova_fixtures.Database()) self.useFixture(nova_fixtures.Database(database='api')) # NOTE(blk-u): WarningsFixture must be after the Database fixture # because sqlalchemy-migrate messes with the warnings filters. self.useFixture(nova_fixtures.WarningsFixture()) # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.NovaObject.indirection_api = None self._base_test_obj_backup = copy.copy( objects_base.NovaObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) self.useFixture(nova_fixtures.StableObjectJsonFixture()) # NOTE(mnaser): All calls to utils.is_neutron() are cached in # nova.utils._IS_NEUTRON. We set it to None to avoid any # caching of that value. utils._IS_NEUTRON = None mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.mox = mox_fixture.mox self.stubs = mox_fixture.stubs self.addCleanup(self._clear_attrs) self.useFixture(fixtures.EnvironmentVariable('http_proxy')) self.policy = self.useFixture(policy_fixture.PolicyFixture()) self.useFixture(nova_fixtures.PoisonFunctions()) openstack_driver.DRIVER_CACHE = {} self.useFixture(nova_fixtures.ForbidNewLegacyNotificationFixture()) def _restore_obj_registry(self): objects_base.NovaObjectRegistry._registry._obj_classes = \ self._base_test_obj_backup def _clear_attrs(self): # Delete attributes that don't start with _ so they don't pin # memory around unnecessarily for the duration of the test # suite for key in [k for k in self.__dict__.keys() if k[0] != '_']: # NOTE(gmann): Skip attribute 'id' because if tests are being # generated using testscenarios then, 'id' attribute is being # added during cloning the tests. And later that 'id' attribute # is being used by test suite to generate the results for each # newly generated tests by testscenarios. if key != 'id': del self.__dict__[key] def stub_out(self, old, new): """Replace a function for the duration of the test. Use the monkey patch fixture to replace a function for the duration of a test. Useful when you want to provide fake methods instead of mocks during testing. This should be used instead of self.stubs.Set (which is based on mox) going forward. """ self.useFixture(fixtures.MonkeyPatch(old, new)) def flags(self, **kw): """Override flag variables for a test.""" group = kw.pop('group', None) for k, v in six.iteritems(kw): CONF.set_override(k, v, group) def start_service(self, name, host=None, **kwargs): svc = self.useFixture( nova_fixtures.ServiceFixture(name, host, **kwargs)) return svc.service def assertJsonEqual(self, expected, observed): """Asserts that 2 complex data structures are json equivalent. We use data structures which serialize down to json throughout the code, and often times we just need to know that these are json equivalent. This means that list order is not important, and should be sorted. Because this is a recursive set of assertions, when failure happens we want to expose both the local failure and the global view of the 2 data structures being compared. So a MismatchError which includes the inner failure as the mismatch, and the passed in expected / observed as matchee / matcher. """ if isinstance(expected, six.string_types): expected = jsonutils.loads(expected) if isinstance(observed, six.string_types): observed = jsonutils.loads(observed) def sort_key(x): if isinstance(x, (set, list)) or isinstance(x, datetime.datetime): return str(x) if isinstance(x, dict): items = ((sort_key(key), sort_key(value)) for key, value in x.items()) return sorted(items) return x def inner(expected, observed): if isinstance(expected, dict) and isinstance(observed, dict): self.assertEqual(len(expected), len(observed)) expected_keys = sorted(expected) observed_keys = sorted(observed) self.assertEqual(expected_keys, observed_keys) for key in list(six.iterkeys(expected)): inner(expected[key], observed[key]) elif (isinstance(expected, (list, tuple, set)) and isinstance(observed, (list, tuple, set))): self.assertEqual(len(expected), len(observed)) expected_values_iter = iter(sorted(expected, key=sort_key)) observed_values_iter = iter(sorted(observed, key=sort_key)) for i in range(len(expected)): inner(next(expected_values_iter), next(observed_values_iter)) else: self.assertEqual(expected, observed) try: inner(expected, observed) except testtools.matchers.MismatchError as e: inner_mismatch = e.mismatch # inverting the observed / expected because testtools # error messages assume expected is second. Possibly makes # reading the error messages less confusing. raise testtools.matchers.MismatchError(observed, expected, inner_mismatch, verbose=True) def assertPublicAPISignatures(self, baseinst, inst): def get_public_apis(inst): methods = {} def findmethods(object): return inspect.ismethod(object) or inspect.isfunction(object) for (name, value) in inspect.getmembers(inst, findmethods): if name.startswith("_"): continue methods[name] = value return methods baseclass = baseinst.__class__.__name__ basemethods = get_public_apis(baseinst) implmethods = get_public_apis(inst) extranames = [] for name in sorted(implmethods.keys()): if name not in basemethods: extranames.append(name) self.assertEqual([], extranames, "public APIs not listed in base class %s" % baseclass) for name in sorted(implmethods.keys()): baseargs = inspect.getargspec(basemethods[name]) implargs = inspect.getargspec(implmethods[name]) self.assertEqual(baseargs, implargs, "%s args don't match base class %s" % (name, baseclass)) class APICoverage(object): cover_api = None def test_api_methods(self): self.assertTrue(self.cover_api is not None) api_methods = [x for x in dir(self.cover_api) if not x.startswith('_')] test_methods = [x[5:] for x in dir(self) if x.startswith('test_')] self.assertThat( test_methods, testtools.matchers.ContainsAll(api_methods)) class TimeOverride(fixtures.Fixture): """Fixture to start and remove time override.""" def setUp(self): super(TimeOverride, self).setUp() timeutils.set_time_override() self.addCleanup(timeutils.clear_time_override) class NoDBTestCase(TestCase): """`NoDBTestCase` differs from TestCase in that DB access is not supported. This makes tests run significantly faster. If possible, all new tests should derive from this class. """ USES_DB = False class BaseHookTestCase(NoDBTestCase): def assert_has_hook(self, expected_name, func): self.assertTrue(hasattr(func, '__hook_name__')) self.assertEqual(expected_name, func.__hook_name__) class MatchType(object): """Matches any instance of a specified type The MatchType class is a helper for use with the mock.assert_called_with() method that lets you assert that a particular parameter has a specific data type. It enables strict check than the built in mock.ANY helper, and is the equivalent of the mox.IsA() function from the legacy mox library Example usage could be: mock_some_method.assert_called_once_with( "hello", MatchType(objects.Instance), mock.ANY, "world", MatchType(objects.KeyPair)) """ def __init__(self, wanttype): self.wanttype = wanttype def __eq__(self, other): return type(other) == self.wanttype def __ne__(self, other): return type(other) != self.wanttype def __repr__(self): return "" nova-13.0.0/nova/quota.py0000664000567000056710000020226012701410011016332 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for instances, and floating ips.""" import datetime from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import timeutils import six from nova import db from nova import exception from nova.i18n import _LE from nova import objects LOG = logging.getLogger(__name__) quota_opts = [ cfg.IntOpt('quota_instances', default=10, help='Number of instances allowed per project'), cfg.IntOpt('quota_cores', default=20, help='Number of instance cores allowed per project'), cfg.IntOpt('quota_ram', default=50 * 1024, help='Megabytes of instance RAM allowed per project'), cfg.IntOpt('quota_floating_ips', default=10, help='Number of floating IPs allowed per project'), cfg.IntOpt('quota_fixed_ips', default=-1, help='Number of fixed IPs allowed per project (this should be ' 'at least the number of instances allowed)'), cfg.IntOpt('quota_metadata_items', default=128, help='Number of metadata items allowed per instance'), cfg.IntOpt('quota_injected_files', default=5, help='Number of injected files allowed'), cfg.IntOpt('quota_injected_file_content_bytes', default=10 * 1024, help='Number of bytes allowed per injected file'), cfg.IntOpt('quota_injected_file_path_length', default=255, help='Length of injected file path'), cfg.IntOpt('quota_security_groups', default=10, help='Number of security groups per project'), cfg.IntOpt('quota_security_group_rules', default=20, help='Number of security rules per security group'), cfg.IntOpt('quota_key_pairs', default=100, help='Number of key pairs per user'), cfg.IntOpt('quota_server_groups', default=10, help='Number of server groups per project'), cfg.IntOpt('quota_server_group_members', default=10, help='Number of servers per server group'), cfg.IntOpt('reservation_expire', default=86400, help='Number of seconds until a reservation expires'), cfg.IntOpt('until_refresh', default=0, help='Count of reservations until usage is refreshed. This ' 'defaults to 0(off) to avoid additional load but it is ' 'useful to turn on to help keep quota usage up to date ' 'and reduce the impact of out of sync usage issues.'), cfg.IntOpt('max_age', default=0, help='Number of seconds between subsequent usage refreshes. ' 'This defaults to 0(off) to avoid additional load but it ' 'is useful to turn on to help keep quota usage up to date ' 'and reduce the impact of out of sync usage issues. ' 'Note that quotas are not updated on a periodic task, ' 'they will update on a new reservation if max_age has ' 'passed since the last reservation'), cfg.StrOpt('quota_driver', default='nova.quota.DbQuotaDriver', help='Default driver to use for quota checks'), ] CONF = cfg.CONF CONF.register_opts(quota_opts) class DbQuotaDriver(object): """Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the local database. """ UNLIMITED_VALUE = -1 def get_by_project_and_user(self, context, project_id, user_id, resource): """Get a specific quota by project and user.""" return db.quota_get(context, project_id, resource, user_id=user_id) def get_by_project(self, context, project_id, resource): """Get a specific quota by project.""" return db.quota_get(context, project_id, resource) def get_by_class(self, context, quota_class, resource): """Get a specific quota by quota class.""" return db.quota_class_get(context, quota_class, resource) def get_defaults(self, context, resources): """Given a list of resources, retrieve the default quotas. Use the class quotas named `_DEFAULT_QUOTA_NAME` as default quotas, if it exists. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. """ quotas = {} default_quotas = db.quota_class_get_default(context) for resource in resources.values(): quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas def get_class_quotas(self, context, resources, quota_class, defaults=True): """Given a list of resources, retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ quotas = {} class_quotas = db.quota_class_get_all_by_name(context, quota_class) for resource in resources.values(): if defaults or resource.name in class_quotas: quotas[resource.name] = class_quotas.get(resource.name, resource.default) return quotas def _process_quotas(self, context, resources, project_id, quotas, quota_class=None, defaults=True, usages=None, remains=False): modified_quotas = {} # Get the quotas for the appropriate class. If the project ID # matches the one in the context, we use the quota_class from # the context, otherwise, we use the provided quota_class (if # any) if project_id == context.project_id: quota_class = context.quota_class if quota_class: class_quotas = db.quota_class_get_all_by_name(context, quota_class) else: class_quotas = {} default_quotas = self.get_defaults(context, resources) for resource in resources.values(): # Omit default/quota class values if not defaults and resource.name not in quotas: continue limit = quotas.get(resource.name, class_quotas.get( resource.name, default_quotas[resource.name])) modified_quotas[resource.name] = dict(limit=limit) # Include usages if desired. This is optional because one # internal consumer of this interface wants to access the # usages directly from inside a transaction. if usages: usage = usages.get(resource.name, {}) modified_quotas[resource.name].update( in_use=usage.get('in_use', 0), reserved=usage.get('reserved', 0), ) # Initialize remains quotas. if remains: modified_quotas[resource.name].update(remains=limit) if remains: all_quotas = db.quota_get_all(context, project_id) for quota in all_quotas: if quota.resource in modified_quotas: modified_quotas[quota.resource]['remains'] -= \ quota.hard_limit return modified_quotas def get_user_quotas(self, context, resources, project_id, user_id, quota_class=None, defaults=True, usages=True, project_quotas=None, user_quotas=None): """Given a list of resources, retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. :param project_quotas: Quotas dictionary for the specified project. :param user_quotas: Quotas dictionary for the specified project and user. """ if user_quotas: user_quotas = user_quotas.copy() else: user_quotas = db.quota_get_all_by_project_and_user(context, project_id, user_id) # Use the project quota for default user quota. proj_quotas = project_quotas or db.quota_get_all_by_project( context, project_id) for key, value in six.iteritems(proj_quotas): if key not in user_quotas.keys(): user_quotas[key] = value user_usages = None if usages: user_usages = db.quota_usage_get_all_by_project_and_user(context, project_id, user_id) return self._process_quotas(context, resources, project_id, user_quotas, quota_class, defaults=defaults, usages=user_usages) def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False, project_quotas=None): """Given a list of resources, retrieve the quotas for the given project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. :param remains: If True, the current remains of the project will will be returned. :param project_quotas: Quotas dictionary for the specified project. """ project_quotas = project_quotas or db.quota_get_all_by_project( context, project_id) project_usages = None if usages: LOG.debug('Getting all quota usages for project: %s', project_id) project_usages = db.quota_usage_get_all_by_project(context, project_id) return self._process_quotas(context, resources, project_id, project_quotas, quota_class, defaults=defaults, usages=project_usages, remains=remains) def _is_unlimited_value(self, v): """A helper method to check for unlimited value. """ return v <= self.UNLIMITED_VALUE def _sum_quota_values(self, v1, v2): """A helper method that handles unlimited values when performing sum operation. """ if self._is_unlimited_value(v1) or self._is_unlimited_value(v2): return self.UNLIMITED_VALUE return v1 + v2 def _sub_quota_values(self, v1, v2): """A helper method that handles unlimited values when performing subtraction operation. """ if self._is_unlimited_value(v1) or self._is_unlimited_value(v2): return self.UNLIMITED_VALUE return v1 - v2 def get_settable_quotas(self, context, resources, project_id, user_id=None): """Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. """ settable_quotas = {} db_proj_quotas = db.quota_get_all_by_project(context, project_id) project_quotas = self.get_project_quotas(context, resources, project_id, remains=True, project_quotas=db_proj_quotas) if user_id: setted_quotas = db.quota_get_all_by_project_and_user(context, project_id, user_id) user_quotas = self.get_user_quotas(context, resources, project_id, user_id, project_quotas=db_proj_quotas, user_quotas=setted_quotas) for key, value in user_quotas.items(): maximum = \ self._sum_quota_values(project_quotas[key]['remains'], setted_quotas.get(key, 0)) minimum = value['in_use'] + value['reserved'] settable_quotas[key] = {'minimum': minimum, 'maximum': maximum} else: for key, value in project_quotas.items(): minimum = \ max(int(self._sub_quota_values(value['limit'], value['remains'])), int(value['in_use'] + value['reserved'])) settable_quotas[key] = {'minimum': minimum, 'maximum': -1} return settable_quotas def _get_quotas(self, context, resources, keys, has_sync, project_id=None, user_id=None, project_quotas=None): """A helper method which retrieves the quotas for the specific resources identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param keys: A list of the desired quotas to retrieve. :param has_sync: If True, indicates that the resource must have a sync function; if False, indicates that the resource must NOT have a sync function. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. :param project_quotas: Quotas dictionary for the specified project. """ # Filter resources if has_sync: sync_filt = lambda x: hasattr(x, 'sync') else: sync_filt = lambda x: not hasattr(x, 'sync') desired = set(keys) sub_resources = {k: v for k, v in resources.items() if k in desired and sync_filt(v)} # Make sure we accounted for all of them... if len(keys) != len(sub_resources): unknown = desired - set(sub_resources.keys()) raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) if user_id: LOG.debug('Getting quotas for user %(user_id)s and project ' '%(project_id)s. Resources: %(keys)s', {'user_id': user_id, 'project_id': project_id, 'keys': keys}) # Grab and return the quotas (without usages) quotas = self.get_user_quotas(context, sub_resources, project_id, user_id, context.quota_class, usages=False, project_quotas=project_quotas) else: LOG.debug('Getting quotas for project %(project_id)s. Resources: ' '%(keys)s', {'project_id': project_id, 'keys': keys}) # Grab and return the quotas (without usages) quotas = self.get_project_quotas(context, sub_resources, project_id, context.quota_class, usages=False, project_quotas=project_quotas) return {k: v['limit'] for k, v in quotas.items()} def limit_check(self, context, resources, values, project_id=None, user_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ _valid_method_call_check_resources(values, 'check') # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exception.InvalidQuotaValue(unders=sorted(unders)) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user id is None, then we use the user_id in context if user_id is None: user_id = context.user_id # Get the applicable quotas project_quotas = db.quota_get_all_by_project(context, project_id) quotas = self._get_quotas(context, resources, values.keys(), has_sync=False, project_id=project_id, project_quotas=project_quotas) user_quotas = self._get_quotas(context, resources, values.keys(), has_sync=False, project_id=project_id, user_id=user_id, project_quotas=project_quotas) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if quotas[key] >= 0 and quotas[key] < val or (user_quotas[key] >= 0 and user_quotas[key] < val)] if overs: headroom = {} for key in overs: headroom[key] = min( val for val in (quotas.get(key), project_quotas.get(key)) if val is not None ) raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages={}, headroom=headroom) def reserve(self, context, resources, deltas, expire=None, project_id=None, user_id=None): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ _valid_method_call_check_resources(deltas, 'reserve') # Set up the reservation expiration if expire is None: expire = CONF.reservation_expire if isinstance(expire, six.integer_types): expire = datetime.timedelta(seconds=expire) if isinstance(expire, datetime.timedelta): expire = timeutils.utcnow() + expire if not isinstance(expire, datetime.datetime): raise exception.InvalidReservationExpiration(expire=expire) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id LOG.debug('Reserving resources using context.project_id: %s', project_id) # If user_id is None, then we use the project_id in context if user_id is None: user_id = context.user_id LOG.debug('Reserving resources using context.user_id: %s', user_id) LOG.debug('Attempting to reserve resources for project %(project_id)s ' 'and user %(user_id)s. Deltas: %(deltas)s', {'project_id': project_id, 'user_id': user_id, 'deltas': deltas}) # Get the applicable quotas. # NOTE(Vek): We're not worried about races at this point. # Yes, the admin may be in the process of reducing # quotas, but that's a pretty rare thing. project_quotas = db.quota_get_all_by_project(context, project_id) LOG.debug('Quota limits for project %(project_id)s: ' '%(project_quotas)s', {'project_id': project_id, 'project_quotas': project_quotas}) quotas = self._get_quotas(context, resources, deltas.keys(), has_sync=True, project_id=project_id, project_quotas=project_quotas) LOG.debug('Quotas for project %(project_id)s after resource sync: ' '%(quotas)s', {'project_id': project_id, 'quotas': quotas}) user_quotas = self._get_quotas(context, resources, deltas.keys(), has_sync=True, project_id=project_id, user_id=user_id, project_quotas=project_quotas) LOG.debug('Quotas for project %(project_id)s and user %(user_id)s ' 'after resource sync: %(quotas)s', {'project_id': project_id, 'user_id': user_id, 'quotas': quotas}) # NOTE(Vek): Most of the work here has to be done in the DB # API, because we have to do it in a transaction, # which means access to the session. Since the # session isn't available outside the DBAPI, we # have to do the work there. return db.quota_reserve(context, resources, quotas, user_quotas, deltas, expire, CONF.until_refresh, CONF.max_age, project_id=project_id, user_id=user_id) def commit(self, context, reservations, project_id=None, user_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the user_id in context if user_id is None: user_id = context.user_id db.reservation_commit(context, reservations, project_id=project_id, user_id=user_id) def rollback(self, context, reservations, project_id=None, user_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the user_id in context if user_id is None: user_id = context.user_id db.reservation_rollback(context, reservations, project_id=project_id, user_id=user_id) def usage_reset(self, context, resources): """Reset the usage records for a particular user on a list of resources. This will force that user's usage records to be refreshed the next time a reservation is made. Note: this does not affect the currently outstanding reservations the user has; those reservations must be committed or rolled back (or expired). :param context: The request context, for access checks. :param resources: A list of the resource names for which the usage must be reset. """ # We need an elevated context for the calls to # quota_usage_update() elevated = context.elevated() for resource in resources: try: # Reset the usage to -1, which will force it to be # refreshed db.quota_usage_update(elevated, context.project_id, context.user_id, resource, in_use=-1) except exception.QuotaUsageNotFound: # That means it'll be refreshed anyway pass def destroy_all_by_project_and_user(self, context, project_id, user_id): """Destroy all quotas, usages, and reservations associated with a project and user. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param user_id: The ID of the user being deleted. """ db.quota_destroy_all_by_project_and_user(context, project_id, user_id) def destroy_all_by_project(self, context, project_id): """Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ db.quota_destroy_all_by_project(context, project_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ db.reservation_expire(context) class NoopQuotaDriver(object): """Driver that turns quotas calls into no-ops and pretends that quotas for all resources are unlimited. This can be used if you do not wish to have any quota checking. For instance, with nova compute cells, the parent cell should do quota checking, but the child cell should not. """ def get_by_project_and_user(self, context, project_id, user_id, resource): """Get a specific quota by project and user.""" # Unlimited return -1 def get_by_project(self, context, project_id, resource): """Get a specific quota by project.""" # Unlimited return -1 def get_by_class(self, context, quota_class, resource): """Get a specific quota by quota class.""" # Unlimited return -1 def get_defaults(self, context, resources): """Given a list of resources, retrieve the default quotas. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. """ quotas = {} for resource in resources.values(): quotas[resource.name] = -1 return quotas def get_class_quotas(self, context, resources, quota_class, defaults=True): """Given a list of resources, retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ quotas = {} for resource in resources.values(): quotas[resource.name] = -1 return quotas def _get_noop_quotas(self, resources, usages=None, remains=False): quotas = {} for resource in resources.values(): quotas[resource.name] = {} quotas[resource.name]['limit'] = -1 if usages: quotas[resource.name]['in_use'] = -1 quotas[resource.name]['reserved'] = -1 if remains: quotas[resource.name]['remains'] = -1 return quotas def get_user_quotas(self, context, resources, project_id, user_id, quota_class=None, defaults=True, usages=True): """Given a list of resources, retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ return self._get_noop_quotas(resources, usages=usages) def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False): """Given a list of resources, retrieve the quotas for the given project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. :param remains: If True, the current remains of the project will will be returned. """ return self._get_noop_quotas(resources, usages=usages, remains=remains) def get_settable_quotas(self, context, resources, project_id, user_id=None): """Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. """ quotas = {} for resource in resources.values(): quotas[resource.name] = {'minimum': 0, 'maximum': -1} return quotas def limit_check(self, context, resources, values, project_id=None, user_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ pass def reserve(self, context, resources, deltas, expire=None, project_id=None, user_id=None): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ return [] def commit(self, context, reservations, project_id=None, user_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ pass def rollback(self, context, reservations, project_id=None, user_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ pass def usage_reset(self, context, resources): """Reset the usage records for a particular user on a list of resources. This will force that user's usage records to be refreshed the next time a reservation is made. Note: this does not affect the currently outstanding reservations the user has; those reservations must be committed or rolled back (or expired). :param context: The request context, for access checks. :param resources: A list of the resource names for which the usage must be reset. """ pass def destroy_all_by_project_and_user(self, context, project_id, user_id): """Destroy all quotas, usages, and reservations associated with a project and user. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param user_id: The ID of the user being deleted. """ pass def destroy_all_by_project(self, context, project_id): """Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ pass def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ pass class BaseResource(object): """Describe a single resource for quota checking.""" def __init__(self, name, flag=None): """Initializes a Resource. :param name: The name of the resource, i.e., "instances". :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ self.name = name self.flag = flag def quota(self, driver, context, **kwargs): """Given a driver and context, obtain the quota for this resource. :param driver: A quota driver. :param context: The request context. :param project_id: The project to obtain the quota value for. If not provided, it is taken from the context. If it is given as None, no project-specific quota will be searched for. :param quota_class: The quota class corresponding to the project, or for which the quota is to be looked up. If not provided, it is taken from the context. If it is given as None, no quota class-specific quota will be searched for. Note that the quota class defaults to the value in the context, which may not correspond to the project if project_id is not the same as the one in the context. """ # Get the project ID project_id = kwargs.get('project_id', context.project_id) # Ditto for the quota class quota_class = kwargs.get('quota_class', context.quota_class) # Look up the quota for the project if project_id: try: return driver.get_by_project(context, project_id, self.name) except exception.ProjectQuotaNotFound: pass # Try for the quota class if quota_class: try: return driver.get_by_class(context, quota_class, self.name) except exception.QuotaClassNotFound: pass # OK, return the default return self.default @property def default(self): """Return the default value of the quota.""" return CONF[self.flag] if self.flag else -1 class ReservableResource(BaseResource): """Describe a reservable resource.""" valid_method = 'reserve' def __init__(self, name, sync, flag=None): """Initializes a ReservableResource. Reservable resources are those resources which directly correspond to objects in the database, i.e., instances, cores, etc. Usage synchronization function must be associated with each object. This function will be called to determine the current counts of one or more resources. This association is done in database backend. The usage synchronization function will be passed three arguments: an admin context, the project ID, and an opaque session object, which should in turn be passed to the underlying database function. Synchronization functions should return a dictionary mapping resource names to the current in_use count for those resources; more than one resource and resource count may be returned. Note that synchronization functions may be associated with more than one ReservableResource. :param name: The name of the resource, i.e., "volumes". :param sync: A dbapi methods name which returns a dictionary to resynchronize the in_use count for one or more resources, as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(ReservableResource, self).__init__(name, flag=flag) self.sync = sync class AbsoluteResource(BaseResource): """Describe a non-reservable resource.""" valid_method = 'check' class CountableResource(AbsoluteResource): """Describe a resource where the counts aren't based solely on the project ID. """ def __init__(self, name, count, flag=None): """Initializes a CountableResource. Countable resources are those resources which directly correspond to objects in the database, i.e., instances, cores, etc., but for which a count by project ID is inappropriate. A CountableResource must be constructed with a counting function, which will be called to determine the current counts of the resource. The counting function will be passed the context, along with the extra positional and keyword arguments that are passed to Quota.count(). It should return an integer specifying the count. Note that this counting is not performed in a transaction-safe manner. This resource class is a temporary measure to provide required functionality, until a better approach to solving this problem can be evolved. :param name: The name of the resource, i.e., "instances". :param count: A callable which returns the count of the resource. The arguments passed are as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(CountableResource, self).__init__(name, flag=flag) self.count = count class QuotaEngine(object): """Represent the set of recognized quotas.""" def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" self._resources = {} self._driver_cls = quota_driver_class self.__driver = None @property def _driver(self): if self.__driver: return self.__driver if not self._driver_cls: self._driver_cls = CONF.quota_driver if isinstance(self._driver_cls, six.string_types): self._driver_cls = importutils.import_object(self._driver_cls) self.__driver = self._driver_cls return self.__driver def __contains__(self, resource): return resource in self._resources def __getitem__(self, key): if key in self._resources: return self._resources[key] def register_resource(self, resource): """Register a resource.""" self._resources[resource.name] = resource def register_resources(self, resources): """Register a list of resources.""" for resource in resources: self.register_resource(resource) def get_by_project_and_user(self, context, project_id, user_id, resource): """Get a specific quota by project and user.""" return self._driver.get_by_project_and_user(context, project_id, user_id, resource) def get_by_project(self, context, project_id, resource): """Get a specific quota by project.""" return self._driver.get_by_project(context, project_id, resource) def get_by_class(self, context, quota_class, resource): """Get a specific quota by quota class.""" return self._driver.get_by_class(context, quota_class, resource) def get_defaults(self, context): """Retrieve the default quotas. :param context: The request context, for access checks. """ return self._driver.get_defaults(context, self._resources) def get_class_quotas(self, context, quota_class, defaults=True): """Retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ return self._driver.get_class_quotas(context, self._resources, quota_class, defaults=defaults) def get_user_quotas(self, context, project_id, user_id, quota_class=None, defaults=True, usages=True): """Retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ return self._driver.get_user_quotas(context, self._resources, project_id, user_id, quota_class=quota_class, defaults=defaults, usages=usages) def get_project_quotas(self, context, project_id, quota_class=None, defaults=True, usages=True, remains=False): """Retrieve the quotas for the given project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. :param remains: If True, the current remains of the project will will be returned. """ return self._driver.get_project_quotas(context, self._resources, project_id, quota_class=quota_class, defaults=defaults, usages=usages, remains=remains) def get_settable_quotas(self, context, project_id, user_id=None): """Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. """ return self._driver.get_settable_quotas(context, self._resources, project_id, user_id=user_id) def count(self, context, resource, *args, **kwargs): """Count a resource. For countable resources, invokes the count() function and returns its result. Arguments following the context and resource are passed directly to the count function declared by the resource. :param context: The request context, for access checks. :param resource: The name of the resource, as a string. """ # Get the resource res = self._resources.get(resource) if not res or not hasattr(res, 'count'): raise exception.QuotaResourceUnknown(unknown=[resource]) return res.count(context, *args, **kwargs) def limit_check(self, context, project_id=None, user_id=None, **values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. The values to check are given as keyword arguments, where the key identifies the specific quota limit to check, and the value is the proposed value. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ return self._driver.limit_check(context, self._resources, values, project_id=project_id, user_id=user_id) def reserve(self, context, expire=None, project_id=None, user_id=None, **deltas): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. The deltas are given as keyword arguments, and current usage and other reservations are factored into the quota check. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ reservations = self._driver.reserve(context, self._resources, deltas, expire=expire, project_id=project_id, user_id=user_id) LOG.debug("Created reservations %s", reservations) return reservations def commit(self, context, reservations, project_id=None, user_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.commit(context, reservations, project_id=project_id, user_id=user_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception(_LE("Failed to commit reservations %s"), reservations) return LOG.debug("Committed reservations %s", reservations) def rollback(self, context, reservations, project_id=None, user_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.rollback(context, reservations, project_id=project_id, user_id=user_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception(_LE("Failed to roll back reservations %s"), reservations) return LOG.debug("Rolled back reservations %s", reservations) def usage_reset(self, context, resources): """Reset the usage records for a particular user on a list of resources. This will force that user's usage records to be refreshed the next time a reservation is made. Note: this does not affect the currently outstanding reservations the user has; those reservations must be committed or rolled back (or expired). :param context: The request context, for access checks. :param resources: A list of the resource names for which the usage must be reset. """ self._driver.usage_reset(context, resources) def destroy_all_by_project_and_user(self, context, project_id, user_id): """Destroy all quotas, usages, and reservations associated with a project and user. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param user_id: The ID of the user being deleted. """ self._driver.destroy_all_by_project_and_user(context, project_id, user_id) def destroy_all_by_project(self, context, project_id): """Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ self._driver.destroy_all_by_project(context, project_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ self._driver.expire(context) @property def resources(self): return sorted(self._resources.keys()) def _keypair_get_count_by_user(*args, **kwargs): """Helper method to avoid referencing objects.KeyPairList on import.""" return objects.KeyPairList.get_count_by_user(*args, **kwargs) def _server_group_count_members_by_user(context, group, user_id): """Helper method to avoid referencing objects.InstanceGroup on import.""" return group.count_members_by_user(user_id) QUOTAS = QuotaEngine() resources = [ ReservableResource('instances', '_sync_instances', 'quota_instances'), ReservableResource('cores', '_sync_instances', 'quota_cores'), ReservableResource('ram', '_sync_instances', 'quota_ram'), ReservableResource('security_groups', '_sync_security_groups', 'quota_security_groups'), ReservableResource('floating_ips', '_sync_floating_ips', 'quota_floating_ips'), ReservableResource('fixed_ips', '_sync_fixed_ips', 'quota_fixed_ips'), AbsoluteResource('metadata_items', 'quota_metadata_items'), AbsoluteResource('injected_files', 'quota_injected_files'), AbsoluteResource('injected_file_content_bytes', 'quota_injected_file_content_bytes'), AbsoluteResource('injected_file_path_bytes', 'quota_injected_file_path_length'), CountableResource('security_group_rules', db.security_group_rule_count_by_group, 'quota_security_group_rules'), CountableResource('key_pairs', _keypair_get_count_by_user, 'quota_key_pairs'), ReservableResource('server_groups', '_sync_server_groups', 'quota_server_groups'), CountableResource('server_group_members', _server_group_count_members_by_user, 'quota_server_group_members'), ] QUOTAS.register_resources(resources) def _valid_method_call_check_resource(name, method): if name not in QUOTAS: raise exception.InvalidQuotaMethodUsage(method=method, res=name) res = QUOTAS[name] if res.valid_method != method: raise exception.InvalidQuotaMethodUsage(method=method, res=name) def _valid_method_call_check_resources(resource, method): """A method to check whether the resource can use the quota method.""" for name in resource.keys(): _valid_method_call_check_resource(name, method) nova-13.0.0/nova/consoleauth/0000775000567000056710000000000012701410205017156 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/consoleauth/__init__.py0000664000567000056710000000165112701410011021265 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module to authenticate Consoles.""" from oslo_config import cfg consoleauth_topic_opt = cfg.StrOpt('consoleauth_topic', default='consoleauth', help='The topic console auth proxy nodes listen on') CONF = cfg.CONF CONF.register_opt(consoleauth_topic_opt) nova-13.0.0/nova/consoleauth/manager.py0000664000567000056710000001336012701410011021140 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Auth Components for Consoles.""" import time from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from nova import cache_utils from nova.cells import rpcapi as cells_rpcapi from nova.compute import rpcapi as compute_rpcapi import nova.conf from nova.i18n import _LI, _LW from nova import manager from nova import objects LOG = logging.getLogger(__name__) consoleauth_opts = [ cfg.IntOpt('console_token_ttl', default=600, help='How many seconds before deleting tokens') ] CONF = nova.conf.CONF CONF.register_opts(consoleauth_opts) class ConsoleAuthManager(manager.Manager): """Manages token based authentication.""" target = messaging.Target(version='2.1') def __init__(self, scheduler_driver=None, *args, **kwargs): super(ConsoleAuthManager, self).__init__(service_name='consoleauth', *args, **kwargs) self._mc = None self._mc_instance = None self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.cells_rpcapi = cells_rpcapi.CellsAPI() @property def mc(self): if self._mc is None: self._mc = cache_utils.get_client(CONF.console_token_ttl) return self._mc @property def mc_instance(self): if self._mc_instance is None: self._mc_instance = cache_utils.get_client() return self._mc_instance def reset(self): LOG.info(_LI('Reloading compute RPC API')) compute_rpcapi.LAST_VERSION = None self.compute_rpcapi = compute_rpcapi.ComputeAPI() def _get_tokens_for_instance(self, instance_uuid): tokens_str = self.mc_instance.get(instance_uuid.encode('UTF-8')) if not tokens_str: tokens = [] else: tokens = jsonutils.loads(tokens_str) return tokens def authorize_console(self, context, token, console_type, host, port, internal_access_path, instance_uuid, access_url=None): token_dict = {'token': token, 'instance_uuid': instance_uuid, 'console_type': console_type, 'host': host, 'port': port, 'internal_access_path': internal_access_path, 'access_url': access_url, 'last_activity_at': time.time()} data = jsonutils.dumps(token_dict) # We need to log the warning message if the token is not cached # successfully, because the failure will cause the console for # instance to not be usable. if not self.mc.set(token.encode('UTF-8'), data): LOG.warning(_LW("Token: %(token)s failed to save into memcached."), {'token': token}) tokens = self._get_tokens_for_instance(instance_uuid) # Remove the expired tokens from cache. token_values = self.mc.get_multi( [tok.encode('UTF-8') for tok in tokens]) tokens = [name for name, value in zip(tokens, token_values) if value is not None] tokens.append(token) if not self.mc_instance.set(instance_uuid.encode('UTF-8'), jsonutils.dumps(tokens)): LOG.warning(_LW("Instance: %(instance_uuid)s failed to save " "into memcached"), {'instance_uuid': instance_uuid}) LOG.info(_LI("Received Token: %(token)s, %(token_dict)s"), {'token': token, 'token_dict': token_dict}) def _validate_token(self, context, token): instance_uuid = token['instance_uuid'] if instance_uuid is None: return False # NOTE(comstud): consoleauth was meant to run in API cells. So, # if cells is enabled, we must call down to the child cell for # the instance. if CONF.cells.enable: return self.cells_rpcapi.validate_console_port(context, instance_uuid, token['port'], token['console_type']) instance = objects.Instance.get_by_uuid(context, instance_uuid) return self.compute_rpcapi.validate_console_port(context, instance, token['port'], token['console_type']) def check_token(self, context, token): token_str = self.mc.get(token.encode('UTF-8')) token_valid = (token_str is not None) LOG.info(_LI("Checking Token: %(token)s, %(token_valid)s"), {'token': token, 'token_valid': token_valid}) if token_valid: token = jsonutils.loads(token_str) if self._validate_token(context, token): return token def delete_tokens_for_instance(self, context, instance_uuid): tokens = self._get_tokens_for_instance(instance_uuid) self.mc.delete_multi( [tok.encode('UTF-8') for tok in tokens]) self.mc_instance.delete(instance_uuid.encode('UTF-8')) nova-13.0.0/nova/consoleauth/rpcapi.py0000664000567000056710000000722112701410011021003 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the consoleauth RPC API. """ from oslo_config import cfg import oslo_messaging as messaging from nova import rpc CONF = cfg.CONF rpcapi_cap_opt = cfg.StrOpt('consoleauth', help='Set a version cap for messages sent to consoleauth services') CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') class ConsoleAuthAPI(object): '''Client side of the consoleauth rpc API. API version history: * 1.0 - Initial version. * 1.1 - Added get_backdoor_port() * 1.2 - Added instance_uuid to authorize_console, and delete_tokens_for_instance ... Grizzly and Havana support message version 1.2. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 1.2. * 2.0 - Major API rev for Icehouse ... Icehouse and Juno support message version 2.0. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.0. * 2.1 - Added access_url to authorize_console ... Kilo and Liberty support message version 2.1. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.1. ''' VERSION_ALIASES = { 'grizzly': '1.2', 'havana': '1.2', 'icehouse': '2.0', 'juno': '2.0', 'kilo': '2.1', 'liberty': '2.1', } def __init__(self): super(ConsoleAuthAPI, self).__init__() target = messaging.Target(topic=CONF.consoleauth_topic, version='2.1') version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.consoleauth, CONF.upgrade_levels.consoleauth) self.client = rpc.get_client(target, version_cap=version_cap) def authorize_console(self, ctxt, token, console_type, host, port, internal_access_path, instance_uuid, access_url): # The remote side doesn't return anything, but we want to block # until it completes.' msg_args = dict(token=token, console_type=console_type, host=host, port=port, internal_access_path=internal_access_path, instance_uuid=instance_uuid, access_url=access_url) version = '2.1' if not self.client.can_send_version('2.1'): version = '2.0' del msg_args['access_url'] cctxt = self.client.prepare(version=version) return cctxt.call(ctxt, 'authorize_console', **msg_args) def check_token(self, ctxt, token): cctxt = self.client.prepare() return cctxt.call(ctxt, 'check_token', token=token) def delete_tokens_for_instance(self, ctxt, instance_uuid): cctxt = self.client.prepare() return cctxt.cast(ctxt, 'delete_tokens_for_instance', instance_uuid=instance_uuid) nova-13.0.0/nova/netconf.py0000664000567000056710000000320112701407773016654 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from oslo_config import cfg from oslo_utils import netutils CONF = cfg.CONF netconf_opts = [ cfg.StrOpt('my_ip', default=netutils.get_my_ipv4(), help='IP address of this host'), cfg.StrOpt('my_block_storage_ip', default='$my_ip', help='Block storage IP address of this host'), cfg.StrOpt('host', default=socket.gethostname(), help='Name of this node. This can be an opaque identifier. ' 'It is not necessarily a hostname, FQDN, or IP address. ' 'However, the node name must be valid within ' 'an AMQP key, and if using ZeroMQ, a valid ' 'hostname, FQDN, or IP address'), cfg.BoolOpt('use_ipv6', default=False, help='Use IPv6'), ] CONF.register_opts(netconf_opts) nova-13.0.0/nova/wsgi/0000775000567000056710000000000012701410205015603 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/wsgi/nova-api.py0000664000567000056710000000234412701407773017712 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI script for Nova API EXPERIMENTAL support script for running Nova API under Apache2 etc. """ from oslo_config import cfg from oslo_log import log as logging from paste import deploy from nova import config from nova import objects from nova import service # noqa from nova import utils CONF = cfg.CONF config_files = ['/etc/nova/api-paste.ini', '/etc/nova/nova.conf'] config.parse_args([], default_config_files=config_files) logging.setup(CONF, "nova") utils.monkey_patch() objects.register_all() conf = config_files[0] name = "osapi_compute" options = deploy.appconfig('config:%s' % conf, name=name) application = deploy.loadapp('config:%s' % conf, name=name) nova-13.0.0/nova/wsgi/nova-metadata.py0000664000567000056710000000235112701407773020717 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI script for Nova metadata EXPERIMENTAL support script for running Nova metadata under Apache2 etc. """ from oslo_config import cfg from oslo_log import log as logging from paste import deploy from nova import config from nova import objects from nova import service # noqa from nova import utils CONF = cfg.CONF config_files = ['/etc/nova/api-paste.ini', '/etc/nova/nova.conf'] config.parse_args([], default_config_files=config_files) logging.setup(CONF, "nova") utils.monkey_patch() objects.register_all() conf = config_files[0] name = "metadata" options = deploy.appconfig('config:%s' % conf, name=name) application = deploy.loadapp('config:%s' % conf, name=name) nova-13.0.0/nova/CA/0000775000567000056710000000000012701410205015115 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/CA/projects/0000775000567000056710000000000012701410205016746 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/CA/projects/.placeholder0000664000567000056710000000000012701407773021237 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/CA/projects/.gitignore0000664000567000056710000000000212701407773020746 0ustar jenkinsjenkins00000000000000* nova-13.0.0/nova/CA/geninter.sh0000775000567000056710000000337312701407773017315 0ustar jenkinsjenkins00000000000000#!/bin/bash # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # $1 is the id of the project and $2 is the subject of the cert NAME=$1 SUBJ=$2 mkdir -p projects/$NAME cd projects/$NAME cp "$(dirname $0)/openssl.cnf.tmpl" openssl.cnf sed -i -e s/%USERNAME%/$NAME/g openssl.cnf mkdir -p certs crl newcerts private openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes echo "10" > serial touch index.txt # NOTE(vish): Disabling intermediate ca's because we don't actually need them. # It makes more sense to have each project have its own root ca. # openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes # openssl req -new -sha256 -key private/cakey.pem -out ../../reqs/inter$NAME.csr -batch -subj "$SUBJ" openssl ca -gencrl -config ./openssl.cnf -out crl.pem if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then sudo chown -R nova:nogroup . fi # cd ../../ # openssl ca -extensions v3_ca -days 365 -out INTER/$NAME/cacert.pem -in reqs/inter$NAME.csr -config openssl.cnf -batch nova-13.0.0/nova/CA/openssl.cnf.tmpl0000664000567000056710000000553212701407773020270 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # OpenSSL configuration file. # # Establish working directory. dir = . [ ca ] default_ca = CA_default [ CA_default ] serial = $dir/serial database = $dir/index.txt new_certs_dir = $dir/newcerts certificate = $dir/cacert.pem private_key = $dir/private/cakey.pem unique_subject = no default_crl_days = 365 default_days = 365 default_md = sha256 preserve = no email_in_dn = no nameopt = default_ca certopt = default_ca policy = policy_match # NOTE(dprince): stateOrProvinceName must be 'supplied' or 'optional' to # work around a stateOrProvince printable string UTF8 mismatch on # RHEL 6 and Fedora 14 (using openssl-1.0.0-4.el6.x86_64 or # openssl-1.0.0d-1.fc14.x86_64) [ policy_match ] countryName = supplied stateOrProvinceName = supplied organizationName = optional organizationalUnitName = optional commonName = supplied emailAddress = optional [ req ] default_bits = 1024 # Size of keys default_keyfile = key.pem # name of generated keys default_md = sha256 # message digest algorithm string_mask = nombstr # permitted characters distinguished_name = req_distinguished_name [ req_distinguished_name ] # Variable name Prompt string #---------------------- ---------------------------------- 0.organizationName = Organization Name (company) organizationalUnitName = Organizational Unit Name (department, division) emailAddress = Email Address emailAddress_max = 40 localityName = Locality Name (city, district) stateOrProvinceName = State or Province Name (full name) countryName = Country Name (2 letter code) countryName_min = 2 countryName_max = 2 commonName = Common Name (hostname, IP, or your name) commonName_max = 64 # Default values for the above, for consistency and less typing. # Variable name Value #------------------------------ ------------------------------ 0.organizationName_default = NOVA %USERNAME% localityName_default = Mountain View stateOrProvinceName_default = California countryName_default = US [ v3_ca ] basicConstraints = CA:TRUE subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always,issuer:always [ v3_req ] basicConstraints = CA:FALSE subjectKeyIdentifier = hash nova-13.0.0/nova/CA/private/0000775000567000056710000000000012701410205016567 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/CA/private/.placeholder0000664000567000056710000000000012701407773021060 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/CA/genrootca.sh0000775000567000056710000000226012701407773017455 0ustar jenkinsjenkins00000000000000#!/bin/bash # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. if [ -f "cacert.pem" ]; then echo "Not installing, it's already done." else cp "$(dirname $0)/openssl.cnf.tmpl" openssl.cnf sed -i -e s/%USERNAME%/ROOT/g openssl.cnf mkdir -p certs crl newcerts private openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes touch index.txt echo "10" > serial openssl ca -gencrl -config ./openssl.cnf -out crl.pem fi nova-13.0.0/nova/CA/newcerts/0000775000567000056710000000000012701410205016747 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/CA/newcerts/.placeholder0000664000567000056710000000000012701407773021240 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/CA/reqs/0000775000567000056710000000000012701410205016067 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/CA/reqs/.placeholder0000664000567000056710000000000012701407773020360 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/CA/reqs/.gitignore0000664000567000056710000000000212701407773020067 0ustar jenkinsjenkins00000000000000* nova-13.0.0/nova/CA/.gitignore0000664000567000056710000000017112701407773017124 0ustar jenkinsjenkins00000000000000index.txt index.txt.old index.txt.attr index.txt.attr.old cacert.pem serial serial.old openssl.cnf private/* newcerts/* nova-13.0.0/nova/tests/0000775000567000056710000000000012701410205015774 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/uuidsentinel.py0000664000567000056710000000202512701407773021075 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys class UUIDSentinels(object): def __init__(self): from oslo_utils import uuidutils self._uuid_module = uuidutils self._sentinels = {} def __getattr__(self, name): if name.startswith('_'): raise ValueError('Sentinels must not start with _') if name not in self._sentinels: self._sentinels[name] = str(self._uuid_module.generate_uuid()) return self._sentinels[name] sys.modules[__name__] = UUIDSentinels() nova-13.0.0/nova/tests/live_migration/0000775000567000056710000000000012701410205021004 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/live_migration/hooks/0000775000567000056710000000000012701410205022127 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/live_migration/hooks/utils.sh0000775000567000056710000000417012701407773023650 0ustar jenkinsjenkins00000000000000#!/bin/bash function run_tempest { local message=$1 sudo -H -u tempest tox -eall -- --concurrency=$TEMPEST_CONCURRENCY live_migration exitcode=$? if [[ $exitcode -ne 0 ]]; then die $LINENO "$message failure" fi } function populate_start_script { SCREEN_NAME=${SCREEN_NAME:-stack} DEST=${DEST:-/opt/stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} ENABLED_SERVICES=${ENABLED_SERVICES:-n-cpu,g-api,c-vol} LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirtd} TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} LOGDAYS=${LOGDAYS:-7} CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") #creates script for starting process without screen and copies it to all # nodes # # args: # $1 - service name to start # $2 - command to execute # $3 - group to run under cat > /tmp/start_process.sh <&"\$REAL_LOG_FILE" 2>&1 ln -sf "\$REAL_LOG_FILE" \$LOGDIR/\$service.log export PYTHONUNBUFFERED=1 fi if [[ -n "\$sg" ]]; then setsid sg \$sg -c "\$command" & echo \$! >\$SERVICE_DIR/\$SCREEN_NAME/\$service.pid else setsid \$command & echo \$! >\$SERVICE_DIR/\$SCREEN_NAME/\$service.pid fi exit 0 EOF chmod +x /tmp/start_process.sh $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/start_process.sh dest=/tmp/start_process.sh owner=$STACK_USER group=$STACK_USER mode=0777" $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ls -la /tmp/start_process.sh" } function stop { local target=$1 local service=$2 $ANSIBLE $target --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a " executable=/bin/bash BASE\=$BASE source $BASE/new/devstack/functions-common ENABLED_SERVICES\=$ENABLED_SERVICES SCREEN_NAME\=$SCREEN_NAME SERVICE_DIR\=$SERVICE_DIR stop_process $service " } nova-13.0.0/nova/tests/live_migration/hooks/run_tests.sh0000775000567000056710000000311412701407773024533 0ustar jenkinsjenkins00000000000000#!/bin/bash # Live migration dedicated ci job will be responsible for testing different # environments based on underlying storage, used for ephemerals. # This hook allows to inject logic of environment reconfiguration in ci job. # Base scenario for this would be: # # 1. test with all local storage (use default for volumes) # 2. test with NFS for root + ephemeral disks # 3. test with Ceph for root + ephemeral disks # 4. test with Ceph for volumes and root + ephemeral disk set -xe cd $BASE/new/tempest source $BASE/new/devstack/functions source $BASE/new/devstack/functions-common source $WORKSPACE/devstack-gate/functions.sh source $BASE/new/nova/nova/tests/live_migration/hooks/utils.sh source $BASE/new/nova/nova/tests/live_migration/hooks/nfs.sh source $BASE/new/nova/nova/tests/live_migration/hooks/ceph.sh primary_node=$(cat /etc/nodepool/primary_node_private) SUBNODES=$(cat /etc/nodepool/sub_nodes_private) SERVICE_HOST=$primary_node STACK_USER=${STACK_USER:-stack} populate_start_script echo '1. test with all local storage (use default for volumes)' run_tempest "block migration test" echo '2. test with NFS for root + ephemeral disks' nfs_setup nfs_configure_tempest nfs_verify_setup run_tempest "NFS shared storage test" nfs_teardown echo '3. test with Ceph for root + ephemeral disks' source $BASE/new/devstack/lib/ceph #reset output set -xe setup_ceph_cluster configure_and_start_glance configure_and_start_nova run_tempest "Ceph nova&glance test" echo '4. test with Ceph for volumes and root + ephemeral disk' configure_and_start_cinder run_tempest "Ceph nova&glance&cinder test"nova-13.0.0/nova/tests/live_migration/hooks/ceph.sh0000775000567000056710000004031112701407773023424 0ustar jenkinsjenkins00000000000000#!/bin/bash CEPH_REPLICAS=2 function setup_ceph_cluster { install_ceph_full configure_ceph_local echo "copy ceph.conf and admin keyring to compute only nodes" ls -la /etc/ceph sudo cp /etc/ceph/ceph.conf /tmp/ceph.conf sudo chown ${STACK_USER}:${STACK_USER} /tmp/ceph.conf $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.conf dest=/etc/ceph/ceph.conf owner=root group=root" sudo rm -f /tmp/ceph.conf sudo cp /etc/ceph/ceph.client.admin.keyring /tmp/ceph.client.admin.keyring sudo chown ${STACK_USER}:${STACK_USER} /tmp/ceph.client.admin.keyring sudo chmod 644 /tmp/ceph.client.admin.keyring ls -la /tmp $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.admin.keyring dest=/etc/ceph/ceph.client.admin.keyring owner=root group=root" sudo rm -f /tmp/ceph.client.admin.keyring echo "check result of copying files" $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ls -la /etc/ceph" echo "start ceph-mon" sudo initctl emit ceph-mon id=$(hostname) echo "start ceph-osd" sudo start ceph-osd id=${OSD_ID} echo "check ceph-osd before second node addition" wait_for_ceph_up configure_ceph_remote echo "check ceph-osd tree" wait_for_ceph_up } function install_ceph_full { if uses_debs; then $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m apt \ -a "name=ceph state=present" elif is_fedora; then $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m yum \ -a "name=ceph state=present" fi } function configure_ceph_local { sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp} # create ceph monitor initial key and directory sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \ --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \ --cap mon 'allow *' sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) # create a default ceph configuration file sudo tee ${CEPH_CONF_FILE} > /dev/null < /dev/null sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring #copy cinder keyring to compute only node sudo cp /etc/ceph/ceph.client.cinder.keyring /tmp/ceph.client.cinder.keyring sudo chown stack:stack /tmp/ceph.client.cinder.keyring $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.cinder.keyring dest=/etc/ceph/ceph.client.cinder.keyring" sudo rm -f /tmp/ceph.client.cinder.keyring sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS} if [[ $CEPH_REPLICAS -ne 1 ]]; then sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID} fi } function configure_and_start_nova { _ceph_configure_nova #import secret to libvirt _populate_libvirt_secret echo 'check compute processes before restart' $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute" #stop nova-compute stop 'all' 'n-cpu' echo 'check processes after compute stop' $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute" # restart local nova-compute sudo -H -u $STACK_USER bash -c "/tmp/start_process.sh n-cpu '/usr/local/bin/nova-compute --config-file /etc/nova/nova.conf' libvirtd" # restart remote nova-compute for SUBNODE in $SUBNODES ; do ssh $SUBNODE "sudo -H -u $STACK_USER bash -c '/tmp/start_process.sh n-cpu \"/usr/local/bin/nova-compute --config-file /etc/nova/nova.conf\" libvirtd'" done $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute" } function _ceph_configure_cinder { sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP} sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS} if [[ $CEPH_REPLICAS -ne 1 ]]; then sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID} fi CINDER_CONF=${CINDER_CONF:-/etc/cinder/cinder.conf} $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_backend_name value=ceph" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_driver value=cinder.volume.drivers.rbd.RBDDriver" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_ceph_conf value=$CEPH_CONF_FILE" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_pool value=$CINDER_CEPH_POOL" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_user value=$CINDER_CEPH_USER" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_uuid value=$CINDER_CEPH_UUID" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_flatten_volume_from_snapshot value=False" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_max_clone_depth value=5" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=glance_api_version value=2" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=default_volume_type value=ceph" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=enabled_backends value=ceph" } function configure_and_start_cinder { _ceph_configure_cinder stop 'primary' 'c-vol' sudo -H -u $STACK_USER bash -c "/tmp/start_process.sh c-vol '/usr/local/bin/cinder-volume --config-file /etc/cinder/cinder.conf'" source $BASE/new/devstack/openrc export OS_USERNAME=admin export OS_PROJECT_NAME=admin lvm_type=$(cinder type-list | awk -F "|" 'NR==4{ print $2}') cinder type-delete $lvm_type openstack volume type create --os-volume-api-version 1 --property volume_backend_name="ceph" ceph } function _populate_libvirt_secret { cat > /tmp/secret.xml < ${CINDER_CEPH_UUID} client.${CINDER_CEPH_USER} secret EOF $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/secret.xml dest=/tmp/secret.xml" $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-define --file /tmp/secret.xml" local secret=$(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER}) # TODO(tdurakov): remove this escaping as https://github.com/ansible/ansible/issues/13862 fixed secret=${secret//=/'\='} $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $secret" $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/tmp/secret.xml state=absent" } nova-13.0.0/nova/tests/live_migration/hooks/nfs.sh0000775000567000056710000000515412701407773023301 0ustar jenkinsjenkins00000000000000#!/bin/bash function nfs_setup { if uses_debs; then module=apt elif is_fedora; then module=yum fi $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m $module \ -a "name=nfs-common state=present" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m $module \ -a "name=nfs-kernel-server state=present" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-User value=nova" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-Group value=nova" for SUBNODE in $SUBNODES ; do $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m lineinfile -a "dest=/etc/exports line='/opt/stack/data/nova/instances $SUBNODE(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)'" done $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "exportfs -a" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=restarted" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=idmapd state=restarted" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 111 -j ACCEPT" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 111 -j ACCEPT" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 2049 -j ACCEPT" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 2049 -j ACCEPT" $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "mount -t nfs4 -o proto\=tcp,port\=2049 $primary_node:/ /opt/stack/data/nova/instances/" } function nfs_configure_tempest { $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$BASE/new/tempest/etc/tempest.conf section=compute-feature-enabled option=block_migration_for_live_migration value=False" } function nfs_verify_setup { $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/opt/stack/data/nova/instances/test_file state=touch" if [ ! -e '/opt/stack/data/nova/instances/test_file' ]; then die $LINENO "NFS configuration failure" fi } function nfs_teardown { #teardown nfs shared storage $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "umount -t nfs4 /opt/stack/data/nova/instances/" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=stopped" }nova-13.0.0/nova/tests/__init__.py0000664000567000056710000000000012701407773020113 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/0000775000567000056710000000000012701410205016753 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/consoleauth/0000775000567000056710000000000012701410205021277 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/consoleauth/__init__.py0000664000567000056710000000000012701407773023416 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/consoleauth/test_consoleauth.py0000664000567000056710000002164612701407773025265 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Consoleauth Code. """ import mock from mox3 import mox from oslo_utils import timeutils from nova.consoleauth import manager from nova import context from nova import test class ConsoleauthTestCase(test.NoDBTestCase): """Test Case for consoleauth.""" def setUp(self): super(ConsoleauthTestCase, self).setUp() self.manager_api = self.manager = manager.ConsoleAuthManager() self.context = context.get_admin_context() self.instance_uuid = '00000000-0000-0000-0000-000000000000' def test_reset(self): with mock.patch('nova.compute.rpcapi.ComputeAPI') as mock_rpc: old_rpcapi = self.manager_api.compute_rpcapi self.manager_api.reset() mock_rpc.assert_called_once_with() self.assertNotEqual(old_rpcapi, self.manager_api.compute_rpcapi) @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_tokens_expire(self, mock_get): mock_get.return_value = None # Test that tokens expire correctly. self.useFixture(test.TimeOverride()) token = u'mytok' self.flags(console_token_ttl=1) self._stub_validate_console_port(True) self.manager_api.authorize_console(self.context, token, 'novnc', '127.0.0.1', '8080', 'host', self.instance_uuid) self.assertIsNotNone(self.manager_api.check_token(self.context, token)) timeutils.advance_time_seconds(1) self.assertIsNone(self.manager_api.check_token(self.context, token)) def _stub_validate_console_port(self, result): def fake_validate_console_port(ctxt, instance, port, console_type): return result self.stubs.Set(self.manager.compute_rpcapi, 'validate_console_port', fake_validate_console_port) @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_multiple_tokens_for_instance(self, mock_get): mock_get.return_value = None tokens = [u"token" + str(i) for i in range(10)] self._stub_validate_console_port(True) for token in tokens: self.manager_api.authorize_console(self.context, token, 'novnc', '127.0.0.1', '8080', 'host', self.instance_uuid) for token in tokens: self.assertIsNotNone( self.manager_api.check_token(self.context, token)) def test_delete_tokens_for_instance(self): tokens = [u"token" + str(i) for i in range(10)] for token in tokens: self.manager_api.authorize_console(self.context, token, 'novnc', '127.0.0.1', '8080', 'host', self.instance_uuid) self.manager_api.delete_tokens_for_instance(self.context, self.instance_uuid) stored_tokens = self.manager._get_tokens_for_instance( self.instance_uuid) self.assertEqual(len(stored_tokens), 0) for token in tokens: self.assertIsNone( self.manager_api.check_token(self.context, token)) @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_wrong_token_has_port(self, mock_get): mock_get.return_value = None token = u'mytok' self._stub_validate_console_port(False) self.manager_api.authorize_console(self.context, token, 'novnc', '127.0.0.1', '8080', 'host', instance_uuid=self.instance_uuid) self.assertIsNone(self.manager_api.check_token(self.context, token)) def test_delete_expired_tokens(self): self.useFixture(test.TimeOverride()) token = u'mytok' self.flags(console_token_ttl=1) self._stub_validate_console_port(True) self.manager_api.authorize_console(self.context, token, 'novnc', '127.0.0.1', '8080', 'host', self.instance_uuid) timeutils.advance_time_seconds(1) self.assertIsNone(self.manager_api.check_token(self.context, token)) token1 = u'mytok2' self.manager_api.authorize_console(self.context, token1, 'novnc', '127.0.0.1', '8080', 'host', self.instance_uuid) stored_tokens = self.manager._get_tokens_for_instance( self.instance_uuid) # when trying to store token1, expired token is removed fist. self.assertEqual(len(stored_tokens), 1) self.assertEqual(stored_tokens[0], token1) class ControlauthMemcacheEncodingTestCase(test.NoDBTestCase): def setUp(self): super(ControlauthMemcacheEncodingTestCase, self).setUp() self.manager = manager.ConsoleAuthManager() self.context = context.get_admin_context() self.u_token = u"token" self.u_instance = u"instance" def test_authorize_console_encoding(self): with test.nested( mock.patch.object(self.manager.mc_instance, 'set', return_value=True), mock.patch.object(self.manager.mc_instance, 'get', return_value='["token"]'), mock.patch.object(self.manager.mc, 'set', return_value=True), mock.patch.object(self.manager.mc, 'get', return_value=None), mock.patch.object(self.manager.mc, 'get_multi', return_value=["token1"]), ) as ( mock_instance_set, mock_instance_get, mock_set, mock_get, mock_get_multi): self.manager.authorize_console(self.context, self.u_token, 'novnc', '127.0.0.1', '8080', 'host', self.u_instance) mock_set.assert_has_calls([mock.call('token', mock.ANY)]) mock_instance_get.assert_has_calls([mock.call('instance')]) mock_get_multi.assert_has_calls([mock.call(['token'])]) mock_instance_set.assert_has_calls( [mock.call('instance', mock.ANY)]) def test_check_token_encoding(self): self.mox.StubOutWithMock(self.manager.mc, "get") self.manager.mc.get(mox.IsA(str)).AndReturn(None) self.mox.ReplayAll() self.manager.check_token(self.context, self.u_token) def test_delete_tokens_for_instance_encoding(self): with test.nested( mock.patch.object(self.manager.mc_instance, 'get', return_value='["token"]'), mock.patch.object(self.manager.mc_instance, 'delete', return_value=True), mock.patch.object(self.manager.mc, 'get'), mock.patch.object(self.manager.mc, 'delete_multi', return_value=True), ) as ( mock_instance_get, mock_instance_delete, mock_get, mock_delete_multi): self.manager.delete_tokens_for_instance(self.context, self.u_instance) mock_instance_get.assert_has_calls([mock.call('instance')]) mock_instance_delete.assert_has_calls([mock.call('instance')]) mock_delete_multi.assert_has_calls([mock.call(['token'])]) class CellsConsoleauthTestCase(ConsoleauthTestCase): """Test Case for consoleauth w/ cells enabled.""" def setUp(self): super(CellsConsoleauthTestCase, self).setUp() self.flags(enable=True, group='cells') def _stub_validate_console_port(self, result): def fake_validate_console_port(ctxt, instance_uuid, console_port, console_type): return result self.stubs.Set(self.manager.cells_rpcapi, 'validate_console_port', fake_validate_console_port) nova-13.0.0/nova/tests/unit/consoleauth/test_rpcapi.py0000664000567000056710000000661112701407773024212 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.consoleauth.rpcapi """ import mock from oslo_config import cfg import six from nova.consoleauth import rpcapi as consoleauth_rpcapi from nova import context from nova import test CONF = cfg.CONF class ConsoleAuthRpcAPITestCase(test.NoDBTestCase): DROPPED_ARG = object() def _test_consoleauth_api(self, method, **kwargs): do_cast = kwargs.pop('_do_cast', False) ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = consoleauth_rpcapi.ConsoleAuthAPI() self.assertIsNotNone(rpcapi.client) self.assertEqual(rpcapi.client.target.topic, CONF.consoleauth_topic) orig_prepare = rpcapi.client.prepare version = kwargs.pop('version', None) rpc_kwargs = {k: v for k, v in six.iteritems(kwargs) if v is not self.DROPPED_ARG} with test.nested( mock.patch.object(rpcapi.client, 'cast' if do_cast else 'call'), mock.patch.object(rpcapi.client, 'prepare'), mock.patch.object(rpcapi.client, 'can_send_version'), ) as ( rpc_mock, prepare_mock, csv_mock ): prepare_mock.return_value = rpcapi.client rpc_mock.return_value = None if do_cast else 'foo' def fake_csv(v): if version: return orig_prepare( version_cap=version).can_send_version(version=v) else: return orig_prepare().can_send_version() csv_mock.side_effect = fake_csv retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, rpc_mock.return_value) if version: prepare_mock.assert_called_once_with(version=version) else: prepare_mock.assert_called_once_with() rpc_mock.assert_called_once_with(ctxt, method, **rpc_kwargs) def test_authorize_console(self): self._test_consoleauth_api('authorize_console', token='token', console_type='ctype', host='h', port='p', internal_access_path='iap', instance_uuid="instance", access_url=self.DROPPED_ARG, version='2.0') def test_authorize_console_access_url(self): self._test_consoleauth_api('authorize_console', token='token', console_type='ctype', host='h', port='p', internal_access_path='iap', instance_uuid="instance", access_url="fake_access_url", version='2.1') def test_check_token(self): self._test_consoleauth_api('check_token', token='t') def test_delete_tokens_for_instnace(self): self._test_consoleauth_api('delete_tokens_for_instance', _do_cast=True, instance_uuid="instance") nova-13.0.0/nova/tests/unit/test_api_validation.py0000664000567000056710000014276512701407773023406 0ustar jenkinsjenkins00000000000000# Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re import fixtures from jsonschema import exceptions as jsonschema_exc import six from nova.api.openstack import api_version_request as api_version from nova.api import validation from nova.api.validation import parameter_types from nova.api.validation import validators from nova import exception from nova import test class FakeRequest(object): api_version_request = api_version.APIVersionRequest("2.1") environ = {} legacy_v2 = False def is_legacy_v2(self): return self.legacy_v2 class ValidationRegex(test.NoDBTestCase): def test_cell_names(self): cellre = re.compile(parameter_types.valid_cell_name_regex.regex) self.assertTrue(cellre.search('foo')) self.assertFalse(cellre.search('foo.bar')) self.assertFalse(cellre.search('foo@bar')) self.assertFalse(cellre.search('foo!bar')) self.assertFalse(cellre.search(' foo!bar')) self.assertFalse(cellre.search('\nfoo!bar')) def test_build_regex_range(self): # this is much easier to think about if we only use the ascii # subset because it's a printable range we can think # about. The algorithm works for all ranges. def _get_all_chars(): for i in range(0x7F): yield six.unichr(i) self.useFixture(fixtures.MonkeyPatch( 'nova.api.validation.parameter_types._get_all_chars', _get_all_chars)) r = parameter_types._build_regex_range(ws=False) self.assertEqual(r, re.escape('!') + '-' + re.escape('~')) # if we allow whitespace the range starts earlier r = parameter_types._build_regex_range(ws=True) self.assertEqual(r, re.escape(' ') + '-' + re.escape('~')) # excluding a character will give us 2 ranges r = parameter_types._build_regex_range(ws=True, exclude=['A']) self.assertEqual(r, re.escape(' ') + '-' + re.escape('@') + 'B' + '-' + re.escape('~')) # inverting which gives us all the initial unprintable characters. r = parameter_types._build_regex_range(ws=False, invert=True) self.assertEqual(r, re.escape('\x00') + '-' + re.escape(' ')) # excluding characters that create a singleton. Naively this would be: # ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural. r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C']) self.assertEqual(r, re.escape(' ') + '-' + re.escape('@') + 'B' + 'D' + '-' + re.escape('~')) # ws=True means the positive regex has printable whitespaces, # so the inverse will not. The inverse will include things we # exclude. r = parameter_types._build_regex_range( ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True) self.assertEqual(r, re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ') class APIValidationTestCase(test.NoDBTestCase): def check_validation_error(self, method, body, expected_detail, req=None): if not req: req = FakeRequest() try: method(body=body, req=req,) except exception.ValidationError as ex: self.assertEqual(400, ex.kwargs['code']) if not re.match(expected_detail, ex.kwargs['detail']): self.assertEqual(expected_detail, ex.kwargs['detail'], 'Exception details did not match expected') except Exception as ex: self.fail('An unexpected exception happens: %s' % ex) else: self.fail('Any exception does not happen.') class FormatCheckerTestCase(test.NoDBTestCase): def test_format_checker_failed(self): format_checker = validators.FormatChecker() exc = self.assertRaises(jsonschema_exc.FormatError, format_checker.check, " ", "name") self.assertIsInstance(exc.cause, exception.InvalidName) self.assertEqual("An invalid 'name' value was provided. The name must " "be: printable characters. " "Can not start or end with whitespace.", exc.cause.format_message()) def test_format_checker_failed_with_non_string(self): checks = ["name", "name_with_leading_trailing_spaces", "cell_name", "cell_name_with_leading_trailing_spaces"] format_checker = validators.FormatChecker() for check in checks: exc = self.assertRaises(jsonschema_exc.FormatError, format_checker.check, None, "name") self.assertIsInstance(exc.cause, exception.InvalidName) self.assertEqual("An invalid 'name' value was provided. The name " "must be: printable characters. " "Can not start or end with whitespace.", exc.cause.format_message()) class MicroversionsSchemaTestCase(APIValidationTestCase): def setUp(self): super(MicroversionsSchemaTestCase, self).setUp() schema_v21_int = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', } } } schema_v20_str = copy.deepcopy(schema_v21_int) schema_v20_str['properties']['foo'] = {'type': 'string'} @validation.schema(schema_v20_str, '2.0', '2.0') @validation.schema(schema_v21_int, '2.1') def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_v2compatible_request(self): req = FakeRequest() req.legacy_v2 = True self.assertEqual(self.post(body={'foo': 'bar'}, req=req), 'Validation succeeded.') detail = ("Invalid input for field/attribute foo. Value: 1. " "1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail, req=req) def test_validate_v21_request(self): req = FakeRequest() self.assertEqual(self.post(body={'foo': 1}, req=req), 'Validation succeeded.') detail = ("Invalid input for field/attribute foo. Value: bar. " "'bar' is not of type 'integer'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail, req=req) def test_validate_v2compatible_request_with_none_min_version(self): schema_none = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer' } } } @validation.schema(schema_none) def post(req, body): return 'Validation succeeded.' req = FakeRequest() req.legacy_v2 = True self.assertEqual('Validation succeeded.', post(body={'foo': 1}, req=req)) detail = ("Invalid input for field/attribute foo. Value: bar. " "'bar' is not of type 'integer'") self.check_validation_error(post, body={'foo': 'bar'}, expected_detail=detail, req=req) class RequiredDisableTestCase(APIValidationTestCase): def setUp(self): super(RequiredDisableTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_required_disable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'abc': 1}, req=FakeRequest()), 'Validation succeeded.') class RequiredEnableTestCase(APIValidationTestCase): def setUp(self): super(RequiredEnableTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'] } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_required_enable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') def test_validate_required_enable_fails(self): detail = "'foo' is a required property" self.check_validation_error(self.post, body={'abc': 1}, expected_detail=detail) class AdditionalPropertiesEnableTestCase(APIValidationTestCase): def setUp(self): super(AdditionalPropertiesEnableTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_additionalProperties_enable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': 1, 'ext': 1}, req=FakeRequest()), 'Validation succeeded.') class AdditionalPropertiesDisableTestCase(APIValidationTestCase): def setUp(self): super(AdditionalPropertiesDisableTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], 'additionalProperties': False, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_additionalProperties_disable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') def test_validate_additionalProperties_disable_fails(self): detail = "Additional properties are not allowed ('ext' was unexpected)" self.check_validation_error(self.post, body={'foo': 1, 'ext': 1}, expected_detail=detail) class PatternPropertiesTestCase(APIValidationTestCase): def setUp(self): super(PatternPropertiesTestCase, self).setUp() schema = { 'patternProperties': { '^[a-zA-Z0-9]{1,10}$': { 'type': 'string' }, }, 'additionalProperties': False, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_patternProperties(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'bar'}, req=FakeRequest())) def test_validate_patternProperties_fails(self): detail = "Additional properties are not allowed ('__' was unexpected)" self.check_validation_error(self.post, body={'__': 'bar'}, expected_detail=detail) detail = "Additional properties are not allowed ('' was unexpected)" self.check_validation_error(self.post, body={'': 'bar'}, expected_detail=detail) detail = ("Additional properties are not allowed ('0123456789a' was" " unexpected)") self.check_validation_error(self.post, body={'0123456789a': 'bar'}, expected_detail=detail) detail = "expected string or buffer" self.check_validation_error(self.post, body={None: 'bar'}, expected_detail=detail) class StringTestCase(APIValidationTestCase): def setUp(self): super(StringTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_string(self): self.assertEqual(self.post(body={'foo': 'abc'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': ''}, req=FakeRequest()), 'Validation succeeded.') def test_validate_string_fails(self): detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.5." " 1.5 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1.5}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) class StringLengthTestCase(APIValidationTestCase): def setUp(self): super(StringLengthTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'minLength': 1, 'maxLength': 10, }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_string_length(self): self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0123456789'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_string_length_fails(self): detail = ("Invalid input for field/attribute foo. Value: ." " '' is too short") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0123456789a." " '0123456789a' is too long") self.check_validation_error(self.post, body={'foo': '0123456789a'}, expected_detail=detail) class IntegerTestCase(APIValidationTestCase): def setUp(self): super(IntegerTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_integer(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0123456789'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_integer_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0xffff." " '0xffff' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '0xffff'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.0." " 1.0 is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': 1.0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.0." " '1.0' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '1.0'}, expected_detail=detail) class IntegerRangeTestCase(APIValidationTestCase): def setUp(self): super(IntegerRangeTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1, 'maximum': 10, }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_integer_range(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': 10}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_integer_range_fails(self): detail = ("Invalid input for field/attribute foo. Value: 0." " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': 0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': 11}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0." " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': '0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': '11'}, expected_detail=detail) class BooleanTestCase(APIValidationTestCase): def setUp(self): super(BooleanTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.boolean, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_boolean(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': True}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': False}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'True'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'False'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest())) def test_validate_boolean_fails(self): enum_boolean = ("[True, 'True', 'TRUE', 'true', '1', 'ON', 'On'," " 'on', 'YES', 'Yes', 'yes'," " False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off'," " 'off', 'NO', 'No', 'no']") detail = ("Invalid input for field/attribute foo. Value: bar." " 'bar' is not one of %s") % enum_boolean self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 2." " '2' is not one of %s") % enum_boolean self.check_validation_error(self.post, body={'foo': '2'}, expected_detail=detail) class HostnameTestCase(APIValidationTestCase): def setUp(self): super(HostnameTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.hostname, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_hostname(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost.localdomain.com'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my-host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my_host'}, req=FakeRequest())) def test_validate_hostname_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: my$host." " 'my$host' does not match '^[a-zA-Z0-9-._]*$'") self.check_validation_error(self.post, body={'foo': 'my$host'}, expected_detail=detail) class HostnameIPaddressTestCase(APIValidationTestCase): def setUp(self): super(HostnameIPaddressTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.hostname_or_ip_address, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_hostname_or_ip_address(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost.localdomain.com'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my-host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my_host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '192.168.10.100'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '2001:db8::9abc'}, req=FakeRequest())) def test_validate_hostname_or_ip_address_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: my$host." " 'my$host' does not match '^[a-zA-Z0-9-_.:]*$'") self.check_validation_error(self.post, body={'foo': 'my$host'}, expected_detail=detail) class CellNameTestCase(APIValidationTestCase): def setUp(self): super(CellNameTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.cell_name, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'abc'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters except !, ., @. " "Can not start or end with whitespace.") should_fail = (' ', ' server', 'server ', u'a\xa0', # trailing unicode space u'\uffff', # non-printable unicode 'abc!def', 'abc.def', 'abc@def') for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class CellNameLeadingTrailingSpacesTestCase(APIValidationTestCase): def setUp(self): super(CellNameLeadingTrailingSpacesTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.cell_name_leading_trailing_spaces, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'abc'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ' my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server '}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters except !, ., @, " "with at least one non space character") should_fail = ( ' ', u'\uffff', # non-printable unicode 'abc!def', 'abc.def', 'abc@def') for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class NameTestCase(APIValidationTestCase): def setUp(self): super(NameTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'm1.small'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'a'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters. " "Can not start or end with whitespace.") should_fail = (' ', ' server', 'server ', u'a\xa0', # trailing unicode space u'\uffff', # non-printable unicode ) for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class NameWithLeadingTrailingSpacesTestCase(APIValidationTestCase): def setUp(self): super(NameWithLeadingTrailingSpacesTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name_with_leading_trailing_spaces, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'm1.small'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'a'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ' abc '}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'abc abc abc'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ' abc abc abc '}, req=FakeRequest())) # leading unicode space self.assertEqual('Validation succeeded.', self.post(body={'foo': '\xa0abc'}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters with at least one non space character") should_fail = ( ' ', u'\xa0', # unicode space u'\uffff', # non-printable unicode ) for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class NoneTypeTestCase(APIValidationTestCase): def setUp(self): super(NoneTypeTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.none } } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_none(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'None'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': None}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': {}}, req=FakeRequest())) def test_validate_none_fails(self): detail = ("Invalid input for field/attribute foo. Value: ." " '' is not one of ['None', None, {}]") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: " "{'key': 'val'}. {'key': 'val'} is not one of " "['None', None, {}]") self.check_validation_error(self.post, body={'foo': {'key': 'val'}}, expected_detail=detail) class TcpUdpPortTestCase(APIValidationTestCase): def setUp(self): super(TcpUdpPortTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.tcp_udp_port, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_tcp_udp_port(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1024}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1024'}, req=FakeRequest())) def test_validate_tcp_udp_port_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 65536." " 65536(.0)? is greater than the maximum of 65535") self.check_validation_error(self.post, body={'foo': 65536}, expected_detail=detail) class CidrFormatTestCase(APIValidationTestCase): def setUp(self): super(CidrFormatTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'cidr', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_cidr(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '192.168.10.0/24'}, req=FakeRequest() )) def test_validate_cidr_fails(self): detail = ("Invalid input for field/attribute foo." " Value: bar." " 'bar' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: . '' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.1.0. '192.168.1.0' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': '192.168.1.0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.1.0 /24." " '192.168.1.0 /24' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': '192.168.1.0 /24'}, expected_detail=detail) class DatetimeTestCase(APIValidationTestCase): def setUp(self): super(DatetimeTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'date-time', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_datetime(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '2014-01-14T01:00:00Z'}, req=FakeRequest() )) def test_validate_datetime_fails(self): detail = ("Invalid input for field/attribute foo." " Value: 2014-13-14T01:00:00Z." " '2014-13-14T01:00:00Z' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': '2014-13-14T01:00:00Z'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: bar. 'bar' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " '1' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': '1'}, expected_detail=detail) class UuidTestCase(APIValidationTestCase): def setUp(self): super(UuidTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'uuid', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_uuid(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '70a599e0-31e7-49b7-b260-868f441e862b'}, req=FakeRequest() )) def test_validate_uuid_fails(self): detail = ("Invalid input for field/attribute foo." " Value: 70a599e031e749b7b260868f441e862." " '70a599e031e749b7b260868f441e862' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': '70a599e031e749b7b260868f441e862'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " '1' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': '1'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) class UriTestCase(APIValidationTestCase): def setUp(self): super(UriTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'uri', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_uri(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': 'http://localhost:8774/v2/servers'}, req=FakeRequest() )) self.assertEqual('Validation succeeded.', self.post( body={'foo': 'http://[::1]:8774/v2/servers'}, req=FakeRequest() )) def test_validate_uri_fails(self): base_detail = ("Invalid input for field/attribute foo. Value: {0}. " "'{0}' is not a 'uri'") invalid_uri = 'http://localhost:8774/v2/servers##' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = 'http://[fdf8:01]:8774/v2/servers' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = '1' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = 'abc' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) class Ipv4TestCase(APIValidationTestCase): def setUp(self): super(Ipv4TestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'ipv4', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_ipv4(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '192.168.0.100'}, req=FakeRequest() )) def test_validate_ipv4_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: localhost." " 'localhost' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': 'localhost'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 2001:db8::1234:0:0:9abc." " '2001:db8::1234:0:0:9abc' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': '2001:db8::1234:0:0:9abc'}, expected_detail=detail) class Ipv6TestCase(APIValidationTestCase): def setUp(self): super(Ipv6TestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'ipv6', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_ipv6(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '2001:db8::1234:0:0:9abc'}, req=FakeRequest() )) def test_validate_ipv6_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: localhost." " 'localhost' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': 'localhost'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.0.100. '192.168.0.100' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': '192.168.0.100'}, expected_detail=detail) class Base64TestCase(APIValidationTestCase): def setUp(self): super(APIValidationTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'base64', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_base64(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'aGVsbG8gd29ybGQ='}, req=FakeRequest())) # 'aGVsbG8gd29ybGQ=' is the base64 code of 'hello world' def test_validate_base64_fails(self): value = 'A random string' detail = ("Invalid input for field/attribute foo. " "Value: %s. '%s' is not a 'base64'") % (value, value) self.check_validation_error(self.post, body={'foo': value}, expected_detail=detail) nova-13.0.0/nova/tests/unit/matchers.py0000664000567000056710000004647712701407773021175 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Matcher classes to be used inside of the testtools assertThat framework.""" import pprint from lxml import etree import six from testtools import content import testtools.matchers class DictKeysMismatch(object): def __init__(self, d1only, d2only): self.d1only = d1only self.d2only = d2only def describe(self): return ('Keys in d1 and not d2: %(d1only)s.' ' Keys in d2 and not d1: %(d2only)s' % {'d1only': self.d1only, 'd2only': self.d2only}) def get_details(self): return {} class DictMismatch(object): def __init__(self, key, d1_value, d2_value): self.key = key self.d1_value = d1_value self.d2_value = d2_value def describe(self): return ("Dictionaries do not match at %(key)s." " d1: %(d1_value)s d2: %(d2_value)s" % {'key': self.key, 'd1_value': self.d1_value, 'd2_value': self.d2_value}) def get_details(self): return {} class DictMatches(object): def __init__(self, d1, approx_equal=False, tolerance=0.001): self.d1 = d1 self.approx_equal = approx_equal self.tolerance = tolerance def __str__(self): return 'DictMatches(%s)' % (pprint.pformat(self.d1)) # Useful assertions def match(self, d2): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested dictionaries appropriately. NOTE: If you don't care (or don't know) a given value, you can specify the string DONTCARE as the value. This will cause that dict-item to be skipped. """ d1keys = set(self.d1.keys()) d2keys = set(d2.keys()) if d1keys != d2keys: d1only = sorted(d1keys - d2keys) d2only = sorted(d2keys - d1keys) return DictKeysMismatch(d1only, d2only) for key in d1keys: d1value = self.d1[key] d2value = d2[key] try: error = abs(float(d1value) - float(d2value)) within_tolerance = error <= self.tolerance except (ValueError, TypeError): # If both values aren't convertible to float, just ignore # ValueError if arg is a str, TypeError if it's something else # (like None) within_tolerance = False if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): matcher = DictMatches(d1value) did_match = matcher.match(d2value) if did_match is not None: return did_match elif 'DONTCARE' in (d1value, d2value): continue elif self.approx_equal and within_tolerance: continue elif d1value != d2value: return DictMismatch(key, d1value, d2value) class ListLengthMismatch(object): def __init__(self, len1, len2): self.len1 = len1 self.len2 = len2 def describe(self): return ('Length mismatch: len(L1)=%(len1)d != ' 'len(L2)=%(len2)d' % {'len1': self.len1, 'len2': self.len2}) def get_details(self): return {} class DictListMatches(object): def __init__(self, l1, approx_equal=False, tolerance=0.001): self.l1 = l1 self.approx_equal = approx_equal self.tolerance = tolerance def __str__(self): return 'DictListMatches(%s)' % (pprint.pformat(self.l1)) # Useful assertions def match(self, l2): """Assert a list of dicts are equivalent.""" l1count = len(self.l1) l2count = len(l2) if l1count != l2count: return ListLengthMismatch(l1count, l2count) for d1, d2 in zip(self.l1, l2): matcher = DictMatches(d2, approx_equal=self.approx_equal, tolerance=self.tolerance) did_match = matcher.match(d1) if did_match: return did_match class SubDictMismatch(object): def __init__(self, key=None, sub_value=None, super_value=None, keys=False): self.key = key self.sub_value = sub_value self.super_value = super_value self.keys = keys def describe(self): if self.keys: return "Keys between dictionaries did not match" else: return("Dictionaries do not match at %s. d1: %s d2: %s" % (self.key, self.super_value, self.sub_value)) def get_details(self): return {} class IsSubDictOf(object): def __init__(self, super_dict): self.super_dict = super_dict def __str__(self): return 'IsSubDictOf(%s)' % (self.super_dict) def match(self, sub_dict): """Assert a sub_dict is subset of super_dict.""" if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())): return SubDictMismatch(keys=True) for k, sub_value in sub_dict.items(): super_value = self.super_dict[k] if isinstance(sub_value, dict): matcher = IsSubDictOf(super_value) did_match = matcher.match(sub_value) if did_match is not None: return did_match elif 'DONTCARE' in (sub_value, super_value): continue else: if sub_value != super_value: return SubDictMismatch(k, sub_value, super_value) class FunctionCallMatcher(object): def __init__(self, expected_func_calls): self.expected_func_calls = expected_func_calls self.actual_func_calls = [] def call(self, *args, **kwargs): func_call = {'args': args, 'kwargs': kwargs} self.actual_func_calls.append(func_call) def match(self): dict_list_matcher = DictListMatches(self.expected_func_calls) return dict_list_matcher.match(self.actual_func_calls) class XMLMismatch(object): """Superclass for XML mismatch.""" def __init__(self, state): self.path = str(state) self.expected = state.expected self.actual = state.actual def describe(self): return "%(path)s: XML does not match" % {'path': self.path} def get_details(self): return { 'expected': content.text_content(self.expected), 'actual': content.text_content(self.actual), } class XMLDocInfoMismatch(XMLMismatch): """XML version or encoding doesn't match.""" def __init__(self, state, expected_doc_info, actual_doc_info): super(XMLDocInfoMismatch, self).__init__(state) self.expected_doc_info = expected_doc_info self.actual_doc_info = actual_doc_info def describe(self): return ("%(path)s: XML information mismatch(version, encoding) " "expected version %(expected_version)s, " "expected encoding %(expected_encoding)s; " "actual version %(actual_version)s, " "actual encoding %(actual_encoding)s" % {'path': self.path, 'expected_version': self.expected_doc_info['version'], 'expected_encoding': self.expected_doc_info['encoding'], 'actual_version': self.actual_doc_info['version'], 'actual_encoding': self.actual_doc_info['encoding']}) class XMLTagMismatch(XMLMismatch): """XML tags don't match.""" def __init__(self, state, idx, expected_tag, actual_tag): super(XMLTagMismatch, self).__init__(state) self.idx = idx self.expected_tag = expected_tag self.actual_tag = actual_tag def describe(self): return ("%(path)s: XML tag mismatch at index %(idx)d: " "expected tag <%(expected_tag)s>; " "actual tag <%(actual_tag)s>" % {'path': self.path, 'idx': self.idx, 'expected_tag': self.expected_tag, 'actual_tag': self.actual_tag}) class XMLAttrKeysMismatch(XMLMismatch): """XML attribute keys don't match.""" def __init__(self, state, expected_only, actual_only): super(XMLAttrKeysMismatch, self).__init__(state) self.expected_only = ', '.join(sorted(expected_only)) self.actual_only = ', '.join(sorted(actual_only)) def describe(self): return ("%(path)s: XML attributes mismatch: " "keys only in expected: %(expected_only)s; " "keys only in actual: %(actual_only)s" % {'path': self.path, 'expected_only': self.expected_only, 'actual_only': self.actual_only}) class XMLAttrValueMismatch(XMLMismatch): """XML attribute values don't match.""" def __init__(self, state, key, expected_value, actual_value): super(XMLAttrValueMismatch, self).__init__(state) self.key = key self.expected_value = expected_value self.actual_value = actual_value def describe(self): return ("%(path)s: XML attribute value mismatch: " "expected value of attribute %(key)s: %(expected_value)r; " "actual value: %(actual_value)r" % {'path': self.path, 'key': self.key, 'expected_value': self.expected_value, 'actual_value': self.actual_value}) class XMLTextValueMismatch(XMLMismatch): """XML text values don't match.""" def __init__(self, state, expected_text, actual_text): super(XMLTextValueMismatch, self).__init__(state) self.expected_text = expected_text self.actual_text = actual_text def describe(self): return ("%(path)s: XML text value mismatch: " "expected text value: %(expected_text)r; " "actual value: %(actual_text)r" % {'path': self.path, 'expected_text': self.expected_text, 'actual_text': self.actual_text}) class XMLUnexpectedChild(XMLMismatch): """Unexpected child present in XML.""" def __init__(self, state, tag, idx): super(XMLUnexpectedChild, self).__init__(state) self.tag = tag self.idx = idx def describe(self): return ("%(path)s: XML unexpected child element <%(tag)s> " "present at index %(idx)d" % {'path': self.path, 'tag': self.tag, 'idx': self.idx}) class XMLExpectedChild(XMLMismatch): """Expected child not present in XML.""" def __init__(self, state, tag, idx): super(XMLExpectedChild, self).__init__(state) self.tag = tag self.idx = idx def describe(self): return ("%(path)s: XML expected child element <%(tag)s> " "not present at index %(idx)d" % {'path': self.path, 'tag': self.tag, 'idx': self.idx}) class XMLMatchState(object): """Maintain some state for matching. Tracks the XML node path and saves the expected and actual full XML text, for use by the XMLMismatch subclasses. """ def __init__(self, expected, actual): self.path = [] self.expected = expected self.actual = actual def __enter__(self): pass def __exit__(self, exc_type, exc_value, exc_tb): self.path.pop() return False def __str__(self): return '/' + '/'.join(self.path) def node(self, tag, idx): """Adds tag and index to the path; they will be popped off when the corresponding 'with' statement exits. :param tag: The element tag :param idx: If not None, the integer index of the element within its parent. Not included in the path element if None. """ if idx is not None: self.path.append("%s[%d]" % (tag, idx)) else: self.path.append(tag) return self class XMLMatches(object): """Compare XML strings. More complete than string comparison.""" SKIP_TAGS = (etree.Comment, etree.ProcessingInstruction) def __init__(self, expected, allow_mixed_nodes=False, skip_empty_text_nodes=True, skip_values=('DONTCARE',)): self.expected_xml = expected self.expected = etree.parse(six.StringIO(expected)) self.allow_mixed_nodes = allow_mixed_nodes self.skip_empty_text_nodes = skip_empty_text_nodes self.skip_values = set(skip_values) def __str__(self): return 'XMLMatches(%r)' % self.expected_xml def match(self, actual_xml): actual = etree.parse(six.StringIO(actual_xml)) state = XMLMatchState(self.expected_xml, actual_xml) expected_doc_info = self._get_xml_docinfo(self.expected) actual_doc_info = self._get_xml_docinfo(actual) if expected_doc_info != actual_doc_info: return XMLDocInfoMismatch(state, expected_doc_info, actual_doc_info) result = self._compare_node(self.expected.getroot(), actual.getroot(), state, None) if result is False: return XMLMismatch(state) elif result is not True: return result @staticmethod def _get_xml_docinfo(xml_document): return {'version': xml_document.docinfo.xml_version, 'encoding': xml_document.docinfo.encoding} def _compare_text_nodes(self, expected, actual, state): expected_text = [expected.text] expected_text.extend(child.tail for child in expected) actual_text = [actual.text] actual_text.extend(child.tail for child in actual) if self.skip_empty_text_nodes: expected_text = [text for text in expected_text if text and not text.isspace()] actual_text = [text for text in actual_text if text and not text.isspace()] if self.skip_values.intersection( expected_text + actual_text): return if self.allow_mixed_nodes: # lets sort text nodes because they can be mixed expected_text = sorted(expected_text) actual_text = sorted(actual_text) if expected_text != actual_text: return XMLTextValueMismatch(state, expected_text, actual_text) def _compare_node(self, expected, actual, state, idx): """Recursively compares nodes within the XML tree.""" # Start by comparing the tags if expected.tag != actual.tag: return XMLTagMismatch(state, idx, expected.tag, actual.tag) with state.node(expected.tag, idx): # Compare the attribute keys expected_attrs = set(expected.attrib.keys()) actual_attrs = set(actual.attrib.keys()) if expected_attrs != actual_attrs: expected_only = expected_attrs - actual_attrs actual_only = actual_attrs - expected_attrs return XMLAttrKeysMismatch(state, expected_only, actual_only) # Compare the attribute values for key in expected_attrs: expected_value = expected.attrib[key] actual_value = actual.attrib[key] if self.skip_values.intersection( [expected_value, actual_value]): continue elif expected_value != actual_value: return XMLAttrValueMismatch(state, key, expected_value, actual_value) # Compare text nodes text_nodes_mismatch = self._compare_text_nodes( expected, actual, state) if text_nodes_mismatch: return text_nodes_mismatch # Compare the contents of the node matched_actual_child_idxs = set() # first_actual_child_idx - pointer to next actual child # used with allow_mixed_nodes=False ONLY # prevent to visit actual child nodes twice first_actual_child_idx = 0 for expected_child in expected: if expected_child.tag in self.SKIP_TAGS: continue related_actual_child_idx = None if self.allow_mixed_nodes: first_actual_child_idx = 0 for actual_child_idx in range( first_actual_child_idx, len(actual)): if actual[actual_child_idx].tag in self.SKIP_TAGS: first_actual_child_idx += 1 continue if actual_child_idx in matched_actual_child_idxs: continue # Compare the nodes result = self._compare_node(expected_child, actual[actual_child_idx], state, actual_child_idx) first_actual_child_idx += 1 if result is not True: if self.allow_mixed_nodes: continue else: return result else: # nodes match related_actual_child_idx = actual_child_idx break if related_actual_child_idx is not None: matched_actual_child_idxs.add(actual_child_idx) else: return XMLExpectedChild(state, expected_child.tag, actual_child_idx + 1) # Make sure we consumed all nodes in actual for actual_child_idx, actual_child in enumerate(actual): if (actual_child.tag not in self.SKIP_TAGS and actual_child_idx not in matched_actual_child_idxs): return XMLUnexpectedChild(state, actual_child.tag, actual_child_idx) # The nodes match return True class EncodedByUTF8(object): def match(self, obj): if isinstance(obj, six.binary_type): if hasattr(obj, "decode"): try: obj.decode("utf-8") except UnicodeDecodeError: return testtools.matchers.Mismatch( "%s is not encoded in UTF-8." % obj) else: reason = ("Type of '%(obj)s' is '%(obj_type)s', " "should be '%(correct_type)s'." % { "obj": obj, "obj_type": type(obj).__name__, "correct_type": six.binary_type.__name__ }) return testtools.matchers.Mismatch(reason) nova-13.0.0/nova/tests/unit/test_pipelib.py0000664000567000056710000000523012701407773022030 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.cloudpipe import pipelib from nova import context from nova import crypto from nova import test from nova import utils CONF = cfg.CONF class PipelibTest(test.TestCase): def setUp(self): super(PipelibTest, self).setUp() self.cloudpipe = pipelib.CloudPipe() self.project = "222" self.user = "111" self.context = context.RequestContext(self.user, self.project) def test_get_encoded_zip(self): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) crypto.ensure_ca_filesystem() ret = self.cloudpipe.get_encoded_zip(self.project) self.assertTrue(ret) def test_launch_vpn_instance(self): self.stubs.Set(self.cloudpipe.compute_api, "create", lambda *a, **kw: (None, "r-fakeres")) with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir, keys_path=tmpdir) crypto.ensure_ca_filesystem() self.cloudpipe.launch_vpn_instance(self.context) def test_setup_security_group(self): group_name = "%s%s" % (self.project, CONF.vpn_key_suffix) # First attempt, does not exist (thus its created) res1_group = self.cloudpipe.setup_security_group(self.context) self.assertEqual(res1_group, group_name) # Second attempt, it exists in the DB res2_group = self.cloudpipe.setup_security_group(self.context) self.assertEqual(res1_group, res2_group) def test_setup_key_pair(self): key_name = "%s%s" % (self.project, CONF.vpn_key_suffix) with utils.tempdir() as tmpdir: self.flags(keys_path=tmpdir) # First attempt, key does not exist (thus it is generated) res1_key = self.cloudpipe.setup_key_pair(self.context) self.assertEqual(res1_key, key_name) # Second attempt, it exists in the DB res2_key = self.cloudpipe.setup_key_pair(self.context) self.assertEqual(res2_key, res1_key) nova-13.0.0/nova/tests/unit/fake_request_spec.py0000664000567000056710000000627512701407773023047 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import uuidutils from nova import context from nova import objects from nova.tests.unit import fake_flavor INSTANCE_NUMA_TOPOLOGY = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=512)]) INSTANCE_NUMA_TOPOLOGY.obj_reset_changes(recursive=True) IMAGE_META = objects.ImageMeta.from_dict( {'status': 'active', 'container_format': 'bare', 'min_ram': 0, 'updated_at': '2014-12-12T11:16:36.000000', 'min_disk': '0', 'owner': '2d8b9502858c406ebee60f0849486222', 'protected': 'yes', 'properties': { 'os_type': 'Linux', 'hw_video_model': 'vga', 'hw_video_ram': '512', 'hw_qemu_guest_agent': 'yes', 'hw_scsi_model': 'virtio-scsi', }, 'size': 213581824, 'name': 'f16-x86_64-openstack-sda', 'checksum': '755122332caeb9f661d5c978adb8b45f', 'created_at': '2014-12-10T16:23:14.000000', 'disk_format': 'qcow2', 'id': 'c8b1790e-a07d-4971-b137-44f2432936cd', } ) IMAGE_META.obj_reset_changes(recursive=True) PCI_REQUESTS = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(count=1), objects.InstancePCIRequest(count=2)]) PCI_REQUESTS.obj_reset_changes(recursive=True) def fake_db_spec(): req_obj = fake_spec_obj() db_request_spec = { 'id': 1, 'instance_uuid': req_obj.instance_uuid, 'spec': jsonutils.dumps(req_obj.obj_to_primitive()), } return db_request_spec def fake_spec_obj(remove_id=False): ctxt = context.RequestContext('fake', 'fake') req_obj = objects.RequestSpec(ctxt) if not remove_id: req_obj.id = 42 req_obj.instance_uuid = uuidutils.generate_uuid() req_obj.image = IMAGE_META req_obj.numa_topology = INSTANCE_NUMA_TOPOLOGY req_obj.pci_requests = PCI_REQUESTS req_obj.flavor = fake_flavor.fake_flavor_obj(ctxt) req_obj.retry = objects.SchedulerRetries() req_obj.limits = objects.SchedulerLimits() req_obj.instance_group = objects.InstanceGroup() req_obj.project_id = 'fake' req_obj.num_instances = 1 req_obj.availability_zone = None req_obj.ignore_hosts = ['host2', 'host4'] req_obj.force_hosts = ['host1', 'host3'] req_obj.force_nodes = ['node1', 'node2'] req_obj.scheduler_hints = {'hint': ['over-there']} # This should never be a changed field req_obj.obj_reset_changes(['id']) return req_obj nova-13.0.0/nova/tests/unit/test_nova_manage.py0000664000567000056710000010423112701407773022660 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2011 Ilya Alekseyev # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import StringIO import sys import fixtures import mock from oslo_utils import uuidutils from nova.cmd import manage from nova import context from nova import db from nova.db import migration from nova.db.sqlalchemy import migration as sqla_migration from nova import exception from nova import objects from nova import test from nova.tests.unit.db import fakes as db_fakes from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_network from nova.tests.unit import test_flavors class FixedIpCommandsTestCase(test.TestCase): def setUp(self): super(FixedIpCommandsTestCase, self).setUp() db_fakes.stub_out_db_network_api(self) self.commands = manage.FixedIpCommands() def test_reserve(self): self.commands.reserve('192.168.0.100') address = db.fixed_ip_get_by_address(context.get_admin_context(), '192.168.0.100') self.assertTrue(address['reserved']) def test_reserve_nonexistent_address(self): self.assertEqual(2, self.commands.reserve('55.55.55.55')) def test_unreserve(self): self.commands.unreserve('192.168.0.100') address = db.fixed_ip_get_by_address(context.get_admin_context(), '192.168.0.100') self.assertFalse(address['reserved']) def test_unreserve_nonexistent_address(self): self.assertEqual(2, self.commands.unreserve('55.55.55.55')) def test_list(self): self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.list() self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100')) def test_list_just_one_host(self): def fake_fixed_ip_get_by_host(*args, **kwargs): return [db_fakes.fixed_ip_fields] self.useFixture(fixtures.MonkeyPatch( 'nova.db.fixed_ip_get_by_host', fake_fixed_ip_get_by_host)) self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.list('banana') self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100')) class FloatingIpCommandsTestCase(test.NoDBTestCase): def setUp(self): super(FloatingIpCommandsTestCase, self).setUp() db_fakes.stub_out_db_network_api(self) self.commands = manage.FloatingIpCommands() def test_address_to_hosts(self): def assert_loop(result, expected): for ip in result: self.assertIn(str(ip), expected) address_to_hosts = self.commands.address_to_hosts # /32 and /31 self.assertRaises(exception.InvalidInput, address_to_hosts, '192.168.100.1/32') self.assertRaises(exception.InvalidInput, address_to_hosts, '192.168.100.1/31') # /30 expected = ["192.168.100.%s" % i for i in range(1, 3)] result = address_to_hosts('192.168.100.0/30') self.assertEqual(2, len(list(result))) assert_loop(result, expected) # /29 expected = ["192.168.100.%s" % i for i in range(1, 7)] result = address_to_hosts('192.168.100.0/29') self.assertEqual(6, len(list(result))) assert_loop(result, expected) # /28 expected = ["192.168.100.%s" % i for i in range(1, 15)] result = address_to_hosts('192.168.100.0/28') self.assertEqual(14, len(list(result))) assert_loop(result, expected) # /16 result = address_to_hosts('192.168.100.0/16') self.assertEqual(65534, len(list(result))) # NOTE(dripton): I don't test /13 because it makes the test take 3s. # /12 gives over a million IPs, which is ridiculous. self.assertRaises(exception.InvalidInput, address_to_hosts, '192.168.100.1/12') class NetworkCommandsTestCase(test.NoDBTestCase): def setUp(self): super(NetworkCommandsTestCase, self).setUp() self.commands = manage.NetworkCommands() self.net = {'id': 0, 'label': 'fake', 'injected': False, 'cidr': '192.168.0.0/24', 'cidr_v6': 'dead:beef::/64', 'multi_host': False, 'gateway_v6': 'dead:beef::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa0', 'bridge_interface': 'fake_fa0', 'gateway': '192.168.0.1', 'broadcast': '192.168.0.255', 'dns1': '8.8.8.8', 'dns2': '8.8.4.4', 'vlan': 200, 'vlan_start': 201, 'vpn_public_address': '10.0.0.2', 'vpn_public_port': '2222', 'vpn_private_address': '192.168.0.2', 'dhcp_start': '192.168.0.3', 'project_id': 'fake_project', 'host': 'fake_host', 'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'} def fake_network_get_by_cidr(context, cidr): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(cidr, self.fake_net['cidr']) return db_fakes.FakeModel(dict(test_network.fake_network, **self.fake_net)) def fake_network_get_by_uuid(context, uuid): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(uuid, self.fake_net['uuid']) return db_fakes.FakeModel(dict(test_network.fake_network, **self.fake_net)) def fake_network_update(context, network_id, values): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(network_id, self.fake_net['id']) self.assertEqual(values, self.fake_update_value) self.fake_network_get_by_cidr = fake_network_get_by_cidr self.fake_network_get_by_uuid = fake_network_get_by_uuid self.fake_network_update = fake_network_update def test_create(self): def fake_create_networks(obj, context, **kwargs): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(kwargs['label'], 'Test') self.assertEqual(kwargs['cidr'], '10.2.0.0/24') self.assertFalse(kwargs['multi_host']) self.assertEqual(kwargs['num_networks'], 1) self.assertEqual(kwargs['network_size'], 256) self.assertEqual(kwargs['vlan'], 200) self.assertEqual(kwargs['vlan_start'], 201) self.assertEqual(kwargs['vpn_start'], 2000) self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120') self.assertEqual(kwargs['gateway'], '10.2.0.1') self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22') self.assertEqual(kwargs['bridge'], 'br200') self.assertEqual(kwargs['bridge_interface'], 'eth0') self.assertEqual(kwargs['dns1'], '8.8.8.8') self.assertEqual(kwargs['dns2'], '8.8.4.4') self.flags(network_manager='nova.network.manager.VlanManager') from nova.network import manager as net_manager self.stubs.Set(net_manager.VlanManager, 'create_networks', fake_create_networks) self.commands.create( label='Test', cidr='10.2.0.0/24', num_networks=1, network_size=256, multi_host='F', vlan=200, vlan_start=201, vpn_start=2000, cidr_v6='fd00:2::/120', gateway='10.2.0.1', gateway_v6='fd00:2::22', bridge='br200', bridge_interface='eth0', dns1='8.8.8.8', dns2='8.8.4.4', uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa') def test_list(self): def fake_network_get_all(context): return [db_fakes.FakeModel(self.net)] self.stub_out('nova.db.network_get_all', fake_network_get_all) output = StringIO() sys.stdout = output self.commands.list() sys.stdout = sys.__stdout__ result = output.getvalue() _fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s", "%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s", "%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"]) head = _fmt % {'id': 'id', 'cidr': 'IPv4', 'cidr_v6': 'IPv6', 'dhcp_start': 'start address', 'dns1': 'DNS1', 'dns2': 'DNS2', 'vlan': 'VlanID', 'project_id': 'project', 'uuid': "uuid"} body = _fmt % {'id': self.net['id'], 'cidr': self.net['cidr'], 'cidr_v6': self.net['cidr_v6'], 'dhcp_start': self.net['dhcp_start'], 'dns1': self.net['dns1'], 'dns2': self.net['dns2'], 'vlan': self.net['vlan'], 'project_id': self.net['project_id'], 'uuid': self.net['uuid']} answer = '%s\n%s\n' % (head, body) self.assertEqual(result, answer) def test_delete(self): self.fake_net = self.net self.fake_net['project_id'] = None self.fake_net['host'] = None self.stub_out('nova.db.network_get_by_uuid', self.fake_network_get_by_uuid) def fake_network_delete_safe(context, network_id): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(network_id, self.fake_net['id']) self.stub_out('nova.db.network_delete_safe', fake_network_delete_safe) self.commands.delete(uuid=self.fake_net['uuid']) def test_delete_by_cidr(self): self.fake_net = self.net self.fake_net['project_id'] = None self.fake_net['host'] = None self.stub_out('nova.db.network_get_by_cidr', self.fake_network_get_by_cidr) def fake_network_delete_safe(context, network_id): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(network_id, self.fake_net['id']) self.stub_out('nova.db.network_delete_safe', fake_network_delete_safe) self.commands.delete(fixed_range=self.fake_net['cidr']) def _test_modify_base(self, update_value, project, host, dis_project=None, dis_host=None): self.fake_net = self.net self.fake_update_value = update_value self.stub_out('nova.db.network_get_by_cidr', self.fake_network_get_by_cidr) self.stub_out('nova.db.network_update', self.fake_network_update) self.commands.modify(self.fake_net['cidr'], project=project, host=host, dis_project=dis_project, dis_host=dis_host) def test_modify_associate(self): self._test_modify_base(update_value={'project_id': 'test_project', 'host': 'test_host'}, project='test_project', host='test_host') def test_modify_unchanged(self): self._test_modify_base(update_value={}, project=None, host=None) def test_modify_disassociate(self): self._test_modify_base(update_value={'project_id': None, 'host': None}, project=None, host=None, dis_project=True, dis_host=True) class NeutronV2NetworkCommandsTestCase(test.NoDBTestCase): def setUp(self): super(NeutronV2NetworkCommandsTestCase, self).setUp() self.flags(use_neutron=True) self.commands = manage.NetworkCommands() def test_create(self): self.assertEqual(2, self.commands.create()) def test_list(self): self.assertEqual(2, self.commands.list()) def test_delete(self): self.assertEqual(2, self.commands.delete()) def test_modify(self): self.assertEqual(2, self.commands.modify('192.168.0.1')) class ProjectCommandsTestCase(test.TestCase): def setUp(self): super(ProjectCommandsTestCase, self).setUp() self.commands = manage.ProjectCommands() def test_quota(self): output = StringIO() sys.stdout = output self.commands.quota(project_id='admin', key='instances', value='unlimited', ) sys.stdout = sys.__stdout__ result = output.getvalue() print_format = "%-36s %-10s" % ('instances', 'unlimited') self.assertIn(print_format, result) def test_quota_update_invalid_key(self): self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10')) class VmCommandsTestCase(test.NoDBTestCase): def setUp(self): super(VmCommandsTestCase, self).setUp() self.commands = manage.VmCommands() self.fake_flavor = objects.Flavor(**test_flavors.DEFAULT_FLAVORS[0]) def test_list_without_host(self): output = StringIO() sys.stdout = output with mock.patch.object(objects.InstanceList, 'get_by_filters') as get: get.return_value = objects.InstanceList( objects=[fake_instance.fake_instance_obj( context.get_admin_context(), host='foo-host', flavor=self.fake_flavor, system_metadata={})]) self.commands.list() sys.stdout = sys.__stdout__ result = output.getvalue() self.assertIn('node', result) # check the header line self.assertIn('m1.tiny', result) # flavor.name self.assertIn('foo-host', result) def test_list_with_host(self): output = StringIO() sys.stdout = output with mock.patch.object(objects.InstanceList, 'get_by_host') as get: get.return_value = objects.InstanceList( objects=[fake_instance.fake_instance_obj( context.get_admin_context(), flavor=self.fake_flavor, system_metadata={})]) self.commands.list(host='fake-host') sys.stdout = sys.__stdout__ result = output.getvalue() self.assertIn('node', result) # check the header line self.assertIn('m1.tiny', result) # flavor.name self.assertIn('fake-host', result) class DBCommandsTestCase(test.NoDBTestCase): def setUp(self): super(DBCommandsTestCase, self).setUp() self.commands = manage.DbCommands() def test_archive_deleted_rows_negative(self): self.assertEqual(1, self.commands.archive_deleted_rows(-1)) def test_archive_deleted_rows_large_number(self): large_number = '1' * 100 self.assertEqual(1, self.commands.archive_deleted_rows(large_number)) @mock.patch.object(db, 'archive_deleted_rows', return_value=dict(instances=10, consoles=5)) def _test_archive_deleted_rows(self, mock_db_archive, verbose=False): self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.archive_deleted_rows(20, verbose=verbose) mock_db_archive.assert_called_once_with(20) output = sys.stdout.getvalue() if verbose: expected = '''\ +-----------+-------------------------+ | Table | Number of Rows Archived | +-----------+-------------------------+ | consoles | 5 | | instances | 10 | +-----------+-------------------------+ ''' self.assertEqual(expected, output) else: self.assertEqual(0, len(output)) def test_archive_deleted_rows(self): # Tests that we don't show any table output (not verbose). self._test_archive_deleted_rows() def test_archive_deleted_rows_verbose(self): # Tests that we get table output. self._test_archive_deleted_rows(verbose=True) @mock.patch.object(db, 'archive_deleted_rows', return_value={}) def test_archive_deleted_rows_verbose_no_results(self, mock_db_archive): self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.archive_deleted_rows(20, verbose=True) mock_db_archive.assert_called_once_with(20) output = sys.stdout.getvalue() self.assertIn('Nothing was archived.', output) @mock.patch.object(migration, 'db_null_instance_uuid_scan', return_value={'foo': 0}) def test_null_instance_uuid_scan_no_records_found(self, mock_scan): self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.null_instance_uuid_scan() self.assertIn("There were no records found", sys.stdout.getvalue()) @mock.patch.object(migration, 'db_null_instance_uuid_scan', return_value={'foo': 1, 'bar': 0}) def _test_null_instance_uuid_scan(self, mock_scan, delete): self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.null_instance_uuid_scan(delete) output = sys.stdout.getvalue() if delete: self.assertIn("Deleted 1 records from table 'foo'.", output) self.assertNotIn("Deleted 0 records from table 'bar'.", output) else: self.assertIn("1 records in the 'foo' table", output) self.assertNotIn("0 records in the 'bar' table", output) self.assertNotIn("There were no records found", output) def test_null_instance_uuid_scan_readonly(self): self._test_null_instance_uuid_scan(delete=False) def test_null_instance_uuid_scan_delete(self): self._test_null_instance_uuid_scan(delete=True) @mock.patch.object(sqla_migration, 'db_version', return_value=2) def test_version(self, sqla_migrate): self.commands.version() sqla_migrate.assert_called_once_with(database='main') @mock.patch.object(sqla_migration, 'db_sync') def test_sync(self, sqla_sync): self.commands.sync(version=4) sqla_sync.assert_called_once_with(version=4, database='main') def _fake_db_command(self, migrations=None): if migrations is None: mock_mig_1 = mock.MagicMock(__name__="mock_mig_1") mock_mig_2 = mock.MagicMock(__name__="mock_mig_2") mock_mig_1.return_value = (5, 4) mock_mig_2.return_value = (6, 6) migrations = (mock_mig_1, mock_mig_2) class _CommandSub(manage.DbCommands): online_migrations = migrations return _CommandSub @mock.patch('nova.context.get_admin_context') def test_online_migrations(self, mock_get_context): ctxt = mock_get_context.return_value command_cls = self._fake_db_command() command = command_cls() command.online_data_migrations(10) command_cls.online_migrations[0].assert_called_once_with(ctxt, 10) command_cls.online_migrations[1].assert_called_once_with(ctxt, 6) @mock.patch('nova.context.get_admin_context') def test_online_migrations_no_max_count(self, mock_get_context): total = [120] batches = [50, 40, 30, 0] runs = [] def fake_migration(context, count): self.assertEqual(mock_get_context.return_value, context) runs.append(count) count = batches.pop(0) total[0] -= count return total[0], count command_cls = self._fake_db_command((fake_migration,)) command = command_cls() command.online_data_migrations(None) self.assertEqual([], batches) self.assertEqual(0, total[0]) self.assertEqual([50, 50, 50, 50], runs) def test_online_migrations_error(self): fake_migration = mock.MagicMock() fake_migration.side_effect = Exception command_cls = self._fake_db_command((fake_migration,)) command = command_cls() command.online_data_migrations(None) class ApiDbCommandsTestCase(test.NoDBTestCase): def setUp(self): super(ApiDbCommandsTestCase, self).setUp() self.commands = manage.ApiDbCommands() @mock.patch.object(sqla_migration, 'db_version', return_value=2) def test_version(self, sqla_migrate): self.commands.version() sqla_migrate.assert_called_once_with(database='api') @mock.patch.object(sqla_migration, 'db_sync') def test_sync(self, sqla_sync): self.commands.sync(version=4) sqla_sync.assert_called_once_with(version=4, database='api') class ServiceCommandsTestCase(test.TestCase): def setUp(self): super(ServiceCommandsTestCase, self).setUp() self.commands = manage.ServiceCommands() def test_service_enable_invalid_params(self): self.assertEqual(2, self.commands.enable('nohost', 'noservice')) def test_service_disable_invalid_params(self): self.assertEqual(2, self.commands.disable('nohost', 'noservice')) class CellCommandsTestCase(test.NoDBTestCase): def setUp(self): super(CellCommandsTestCase, self).setUp() self.commands = manage.CellCommands() def test_create_transport_hosts_multiple(self): """Test the _create_transport_hosts method when broker_hosts is set. """ brokers = "127.0.0.1:5672,127.0.0.2:5671" thosts = self.commands._create_transport_hosts( 'guest', 'devstack', broker_hosts=brokers) self.assertEqual(2, len(thosts)) self.assertEqual('127.0.0.1', thosts[0].hostname) self.assertEqual(5672, thosts[0].port) self.assertEqual('127.0.0.2', thosts[1].hostname) self.assertEqual(5671, thosts[1].port) def test_create_transport_hosts_single(self): """Test the _create_transport_hosts method when hostname is passed.""" thosts = self.commands._create_transport_hosts('guest', 'devstack', hostname='127.0.0.1', port=80) self.assertEqual(1, len(thosts)) self.assertEqual('127.0.0.1', thosts[0].hostname) self.assertEqual(80, thosts[0].port) def test_create_transport_hosts_single_broker(self): """Test the _create_transport_hosts method for single broker_hosts.""" thosts = self.commands._create_transport_hosts( 'guest', 'devstack', broker_hosts='127.0.0.1:5672') self.assertEqual(1, len(thosts)) self.assertEqual('127.0.0.1', thosts[0].hostname) self.assertEqual(5672, thosts[0].port) def test_create_transport_hosts_both(self): """Test the _create_transport_hosts method when both broker_hosts and hostname/port are passed. """ thosts = self.commands._create_transport_hosts( 'guest', 'devstack', broker_hosts='127.0.0.1:5672', hostname='127.0.0.2', port=80) self.assertEqual(1, len(thosts)) self.assertEqual('127.0.0.1', thosts[0].hostname) self.assertEqual(5672, thosts[0].port) def test_create_transport_hosts_wrong_val(self): """Test the _create_transport_hosts method when broker_hosts is wrongly sepcified """ self.assertRaises(ValueError, self.commands._create_transport_hosts, 'guest', 'devstack', broker_hosts='127.0.0.1:5672,127.0.0.1') def test_create_transport_hosts_wrong_port_val(self): """Test the _create_transport_hosts method when port in broker_hosts is wrongly sepcified """ self.assertRaises(ValueError, self.commands._create_transport_hosts, 'guest', 'devstack', broker_hosts='127.0.0.1:') def test_create_transport_hosts_wrong_port_arg(self): """Test the _create_transport_hosts method when port argument is wrongly sepcified """ self.assertRaises(ValueError, self.commands._create_transport_hosts, 'guest', 'devstack', hostname='127.0.0.1', port='ab') @mock.patch.object(context, 'get_admin_context') @mock.patch.object(db, 'cell_create') def test_create_broker_hosts(self, mock_db_cell_create, mock_ctxt): """Test the create function when broker_hosts is passed """ cell_tp_url = "fake://guest:devstack@127.0.0.1:5432" cell_tp_url += ",guest:devstack@127.0.0.2:9999/" ctxt = mock.sentinel mock_ctxt.return_value = mock.sentinel self.commands.create("test", broker_hosts='127.0.0.1:5432,127.0.0.2:9999', woffset=0, wscale=0, username="guest", password="devstack") exp_values = {'name': "test", 'is_parent': False, 'transport_url': cell_tp_url, 'weight_offset': 0.0, 'weight_scale': 0.0} mock_db_cell_create.assert_called_once_with(ctxt, exp_values) @mock.patch.object(context, 'get_admin_context') @mock.patch.object(db, 'cell_create') def test_create_broker_hosts_with_url_decoding_fix(self, mock_db_cell_create, mock_ctxt): """Test the create function when broker_hosts is passed """ cell_tp_url = "fake://the=user:the=password@127.0.0.1:5432/" ctxt = mock.sentinel mock_ctxt.return_value = mock.sentinel self.commands.create("test", broker_hosts='127.0.0.1:5432', woffset=0, wscale=0, username="the=user", password="the=password") exp_values = {'name': "test", 'is_parent': False, 'transport_url': cell_tp_url, 'weight_offset': 0.0, 'weight_scale': 0.0} mock_db_cell_create.assert_called_once_with(ctxt, exp_values) @mock.patch.object(context, 'get_admin_context') @mock.patch.object(db, 'cell_create') def test_create_hostname(self, mock_db_cell_create, mock_ctxt): """Test the create function when hostname and port is passed """ cell_tp_url = "fake://guest:devstack@127.0.0.1:9999/" ctxt = mock.sentinel mock_ctxt.return_value = mock.sentinel self.commands.create("test", hostname='127.0.0.1', port="9999", woffset=0, wscale=0, username="guest", password="devstack") exp_values = {'name': "test", 'is_parent': False, 'transport_url': cell_tp_url, 'weight_offset': 0.0, 'weight_scale': 0.0} mock_db_cell_create.assert_called_once_with(ctxt, exp_values) class CellV2CommandsTestCase(test.TestCase): def setUp(self): super(CellV2CommandsTestCase, self).setUp() self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands = manage.CellV2Commands() def test_map_cell_and_hosts(self): # Create some fake compute nodes and check if they get host mappings ctxt = context.RequestContext() values = { 'vcpus': 4, 'memory_mb': 4096, 'local_gb': 1024, 'vcpus_used': 2, 'memory_mb_used': 2048, 'local_gb_used': 512, 'hypervisor_type': 'Hyper-Dan-VM-ware', 'hypervisor_version': 1001, 'cpu_info': 'Schmintel i786', } for i in range(3): host = 'host%s' % i compute_node = objects.ComputeNode(ctxt, host=host, **values) compute_node.create() cell_transport_url = "fake://guest:devstack@127.0.0.1:9999/" self.commands.map_cell_and_hosts(cell_transport_url, name='ssd', verbose=True) cell_mapping_uuid = sys.stdout.getvalue().strip() # Verify the cell mapping cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_mapping_uuid) self.assertEqual('ssd', cell_mapping.name) self.assertEqual(cell_transport_url, cell_mapping.transport_url) # Verify the host mappings for i in range(3): host = 'host%s' % i host_mapping = objects.HostMapping.get_by_host(ctxt, host) self.assertEqual(cell_mapping.uuid, host_mapping.cell_mapping.uuid) def test_map_cell_and_hosts_duplicate(self): # Create a cell mapping and hosts and check that nothing new is created ctxt = context.RequestContext() cell_mapping_uuid = uuidutils.generate_uuid() cell_mapping = objects.CellMapping( ctxt, uuid=cell_mapping_uuid, name='fake', transport_url='fake://', database_connection='fake://') cell_mapping.create() # Create compute nodes that will map to the cell values = { 'vcpus': 4, 'memory_mb': 4096, 'local_gb': 1024, 'vcpus_used': 2, 'memory_mb_used': 2048, 'local_gb_used': 512, 'hypervisor_type': 'Hyper-Dan-VM-ware', 'hypervisor_version': 1001, 'cpu_info': 'Schmintel i786', } for i in range(3): host = 'host%s' % i compute_node = objects.ComputeNode(ctxt, host=host, **values) compute_node.create() host_mapping = objects.HostMapping( ctxt, host=host, cell_mapping=cell_mapping) host_mapping.create() cell_transport_url = "fake://guest:devstack@127.0.0.1:9999/" retval = self.commands.map_cell_and_hosts(cell_transport_url, name='ssd', verbose=True) self.assertEqual(0, retval) output = sys.stdout.getvalue().strip() expected = '' for i in range(3): expected += ('Host host%s is already mapped to cell %s\n' % (i, cell_mapping_uuid)) expected += 'All hosts are already mapped to cell(s), exiting.' self.assertEqual(expected, output) def test_map_cell_and_hosts_partial_update(self): # Create a cell mapping and partial hosts and check that # missing HostMappings are created ctxt = context.RequestContext() cell_mapping_uuid = uuidutils.generate_uuid() cell_mapping = objects.CellMapping( ctxt, uuid=cell_mapping_uuid, name='fake', transport_url='fake://', database_connection='fake://') cell_mapping.create() # Create compute nodes that will map to the cell values = { 'vcpus': 4, 'memory_mb': 4096, 'local_gb': 1024, 'vcpus_used': 2, 'memory_mb_used': 2048, 'local_gb_used': 512, 'hypervisor_type': 'Hyper-Dan-VM-ware', 'hypervisor_version': 1001, 'cpu_info': 'Schmintel i786', } for i in range(3): host = 'host%s' % i compute_node = objects.ComputeNode(ctxt, host=host, **values) compute_node.create() # Only create 2 existing HostMappings out of 3 for i in range(2): host = 'host%s' % i host_mapping = objects.HostMapping( ctxt, host=host, cell_mapping=cell_mapping) host_mapping.create() cell_transport_url = "fake://guest:devstack@127.0.0.1:9999/" self.commands.map_cell_and_hosts(cell_transport_url, name='ssd', verbose=True) # Verify the HostMapping for the last host was created host_mapping = objects.HostMapping.get_by_host(ctxt, 'host2') self.assertEqual(cell_mapping.uuid, host_mapping.cell_mapping.uuid) # Verify the output output = sys.stdout.getvalue().strip() expected = '' for i in range(2): expected += ('Host host%s is already mapped to cell %s\n' % (i, cell_mapping_uuid)) # The expected CellMapping UUID for the last host should be the same expected += cell_mapping.uuid self.assertEqual(expected, output) def test_map_cell_and_hosts_no_hosts_found(self): cell_transport_url = "fake://guest:devstack@127.0.0.1:9999/" retval = self.commands.map_cell_and_hosts(cell_transport_url, name='ssd', verbose=True) self.assertEqual(0, retval) output = sys.stdout.getvalue().strip() expected = 'No hosts found to map to cell, exiting.' self.assertEqual(expected, output) nova-13.0.0/nova/tests/unit/fake_server_actions.py0000664000567000056710000001072212701407773023363 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from nova import db FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13' FAKE_REQUEST_ID1 = 'req-3293a3f1-b44c-4609-b8d2-d81b105636b8' FAKE_REQUEST_ID2 = 'req-25517360-b757-47d3-be45-0e8d2a01b36a' FAKE_ACTION_ID1 = 123 FAKE_ACTION_ID2 = 456 FAKE_ACTIONS = { FAKE_UUID: { FAKE_REQUEST_ID1: {'id': FAKE_ACTION_ID1, 'action': 'reboot', 'instance_uuid': FAKE_UUID, 'request_id': FAKE_REQUEST_ID1, 'project_id': '147', 'user_id': '789', 'start_time': datetime.datetime( 2012, 12, 5, 0, 0, 0, 0), 'finish_time': None, 'message': '', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, }, FAKE_REQUEST_ID2: {'id': FAKE_ACTION_ID2, 'action': 'resize', 'instance_uuid': FAKE_UUID, 'request_id': FAKE_REQUEST_ID2, 'user_id': '789', 'project_id': '842', 'start_time': datetime.datetime( 2012, 12, 5, 1, 0, 0, 0), 'finish_time': None, 'message': '', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, } } } FAKE_EVENTS = { FAKE_ACTION_ID1: [{'id': 1, 'action_id': FAKE_ACTION_ID1, 'event': 'schedule', 'start_time': datetime.datetime( 2012, 12, 5, 1, 0, 2, 0), 'finish_time': datetime.datetime( 2012, 12, 5, 1, 2, 0, 0), 'result': 'Success', 'traceback': '', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, }, {'id': 2, 'action_id': FAKE_ACTION_ID1, 'event': 'compute_create', 'start_time': datetime.datetime( 2012, 12, 5, 1, 3, 0, 0), 'finish_time': datetime.datetime( 2012, 12, 5, 1, 4, 0, 0), 'result': 'Success', 'traceback': '', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, } ], FAKE_ACTION_ID2: [{'id': 3, 'action_id': FAKE_ACTION_ID2, 'event': 'schedule', 'start_time': datetime.datetime( 2012, 12, 5, 3, 0, 0, 0), 'finish_time': datetime.datetime( 2012, 12, 5, 3, 2, 0, 0), 'result': 'Error', 'traceback': '', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, } ] } def fake_action_event_start(*args): return FAKE_EVENTS[FAKE_ACTION_ID1][0] def fake_action_event_finish(*args): return FAKE_EVENTS[FAKE_ACTION_ID1][0] def stub_out_action_events(stubs): stubs.Set(db, 'action_event_start', fake_action_event_start) stubs.Set(db, 'action_event_finish', fake_action_event_finish) nova-13.0.0/nova/tests/unit/fake_flavor.py0000664000567000056710000000325012701407773021624 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova.objects import fields def fake_db_flavor(**updates): db_flavor = { 'id': 1, 'name': 'fake_flavor', 'memory_mb': 1024, 'vcpus': 1, 'root_gb': 100, 'ephemeral_gb': 0, 'flavorid': 'abc', 'swap': 0, 'disabled': False, 'is_public': True, 'extra_specs': {}, 'projects': [] } for name, field in objects.Flavor.fields.items(): if name in db_flavor: continue if field.nullable: db_flavor[name] = None elif field.default != fields.UnspecifiedDefault: db_flavor[name] = field.default else: raise Exception('fake_db_flavor needs help with %s' % name) if updates: db_flavor.update(updates) return db_flavor def fake_flavor_obj(context, **updates): expected_attrs = updates.pop('expected_attrs', None) return objects.Flavor._from_db_object(context, objects.Flavor(), fake_db_flavor(**updates), expected_attrs=expected_attrs) nova-13.0.0/nova/tests/unit/image_fixtures.py0000664000567000056710000000572712701407773022373 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime # nova.image.glance._translate_from_glance() returns datetime # objects, not strings. NOW_DATE = datetime.datetime(2010, 10, 11, 10, 30, 22) def get_image_fixtures(): """Returns a set of image fixture dicts for use in unit tests. Returns a set of dicts representing images/snapshots of varying statuses that would be returned from a call to `glanceclient.client.Client.images.list`. The IDs of the images returned start at 123 and go to 131, with the following brief summary of image attributes: | ID Type Status Notes | ---------------------------------------------------------- | 123 Public image active | 124 Snapshot queued | 125 Snapshot saving | 126 Snapshot active | 127 Snapshot killed | 128 Snapshot deleted | 129 Snapshot pending_delete | 130 Public image active Has no name """ image_id = 123 fixtures = [] def add_fixture(**kwargs): kwargs.update(created_at=NOW_DATE, updated_at=NOW_DATE) fixtures.append(kwargs) # Public image add_fixture(id=str(image_id), name='public image', is_public=True, status='active', properties={'key1': 'value1'}, min_ram="128", min_disk="10", size='25165824') image_id += 1 # Snapshot for User 1 uuid = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74' snapshot_properties = {'instance_uuid': uuid, 'user_id': 'fake'} for status in ('queued', 'saving', 'active', 'killed', 'deleted', 'pending_delete'): deleted = False if status != 'deleted' else True deleted_at = NOW_DATE if deleted else None add_fixture(id=str(image_id), name='%s snapshot' % status, is_public=False, status=status, properties=snapshot_properties, size='25165824', deleted=deleted, deleted_at=deleted_at) image_id += 1 # Image without a name add_fixture(id=str(image_id), is_public=True, status='active', properties={}) # Image for permission tests image_id += 1 add_fixture(id=str(image_id), is_public=True, status='active', properties={}, owner='authorized_fake') return fixtures nova-13.0.0/nova/tests/unit/test_policy.py0000664000567000056710000007070612701407773021715 0ustar jenkinsjenkins00000000000000# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Policy Engine For Nova.""" import os.path from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils import requests_mock from nova import context from nova import exception from nova import policy from nova import test from nova.tests.unit import fake_policy from nova.tests.unit import policy_fixture from nova import utils class PolicyFileTestCase(test.NoDBTestCase): def setUp(self): super(PolicyFileTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.target = {} def test_modified_policy_reloads(self): with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, 'policy') self.flags(policy_file=tmpfilename, group='oslo_policy') # NOTE(uni): context construction invokes policy check to determin # is_admin or not. As a side-effect, policy reset is needed here # to flush existing policy cache. policy.reset() action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write('{"example:test": ""}') policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write('{"example:test": "!"}') policy._ENFORCER.load_rules(True) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) class PolicyTestCase(test.NoDBTestCase): def setUp(self): super(PolicyTestCase, self).setUp() rules = { "true": '@', "example:allowed": '@', "example:denied": "!", "example:get_http": "http://www.example.com", "example:my_file": "role:compute_admin or " "project_id:%(project_id)s", "example:early_and_fail": "! and @", "example:early_or_success": "@ or !", "example:lowercase_admin": "role:admin or role:sysadmin", "example:uppercase_admin": "role:ADMIN or role:sysadmin", } policy.reset() policy.init() policy.set_rules(oslo_policy.Rules.from_dict(rules)) self.context = context.RequestContext('fake', 'fake', roles=['member']) self.target = {} def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_noraise(self): action = "example:denied" result = policy.enforce(self.context, action, self.target, False) self.assertFalse(result) def test_enforce_good_action(self): action = "example:allowed" result = policy.enforce(self.context, action, self.target) self.assertTrue(result) @requests_mock.mock() def test_enforce_http_true(self, req_mock): req_mock.post('http://www.example.com/', text='True') action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertTrue(result) @requests_mock.mock() def test_enforce_http_false(self, req_mock): req_mock.post('http://www.example.com/', text='False') action = "example:get_http" target = {} self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, target) def test_templatized_enforcement(self): target_mine = {'project_id': 'fake'} target_not_mine = {'project_id': 'another'} action = "example:my_file" policy.enforce(self.context, action, target_mine) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_early_OR_enforcement(self): action = "example:early_or_success" policy.enforce(self.context, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.RequestContext('admin', 'fake', roles=['AdMiN']) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target) class DefaultPolicyTestCase(test.NoDBTestCase): def setUp(self): super(DefaultPolicyTestCase, self).setUp() self.rules = { "default": '', "example:exist": "!", } self._set_rules('default') self.context = context.RequestContext('fake', 'fake') def _set_rules(self, default_rule): policy.reset() rules = oslo_policy.Rules.from_dict(self.rules) policy.init(rules=rules, default_rule=default_rule, use_conf=False) def test_policy_called(self): self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {}) def test_default_not_found(self): self._set_rules("default_noexist") self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:noexist", {}) class IsAdminCheckTestCase(test.NoDBTestCase): def setUp(self): super(IsAdminCheckTestCase, self).setUp() policy.init() def test_init_true(self): check = policy.IsAdminCheck('is_admin', 'True') self.assertEqual(check.kind, 'is_admin') self.assertEqual(check.match, 'True') self.assertTrue(check.expected) def test_init_false(self): check = policy.IsAdminCheck('is_admin', 'nottrue') self.assertEqual(check.kind, 'is_admin') self.assertEqual(check.match, 'False') self.assertFalse(check.expected) def test_call_true(self): check = policy.IsAdminCheck('is_admin', 'True') self.assertEqual(check('target', dict(is_admin=True), policy._ENFORCER), True) self.assertEqual(check('target', dict(is_admin=False), policy._ENFORCER), False) def test_call_false(self): check = policy.IsAdminCheck('is_admin', 'False') self.assertEqual(check('target', dict(is_admin=True), policy._ENFORCER), False) self.assertEqual(check('target', dict(is_admin=False), policy._ENFORCER), True) class AdminRolePolicyTestCase(test.NoDBTestCase): def setUp(self): super(AdminRolePolicyTestCase, self).setUp() self.policy = self.useFixture(policy_fixture.RoleBasedPolicyFixture()) self.context = context.RequestContext('fake', 'fake', roles=['member']) self.actions = policy.get_rules().keys() self.target = {} def test_enforce_admin_actions_with_nonadmin_context_throws(self): """Check if non-admin context passed to admin actions throws Policy not authorized exception """ for action in self.actions: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) class RealRolePolicyTestCase(test.NoDBTestCase): def setUp(self): super(RealRolePolicyTestCase, self).setUp() self.policy = self.useFixture(policy_fixture.RealPolicyFixture()) self.non_admin_context = context.RequestContext('fake', 'fake', roles=['member']) self.admin_context = context.RequestContext('fake', 'fake', True, roles=['member']) self.target = {} self.fake_policy = jsonutils.loads(fake_policy.policy_data) self.admin_only_rules = ( "cells_scheduler_filter:TargetCellFilter", "compute:unlock_override", "compute:get_all_tenants", "compute:create:forced_host", "compute_extension:accounts", "compute_extension:admin_actions", "compute_extension:admin_actions:resetNetwork", "compute_extension:admin_actions:injectNetworkInfo", "compute_extension:admin_actions:migrateLive", "compute_extension:admin_actions:resetState", "compute_extension:admin_actions:migrate", "compute_extension:aggregates", "compute_extension:agents", "compute_extension:baremetal_nodes", "compute_extension:cells", "compute_extension:cells:create", "compute_extension:cells:delete", "compute_extension:cells:update", "compute_extension:cells:sync_instances", "compute_extension:cloudpipe", "compute_extension:cloudpipe_update", "compute_extension:evacuate", "compute_extension:extended_server_attributes", "compute_extension:fixed_ips", "compute_extension:flavor_access:addTenantAccess", "compute_extension:flavor_access:removeTenantAccess", "compute_extension:flavorextraspecs:create", "compute_extension:flavorextraspecs:update", "compute_extension:flavorextraspecs:delete", "compute_extension:flavormanage", "compute_extension:floating_ips_bulk", "compute_extension:fping:all_tenants", "compute_extension:hosts", "compute_extension:hypervisors", "compute_extension:instance_actions:events", "compute_extension:instance_usage_audit_log", "compute_extension:networks", "compute_extension:networks_associate", "compute_extension:quotas:update", "compute_extension:quotas:delete", "compute_extension:security_group_default_rules", "compute_extension:server_diagnostics", "compute_extension:services", "compute_extension:shelveOffload", "compute_extension:simple_tenant_usage:list", "compute_extension:users", "compute_extension:availability_zone:detail", "compute_extension:used_limits_for_admin", "compute_extension:migrations:index", "compute_extension:os-assisted-volume-snapshots:create", "compute_extension:os-assisted-volume-snapshots:delete", "compute_extension:console_auth_tokens", "compute_extension:os-server-external-events:create", "os_compute_api:servers:create:forced_host", "os_compute_api:servers:detail:get_all_tenants", "os_compute_api:servers:index:get_all_tenants", "os_compute_api:servers:show:host_status", "os_compute_api:servers:migrations:force_complete", "os_compute_api:servers:migrations:delete", "network:attach_external_network", "os_compute_api:os-admin-actions", "os_compute_api:os-admin-actions:reset_network", "os_compute_api:os-admin-actions:inject_network_info", "os_compute_api:os-admin-actions:reset_state", "os_compute_api:os-aggregates:index", "os_compute_api:os-aggregates:create", "os_compute_api:os-aggregates:show", "os_compute_api:os-aggregates:update", "os_compute_api:os-aggregates:delete", "os_compute_api:os-aggregates:add_host", "os_compute_api:os-aggregates:remove_host", "os_compute_api:os-aggregates:set_metadata", "os_compute_api:os-agents", "os_compute_api:os-baremetal-nodes", "os_compute_api:os-cells", "os_compute_api:os-cells:create", "os_compute_api:os-cells:delete", "os_compute_api:os-cells:update", "os_compute_api:os-cells:sync_instances", "os_compute_api:os-cloudpipe", "os_compute_api:os-evacuate", "os_compute_api:os-extended-server-attributes", "os_compute_api:os-fixed-ips", "os_compute_api:os-flavor-access:remove_tenant_access", "os_compute_api:os-flavor-access:add_tenant_access", "os_compute_api:os-flavor-extra-specs:create", "os_compute_api:os-flavor-extra-specs:update", "os_compute_api:os-flavor-extra-specs:delete", "os_compute_api:os-flavor-manage", "os_compute_api:os-floating-ips-bulk", "os_compute_api:os-floating-ip-dns:domain:delete", "os_compute_api:os-floating-ip-dns:domain:update", "os_compute_api:os-fping:all_tenants", "os_compute_api:os-hosts", "os_compute_api:os-hypervisors", "os_compute_api:os-instance-actions:events", "os_compute_api:os-instance-usage-audit-log", "os_compute_api:os-lock-server:unlock:unlock_override", "os_compute_api:os-migrate-server:migrate", "os_compute_api:os-migrate-server:migrate_live", "os_compute_api:os-networks", "os_compute_api:os-networks-associate", "os_compute_api:os-pci:index", "os_compute_api:os-pci:detail", "os_compute_api:os-pci:show", "os_compute_api:os-quota-sets:update", "os_compute_api:os-quota-sets:delete", "os_compute_api:os-quota-sets:detail", "os_compute_api:os-security-group-default-rules", "os_compute_api:os-server-diagnostics", "os_compute_api:os-services", "os_compute_api:os-shelve:shelve_offload", "os_compute_api:os-simple-tenant-usage:list", "os_compute_api:os-availability-zone:detail", "os_compute_api:os-used-limits", "os_compute_api:os-migrations:index", "os_compute_api:os-assisted-volume-snapshots:create", "os_compute_api:os-assisted-volume-snapshots:delete", "os_compute_api:os-console-auth-tokens", "os_compute_api:os-quota-class-sets:update", "os_compute_api:os-server-external-events:create", "os_compute_api:servers:migrations:index", "os_compute_api:servers:migrations:show", ) self.admin_or_owner_rules = ( "default", "compute:start", "compute:stop", "compute:delete", "compute:soft_delete", "compute:force_delete", "compute:lock", "compute:unlock", "compute_extension:admin_actions:pause", "compute_extension:admin_actions:unpause", "compute_extension:admin_actions:suspend", "compute_extension:admin_actions:resume", "compute_extension:admin_actions:lock", "compute_extension:admin_actions:unlock", "compute_extension:admin_actions:createBackup", "compute_extension:simple_tenant_usage:show", "os_compute_api:servers:start", "os_compute_api:servers:stop", "os_compute_api:servers:trigger_crash_dump", "os_compute_api:os-create-backup", "os_compute_api:ips:index", "os_compute_api:ips:show", "os_compute_api:os-keypairs:create", "os_compute_api:os-keypairs:delete", "os_compute_api:os-keypairs:index", "os_compute_api:os-keypairs:show", "os_compute_api:os-lock-server:lock", "os_compute_api:os-lock-server:unlock", "os_compute_api:os-pause-server:pause", "os_compute_api:os-pause-server:unpause", "os_compute_api:os-quota-sets:show", "os_compute_api:server-metadata:index", "os_compute_api:server-metadata:show", "os_compute_api:server-metadata:delete", "os_compute_api:server-metadata:create", "os_compute_api:server-metadata:update", "os_compute_api:server-metadata:update_all", "os_compute_api:os-simple-tenant-usage:show", "os_compute_api:os-suspend-server:suspend", "os_compute_api:os-suspend-server:resume", "os_compute_api:os-tenant-networks", "compute:create", "compute:create:attach_network", "compute:create:attach_volume", "compute:get_all_instance_metadata", "compute:get_all_instance_system_metadata", "compute:get_console_output", "compute:get_diagnostics", "compute:delete_instance_metadata", "compute:get", "compute:get_all", "compute:shelve", "compute:shelve_offload", "compute:snapshot_volume_backed", "compute:unshelve", "compute:resize", "compute:confirm_resize", "compute:revert_resize", "compute:rebuild", "compute:reboot", "compute:volume_snapshot_create", "compute:volume_snapshot_delete", "compute:add_fixed_ip", "compute:attach_interface", "compute:detach_interface", "compute:attach_volume", "compute:detach_volume", "compute:backup", "compute:get_instance_diagnostics", "compute:get_instance_metadata", "compute:get_mks_console", "compute:get_rdp_console", "compute:get_serial_console", "compute:get_spice_console", "compute:get_vnc_console", "compute:inject_network_info", "compute:pause", "compute:remove_fixed_ip", "compute:rescue", "compute:reset_network", "compute:restore", "compute:resume", "compute:security_groups:add_to_instance", "compute:security_groups:remove_from_instance", "compute:set_admin_password", "compute:snapshot", "compute:suspend", "compute:swap_volume", "compute:unpause", "compute:unrescue", "compute:update", "compute:update_instance_metadata", "compute_extension:config_drive", "compute_extension:os-tenant-networks", "network:get_vif_by_mac_address", "os_compute_api:extensions", "os_compute_api:os-config-drive", "os_compute_api:servers:confirm_resize", "os_compute_api:servers:create", "os_compute_api:servers:create:attach_network", "os_compute_api:servers:create:attach_volume", "os_compute_api:servers:create_image", "os_compute_api:servers:delete", "os_compute_api:servers:detail", "os_compute_api:servers:index", "os_compute_api:servers:reboot", "os_compute_api:servers:rebuild", "os_compute_api:servers:resize", "os_compute_api:servers:revert_resize", "os_compute_api:servers:show", "os_compute_api:servers:update", "compute_extension:attach_interfaces", "compute_extension:certificates", "compute_extension:console_output", "compute_extension:consoles", "compute_extension:createserverext", "compute_extension:deferred_delete", "compute_extension:disk_config", "compute_extension:extended_status", "compute_extension:extended_availability_zone", "compute_extension:extended_ips", "compute_extension:extended_ips_mac", "compute_extension:extended_vif_net", "compute_extension:extended_volumes", "compute_extension:flavor_access", "compute_extension:flavor_disabled", "compute_extension:flavor_rxtx", "compute_extension:flavor_swap", "compute_extension:flavorextradata", "compute_extension:flavorextraspecs:index", "compute_extension:flavorextraspecs:show", "compute_extension:floating_ip_dns", "compute_extension:floating_ip_pools", "compute_extension:floating_ips", "compute_extension:fping", "compute_extension:image_size", "compute_extension:instance_actions", "compute_extension:keypairs", "compute_extension:keypairs:index", "compute_extension:keypairs:show", "compute_extension:keypairs:create", "compute_extension:keypairs:delete", "compute_extension:multinic", "compute_extension:networks:view", "compute_extension:quotas:show", "compute_extension:quota_classes", "compute_extension:rescue", "compute_extension:security_groups", "compute_extension:server_groups", "compute_extension:server_password", "compute_extension:server_usage", "compute_extension:shelve", "compute_extension:unshelve", "compute_extension:virtual_interfaces", "compute_extension:virtual_storage_arrays", "compute_extension:volumes", "compute_extension:volume_attachments:index", "compute_extension:volume_attachments:show", "compute_extension:volume_attachments:create", "compute_extension:volume_attachments:update", "compute_extension:volume_attachments:delete", "compute_extension:volumetypes", "compute_extension:availability_zone:list", "network:get_all", "network:get", "network:create", "network:delete", "network:associate", "network:disassociate", "network:get_vifs_by_instance", "network:allocate_for_instance", "network:deallocate_for_instance", "network:validate_networks", "network:get_instance_uuids_by_ip_filter", "network:get_instance_id_by_floating_address", "network:setup_networks_on_host", "network:get_backdoor_port", "network:get_floating_ip", "network:get_floating_ip_pools", "network:get_floating_ip_by_address", "network:get_floating_ips_by_project", "network:get_floating_ips_by_fixed_address", "network:allocate_floating_ip", "network:associate_floating_ip", "network:disassociate_floating_ip", "network:release_floating_ip", "network:migrate_instance_start", "network:migrate_instance_finish", "network:get_fixed_ip", "network:get_fixed_ip_by_address", "network:add_fixed_ip_to_instance", "network:remove_fixed_ip_from_instance", "network:add_network_to_project", "network:get_instance_nw_info", "network:get_dns_domains", "network:add_dns_entry", "network:modify_dns_entry", "network:delete_dns_entry", "network:get_dns_entries_by_address", "network:get_dns_entries_by_name", "network:create_private_dns_domain", "network:create_public_dns_domain", "network:delete_dns_domain", "os_compute_api:servers:create_image:allow_volume_backed", "os_compute_api:os-access-ips", "os_compute_api:os-admin-password", "os_compute_api:os-attach-interfaces", "os_compute_api:os-certificates:create", "os_compute_api:os-certificates:show", "os_compute_api:os-consoles:create", "os_compute_api:os-consoles:delete", "os_compute_api:os-consoles:index", "os_compute_api:os-consoles:show", "os_compute_api:os-console-output", "os_compute_api:os-remote-consoles", "os_compute_api:os-deferred-delete", "os_compute_api:os-disk-config", "os_compute_api:os-extended-status", "os_compute_api:os-extended-availability-zone", "os_compute_api:os-extended-volumes", "os_compute_api:os-flavor-access", "os_compute_api:os-flavor-rxtx", "os_compute_api:flavors", "os_compute_api:os-flavor-extra-specs:index", "os_compute_api:os-flavor-extra-specs:show", "os_compute_api:os-floating-ip-dns", "os_compute_api:os-floating-ip-pools", "os_compute_api:os-floating-ips", "os_compute_api:os-fping", "os_compute_api:image-size", "os_compute_api:os-instance-actions", "os_compute_api:os-keypairs", "os_compute_api:limits", "os_compute_api:os-multinic", "os_compute_api:os-networks:view", "os_compute_api:os-pci:pci_servers", "os_compute_api:os-rescue", "os_compute_api:os-security-groups", "os_compute_api:os-server-password", "os_compute_api:os-server-usage", "os_compute_api:os-server-groups", "os_compute_api:os-shelve:shelve", "os_compute_api:os-shelve:unshelve", "os_compute_api:os-virtual-interfaces", "os_compute_api:os-volumes", "os_compute_api:os-volumes-attachments:index", "os_compute_api:os-volumes-attachments:show", "os_compute_api:os-volumes-attachments:create", "os_compute_api:os-volumes-attachments:update", "os_compute_api:os-volumes-attachments:delete", "os_compute_api:os-availability-zone:list", ) self.non_admin_only_rules = ( "compute_extension:hide_server_addresses", "os_compute_api:os-hide-server-addresses") self.allow_all_rules = ( "os_compute_api:os-quota-sets:defaults", "os_compute_api:extensions:discoverable", "os_compute_api:os-access-ips:discoverable", "os_compute_api:os-admin-actions:discoverable", "os_compute_api:os-admin-password:discoverable", "os_compute_api:os-aggregates:discoverable", "os_compute_api:os-agents:discoverable", "os_compute_api:os-attach-interfaces:discoverable", "os_compute_api:os-baremetal-nodes:discoverable", "os_compute_api:os-block-device-mapping-v1:discoverable", "os_compute_api:os-cells:discoverable", "os_compute_api:os-certificates:discoverable", "os_compute_api:os-cloudpipe:discoverable", "os_compute_api:os-consoles:discoverable", "os_compute_api:os-console-output:discoverable", "os_compute_api:os-remote-consoles:discoverable", "os_compute_api:os-create-backup:discoverable", "os_compute_api:os-deferred-delete:discoverable", "os_compute_api:os-disk-config:discoverable", "os_compute_api:os-evacuate:discoverable", "os_compute_api:os-extended-server-attributes:discoverable", "os_compute_api:os-extended-status:discoverable", "os_compute_api:os-extended-availability-zone:discoverable", "os_compute_api:extension_info:discoverable", "os_compute_api:os-extended-volumes:discoverable", "os_compute_api:os-fixed-ips:discoverable", "os_compute_api:os-flavor-access:discoverable", "os_compute_api:os-flavor-rxtx:discoverable", "os_compute_api:flavors:discoverable", "os_compute_api:os-flavor-extra-specs:discoverable", "os_compute_api:os-flavor-manage:discoverable", "os_compute_api:os-floating-ip-dns:discoverable", "os_compute_api:os-floating-ip-pools:discoverable", "os_compute_api:os-floating-ips:discoverable", "os_compute_api:os-floating-ips-bulk:discoverable", "os_compute_api:os-fping:discoverable", "os_compute_api:os-hide-server-addresses:discoverable", "os_compute_api:os-hosts:discoverable", "os_compute_api:os-hypervisors:discoverable", "os_compute_api:images:discoverable", "os_compute_api:image-size:discoverable", "os_compute_api:os-instance-actions:discoverable", "os_compute_api:os-instance-usage-audit-log:discoverable", "os_compute_api:ips:discoverable", "os_compute_api:os-keypairs:discoverable", "os_compute_api:limits:discoverable", "os_compute_api:os-lock-server:discoverable", "os_compute_api:os-migrate-server:discoverable", "os_compute_api:os-multinic:discoverable", "os_compute_api:os-networks:discoverable", "os_compute_api:os-networks-associate:discoverable", "os_compute_api:os-pause-server:discoverable", "os_compute_api:os-pci:discoverable", "os_compute_api:os-personality:discoverable", "os_compute_api:os-preserve-ephemeral-rebuild:discoverable", "os_compute_api:os-quota-sets:discoverable", "os_compute_api:os-quota-class-sets:discoverable", "os_compute_api:os-rescue:discoverable", "os_compute_api:os-scheduler-hints:discoverable", "os_compute_api:os-security-group-default-rules:discoverable", "os_compute_api:os-security-groups:discoverable", "os_compute_api:os-server-diagnostics:discoverable", "os_compute_api:os-server-password:discoverable", "os_compute_api:os-server-usage:discoverable", "os_compute_api:os-server-groups:discoverable", "os_compute_api:os-services:discoverable", "os_compute_api:server-metadata:discoverable", "os_compute_api:servers:discoverable", "os_compute_api:os-shelve:shelve:discoverable", "os_compute_api:os-simple-tenant-usage:discoverable", "os_compute_api:os-suspend-server:discoverable", "os_compute_api:os-tenant-networks:discoverable", "os_compute_api:os-user-data:discoverable", "os_compute_api:os-virtual-interfaces:discoverable", "os_compute_api:os-volumes:discoverable", "os_compute_api:os-volumes-attachments:discoverable", "os_compute_api:os-availability-zone:discoverable", "os_compute_api:os-used-limits:discoverable", "os_compute_api:os-migrations:discoverable", "os_compute_api:os-assisted-volume-snapshots:discoverable", ) def test_all_rules_in_sample_file(self): special_rules = ["context_is_admin", "admin_or_owner", "default"] for (name, rule) in self.fake_policy.items(): if name in special_rules: continue self.assertIn(name, policy.get_rules()) def test_admin_only_rules(self): for rule in self.admin_only_rules: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.non_admin_context, rule, self.target) policy.enforce(self.admin_context, rule, self.target) def test_non_admin_only_rules(self): for rule in self.non_admin_only_rules: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.admin_context, rule, self.target) policy.enforce(self.non_admin_context, rule, self.target) def test_admin_or_owner_rules(self): for rule in self.admin_or_owner_rules: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.non_admin_context, rule, self.target) policy.enforce(self.non_admin_context, rule, {'project_id': 'fake', 'user_id': 'fake'}) def test_no_empty_rules(self): rules = policy.get_rules() for rule in rules: self.assertNotEqual('', str(rule), '%s should not be empty, use "@" instead if the policy ' 'should allow everything' % rule) def test_allow_all_rules(self): for rule in self.allow_all_rules: policy.enforce(self.non_admin_context, rule, self.target) def test_rule_missing(self): rules = policy.get_rules() # eliqiao os_compute_api:os-quota-class-sets:show requires # admin=True or quota_class match, this rule won't belong to # admin_only, non_admin, admin_or_user, empty_rule special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin', 'os_compute_api:os-quota-class-sets:show') result = set(rules.keys()) - set(self.admin_only_rules + self.admin_or_owner_rules + self.non_admin_only_rules + self.allow_all_rules + special_rules) self.assertEqual(set([]), result) nova-13.0.0/nova/tests/unit/cert/0000775000567000056710000000000012701410205017710 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/cert/__init__.py0000664000567000056710000000000012701407773022027 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/cert/test_rpcapi.py0000664000567000056710000000555612701407773022632 0ustar jenkinsjenkins00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.cert.rpcapi """ import mock from oslo_config import cfg from nova.cert import rpcapi as cert_rpcapi from nova import context from nova import test CONF = cfg.CONF class CertRpcAPITestCase(test.NoDBTestCase): def _test_cert_api(self, method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = cert_rpcapi.CertAPI() self.assertIsNotNone(rpcapi.client) self.assertEqual(CONF.cert_topic, rpcapi.client.target.topic) orig_prepare = rpcapi.client.prepare with test.nested( mock.patch.object(rpcapi.client, 'call'), mock.patch.object(rpcapi.client, 'prepare'), mock.patch.object(rpcapi.client, 'can_send_version'), ) as ( rpc_mock, prepare_mock, csv_mock ): prepare_mock.return_value = rpcapi.client rpc_mock.return_value = 'foo' csv_mock.side_effect = ( lambda v: orig_prepare().can_send_version()) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(rpc_mock.return_value, retval) prepare_mock.assert_called_once_with() rpc_mock.assert_called_once_with(ctxt, method, **kwargs) def test_revoke_certs_by_user(self): self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id') def test_revoke_certs_by_project(self): self._test_cert_api('revoke_certs_by_project', project_id='fake_project_id') def test_revoke_certs_by_user_and_project(self): self._test_cert_api('revoke_certs_by_user_and_project', user_id='fake_user_id', project_id='fake_project_id') def test_generate_x509_cert(self): self._test_cert_api('generate_x509_cert', user_id='fake_user_id', project_id='fake_project_id') def test_fetch_ca(self): self._test_cert_api('fetch_ca', project_id='fake_project_id') def test_fetch_crl(self): self._test_cert_api('fetch_crl', project_id='fake_project_id') def test_decrypt_text(self): self._test_cert_api('decrypt_text', project_id='fake_project_id', text='blah') nova-13.0.0/nova/tests/unit/test_signature_utils.py0000664000567000056710000003726612701407773023643 0ustar jenkinsjenkins00000000000000# Copyright (c) The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import datetime import mock from castellan.common.exception import KeyManagerError import cryptography.exceptions as crypto_exceptions from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import dsa from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import rsa from oslo_utils import timeutils from nova import exception from nova import signature_utils from nova import test TEST_RSA_PRIVATE_KEY = rsa.generate_private_key(public_exponent=3, key_size=1024, backend=default_backend()) # secp521r1 is assumed to be available on all supported platforms TEST_ECC_PRIVATE_KEY = ec.generate_private_key(ec.SECP521R1(), default_backend()) TEST_DSA_PRIVATE_KEY = dsa.generate_private_key(key_size=3072, backend=default_backend()) class FakeKeyManager(object): def __init__(self): self.certs = {'invalid_format_cert': FakeCastellanCertificate('A' * 256, 'BLAH'), 'valid_format_cert': FakeCastellanCertificate('A' * 256, 'X.509')} def get(self, context, cert_uuid): cert = self.certs.get(cert_uuid) if cert is None: raise KeyManagerError("No matching certificate found.") return cert class FakeCastellanCertificate(object): def __init__(self, data, cert_format): self.data = data self.cert_format = cert_format @property def format(self): return self.cert_format def get_encoded(self): return self.data class FakeCryptoCertificate(object): def __init__(self, pub_key=TEST_RSA_PRIVATE_KEY.public_key(), not_valid_before=(timeutils.utcnow() - datetime.timedelta(hours=1)), not_valid_after=(timeutils.utcnow() + datetime.timedelta(hours=2))): self.pub_key = pub_key self.cert_not_valid_before = not_valid_before self.cert_not_valid_after = not_valid_after def public_key(self): return self.pub_key @property def not_valid_before(self): return self.cert_not_valid_before @property def not_valid_after(self): return self.cert_not_valid_after class BadPublicKey(object): def verifier(self, signature, padding, hash_method): return None class TestSignatureUtils(test.NoDBTestCase): """Test methods of signature_utils""" @mock.patch('nova.signature_utils.get_public_key') def test_verify_signature_PSS(self, mock_get_pub_key): data = b'224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() for hash_name, hash_alg in signature_utils.HASH_METHODS.items(): signer = TEST_RSA_PRIVATE_KEY.signer( padding.PSS( mgf=padding.MGF1(hash_alg), salt_length=padding.PSS.MAX_LENGTH ), hash_alg ) signer.update(data) signature = base64.b64encode(signer.finalize()) img_sig_cert_uuid = 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693' verifier = signature_utils.get_verifier(None, img_sig_cert_uuid, hash_name, signature, signature_utils.RSA_PSS) verifier.update(data) verifier.verify() @mock.patch('nova.signature_utils.get_public_key') def test_verify_signature_ECC(self, mock_get_pub_key): data = b'224626ae19824466f2a7f39ab7b80f7f' # test every ECC curve for curve in signature_utils.ECC_CURVES: key_type_name = 'ECC_' + curve.name.upper() try: signature_utils.SignatureKeyType.lookup(key_type_name) except exception.SignatureVerificationError: import warnings warnings.warn("ECC curve '%s' not supported" % curve.name) continue # Create a private key to use private_key = ec.generate_private_key(curve, default_backend()) mock_get_pub_key.return_value = private_key.public_key() for hash_name, hash_alg in signature_utils.HASH_METHODS.items(): signer = private_key.signer( ec.ECDSA(hash_alg) ) signer.update(data) signature = base64.b64encode(signer.finalize()) img_sig_cert_uuid = 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693' verifier = signature_utils.get_verifier(None, img_sig_cert_uuid, hash_name, signature, key_type_name) verifier.update(data) verifier.verify() @mock.patch('nova.signature_utils.get_public_key') def test_verify_signature_DSA(self, mock_get_pub_key): data = b'224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_DSA_PRIVATE_KEY.public_key() for hash_name, hash_alg in signature_utils.HASH_METHODS.items(): signer = TEST_DSA_PRIVATE_KEY.signer( hash_alg ) signer.update(data) signature = base64.b64encode(signer.finalize()) img_sig_cert_uuid = 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693' verifier = signature_utils.get_verifier(None, img_sig_cert_uuid, hash_name, signature, signature_utils.DSA) verifier.update(data) verifier.verify() @mock.patch('nova.signature_utils.get_public_key') def test_verify_signature_bad_signature(self, mock_get_pub_key): data = b'224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() img_sig_cert_uuid = 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693' verifier = signature_utils.get_verifier(None, img_sig_cert_uuid, 'SHA-256', 'BLAH', signature_utils.RSA_PSS) verifier.update(data) self.assertRaises(crypto_exceptions.InvalidSignature, verifier.verify) def test_get_verifier_invalid_image_props(self): self.assertRaisesRegex(exception.SignatureVerificationError, 'Required image properties for signature' ' verification do not exist. Cannot verify' ' signature. Missing property: .*', signature_utils.get_verifier, None, None, 'SHA-256', 'BLAH', signature_utils.RSA_PSS) @mock.patch('nova.signature_utils.get_public_key') def test_verify_signature_bad_sig_key_type(self, mock_get_pub_key): mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() img_sig_cert_uuid = 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693' self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid signature key type: .*', signature_utils.get_verifier, None, img_sig_cert_uuid, 'SHA-256', 'BLAH', 'BLAH') @mock.patch('nova.signature_utils.get_public_key') def test_get_verifier_none(self, mock_get_pub_key): mock_get_pub_key.return_value = BadPublicKey() img_sig_cert_uuid = 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693' self.assertRaisesRegex(exception.SignatureVerificationError, 'Error occurred while creating' ' the verifier', signature_utils.get_verifier, None, img_sig_cert_uuid, 'SHA-256', 'BLAH', signature_utils.RSA_PSS) def test_get_signature(self): signature = b'A' * 256 data = base64.b64encode(signature) self.assertEqual(signature, signature_utils.get_signature(data)) def test_get_signature_fail(self): self.assertRaisesRegex(exception.SignatureVerificationError, 'The signature data was not properly' ' encoded using base64', signature_utils.get_signature, '///') def test_get_hash_method(self): hash_dict = signature_utils.HASH_METHODS for hash_name in hash_dict.keys(): hash_class = signature_utils.get_hash_method(hash_name).__class__ self.assertIsInstance(hash_dict[hash_name], hash_class) def test_get_hash_method_fail(self): self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid signature hash method: .*', signature_utils.get_hash_method, 'SHA-2') def test_signature_key_type_lookup(self): for sig_format in [signature_utils.RSA_PSS, signature_utils.DSA]: sig_key_type = signature_utils.SignatureKeyType.lookup(sig_format) self.assertIsInstance(sig_key_type, signature_utils.SignatureKeyType) self.assertEqual(sig_format, sig_key_type.name) def test_signature_key_type_lookup_fail(self): self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid signature key type: .*', signature_utils.SignatureKeyType.lookup, 'RSB-PSS') @mock.patch('nova.signature_utils.get_certificate') def test_get_public_key_rsa(self, mock_get_cert): fake_cert = FakeCryptoCertificate() mock_get_cert.return_value = fake_cert sig_key_type = signature_utils.SignatureKeyType.lookup( signature_utils.RSA_PSS ) result_pub_key = signature_utils.get_public_key(None, None, sig_key_type) self.assertEqual(fake_cert.public_key(), result_pub_key) @mock.patch('nova.signature_utils.get_certificate') def test_get_public_key_ecc(self, mock_get_cert): fake_cert = FakeCryptoCertificate(TEST_ECC_PRIVATE_KEY.public_key()) mock_get_cert.return_value = fake_cert sig_key_type = signature_utils.SignatureKeyType.lookup('ECC_SECP521R1') result_pub_key = signature_utils.get_public_key(None, None, sig_key_type) self.assertEqual(fake_cert.public_key(), result_pub_key) @mock.patch('nova.signature_utils.get_certificate') def test_get_public_key_dsa(self, mock_get_cert): fake_cert = FakeCryptoCertificate(TEST_DSA_PRIVATE_KEY.public_key()) mock_get_cert.return_value = fake_cert sig_key_type = signature_utils.SignatureKeyType.lookup( signature_utils.DSA ) result_pub_key = signature_utils.get_public_key(None, None, sig_key_type) self.assertEqual(fake_cert.public_key(), result_pub_key) @mock.patch('nova.signature_utils.get_certificate') def test_get_public_key_invalid_key(self, mock_get_certificate): bad_pub_key = 'A' * 256 mock_get_certificate.return_value = FakeCryptoCertificate(bad_pub_key) sig_key_type = signature_utils.SignatureKeyType.lookup( signature_utils.RSA_PSS ) self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid public key type for ' 'signature key type: .*', signature_utils.get_public_key, None, None, sig_key_type) @mock.patch('cryptography.x509.load_der_x509_certificate') @mock.patch('castellan.key_manager.API', return_value=FakeKeyManager()) def test_get_certificate(self, mock_key_manager_API, mock_load_cert): cert_uuid = 'valid_format_cert' x509_cert = FakeCryptoCertificate() mock_load_cert.return_value = x509_cert self.assertEqual(x509_cert, signature_utils.get_certificate(None, cert_uuid)) @mock.patch('cryptography.x509.load_der_x509_certificate') @mock.patch('castellan.key_manager.API', return_value=FakeKeyManager()) def test_get_expired_certificate(self, mock_key_manager_API, mock_load_cert): cert_uuid = 'valid_format_cert' x509_cert = FakeCryptoCertificate( not_valid_after=timeutils.utcnow() - datetime.timedelta(hours=1)) mock_load_cert.return_value = x509_cert self.assertRaisesRegex(exception.SignatureVerificationError, 'Certificate is not valid after: .*', signature_utils.get_certificate, None, cert_uuid) @mock.patch('cryptography.x509.load_der_x509_certificate') @mock.patch('castellan.key_manager.API', return_value=FakeKeyManager()) def test_get_not_yet_valid_certificate(self, mock_key_manager_API, mock_load_cert): cert_uuid = 'valid_format_cert' x509_cert = FakeCryptoCertificate( not_valid_before=timeutils.utcnow() + datetime.timedelta(hours=1)) mock_load_cert.return_value = x509_cert self.assertRaisesRegex(exception.SignatureVerificationError, 'Certificate is not valid before: .*', signature_utils.get_certificate, None, cert_uuid) @mock.patch('castellan.key_manager.API', return_value=FakeKeyManager()) def test_get_certificate_key_manager_fail(self, mock_key_manager_API): bad_cert_uuid = 'fea14bc2-d75f-4ba5-bccc-b5c924ad0695' self.assertRaisesRegex(exception.SignatureVerificationError, 'Unable to retrieve certificate with ID: .*', signature_utils.get_certificate, None, bad_cert_uuid) @mock.patch('castellan.key_manager.API', return_value=FakeKeyManager()) def test_get_certificate_invalid_format(self, mock_API): cert_uuid = 'invalid_format_cert' self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid certificate format: .*', signature_utils.get_certificate, None, cert_uuid) nova-13.0.0/nova/tests/unit/fake_utils.py0000664000567000056710000000246412701407773021501 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This modules stubs out functions in nova.utils.""" from nova import utils def stub_out_utils_spawn_n(stubs): """Stubs out spawn_n with a blocking version. This aids testing async processes by blocking until they're done. """ def no_spawn(func, *args, **kwargs): try: return func(*args, **kwargs) except Exception: # NOTE(danms): This is supposed to simulate spawning # of a thread, which would run separate from the parent, # and die silently on error. If we don't catch and discard # any exceptions here, we're not honoring the usual # behavior. pass stubs.Set(utils, 'spawn_n', no_spawn) nova-13.0.0/nova/tests/unit/test_test_utils.py0000664000567000056710000000500512701407773022603 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import socket import tempfile import fixtures from nova import db from nova import test from nova.tests.unit import utils as test_utils class TestUtilsTestCase(test.TestCase): def test_get_test_admin_context(self): # get_test_admin_context's return value behaves like admin context. ctxt = test_utils.get_test_admin_context() # TODO(soren): This should verify the full interface context # objects expose. self.assertTrue(ctxt.is_admin) def test_get_test_instance(self): # get_test_instance's return value looks like an instance_ref. instance_ref = test_utils.get_test_instance() ctxt = test_utils.get_test_admin_context() db.instance_get(ctxt, instance_ref['id']) def _test_get_test_network_info(self): """Does the return value match a real network_info structure.""" # The challenge here is to define what exactly such a structure # must look like. pass def test_ipv6_supported(self): self.assertIn(test_utils.is_ipv6_supported(), (False, True)) def fake_open(path): raise IOError def fake_socket_fail(x, y): e = socket.error() e.errno = errno.EAFNOSUPPORT raise e def fake_socket_ok(x, y): return tempfile.TemporaryFile() with fixtures.MonkeyPatch('socket.socket', fake_socket_fail): self.assertFalse(test_utils.is_ipv6_supported()) with fixtures.MonkeyPatch('socket.socket', fake_socket_ok): with fixtures.MonkeyPatch('sys.platform', 'windows'): self.assertTrue(test_utils.is_ipv6_supported()) with fixtures.MonkeyPatch('sys.platform', 'linux2'): with fixtures.MonkeyPatch('six.moves.builtins.open', fake_open): self.assertFalse(test_utils.is_ipv6_supported()) nova-13.0.0/nova/tests/unit/test_crypto.py0000664000567000056710000004014512701407773021730 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Crypto module. """ import os import uuid from cryptography.hazmat import backends from cryptography.hazmat.primitives import serialization import mock from oslo_concurrency import processutils import paramiko import six from nova import crypto from nova import exception from nova import test from nova import utils class X509Test(test.NoDBTestCase): @mock.patch('nova.db.certificate_create') def test_can_generate_x509(self, mock_create): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) crypto.ensure_ca_filesystem() _key, cert_str = crypto.generate_x509_cert('fake', 'fake') project_cert = crypto.fetch_ca(project_id='fake') signed_cert_file = os.path.join(tmpdir, "signed") with open(signed_cert_file, 'w') as keyfile: keyfile.write(cert_str) project_cert_file = os.path.join(tmpdir, "project") with open(project_cert_file, 'w') as keyfile: keyfile.write(project_cert) enc, err = utils.execute('openssl', 'verify', '-CAfile', project_cert_file, '-verbose', signed_cert_file) self.assertFalse(err) def test_encrypt_decrypt_x509(self): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) project_id = "fake" crypto.ensure_ca_filesystem() cert = crypto.fetch_ca(project_id) public_key = os.path.join(tmpdir, "public.pem") with open(public_key, 'w') as keyfile: keyfile.write(cert) text = "some @#!%^* test text" process_input = text.encode("ascii") if six.PY3 else text enc, _err = utils.execute('openssl', 'rsautl', '-certin', '-encrypt', '-inkey', '%s' % public_key, process_input=process_input, binary=True) dec = crypto.decrypt_text(project_id, enc) self.assertIsInstance(dec, bytes) if six.PY3: dec = dec.decode('ascii') self.assertEqual(text, dec) @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError) def test_ensure_ca_filesystem_chdir(self, *args, **kargs): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) start = os.getcwd() self.assertRaises(processutils.ProcessExecutionError, crypto.ensure_ca_filesystem) self.assertEqual(start, os.getcwd()) class RevokeCertsTest(test.NoDBTestCase): @mock.patch('nova.crypto.revoke_cert') def test_revoke_certs_by_user_and_project(self, mock_revoke): user_id = 'test_user' project_id = 2 file_name = 'test_file' def mock_certificate_get_all_by_user_and_project(context, user_id, project_id): return [{"user_id": user_id, "project_id": project_id, "file_name": file_name}] self.stub_out('nova.db.certificate_get_all_by_user_and_project', mock_certificate_get_all_by_user_and_project) crypto.revoke_certs_by_user_and_project(user_id, project_id) mock_revoke.assert_called_once_with(project_id, file_name) @mock.patch('nova.crypto.revoke_cert') def test_revoke_certs_by_user(self, mock_revoke): user_id = 'test_user' project_id = 2 file_name = 'test_file' def mock_certificate_get_all_by_user(context, user_id): return [{"user_id": user_id, "project_id": project_id, "file_name": file_name}] self.stub_out('nova.db.certificate_get_all_by_user', mock_certificate_get_all_by_user) crypto.revoke_certs_by_user(user_id) mock_revoke.assert_called_once_with(project_id, mock.ANY) @mock.patch('nova.crypto.revoke_cert') def test_revoke_certs_by_project(self, mock_revoke): user_id = 'test_user' project_id = 2 file_name = 'test_file' def mock_certificate_get_all_by_project(context, project_id): return [{"user_id": user_id, "project_id": project_id, "file_name": file_name}] self.stub_out('nova.db.certificate_get_all_by_project', mock_certificate_get_all_by_project) crypto.revoke_certs_by_project(project_id) mock_revoke.assert_called_once_with(project_id, mock.ANY) @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError) @mock.patch.object(os, 'chdir', return_value=None) def test_revoke_cert_process_execution_error(self, *args, **kargs): self.assertRaises(exception.RevokeCertFailure, crypto.revoke_cert, 2, 'test_file') def test_revoke_cert_project_not_found_chdir_fails(self, *args, **kargs): self.flags(use_project_ca=True) self.assertRaises(exception.ProjectNotFound, crypto.revoke_cert, str(uuid.uuid4()), 'test_file') class CertExceptionTests(test.NoDBTestCase): def test_fetch_ca_file_not_found(self): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) self.flags(use_project_ca=True) self.assertRaises(exception.CryptoCAFileNotFound, crypto.fetch_ca, project_id='fake') def test_fetch_crl_file_not_found(self): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) self.flags(use_project_ca=True) self.assertRaises(exception.CryptoCRLFileNotFound, crypto.fetch_crl, project_id='fake') class EncryptionTests(test.NoDBTestCase): pubkey = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArtgrfBu/g2o28o+H2ng/crv" "zgES91i/NNPPFTOutXelrJ9QiPTPTm+B8yspLsXifmbsmXztNOlBQgQXs6usxb4" "fnJKNUZ84Vkp5esbqK/L7eyRqwPvqo7btKBMoAMVX/kUyojMpxb7Ssh6M6Y8cpi" "goi+MSDPD7+5yRJ9z4mH9h7MCY6Ejv8KTcNYmVHvRhsFUcVhWcIISlNWUGiG7rf" "oki060F5myQN3AXcL8gHG5/Qb1RVkQFUKZ5geQ39/wSyYA1Q65QTba/5G2QNbl2" "0eAIBTyKZhN6g88ak+yARa6BLLDkrlP7L4WctHQMLsuXHohQsUO9AcOlVMARgrg" "uF test@test") prikey = """-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAwK7YK3wbv4NqNvKPh9p4P3K784BEvdYvzTTzxUzrrV3payfU Ij0z05vgfMrKS7F4n5m7Jl87TTpQUIEF7OrrMW+H5ySjVGfOFZKeXrG6ivy+3ska sD76qO27SgTKADFV/5FMqIzKcW+0rIejOmPHKYoKIvjEgzw+/uckSfc+Jh/YezAm OhI7/Ck3DWJlR70YbBVHFYVnCCEpTVlBohu636JItOtBeZskDdwF3C/IBxuf0G9U VZEBVCmeYHkN/f8EsmANUOuUE22v+RtkDW5dtHgCAU8imYTeoPPGpPsgEWugSyw5 K5T+y+FnLR0DC7Llx6IULFDvQHDpVTAEYK4LhQIDAQABAoIBAF9ibrrgHnBpItx+ qVUMbriiGK8LUXxUmqdQTljeolDZi6KzPc2RVKWtpazBSvG7skX3+XCediHd+0JP DNri1HlNiA6B0aUIGjoNsf6YpwsE4YwyK9cR5k5YGX4j7se3pKX2jOdngxQyw1Mh dkmCeWZz4l67nbSFz32qeQlwrsB56THJjgHB7elDoGCXTX/9VJyjFlCbfxVCsIng inrNgT0uMSYMNpAjTNOjguJt/DtXpwzei5eVpsERe0TRRVH23ycS0fuq/ancYwI/ MDr9KSB8r+OVGeVGj3popCxECxYLBxhqS1dAQyJjhQXKwajJdHFzidjXO09hLBBz FiutpYUCgYEA6OFikTrPlCMGMJjSj+R9woDAOPfvCDbVZWfNo8iupiECvei88W28 RYFnvUQRjSC0pHe//mfUSmiEaE+SjkNCdnNR+vsq9q+htfrADm84jl1mfeWatg/g zuGz2hAcZnux3kQMI7ufOwZNNpM2bf5B4yKamvG8tZRRxSkkAL1NV48CgYEA08/Z Ty9g9XPKoLnUWStDh1zwG+c0q14l2giegxzaUAG5DOgOXbXcw0VQ++uOWD5ARELG g9wZcbBsXxJrRpUqx+GAlv2Y1bkgiPQS1JIyhsWEUtwfAC/G+uZhCX53aI3Pbsjh QmkPCSp5DuOuW2PybMaw+wVe+CaI/gwAWMYDAasCgYEA4Fzkvc7PVoU33XIeywr0 LoQkrb4QyPUrOvt7H6SkvuFm5thn0KJMlRpLfAksb69m2l2U1+HooZd4mZawN+eN DNmlzgxWJDypq83dYwq8jkxmBj1DhMxfZnIE+L403nelseIVYAfPLOqxUTcbZXVk vRQFp+nmSXqQHUe5rAy1ivkCgYEAqLu7cclchCxqDv/6mc5NTVhMLu5QlvO5U6fq HqitgW7d69oxF5X499YQXZ+ZFdMBf19ypTiBTIAu1M3nh6LtIa4SsjXzus5vjKpj FdQhTBus/hU83Pkymk1MoDOPDEtsI+UDDdSDldmv9pyKGWPVi7H86vusXCLWnwsQ e6fCXWECgYEAqgpGvva5kJ1ISgNwnJbwiNw0sOT9BMOsdNZBElf0kJIIy6FMPvap 6S1ziw+XWfdQ83VIUOCL5DrwmcYzLIogS0agmnx/monfDx0Nl9+OZRxy6+AI9vkK 86A1+DXdo+IgX3grFK1l1gPhAZPRWJZ+anrEkyR4iLq6ZoPZ3BQn97U= -----END RSA PRIVATE KEY-----""" text = "Some text! %$*" def _ssh_decrypt_text(self, ssh_private_key, text): with utils.tempdir() as tmpdir: sshkey = os.path.abspath(os.path.join(tmpdir, 'ssh.key')) with open(sshkey, 'w') as f: f.write(ssh_private_key) try: dec, _err = utils.execute('openssl', 'rsautl', '-decrypt', '-inkey', sshkey, process_input=text, binary=True) return dec except processutils.ProcessExecutionError as exc: raise exception.DecryptionFailure(reason=exc.stderr) def test_ssh_encrypt_decrypt_text(self): self._test_ssh_encrypt_decrypt_text(self.pubkey) key_with_spaces_in_comment = self.pubkey.replace('test@test', 'Generated by Nova') self._test_ssh_encrypt_decrypt_text(key_with_spaces_in_comment) def _test_ssh_encrypt_decrypt_text(self, key): enc = crypto.ssh_encrypt_text(self.pubkey, self.text) self.assertIsInstance(enc, bytes) # Comparison between bytes and str raises a TypeError # when using python3 -bb if six.PY2: self.assertNotEqual(enc, self.text) result = self._ssh_decrypt_text(self.prikey, enc) self.assertIsInstance(result, bytes) if six.PY3: result = result.decode('utf-8') self.assertEqual(result, self.text) def test_ssh_encrypt_failure(self): self.assertRaises(exception.EncryptionFailure, crypto.ssh_encrypt_text, '', self.text) class KeyPairTest(test.NoDBTestCase): rsa_prv = ( "-----BEGIN RSA PRIVATE KEY-----\n" "MIIEowIBAAKCAQEA5G44D6lEgMj6cRwCPydsMl1VRN2B9DVyV5lmwssGeJClywZM\n" "WcKlSZBaWPbwbt20/r74eMGZPlqtEi9Ro+EHj4/n5+3A2Mh11h0PGSt53PSPfWwo\n" "ZhEg9hQ1w1ZxfBMCx7eG2YdGFQocMgR0zQasJGjjt8hruCnWRB3pNH9DhEwKhgET\n" "H0/CFzxSh0eZWs/O4GSf4upwmRG/1Yu90vnVZq3AanwvvW5UBk6g4uWb6FTES867\n" "kAy4b5EcH6WR3lLE09omuG/NqtH+qkgIdQconDkmkuK3xf5go6GSwEod0erM1G1v\n" "e+C4w/MD98KZ4Zlon9hy7oE2rcqHXf58gZtOTQIDAQABAoIBAQCnkeM2Oemyv7xY\n" "dT+ArJ7GY4lFt2i5iOuUL0ge5Wid0R6OTNR9lDhEOszMLno6GhHIPrdvfjW4dDQ5\n" "/tRY757oRZzNmq+5V3R52V9WC3qeCBmq3EjWdwJDAphd72/YoOmNMKiPsphKntwI\n" "JRS5wodNPlSuYSwEMUypM3f7ttAEn5CASgYgribBDapm7EqkVa2AqSvpFzNvN3/e\n" "Sc36/XlxJin7AkKVOnRksuVOOj504VUQfXgVWZkfTeZqAROgA1FSnjUAffcubJmq\n" "pDL/JSgOqN4S+sJkkTrb19MuM9M/IdXteloynF+GUKZx6FdVQQc8xCiXgeupeeSD\n" "fNMAP7DRAoGBAP0JRFm3fCAavBREKVOyZm20DpeR6zMrVP7ht0SykkT/bw/kiRG+\n" "FH1tNioj9uyixt5SiKhH3ZVAunjsKvrwET8i3uz1M2Gk+ovWdLXurBogYNNWafjQ\n" "hRhFHpyExoZYRsn58bvYvjFXTO6JxuNS2b59DGBRkQ5mpsOhxarfbZnXAoGBAOcb\n" "K+qoPDeDicnQZ8+ygYYHxY3fy1nvm1F19jBiWd26bAUOHeZNPPKGvTSlrGWJgEyA\n" "FjZIlHJOY2s0dhukiytOiXzdA5iqK1NvlF+QTUI4tCeNMVejWC+n6sKR9ADZkX8D\n" "NOHaLkDzc/ukus59aKyjxP53I6SV6y6m5NeyvDx7AoGAaUji1MXA8wbMvU4DOB0h\n" "+4GRFMYVbEwaaJd4jzASJn12M9GuquBBXFMF15DxXFL6lmUXEZYdf83YCRqTY6hi\n" "NLgIs+XuxDFGQssv8sdletWAFE9/dpUk3A1eiFfC1wGCKuZCDBxKPvOJQjO3uryt\n" "d1JGxQkLZ0eVGg+E1O10iC8CgYB4w2QRfNPqllu8D6EPkVHJfeonltgmKOTajm+V\n" "HO+kw7OKeLP7EkVU3j+kcSZC8LUQRKZWu1qG2Jtu+7zz+OmYObPygXNNpS56rQW1\n" "Yixc/FB3knpEN2DvlilAfxAoGYjD/CL4GhCtdAoZZx0Opc262OEpr4v6hzSb7i4K\n" "4KUoXQKBgHfbiaSilxx9guUqvSaexpHmtiUwx05a05fD6tu8Cofl6AM9wGpw3xOT\n" "tfo4ehvS13tTz2RDE2xKuetMmkya7UgifcxUmBzqkOlgr0oOi2rp+eDKXnzUUqsH\n" "V7E96Dj36K8q2+gZIXcNqjN7PzfkF8pA0G+E1veTi8j5dnvIsy1x\n" "-----END RSA PRIVATE KEY-----\n" ) rsa_pub = ( "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkbjgPqUSAyPpxHAI/J2wyXVVE" "3YH0NXJXmWbCywZ4kKXLBkxZwqVJkFpY9vBu3bT+vvh4wZk+Wq0SL1Gj4QePj+fn" "7cDYyHXWHQ8ZK3nc9I99bChmESD2FDXDVnF8EwLHt4bZh0YVChwyBHTNBqwkaOO3" "yGu4KdZEHek0f0OETAqGARMfT8IXPFKHR5laz87gZJ/i6nCZEb/Vi73S+dVmrcBq" "fC+9blQGTqDi5ZvoVMRLzruQDLhvkRwfpZHeUsTT2ia4b82q0f6qSAh1ByicOSaS" "4rfF/mCjoZLASh3R6szUbW974LjD8wP3wpnhmWif2HLugTatyodd/nyBm05N Gen" "erated-by-Nova" ) rsa_fp = "e7:66:a1:2c:4f:90:6e:11:19:da:ac:c2:69:e1:ad:89" dss_pub = ( "ssh-dss AAAAB3NzaC1kc3MAAACBAKWFW2++pDxJWObkADbSXw8KfZ4VupkRKEXF" "SPN2kV0v+FgdnBEcrEJPExaOTMhmxIuc82ktTv76wHSEpbbsLuI7IDbB6KJJwHs2" "y356yB28Q9rin7X0VMYKkPxvAcbIUSrEbQtyPMihlOaaQ2dGSsEQGQSpjm3f3RU6" "OWux0w/NAAAAFQCgzWF2zxQmi/Obd11z9Im6gY02gwAAAIAHCDLjipVwMLXIqNKO" "MktiPex+ewRQxBi80dzZ3mJzARqzLPYI9hJFUU0LiMtLuypV/djpUWN0cQpmgTQf" "TfuZx9ipC6Mtiz66NQqjkQuoihzdk+9KlOTo03UsX5uBGwuZ09Dnf1VTF8ZsW5Hg" "HyOk6qD71QBajkcFJAKOT3rFfgAAAIAy8trIzqEps9/n37Nli1TvNPLbFQAXl1LN" "wUFmFDwBCGTLl8puVZv7VSu1FG8ko+mzqNebqcN4RMC26NxJqe+RRubn5KtmLoIa" "7tRe74hvQ1HTLLuGxugwa4CewNbwzzEDEs8U79WDhGKzDkJR4nLPVimj5WLAWV70" "RNnRX7zj5w== Generated-by-Nova" ) dss_fp = "b9:dc:ac:57:df:2a:2b:cf:65:a8:c3:4e:9d:4a:82:3c" ecdsa_pub = ( "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAy" "NTYAAABBBG1r4wzPTIjSo78POCq+u/czb8gYK0KvqlmCvcRPrnDWxgLw7y6BX51t" "uYREz7iLRCP7BwUt8R+ZWzFZDeOLIWU= Generated-by-Nova" ) ecdsa_pub_with_spaces = ( "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAy" "NTYAAABBBG1r4wzPTIjSo78POCq+u/czb8gYK0KvqlmCvcRPrnDWxgLw7y6BX51t" "uYREz7iLRCP7BwUt8R+ZWzFZDeOLIWU= Generated by Nova" ) ecdsa_fp = "16:6a:c9:ec:80:4d:17:3e:d5:3b:6f:c0:d7:15:04:40" def test_generate_fingerprint(self): fingerprint = crypto.generate_fingerprint(self.rsa_pub) self.assertEqual(self.rsa_fp, fingerprint) fingerprint = crypto.generate_fingerprint(self.dss_pub) self.assertEqual(self.dss_fp, fingerprint) fingerprint = crypto.generate_fingerprint(self.ecdsa_pub) self.assertEqual(self.ecdsa_fp, fingerprint) fingerprint = crypto.generate_fingerprint(self.ecdsa_pub_with_spaces) self.assertEqual(self.ecdsa_fp, fingerprint) def test_generate_key_pair_2048_bits(self): (private_key, public_key, fingerprint) = crypto.generate_key_pair() pub_bytes = public_key.encode('utf-8') pkey = serialization.load_ssh_public_key( pub_bytes, backends.default_backend()) self.assertEqual(2048, pkey.key_size) def test_generate_key_pair_1024_bits(self): bits = 1024 (private_key, public_key, fingerprint) = crypto.generate_key_pair(bits) pub_bytes = public_key.encode('utf-8') pkey = serialization.load_ssh_public_key( pub_bytes, backends.default_backend()) self.assertEqual(bits, pkey.key_size) def test_generate_key_pair_mocked_private_key(self): keyin = six.StringIO() keyin.write(self.rsa_prv) keyin.seek(0) key = paramiko.RSAKey.from_private_key(keyin) with mock.patch.object(crypto, 'generate_key') as mock_generate: mock_generate.return_value = key (private_key, public_key, fingerprint) = crypto.generate_key_pair() self.assertEqual(self.rsa_pub, public_key) self.assertEqual(self.rsa_fp, fingerprint) nova-13.0.0/nova/tests/unit/policy_fixture.py0000664000567000056710000001002312701407773022406 0ustar jenkinsjenkins00000000000000# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import fixtures from oslo_config import cfg from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils import six from nova import paths import nova.policy from nova.tests.unit import fake_policy CONF = cfg.CONF class RealPolicyFixture(fixtures.Fixture): """Load the live policy for tests. A base policy fixture that starts with the assumption that you'd like to load and enforce the shipped default policy in tests. Provides interfaces to tinker with both the contents and location of the policy file before loading to allow overrides. To do this implement ``_prepare_policy`` in the subclass, and adjust the ``policy_file`` accordingly. """ def _prepare_policy(self): """Allow changing of the policy before we get started""" pass def setUp(self): super(RealPolicyFixture, self).setUp() # policy_file can be overridden by subclasses self.policy_file = paths.state_path_def('etc/nova/policy.json') self._prepare_policy() CONF.set_override('policy_file', self.policy_file, group='oslo_policy') nova.policy.reset() nova.policy.init() self.addCleanup(nova.policy.reset) def set_rules(self, rules): policy = nova.policy._ENFORCER policy.set_rules(oslo_policy.Rules.from_dict(rules)) class PolicyFixture(RealPolicyFixture): """Load a fake policy from nova.tests.unit.fake_policy This overrides the policy with a completely fake and synthetic policy file. NOTE(sdague): the use of this is deprecated, and we should unwind the tests so that they can function with the real policy. This is mostly legacy because our default test instances and default test contexts don't match up. It appears that in many cases fake_policy was just modified to whatever makes tests pass, which makes it dangerous to be used in tree. Long term a NullPolicy fixture might be better in those cases. """ def _prepare_policy(self): self.policy_dir = self.useFixture(fixtures.TempDir()) self.policy_file = os.path.join(self.policy_dir.path, 'policy.json') with open(self.policy_file, 'w') as f: f.write(fake_policy.policy_data) CONF.set_override('policy_dirs', [], group='oslo_policy') class RoleBasedPolicyFixture(RealPolicyFixture): """Load a modified policy which allows all actions only be a single roll. This fixture can be used for testing role based permissions as it provides a version of the policy which stomps over all previous declaration and makes every action only available to a single role. NOTE(sdague): we could probably do this simpler by only loading a single default rule. """ def __init__(self, role="admin", *args, **kwargs): super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs) self.role = role def _prepare_policy(self): policy = jsonutils.load(open(CONF.oslo_policy.policy_file)) # Convert all actions to require specified role for action, rule in six.iteritems(policy): policy[action] = 'role:%s' % self.role self.policy_dir = self.useFixture(fixtures.TempDir()) self.policy_file = os.path.join(self.policy_dir.path, 'policy.json') with open(self.policy_file, 'w') as f: jsonutils.dump(policy, f) nova-13.0.0/nova/tests/unit/fake_network.py0000664000567000056710000004056412701407773022035 0ustar jenkinsjenkins00000000000000# Copyright 2011 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils from six.moves import range from nova.compute import api as compute_api from nova.compute import manager as compute_manager import nova.context from nova import db from nova import exception from nova.network import manager as network_manager from nova.network import model as network_model from nova.network import rpcapi as network_rpcapi from nova import objects from nova.objects import base as obj_base from nova.objects import network as network_obj from nova.objects import virtual_interface as vif_obj from nova.tests.unit.objects import test_fixed_ip from nova.tests.unit.objects import test_instance_info_cache from nova.tests.unit.objects import test_pci_device from nova.tests import uuidsentinel as uuids HOST = "testhost" CONF = cfg.CONF CONF.import_opt('use_ipv6', 'nova.netconf') class FakeModel(dict): """Represent a model from the db.""" def __init__(self, *args, **kwargs): self.update(kwargs) class FakeNetworkManager(network_manager.NetworkManager): """This NetworkManager doesn't call the base class so we can bypass all inherited service cruft and just perform unit tests. """ class FakeDB(object): vifs = [{'id': 0, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'instance_uuid': uuids.instance_1, 'network_id': 1, 'uuid': uuids.vifs_1, 'address': 'DC:AD:BE:FF:EF:01'}, {'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'instance_uuid': uuids.instance_2, 'network_id': 21, 'uuid': uuids.vifs_2, 'address': 'DC:AD:BE:FF:EF:02'}, {'id': 2, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'instance_uuid': uuids.instance_1, 'network_id': 31, 'uuid': uuids.vifs_3, 'address': 'DC:AD:BE:FF:EF:03'}] floating_ips = [dict(address='172.16.1.1', fixed_ip_id=100), dict(address='172.16.1.2', fixed_ip_id=200), dict(address='173.16.1.2', fixed_ip_id=210)] fixed_ips = [dict(test_fixed_ip.fake_fixed_ip, id=100, address='172.16.0.1', virtual_interface_id=0), dict(test_fixed_ip.fake_fixed_ip, id=200, address='172.16.0.2', virtual_interface_id=1), dict(test_fixed_ip.fake_fixed_ip, id=210, address='173.16.0.2', virtual_interface_id=2)] def fixed_ip_get_by_instance(self, context, instance_uuid): return [dict(address='10.0.0.0'), dict(address='10.0.0.1'), dict(address='10.0.0.2')] def network_get_by_cidr(self, context, cidr): raise exception.NetworkNotFoundForCidr(cidr=cidr) def network_create_safe(self, context, net): fakenet = dict(net) fakenet['id'] = 999 return fakenet def network_get(self, context, network_id, project_only="allow_none"): return {'cidr_v6': '2001:db8:69:%x::/64' % network_id} def network_get_by_uuid(self, context, network_uuid): raise exception.NetworkNotFoundForUUID(uuid=network_uuid) def network_get_all(self, context): raise exception.NoNetworksFound() def network_get_all_by_uuids(self, context, project_only="allow_none"): raise exception.NoNetworksFound() def network_disassociate(self, context, network_id): return True def virtual_interface_get_all(self, context): return self.vifs def fixed_ips_by_virtual_interface(self, context, vif_id): return [ip for ip in self.fixed_ips if ip['virtual_interface_id'] == vif_id] def fixed_ip_disassociate(self, context, address): return True def __init__(self, stubs=None): self.db = self.FakeDB() if stubs: stubs.Set(vif_obj, 'db', self.db) self.deallocate_called = None self.deallocate_fixed_ip_calls = [] self.network_rpcapi = network_rpcapi.NetworkAPI() # TODO(matelakat) method signature should align with the faked one's def deallocate_fixed_ip(self, context, address=None, host=None, instance=None): self.deallocate_fixed_ip_calls.append((context, address, host)) # TODO(matelakat) use the deallocate_fixed_ip_calls instead self.deallocate_called = address def _create_fixed_ips(self, context, network_id, fixed_cidr=None, extra_reserved=None, bottom_reserved=0, top_reserved=0): pass def get_instance_nw_info(context, instance_id, rxtx_factor, host, instance_uuid=None, **kwargs): pass def fake_network(network_id, ipv6=None): if ipv6 is None: ipv6 = CONF.use_ipv6 fake_network = {'id': network_id, 'uuid': getattr(uuids, 'network%i' % network_id), 'label': 'test%d' % network_id, 'injected': False, 'multi_host': False, 'cidr': '192.168.%d.0/24' % network_id, 'cidr_v6': None, 'netmask': '255.255.255.0', 'netmask_v6': None, 'bridge': 'fake_br%d' % network_id, 'bridge_interface': 'fake_eth%d' % network_id, 'gateway': '192.168.%d.1' % network_id, 'gateway_v6': None, 'broadcast': '192.168.%d.255' % network_id, 'dns1': '192.168.%d.3' % network_id, 'dns2': '192.168.%d.4' % network_id, 'dns3': '192.168.%d.3' % network_id, 'vlan': None, 'host': None, 'project_id': uuids.project, 'vpn_public_address': '192.168.%d.2' % network_id, 'vpn_public_port': None, 'vpn_private_address': None, 'dhcp_start': None, 'rxtx_base': network_id * 10, 'priority': None, 'deleted': False, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'mtu': None, 'dhcp_server': '192.168.%d.1' % network_id, 'enable_dhcp': True, 'share_address': False} if ipv6: fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id fake_network['netmask_v6'] = '64' if CONF.flat_injected: fake_network['injected'] = True return fake_network def fake_network_obj(context, network_id=1, ipv6=None): return network_obj.Network._from_db_object( context, network_obj.Network(), fake_network(network_id, ipv6)) def fake_vif(x): return{'id': x, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:%02x' % x, 'uuid': getattr(uuids, 'vif%i' % x), 'network_id': x, 'instance_uuid': uuids.vifs_1} def floating_ip_ids(): for i in range(1, 100): yield i def fixed_ip_ids(): for i in range(1, 100): yield i floating_ip_id = floating_ip_ids() fixed_ip_id = fixed_ip_ids() def next_fixed_ip(network_id, num_floating_ips=0): next_id = next(fixed_ip_id) f_ips = [FakeModel(**next_floating_ip(next_id)) for i in range(num_floating_ips)] return {'id': next_id, 'network_id': network_id, 'address': '192.168.%d.%03d' % (network_id, (next_id + 99)), 'instance_uuid': uuids.fixed_ip, 'allocated': False, 'reserved': False, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'leased': True, 'host': HOST, 'deleted': 0, 'network': fake_network(network_id), 'virtual_interface': fake_vif(network_id), # and since network_id and vif_id happen to be equivalent 'virtual_interface_id': network_id, 'floating_ips': f_ips} def next_floating_ip(fixed_ip_id): next_id = next(floating_ip_id) return {'id': next_id, 'address': '10.10.10.%03d' % (next_id + 99), 'fixed_ip_id': fixed_ip_id, 'project_id': None, 'auto_assigned': False} def ipv4_like(ip, match_string): ip = ip.split('.') match_octets = match_string.split('.') for i, octet in enumerate(match_octets): if octet == '*': continue if octet != ip[i]: return False return True def fake_get_instance_nw_info(test, num_networks=1, ips_per_vif=2, floating_ips_per_fixed_ip=0): # test is an instance of nova.test.TestCase # ips_per_vif is the number of ips each vif will have # num_floating_ips is number of float ips for each fixed ip network = network_manager.FlatManager(host=HOST) network.db = db # reset the fixed and floating ip generators global floating_ip_id, fixed_ip_id, fixed_ips floating_ip_id = floating_ip_ids() fixed_ip_id = fixed_ip_ids() fixed_ips = [] def fixed_ips_fake(*args, **kwargs): global fixed_ips ips = [next_fixed_ip(i, floating_ips_per_fixed_ip) for i in range(1, num_networks + 1) for j in range(ips_per_vif)] fixed_ips = ips return ips def update_cache_fake(*args, **kwargs): fake_info_cache = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'instance_uuid': uuids.vifs_1, 'network_info': '[]', } return fake_info_cache test.stub_out('nova.db.fixed_ip_get_by_instance', fixed_ips_fake) test.stub_out('nova.db.instance_info_cache_update', update_cache_fake) class FakeContext(nova.context.RequestContext): def is_admin(self): return True nw_model = network.get_instance_nw_info( FakeContext('fakeuser', 'fake_project'), 0, 3, None) return nw_model def stub_out_nw_api_get_instance_nw_info(test, func=None, num_networks=1, ips_per_vif=1, floating_ips_per_fixed_ip=0): def get_instance_nw_info(self, context, instance, conductor_api=None): return fake_get_instance_nw_info(test, num_networks=num_networks, ips_per_vif=ips_per_vif, floating_ips_per_fixed_ip=floating_ips_per_fixed_ip) if func is None: func = get_instance_nw_info test.stub_out('nova.network.api.API.get_instance_nw_info', func) def stub_out_network_cleanup(test): test.stub_out('nova.network.api.API.deallocate_for_instance', lambda *args, **kwargs: None) _real_functions = {} def set_stub_network_methods(test): global _real_functions cm = compute_manager.ComputeManager if not _real_functions: _real_functions = { '_allocate_network': cm._allocate_network, '_deallocate_network': cm._deallocate_network} def fake_networkinfo(*args, **kwargs): return network_model.NetworkInfo() def fake_async_networkinfo(*args, **kwargs): return network_model.NetworkInfoAsyncWrapper(fake_networkinfo) test.stub_out('nova.compute.manager.ComputeManager._allocate_network', fake_async_networkinfo) test.stub_out('nova.compute.manager.ComputeManager._deallocate_network', lambda *args, **kwargs: None) def unset_stub_network_methods(test): global _real_functions if _real_functions: for name in _real_functions: test.stub_out('nova.compute.manager.ComputeManager.' + name, _real_functions[name]) def stub_compute_with_ips(stubs): orig_get = compute_api.API.get orig_get_all = compute_api.API.get_all orig_create = compute_api.API.create def fake_get(*args, **kwargs): return _get_instances_with_cached_ips(orig_get, *args, **kwargs) def fake_get_all(*args, **kwargs): return _get_instances_with_cached_ips(orig_get_all, *args, **kwargs) def fake_create(*args, **kwargs): return _create_instances_with_cached_ips(orig_create, *args, **kwargs) def fake_pci_device_get_by_addr(context, node_id, dev_addr): return test_pci_device.fake_db_dev stubs.Set(db, 'pci_device_get_by_addr', fake_pci_device_get_by_addr) stubs.Set(compute_api.API, 'get', fake_get) stubs.Set(compute_api.API, 'get_all', fake_get_all) stubs.Set(compute_api.API, 'create', fake_create) def _get_fake_cache(): def _ip(ip, fixed=True, floats=None): ip_dict = {'address': ip, 'type': 'fixed'} if not fixed: ip_dict['type'] = 'floating' if fixed and floats: ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats] return ip_dict info = [{'address': 'aa:bb:cc:dd:ee:ff', 'id': 1, 'network': {'bridge': 'br0', 'id': 1, 'label': 'private', 'subnets': [{'cidr': '192.168.0.0/24', 'ips': [_ip('192.168.0.3')]}]}}] if CONF.use_ipv6: ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff' info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64', 'ips': [_ip(ipv6_addr)]}) return jsonutils.dumps(info) def _get_instances_with_cached_ips(orig_func, *args, **kwargs): """Kludge the cache into instance(s) without having to create DB entries """ instances = orig_func(*args, **kwargs) context = args[0] fake_device = objects.PciDevice.get_by_dev_addr(context, 1, 'a') def _info_cache_for(instance): info_cache = dict(test_instance_info_cache.fake_info_cache, network_info=_get_fake_cache(), instance_uuid=instance['uuid']) if isinstance(instance, obj_base.NovaObject): _info_cache = objects.InstanceInfoCache(context) objects.InstanceInfoCache._from_db_object(context, _info_cache, info_cache) info_cache = _info_cache instance['info_cache'] = info_cache if isinstance(instances, (list, obj_base.ObjectListBase)): for instance in instances: _info_cache_for(instance) fake_device.claim(instance) fake_device.allocate(instance) else: _info_cache_for(instances) fake_device.claim(instances) fake_device.allocate(instances) return instances def _create_instances_with_cached_ips(orig_func, *args, **kwargs): """Kludge the above kludge so that the database doesn't get out of sync with the actual instance. """ instances, reservation_id = orig_func(*args, **kwargs) fake_cache = _get_fake_cache() for instance in instances: instance['info_cache']['network_info'] = fake_cache db.instance_info_cache_update(args[1], instance['uuid'], {'network_info': fake_cache}) return (instances, reservation_id) nova-13.0.0/nova/tests/unit/test_quota.py0000664000567000056710000033245412701407773021550 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_db.sqlalchemy import enginefacade from oslo_utils import timeutils from six.moves import range from nova import compute from nova.compute import flavors import nova.conf from nova import context from nova import db from nova.db.sqlalchemy import api as sqa_api from nova.db.sqlalchemy import models as sqa_models from nova import exception from nova import quota from nova import test import nova.tests.unit.image.fake CONF = nova.conf.CONF class QuotaIntegrationTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(QuotaIntegrationTestCase, self).setUp() self.flags(compute_driver='nova.virt.fake.FakeDriver', quota_instances=2, quota_cores=4, quota_floating_ips=1, network_manager='nova.network.manager.FlatDHCPManager') # Apparently needed by the RPC tests... self.network = self.start_service('network') self.user_id = 'admin' self.project_id = 'admin' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) nova.tests.unit.image.fake.stub_out_image_service(self) self.compute_api = compute.API() def tearDown(self): super(QuotaIntegrationTestCase, self).tearDown() nova.tests.unit.image.fake.FakeImageService_reset() def _create_instance(self, cores=2): """Create a test instance.""" inst = {} inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175' inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id inst['project_id'] = self.project_id inst['instance_type_id'] = '3' # m1.large inst['vcpus'] = cores return db.instance_create(self.context, inst) def test_too_many_instances(self): instance_uuids = [] for i in range(CONF.quota_instances): instance = self._create_instance() instance_uuids.append(instance['uuid']) inst_type = flavors.get_flavor_by_name('m1.small') image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175' try: self.compute_api.create(self.context, min_count=1, max_count=1, instance_type=inst_type, image_href=image_uuid) except exception.QuotaError as e: expected_kwargs = {'code': 413, 'req': '1, 1', 'used': '4, 2', 'allowed': '4, 2', 'overs': 'cores, instances'} self.assertEqual(expected_kwargs, e.kwargs) else: self.fail('Expected QuotaError exception') for instance_uuid in instance_uuids: db.instance_destroy(self.context, instance_uuid) def test_too_many_cores(self): instance = self._create_instance(cores=4) inst_type = flavors.get_flavor_by_name('m1.small') image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175' try: self.compute_api.create(self.context, min_count=1, max_count=1, instance_type=inst_type, image_href=image_uuid) except exception.QuotaError as e: expected_kwargs = {'code': 413, 'req': '1', 'used': '4', 'allowed': '4', 'overs': 'cores'} self.assertEqual(expected_kwargs, e.kwargs) else: self.fail('Expected QuotaError exception') db.instance_destroy(self.context, instance['uuid']) def test_many_cores_with_unlimited_quota(self): # Setting cores quota to unlimited: self.flags(quota_cores=-1) instance = self._create_instance(cores=4) db.instance_destroy(self.context, instance['uuid']) def test_too_many_addresses(self): address = '192.168.0.100' db.floating_ip_create(context.get_admin_context(), {'address': address, 'pool': 'nova', 'project_id': self.project_id}) self.assertRaises(exception.QuotaError, self.network.allocate_floating_ip, self.context, self.project_id) db.floating_ip_destroy(context.get_admin_context(), address) def test_auto_assigned(self): address = '192.168.0.100' db.floating_ip_create(context.get_admin_context(), {'address': address, 'pool': 'nova', 'project_id': self.project_id}) # auto allocated addresses should not be counted self.assertRaises(exception.NoMoreFloatingIps, self.network.allocate_floating_ip, self.context, self.project_id, True) db.floating_ip_destroy(context.get_admin_context(), address) def test_too_many_metadata_items(self): metadata = {} for i in range(CONF.quota_metadata_items + 1): metadata['key%s' % i] = 'value%s' % i inst_type = flavors.get_flavor_by_name('m1.small') image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175' self.assertRaises(exception.QuotaError, self.compute_api.create, self.context, min_count=1, max_count=1, instance_type=inst_type, image_href=image_uuid, metadata=metadata) def _create_with_injected_files(self, files): api = self.compute_api inst_type = flavors.get_flavor_by_name('m1.small') image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175' api.create(self.context, min_count=1, max_count=1, instance_type=inst_type, image_href=image_uuid, injected_files=files) def test_no_injected_files(self): api = self.compute_api inst_type = flavors.get_flavor_by_name('m1.small') image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175' api.create(self.context, instance_type=inst_type, image_href=image_uuid) def test_max_injected_files(self): files = [] for i in range(CONF.quota_injected_files): files.append(('/my/path%d' % i, 'config = test\n')) self._create_with_injected_files(files) # no QuotaError def test_too_many_injected_files(self): files = [] for i in range(CONF.quota_injected_files + 1): files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i)) self.assertRaises(exception.QuotaError, self._create_with_injected_files, files) def test_max_injected_file_content_bytes(self): max = CONF.quota_injected_file_content_bytes content = ''.join(['a' for i in range(max)]) files = [('/test/path', content)] self._create_with_injected_files(files) # no QuotaError def test_too_many_injected_file_content_bytes(self): max = CONF.quota_injected_file_content_bytes content = ''.join(['a' for i in range(max + 1)]) files = [('/test/path', content)] self.assertRaises(exception.QuotaError, self._create_with_injected_files, files) def test_max_injected_file_path_bytes(self): max = CONF.quota_injected_file_path_length path = ''.join(['a' for i in range(max)]) files = [(path, 'config = quotatest')] self._create_with_injected_files(files) # no QuotaError def test_too_many_injected_file_path_bytes(self): max = CONF.quota_injected_file_path_length path = ''.join(['a' for i in range(max + 1)]) files = [(path, 'config = quotatest')] self.assertRaises(exception.QuotaError, self._create_with_injected_files, files) def test_reservation_expire(self): self.useFixture(test.TimeOverride()) def assertInstancesReserved(reserved): result = quota.QUOTAS.get_project_quotas(self.context, self.context.project_id) self.assertEqual(result['instances']['reserved'], reserved) quota.QUOTAS.reserve(self.context, expire=60, instances=2) assertInstancesReserved(2) timeutils.advance_time_seconds(80) quota.QUOTAS.expire(self.context) assertInstancesReserved(0) @enginefacade.transaction_context_provider class FakeContext(object): def __init__(self, project_id, quota_class): self.is_admin = False self.user_id = 'fake_user' self.project_id = project_id self.quota_class = quota_class self.read_deleted = 'no' def elevated(self): elevated = self.__class__(self.project_id, self.quota_class) elevated.is_admin = True return elevated class FakeDriver(object): def __init__(self, by_project=None, by_user=None, by_class=None, reservations=None): self.called = [] self.by_project = by_project or {} self.by_user = by_user or {} self.by_class = by_class or {} self.reservations = reservations or [] def get_by_project_and_user(self, context, project_id, user_id, resource): self.called.append(('get_by_project_and_user', context, project_id, user_id, resource)) try: return self.by_user[user_id][resource] except KeyError: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) def get_by_project(self, context, project_id, resource): self.called.append(('get_by_project', context, project_id, resource)) try: return self.by_project[project_id][resource] except KeyError: raise exception.ProjectQuotaNotFound(project_id=project_id) def get_by_class(self, context, quota_class, resource): self.called.append(('get_by_class', context, quota_class, resource)) try: return self.by_class[quota_class][resource] except KeyError: raise exception.QuotaClassNotFound(class_name=quota_class) def get_defaults(self, context, resources): self.called.append(('get_defaults', context, resources)) return resources def get_class_quotas(self, context, resources, quota_class, defaults=True): self.called.append(('get_class_quotas', context, resources, quota_class, defaults)) return resources def get_user_quotas(self, context, resources, project_id, user_id, quota_class=None, defaults=True, usages=True): self.called.append(('get_user_quotas', context, resources, project_id, user_id, quota_class, defaults, usages)) return resources def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False): self.called.append(('get_project_quotas', context, resources, project_id, quota_class, defaults, usages, remains)) return resources def limit_check(self, context, resources, values, project_id=None, user_id=None): self.called.append(('limit_check', context, resources, values, project_id, user_id)) def reserve(self, context, resources, deltas, expire=None, project_id=None, user_id=None): self.called.append(('reserve', context, resources, deltas, expire, project_id, user_id)) return self.reservations def commit(self, context, reservations, project_id=None, user_id=None): self.called.append(('commit', context, reservations, project_id, user_id)) def rollback(self, context, reservations, project_id=None, user_id=None): self.called.append(('rollback', context, reservations, project_id, user_id)) def usage_reset(self, context, resources): self.called.append(('usage_reset', context, resources)) def destroy_all_by_project_and_user(self, context, project_id, user_id): self.called.append(('destroy_all_by_project_and_user', context, project_id, user_id)) def destroy_all_by_project(self, context, project_id): self.called.append(('destroy_all_by_project', context, project_id)) def expire(self, context): self.called.append(('expire', context)) class BaseResourceTestCase(test.TestCase): def test_no_flag(self): resource = quota.BaseResource('test_resource') self.assertEqual(resource.name, 'test_resource') self.assertIsNone(resource.flag) self.assertEqual(resource.default, -1) def test_with_flag(self): # We know this flag exists, so use it... self.flags(quota_instances=10) resource = quota.BaseResource('test_resource', 'quota_instances') self.assertEqual(resource.name, 'test_resource') self.assertEqual(resource.flag, 'quota_instances') self.assertEqual(resource.default, 10) def test_with_flag_no_quota(self): self.flags(quota_instances=-1) resource = quota.BaseResource('test_resource', 'quota_instances') self.assertEqual(resource.name, 'test_resource') self.assertEqual(resource.flag, 'quota_instances') self.assertEqual(resource.default, -1) def test_quota_no_project_no_class(self): self.flags(quota_instances=10) resource = quota.BaseResource('test_resource', 'quota_instances') driver = FakeDriver() context = FakeContext(None, None) quota_value = resource.quota(driver, context) self.assertEqual(quota_value, 10) def test_quota_with_project_no_class(self): self.flags(quota_instances=10) resource = quota.BaseResource('test_resource', 'quota_instances') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), )) context = FakeContext('test_project', None) quota_value = resource.quota(driver, context) self.assertEqual(quota_value, 15) def test_quota_no_project_with_class(self): self.flags(quota_instances=10) resource = quota.BaseResource('test_resource', 'quota_instances') driver = FakeDriver(by_class=dict( test_class=dict(test_resource=20), )) context = FakeContext(None, 'test_class') quota_value = resource.quota(driver, context) self.assertEqual(quota_value, 20) def test_quota_with_project_with_class(self): self.flags(quota_instances=10) resource = quota.BaseResource('test_resource', 'quota_instances') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), ), by_class=dict( test_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context) self.assertEqual(quota_value, 15) def test_quota_override_project_with_class(self): self.flags(quota_instances=10) resource = quota.BaseResource('test_resource', 'quota_instances') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), override_project=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, project_id='override_project') self.assertEqual(quota_value, 20) def test_quota_with_project_override_class(self): self.flags(quota_instances=10) resource = quota.BaseResource('test_resource', 'quota_instances') driver = FakeDriver(by_class=dict( test_class=dict(test_resource=15), override_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, quota_class='override_class') self.assertEqual(quota_value, 20) def test_valid_method_call_check_invalid_input(self): resources = {'dummy': 1} self.assertRaises(exception.InvalidQuotaMethodUsage, quota._valid_method_call_check_resources, resources, 'limit') def test_valid_method_call_check_invalid_method(self): resources = {'key_pairs': 1} self.assertRaises(exception.InvalidQuotaMethodUsage, quota._valid_method_call_check_resources, resources, 'dummy') def test_valid_method_call_check_multiple(self): resources = {'key_pairs': 1, 'dummy': 2} self.assertRaises(exception.InvalidQuotaMethodUsage, quota._valid_method_call_check_resources, resources, 'check') resources = {'key_pairs': 1, 'instances': 2, 'dummy': 3} self.assertRaises(exception.InvalidQuotaMethodUsage, quota._valid_method_call_check_resources, resources, 'check') def test_valid_method_call_check_wrong_method_reserve(self): resources = {'key_pairs': 1} self.assertRaises(exception.InvalidQuotaMethodUsage, quota._valid_method_call_check_resources, resources, 'reserve') def test_valid_method_call_check_wrong_method_check(self): resources = {'fixed_ips': 1} self.assertRaises(exception.InvalidQuotaMethodUsage, quota._valid_method_call_check_resources, resources, 'check') class QuotaEngineTestCase(test.TestCase): def test_init(self): quota_obj = quota.QuotaEngine() self.assertEqual(quota_obj._resources, {}) self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver) def test_init_override_string(self): quota_obj = quota.QuotaEngine( quota_driver_class='nova.tests.unit.test_quota.FakeDriver') self.assertEqual(quota_obj._resources, {}) self.assertIsInstance(quota_obj._driver, FakeDriver) def test_init_override_obj(self): quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver) self.assertEqual(quota_obj._resources, {}) self.assertEqual(quota_obj._driver, FakeDriver) def test_register_resource(self): quota_obj = quota.QuotaEngine() resource = quota.AbsoluteResource('test_resource') quota_obj.register_resource(resource) self.assertEqual(quota_obj._resources, dict(test_resource=resource)) def test_register_resources(self): quota_obj = quota.QuotaEngine() resources = [ quota.AbsoluteResource('test_resource1'), quota.AbsoluteResource('test_resource2'), quota.AbsoluteResource('test_resource3'), ] quota_obj.register_resources(resources) self.assertEqual(quota_obj._resources, dict( test_resource1=resources[0], test_resource2=resources[1], test_resource3=resources[2], )) def test_get_by_project_and_user(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver(by_user=dict( fake_user=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_project_and_user(context, 'test_project', 'fake_user', 'test_resource') self.assertEqual(driver.called, [ ('get_by_project_and_user', context, 'test_project', 'fake_user', 'test_resource'), ]) self.assertEqual(result, 42) def test_get_by_project(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_project(context, 'test_project', 'test_resource') self.assertEqual(driver.called, [ ('get_by_project', context, 'test_project', 'test_resource'), ]) self.assertEqual(result, 42) def test_get_by_class(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver(by_class=dict( test_class=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_class(context, 'test_class', 'test_resource') self.assertEqual(driver.called, [ ('get_by_class', context, 'test_class', 'test_resource'), ]) self.assertEqual(result, 42) def _make_quota_obj(self, driver): quota_obj = quota.QuotaEngine(quota_driver_class=driver) resources = [ quota.AbsoluteResource('test_resource4'), quota.AbsoluteResource('test_resource3'), quota.AbsoluteResource('test_resource2'), quota.AbsoluteResource('test_resource1'), ] quota_obj.register_resources(resources) return quota_obj def test_get_defaults(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result = quota_obj.get_defaults(context) self.assertEqual(driver.called, [ ('get_defaults', context, quota_obj._resources), ]) self.assertEqual(result, quota_obj._resources) def test_get_class_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_class_quotas(context, 'test_class') result2 = quota_obj.get_class_quotas(context, 'test_class', False) self.assertEqual(driver.called, [ ('get_class_quotas', context, quota_obj._resources, 'test_class', True), ('get_class_quotas', context, quota_obj._resources, 'test_class', False), ]) self.assertEqual(result1, quota_obj._resources) self.assertEqual(result2, quota_obj._resources) def test_get_user_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_user_quotas(context, 'test_project', 'fake_user') result2 = quota_obj.get_user_quotas(context, 'test_project', 'fake_user', quota_class='test_class', defaults=False, usages=False) self.assertEqual(driver.called, [ ('get_user_quotas', context, quota_obj._resources, 'test_project', 'fake_user', None, True, True), ('get_user_quotas', context, quota_obj._resources, 'test_project', 'fake_user', 'test_class', False, False), ]) self.assertEqual(result1, quota_obj._resources) self.assertEqual(result2, quota_obj._resources) def test_get_project_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_project_quotas(context, 'test_project') result2 = quota_obj.get_project_quotas(context, 'test_project', quota_class='test_class', defaults=False, usages=False) self.assertEqual(driver.called, [ ('get_project_quotas', context, quota_obj._resources, 'test_project', None, True, True, False), ('get_project_quotas', context, quota_obj._resources, 'test_project', 'test_class', False, False, False), ]) self.assertEqual(result1, quota_obj._resources) self.assertEqual(result2, quota_obj._resources) def test_count_no_resource(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) self.assertRaises(exception.QuotaResourceUnknown, quota_obj.count, context, 'test_resource5', True, foo='bar') def test_count_wrong_resource(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) self.assertRaises(exception.QuotaResourceUnknown, quota_obj.count, context, 'test_resource1', True, foo='bar') def test_count(self): def fake_count(context, *args, **kwargs): self.assertEqual(args, (True,)) self.assertEqual(kwargs, dict(foo='bar')) return 5 context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.register_resource(quota.CountableResource('test_resource5', fake_count)) result = quota_obj.count(context, 'test_resource5', True, foo='bar') self.assertEqual(result, 5) def test_limit_check(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.limit_check(context, test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1) self.assertEqual(driver.called, [ ('limit_check', context, quota_obj._resources, dict( test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1, ), None, None), ]) def test_reserve(self): context = FakeContext(None, None) driver = FakeDriver(reservations=[ 'resv-01', 'resv-02', 'resv-03', 'resv-04', ]) quota_obj = self._make_quota_obj(driver) result1 = quota_obj.reserve(context, test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1) result2 = quota_obj.reserve(context, expire=3600, test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) result3 = quota_obj.reserve(context, project_id='fake_project', test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) self.assertEqual(driver.called, [ ('reserve', context, quota_obj._resources, dict( test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1, ), None, None, None), ('reserve', context, quota_obj._resources, dict( test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4, ), 3600, None, None), ('reserve', context, quota_obj._resources, dict( test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4, ), None, 'fake_project', None), ]) self.assertEqual(result1, [ 'resv-01', 'resv-02', 'resv-03', 'resv-04', ]) self.assertEqual(result2, [ 'resv-01', 'resv-02', 'resv-03', 'resv-04', ]) self.assertEqual(result3, [ 'resv-01', 'resv-02', 'resv-03', 'resv-04', ]) def test_commit(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03']) self.assertEqual(driver.called, [ ('commit', context, ['resv-01', 'resv-02', 'resv-03'], None, None), ]) def test_rollback(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03']) self.assertEqual(driver.called, [ ('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None, None), ]) def test_usage_reset(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.usage_reset(context, ['res1', 'res2', 'res3']) self.assertEqual(driver.called, [ ('usage_reset', context, ['res1', 'res2', 'res3']), ]) def test_destroy_all_by_project_and_user(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.destroy_all_by_project_and_user(context, 'test_project', 'fake_user') self.assertEqual(driver.called, [ ('destroy_all_by_project_and_user', context, 'test_project', 'fake_user'), ]) def test_destroy_all_by_project(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.destroy_all_by_project(context, 'test_project') self.assertEqual(driver.called, [ ('destroy_all_by_project', context, 'test_project'), ]) def test_expire(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.expire(context) self.assertEqual(driver.called, [ ('expire', context), ]) def test_resources(self): quota_obj = self._make_quota_obj(None) self.assertEqual(quota_obj.resources, ['test_resource1', 'test_resource2', 'test_resource3', 'test_resource4']) class DbQuotaDriverTestCase(test.TestCase): def setUp(self): super(DbQuotaDriverTestCase, self).setUp() self.flags(quota_instances=10, quota_cores=20, quota_ram=50 * 1024, quota_floating_ips=10, quota_fixed_ips=10, quota_metadata_items=128, quota_injected_files=5, quota_injected_file_content_bytes=10 * 1024, quota_injected_file_path_length=255, quota_security_groups=10, quota_security_group_rules=20, quota_server_groups=10, quota_server_group_members=10, reservation_expire=86400, until_refresh=0, max_age=0, ) self.driver = quota.DbQuotaDriver() self.calls = [] self.useFixture(test.TimeOverride()) def test_get_defaults(self): # Use our pre-defined resources self._stub_quota_class_get_default() result = self.driver.get_defaults(None, quota.QUOTAS._resources) self.assertEqual(result, dict( instances=5, cores=20, ram=25 * 1024, floating_ips=10, fixed_ips=10, metadata_items=64, injected_files=5, injected_file_content_bytes=5 * 1024, injected_file_path_bytes=255, security_groups=10, security_group_rules=20, key_pairs=100, server_groups=10, server_group_members=10, )) def _stub_quota_class_get_default(self): # Stub out quota_class_get_default def fake_qcgd(context): self.calls.append('quota_class_get_default') return dict( instances=5, ram=25 * 1024, metadata_items=64, injected_file_content_bytes=5 * 1024, ) self.stub_out('nova.db.quota_class_get_default', fake_qcgd) def _stub_quota_class_get_all_by_name(self): # Stub out quota_class_get_all_by_name def fake_qcgabn(context, quota_class): self.calls.append('quota_class_get_all_by_name') self.assertEqual(quota_class, 'test_class') return dict( instances=5, ram=25 * 1024, metadata_items=64, injected_file_content_bytes=5 * 1024, ) self.stub_out('nova.db.quota_class_get_all_by_name', fake_qcgabn) def test_get_class_quotas(self): self._stub_quota_class_get_all_by_name() result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, 'test_class') self.assertEqual(self.calls, ['quota_class_get_all_by_name']) self.assertEqual(result, dict( instances=5, cores=20, ram=25 * 1024, floating_ips=10, fixed_ips=10, metadata_items=64, injected_files=5, injected_file_content_bytes=5 * 1024, injected_file_path_bytes=255, security_groups=10, security_group_rules=20, key_pairs=100, server_groups=10, server_group_members=10, )) def test_get_class_quotas_no_defaults(self): self._stub_quota_class_get_all_by_name() result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, 'test_class', False) self.assertEqual(self.calls, ['quota_class_get_all_by_name']) self.assertEqual(result, dict( instances=5, ram=25 * 1024, metadata_items=64, injected_file_content_bytes=5 * 1024, )) def _stub_get_by_project_and_user(self): def fake_qgabpau(context, project_id, user_id): self.calls.append('quota_get_all_by_project_and_user') self.assertEqual(project_id, 'test_project') self.assertEqual(user_id, 'fake_user') return dict( cores=10, injected_files=2, injected_file_path_bytes=127, ) def fake_qgabp(context, project_id): self.calls.append('quota_get_all_by_project') self.assertEqual(project_id, 'test_project') return { 'cores': 10, 'injected_files': 2, 'injected_file_path_bytes': 127, } def fake_qugabpau(context, project_id, user_id): self.calls.append('quota_usage_get_all_by_project_and_user') self.assertEqual(project_id, 'test_project') self.assertEqual(user_id, 'fake_user') return dict( instances=dict(in_use=2, reserved=2), cores=dict(in_use=4, reserved=4), ram=dict(in_use=10 * 1024, reserved=0), floating_ips=dict(in_use=2, reserved=0), metadata_items=dict(in_use=0, reserved=0), injected_files=dict(in_use=0, reserved=0), injected_file_content_bytes=dict(in_use=0, reserved=0), injected_file_path_bytes=dict(in_use=0, reserved=0), ) self.stub_out('nova.db.quota_get_all_by_project_and_user', fake_qgabpau) self.stub_out('nova.db.quota_get_all_by_project', fake_qgabp) self.stub_out('nova.db.quota_usage_get_all_by_project_and_user', fake_qugabpau) self._stub_quota_class_get_all_by_name() def test_get_user_quotas(self): self.maxDiff = None self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', 'fake_user') self.assertEqual(self.calls, [ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict( instances=dict( limit=5, in_use=2, reserved=2, ), cores=dict( limit=10, in_use=4, reserved=4, ), ram=dict( limit=25 * 1024, in_use=10 * 1024, reserved=0, ), floating_ips=dict( limit=10, in_use=2, reserved=0, ), fixed_ips=dict( limit=10, in_use=0, reserved=0, ), metadata_items=dict( limit=64, in_use=0, reserved=0, ), injected_files=dict( limit=2, in_use=0, reserved=0, ), injected_file_content_bytes=dict( limit=5 * 1024, in_use=0, reserved=0, ), injected_file_path_bytes=dict( limit=127, in_use=0, reserved=0, ), security_groups=dict( limit=10, in_use=0, reserved=0, ), security_group_rules=dict( limit=20, in_use=0, reserved=0, ), key_pairs=dict( limit=100, in_use=0, reserved=0, ), server_groups=dict( limit=10, in_use=0, reserved=0, ), server_group_members=dict( limit=10, in_use=0, reserved=0, ), )) def _stub_get_by_project_and_user_specific(self): def fake_quota_get(context, project_id, resource, user_id=None): self.calls.append('quota_get') self.assertEqual(project_id, 'test_project') self.assertEqual(user_id, 'fake_user') self.assertEqual(resource, 'test_resource') return dict( test_resource=dict(in_use=20, reserved=10), ) self.stub_out('nova.db.quota_get', fake_quota_get) def test_get_by_project_and_user(self): self._stub_get_by_project_and_user_specific() result = self.driver.get_by_project_and_user( FakeContext('test_project', 'test_class'), 'test_project', 'fake_user', 'test_resource') self.assertEqual(self.calls, ['quota_get']) self.assertEqual(result, dict( test_resource=dict(in_use=20, reserved=10), )) def _stub_get_by_project(self): def fake_qgabp(context, project_id): self.calls.append('quota_get_all_by_project') self.assertEqual(project_id, 'test_project') return dict( cores=10, injected_files=2, injected_file_path_bytes=127, ) def fake_qugabp(context, project_id): self.calls.append('quota_usage_get_all_by_project') self.assertEqual(project_id, 'test_project') return dict( instances=dict(in_use=2, reserved=2), cores=dict(in_use=4, reserved=4), ram=dict(in_use=10 * 1024, reserved=0), floating_ips=dict(in_use=2, reserved=0), metadata_items=dict(in_use=0, reserved=0), injected_files=dict(in_use=0, reserved=0), injected_file_content_bytes=dict(in_use=0, reserved=0), injected_file_path_bytes=dict(in_use=0, reserved=0), ) def fake_quota_get_all(context, project_id): self.calls.append('quota_get_all') self.assertEqual(project_id, 'test_project') return [sqa_models.ProjectUserQuota(resource='instances', hard_limit=5), sqa_models.ProjectUserQuota(resource='cores', hard_limit=2)] self.stub_out('nova.db.quota_get_all_by_project', fake_qgabp) self.stub_out('nova.db.quota_usage_get_all_by_project', fake_qugabp) self.stub_out('nova.db.quota_get_all', fake_quota_get_all) self._stub_quota_class_get_all_by_name() self._stub_quota_class_get_default() def test_get_project_quotas(self): self.maxDiff = None self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project') self.assertEqual(self.calls, [ 'quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_default', ]) self.assertEqual(result, dict( instances=dict( limit=5, in_use=2, reserved=2, ), cores=dict( limit=10, in_use=4, reserved=4, ), ram=dict( limit=25 * 1024, in_use=10 * 1024, reserved=0, ), floating_ips=dict( limit=10, in_use=2, reserved=0, ), fixed_ips=dict( limit=10, in_use=0, reserved=0, ), metadata_items=dict( limit=64, in_use=0, reserved=0, ), injected_files=dict( limit=2, in_use=0, reserved=0, ), injected_file_content_bytes=dict( limit=5 * 1024, in_use=0, reserved=0, ), injected_file_path_bytes=dict( limit=127, in_use=0, reserved=0, ), security_groups=dict( limit=10, in_use=0, reserved=0, ), security_group_rules=dict( limit=20, in_use=0, reserved=0, ), key_pairs=dict( limit=100, in_use=0, reserved=0, ), server_groups=dict( limit=10, in_use=0, reserved=0, ), server_group_members=dict( limit=10, in_use=0, reserved=0, ), )) def test_get_project_quotas_with_remains(self): self.maxDiff = None self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', remains=True) self.assertEqual(self.calls, [ 'quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_default', 'quota_get_all', ]) self.assertEqual(result, dict( instances=dict( limit=5, in_use=2, reserved=2, remains=0, ), cores=dict( limit=10, in_use=4, reserved=4, remains=8, ), ram=dict( limit=25 * 1024, in_use=10 * 1024, reserved=0, remains=25 * 1024, ), floating_ips=dict( limit=10, in_use=2, reserved=0, remains=10, ), fixed_ips=dict( limit=10, in_use=0, reserved=0, remains=10, ), metadata_items=dict( limit=64, in_use=0, reserved=0, remains=64, ), injected_files=dict( limit=2, in_use=0, reserved=0, remains=2, ), injected_file_content_bytes=dict( limit=5 * 1024, in_use=0, reserved=0, remains=5 * 1024, ), injected_file_path_bytes=dict( limit=127, in_use=0, reserved=0, remains=127, ), security_groups=dict( limit=10, in_use=0, reserved=0, remains=10, ), security_group_rules=dict( limit=20, in_use=0, reserved=0, remains=20, ), key_pairs=dict( limit=100, in_use=0, reserved=0, remains=100, ), server_groups=dict( limit=10, in_use=0, reserved=0, remains=10, ), server_group_members=dict( limit=10, in_use=0, reserved=0, remains=10, ), )) def test_get_user_quotas_alt_context_no_class(self): self.maxDiff = None self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', None), quota.QUOTAS._resources, 'test_project', 'fake_user') self.assertEqual(self.calls, [ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', ]) self.assertEqual(result, dict( instances=dict( limit=10, in_use=2, reserved=2, ), cores=dict( limit=10, in_use=4, reserved=4, ), ram=dict( limit=50 * 1024, in_use=10 * 1024, reserved=0, ), floating_ips=dict( limit=10, in_use=2, reserved=0, ), fixed_ips=dict( limit=10, in_use=0, reserved=0, ), metadata_items=dict( limit=128, in_use=0, reserved=0, ), injected_files=dict( limit=2, in_use=0, reserved=0, ), injected_file_content_bytes=dict( limit=10 * 1024, in_use=0, reserved=0, ), injected_file_path_bytes=dict( limit=127, in_use=0, reserved=0, ), security_groups=dict( limit=10, in_use=0, reserved=0, ), security_group_rules=dict( limit=20, in_use=0, reserved=0, ), key_pairs=dict( limit=100, in_use=0, reserved=0, ), server_groups=dict( limit=10, in_use=0, reserved=0, ), server_group_members=dict( limit=10, in_use=0, reserved=0, ), )) def test_get_project_quotas_alt_context_no_class(self): self.maxDiff = None self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), quota.QUOTAS._resources, 'test_project') self.assertEqual(self.calls, [ 'quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_default', ]) self.assertEqual(result, dict( instances=dict( limit=5, in_use=2, reserved=2, ), cores=dict( limit=10, in_use=4, reserved=4, ), ram=dict( limit=25 * 1024, in_use=10 * 1024, reserved=0, ), floating_ips=dict( limit=10, in_use=2, reserved=0, ), fixed_ips=dict( limit=10, in_use=0, reserved=0, ), metadata_items=dict( limit=64, in_use=0, reserved=0, ), injected_files=dict( limit=2, in_use=0, reserved=0, ), injected_file_content_bytes=dict( limit=5 * 1024, in_use=0, reserved=0, ), injected_file_path_bytes=dict( limit=127, in_use=0, reserved=0, ), security_groups=dict( limit=10, in_use=0, reserved=0, ), security_group_rules=dict( limit=20, in_use=0, reserved=0, ), key_pairs=dict( limit=100, in_use=0, reserved=0, ), server_groups=dict( limit=10, in_use=0, reserved=0, ), server_group_members=dict( limit=10, in_use=0, reserved=0, ), )) def test_get_user_quotas_alt_context_with_class(self): self.maxDiff = None self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', 'fake_user', quota_class='test_class') self.assertEqual(self.calls, [ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict( instances=dict( limit=5, in_use=2, reserved=2, ), cores=dict( limit=10, in_use=4, reserved=4, ), ram=dict( limit=25 * 1024, in_use=10 * 1024, reserved=0, ), floating_ips=dict( limit=10, in_use=2, reserved=0, ), fixed_ips=dict( limit=10, in_use=0, reserved=0, ), metadata_items=dict( limit=64, in_use=0, reserved=0, ), injected_files=dict( limit=2, in_use=0, reserved=0, ), injected_file_content_bytes=dict( limit=5 * 1024, in_use=0, reserved=0, ), injected_file_path_bytes=dict( limit=127, in_use=0, reserved=0, ), security_groups=dict( limit=10, in_use=0, reserved=0, ), security_group_rules=dict( limit=20, in_use=0, reserved=0, ), key_pairs=dict( limit=100, in_use=0, reserved=0, ), server_groups=dict( limit=10, in_use=0, reserved=0, ), server_group_members=dict( limit=10, in_use=0, reserved=0, ), )) def test_get_project_quotas_alt_context_with_class(self): self.maxDiff = None self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), quota.QUOTAS._resources, 'test_project', quota_class='test_class') self.assertEqual(self.calls, [ 'quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_default', ]) self.assertEqual(result, dict( instances=dict( limit=5, in_use=2, reserved=2, ), cores=dict( limit=10, in_use=4, reserved=4, ), ram=dict( limit=25 * 1024, in_use=10 * 1024, reserved=0, ), floating_ips=dict( limit=10, in_use=2, reserved=0, ), fixed_ips=dict( limit=10, in_use=0, reserved=0, ), metadata_items=dict( limit=64, in_use=0, reserved=0, ), injected_files=dict( limit=2, in_use=0, reserved=0, ), injected_file_content_bytes=dict( limit=5 * 1024, in_use=0, reserved=0, ), injected_file_path_bytes=dict( limit=127, in_use=0, reserved=0, ), security_groups=dict( limit=10, in_use=0, reserved=0, ), security_group_rules=dict( limit=20, in_use=0, reserved=0, ), key_pairs=dict( limit=100, in_use=0, reserved=0, ), server_groups=dict( limit=10, in_use=0, reserved=0, ), server_group_members=dict( limit=10, in_use=0, reserved=0, ), )) def test_get_user_quotas_no_defaults(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', 'fake_user', defaults=False) self.assertEqual(self.calls, [ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict( cores=dict( limit=10, in_use=4, reserved=4, ), injected_files=dict( limit=2, in_use=0, reserved=0, ), injected_file_path_bytes=dict( limit=127, in_use=0, reserved=0, ), )) def test_get_project_quotas_no_defaults(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', defaults=False) self.assertEqual(self.calls, [ 'quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_default', ]) self.assertEqual(result, dict( cores=dict( limit=10, in_use=4, reserved=4, ), injected_files=dict( limit=2, in_use=0, reserved=0, ), injected_file_path_bytes=dict( limit=127, in_use=0, reserved=0, ), )) def test_get_user_quotas_no_usages(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', 'fake_user', usages=False) self.assertEqual(self.calls, [ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict( instances=dict( limit=5, ), cores=dict( limit=10, ), ram=dict( limit=25 * 1024, ), floating_ips=dict( limit=10, ), fixed_ips=dict( limit=10, ), metadata_items=dict( limit=64, ), injected_files=dict( limit=2, ), injected_file_content_bytes=dict( limit=5 * 1024, ), injected_file_path_bytes=dict( limit=127, ), security_groups=dict( limit=10, ), security_group_rules=dict( limit=20, ), key_pairs=dict( limit=100, ), server_groups=dict( limit=10, ), server_group_members=dict( limit=10, ), )) def test_get_project_quotas_no_usages(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', usages=False) self.assertEqual(self.calls, [ 'quota_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_default', ]) self.assertEqual(result, dict( instances=dict( limit=5, ), cores=dict( limit=10, ), ram=dict( limit=25 * 1024, ), floating_ips=dict( limit=10, ), fixed_ips=dict( limit=10, ), metadata_items=dict( limit=64, ), injected_files=dict( limit=2, ), injected_file_content_bytes=dict( limit=5 * 1024, ), injected_file_path_bytes=dict( limit=127, ), security_groups=dict( limit=10, ), security_group_rules=dict( limit=20, ), key_pairs=dict( limit=100, ), server_groups=dict( limit=10, ), server_group_members=dict( limit=10, ), )) def _stub_get_settable_quotas(self): def fake_quota_get_all_by_project(context, project_id): self.calls.append('quota_get_all_by_project') return {'floating_ips': 20} def fake_get_project_quotas(dbdrv, context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False, project_quotas=None): self.calls.append('get_project_quotas') result = {} for k, v in resources.items(): limit = v.default reserved = 0 if k == 'instances': remains = v.default - 5 in_use = 1 elif k == 'cores': remains = -1 in_use = 5 limit = -1 elif k == 'floating_ips': remains = 20 in_use = 0 limit = 20 else: remains = v.default in_use = 0 result[k] = {'limit': limit, 'in_use': in_use, 'reserved': reserved, 'remains': remains} return result def fake_process_quotas_in_get_user_quotas(dbdrv, context, resources, project_id, quotas, quota_class=None, defaults=True, usages=None, remains=False): self.calls.append('_process_quotas') result = {} for k, v in resources.items(): reserved = 0 if k == 'instances': in_use = 1 elif k == 'cores': in_use = 5 reserved = 10 else: in_use = 0 result[k] = {'limit': v.default, 'in_use': in_use, 'reserved': reserved} return result def fake_qgabpau(context, project_id, user_id): self.calls.append('quota_get_all_by_project_and_user') return {'instances': 2, 'cores': -1} self.stub_out('nova.db.quota_get_all_by_project', fake_quota_get_all_by_project) self.stub_out('nova.quota.DbQuotaDriver.get_project_quotas', fake_get_project_quotas) self.stub_out('nova.quota.DbQuotaDriver._process_quotas', fake_process_quotas_in_get_user_quotas) self.stub_out('nova.db.quota_get_all_by_project_and_user', fake_qgabpau) def test_get_settable_quotas_with_user(self): self._stub_get_settable_quotas() result = self.driver.get_settable_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', user_id='test_user') self.assertEqual(self.calls, [ 'quota_get_all_by_project', 'get_project_quotas', 'quota_get_all_by_project_and_user', '_process_quotas', ]) self.assertEqual(result, { 'instances': { 'minimum': 1, 'maximum': 7, }, 'cores': { 'minimum': 15, 'maximum': -1, }, 'ram': { 'minimum': 0, 'maximum': 50 * 1024, }, 'floating_ips': { 'minimum': 0, 'maximum': 20, }, 'fixed_ips': { 'minimum': 0, 'maximum': 10, }, 'metadata_items': { 'minimum': 0, 'maximum': 128, }, 'injected_files': { 'minimum': 0, 'maximum': 5, }, 'injected_file_content_bytes': { 'minimum': 0, 'maximum': 10 * 1024, }, 'injected_file_path_bytes': { 'minimum': 0, 'maximum': 255, }, 'security_groups': { 'minimum': 0, 'maximum': 10, }, 'security_group_rules': { 'minimum': 0, 'maximum': 20, }, 'key_pairs': { 'minimum': 0, 'maximum': 100, }, 'server_groups': { 'minimum': 0, 'maximum': 10, }, 'server_group_members': { 'minimum': 0, 'maximum': 10, }, }) def test_get_settable_quotas_without_user(self): self._stub_get_settable_quotas() result = self.driver.get_settable_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project') self.assertEqual(self.calls, [ 'quota_get_all_by_project', 'get_project_quotas', ]) self.assertEqual(result, { 'instances': { 'minimum': 5, 'maximum': -1, }, 'cores': { 'minimum': 5, 'maximum': -1, }, 'ram': { 'minimum': 0, 'maximum': -1, }, 'floating_ips': { 'minimum': 0, 'maximum': -1, }, 'fixed_ips': { 'minimum': 0, 'maximum': -1, }, 'metadata_items': { 'minimum': 0, 'maximum': -1, }, 'injected_files': { 'minimum': 0, 'maximum': -1, }, 'injected_file_content_bytes': { 'minimum': 0, 'maximum': -1, }, 'injected_file_path_bytes': { 'minimum': 0, 'maximum': -1, }, 'security_groups': { 'minimum': 0, 'maximum': -1, }, 'security_group_rules': { 'minimum': 0, 'maximum': -1, }, 'key_pairs': { 'minimum': 0, 'maximum': -1, }, 'server_groups': { 'minimum': 0, 'maximum': -1, }, 'server_group_members': { 'minimum': 0, 'maximum': -1, }, }) def test_get_settable_quotas_by_user_with_unlimited_value(self): self._stub_get_settable_quotas() result = self.driver.get_settable_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', user_id='test_user') self.assertEqual(self.calls, [ 'quota_get_all_by_project', 'get_project_quotas', 'quota_get_all_by_project_and_user', '_process_quotas', ]) self.assertEqual(result, { 'instances': { 'minimum': 1, 'maximum': 7, }, 'cores': { 'minimum': 15, 'maximum': -1, }, 'ram': { 'minimum': 0, 'maximum': 50 * 1024, }, 'floating_ips': { 'minimum': 0, 'maximum': 20, }, 'fixed_ips': { 'minimum': 0, 'maximum': 10, }, 'metadata_items': { 'minimum': 0, 'maximum': 128, }, 'injected_files': { 'minimum': 0, 'maximum': 5, }, 'injected_file_content_bytes': { 'minimum': 0, 'maximum': 10 * 1024, }, 'injected_file_path_bytes': { 'minimum': 0, 'maximum': 255, }, 'security_groups': { 'minimum': 0, 'maximum': 10, }, 'security_group_rules': { 'minimum': 0, 'maximum': 20, }, 'key_pairs': { 'minimum': 0, 'maximum': 100, }, 'server_groups': { 'minimum': 0, 'maximum': 10, }, 'server_group_members': { 'minimum': 0, 'maximum': 10, }, }) def _stub_get_project_quotas(self): def fake_get_project_quotas(dbdrv, context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False, project_quotas=None): self.calls.append('get_project_quotas') return {k: dict(limit=v.default) for k, v in resources.items()} self.stub_out('nova.quota.DbQuotaDriver.get_project_quotas', fake_get_project_quotas) def test_get_quotas_has_sync_unknown(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['unknown'], True) self.assertEqual(self.calls, []) def test_get_quotas_no_sync_unknown(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['unknown'], False) self.assertEqual(self.calls, []) def test_get_quotas_has_sync_no_sync_resource(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['metadata_items'], True) self.assertEqual(self.calls, []) def test_get_quotas_no_sync_has_sync_resource(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['instances'], False) self.assertEqual(self.calls, []) def test_get_quotas_has_sync(self): self._stub_get_project_quotas() result = self.driver._get_quotas(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, ['instances', 'cores', 'ram', 'floating_ips', 'security_groups', 'server_groups'], True, project_id='test_project') self.assertEqual(self.calls, ['get_project_quotas']) self.assertEqual(result, dict( instances=10, cores=20, ram=50 * 1024, floating_ips=10, security_groups=10, server_groups=10, )) def test_get_quotas_no_sync(self): self._stub_get_project_quotas() result = self.driver._get_quotas(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, ['metadata_items', 'injected_files', 'injected_file_content_bytes', 'injected_file_path_bytes', 'security_group_rules', 'server_group_members'], False, project_id='test_project') self.assertEqual(self.calls, ['get_project_quotas']) self.assertEqual(result, dict( metadata_items=128, injected_files=5, injected_file_content_bytes=10 * 1024, injected_file_path_bytes=255, security_group_rules=20, server_group_members=10, )) def test_limit_check_under(self): self._stub_get_project_quotas() self.assertRaises(exception.InvalidQuotaValue, self.driver.limit_check, FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(metadata_items=-1)) def test_limit_check_over(self): self._stub_get_project_quotas() self.assertRaises(exception.OverQuota, self.driver.limit_check, FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(metadata_items=129)) def test_limit_check_project_overs(self): self._stub_get_project_quotas() self.assertRaises(exception.OverQuota, self.driver.limit_check, FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(injected_file_content_bytes=10241, injected_file_path_bytes=256)) def test_limit_check_unlimited(self): self.flags(quota_metadata_items=-1) self._stub_get_project_quotas() self.driver.limit_check(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(metadata_items=32767)) def test_limit_check(self): self._stub_get_project_quotas() self.driver.limit_check(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(metadata_items=128)) def _stub_quota_reserve(self): def fake_quota_reserve(context, resources, quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None): self.calls.append(('quota_reserve', expire, until_refresh, max_age)) return ['resv-1', 'resv-2', 'resv-3'] self.stub_out('nova.db.quota_reserve', fake_quota_reserve) def test_reserve_bad_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.assertRaises(exception.InvalidReservationExpiration, self.driver.reserve, FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(instances=2), expire='invalid') self.assertEqual(self.calls, []) def test_reserve_default_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(instances=2)) expire = timeutils.utcnow() + datetime.timedelta(seconds=86400) self.assertEqual(self.calls, [ 'get_project_quotas', ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_int_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(instances=2), expire=3600) expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.assertEqual(self.calls, [ 'get_project_quotas', ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_timedelta_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() expire_delta = datetime.timedelta(seconds=60) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(instances=2), expire=expire_delta) expire = timeutils.utcnow() + expire_delta self.assertEqual(self.calls, [ 'get_project_quotas', ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_datetime_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(instances=2), expire=expire) self.assertEqual(self.calls, [ 'get_project_quotas', ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_until_refresh(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.flags(until_refresh=500) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(instances=2), expire=expire) self.assertEqual(self.calls, [ 'get_project_quotas', ('quota_reserve', expire, 500, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_max_age(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.flags(max_age=86400) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(instances=2), expire=expire) self.assertEqual(self.calls, [ 'get_project_quotas', ('quota_reserve', expire, 0, 86400), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_usage_reset(self): calls = [] def fake_quota_usage_update(context, project_id, user_id, resource, **kwargs): calls.append(('quota_usage_update', context, project_id, user_id, resource, kwargs)) if resource == 'nonexist': raise exception.QuotaUsageNotFound(project_id=project_id) self.stub_out('nova.db.quota_usage_update', fake_quota_usage_update) ctx = FakeContext('test_project', 'test_class') resources = ['res1', 'res2', 'nonexist', 'res4'] self.driver.usage_reset(ctx, resources) # Make sure we had some calls self.assertEqual(len(calls), len(resources)) # Extract the elevated context that was used and do some # sanity checks elevated = calls[0][1] self.assertEqual(elevated.project_id, ctx.project_id) self.assertEqual(elevated.quota_class, ctx.quota_class) self.assertTrue(elevated.is_admin) # Now check that all the expected calls were made exemplar = [('quota_usage_update', elevated, 'test_project', 'fake_user', res, dict(in_use=-1)) for res in resources] self.assertEqual(calls, exemplar) class FakeSession(object): def begin(self): return self def add(self, instance): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): return False class FakeUsage(sqa_models.QuotaUsage): def save(self, *args, **kwargs): pass class QuotaReserveSqlAlchemyTestCase(test.TestCase): # nova.db.sqlalchemy.api.quota_reserve is so complex it needs its # own test case, and since it's a quota manipulator, this is the # best place to put it... def setUp(self): super(QuotaReserveSqlAlchemyTestCase, self).setUp() self.sync_called = set() self.quotas = dict( instances=5, cores=10, ram=10 * 1024, fixed_ips=5, ) self.deltas = dict( instances=2, cores=4, ram=2 * 1024, fixed_ips=2, ) def make_sync(res_name): def sync(context, project_id, user_id): self.sync_called.add(res_name) if res_name in self.usages: if self.usages[res_name].in_use < 0: return {res_name: 2} else: return {res_name: self.usages[res_name].in_use - 1} return {res_name: 0} return sync self.resources = {} _existing_quota_sync_func_dict = dict(sqa_api.QUOTA_SYNC_FUNCTIONS) def restore_sync_functions(): sqa_api.QUOTA_SYNC_FUNCTIONS.clear() sqa_api.QUOTA_SYNC_FUNCTIONS.update(_existing_quota_sync_func_dict) self.addCleanup(restore_sync_functions) for res_name in ('instances', 'cores', 'ram', 'fixed_ips'): method_name = '_sync_%s' % res_name sqa_api.QUOTA_SYNC_FUNCTIONS[method_name] = make_sync(res_name) res = quota.ReservableResource(res_name, '_sync_%s' % res_name) self.resources[res_name] = res self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.usages = {} self.usages_created = {} self.reservations_created = {} self.usages_list = [ dict(resource='instances', project_id='test_project', user_id='fake_user', in_use=2, reserved=2, until_refresh=None), dict(resource='cores', project_id='test_project', user_id='fake_user', in_use=2, reserved=4, until_refresh=None), dict(resource='ram', project_id='test_project', user_id='fake_user', in_use=2, reserved=2 * 1024, until_refresh=None), dict(resource='fixed_ips', project_id='test_project', user_id=None, in_use=2, reserved=2, until_refresh=None), ] def fake_get_project_user_quota_usages(context, project_id, user_id): return self.usages.copy(), self.usages.copy() def fake_quota_usage_create(project_id, user_id, resource, in_use, reserved, until_refresh, session): quota_usage_ref = self._make_quota_usage( project_id, user_id, resource, in_use, reserved, until_refresh, timeutils.utcnow(), timeutils.utcnow()) self.usages_created[resource] = quota_usage_ref return quota_usage_ref def fake_reservation_create(uuid, usage_id, project_id, user_id, resource, delta, expire, session): reservation_ref = self._make_reservation( uuid, usage_id, project_id, user_id, resource, delta, expire, timeutils.utcnow(), timeutils.utcnow()) self.reservations_created[resource] = reservation_ref return reservation_ref self.stub_out('nova.db.sqlalchemy.api._get_project_user_quota_usages', fake_get_project_user_quota_usages) self.stub_out('nova.db.sqlalchemy.api._quota_usage_create', fake_quota_usage_create) self.stub_out('nova.db.sqlalchemy.api._reservation_create', fake_reservation_create) self.useFixture(test.TimeOverride()) def _make_quota_usage(self, project_id, user_id, resource, in_use, reserved, until_refresh, created_at, updated_at): quota_usage_ref = FakeUsage() quota_usage_ref.id = len(self.usages) + len(self.usages_created) quota_usage_ref.project_id = project_id quota_usage_ref.user_id = user_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh quota_usage_ref.created_at = created_at quota_usage_ref.updated_at = updated_at quota_usage_ref.deleted_at = None quota_usage_ref.deleted = False return quota_usage_ref def init_usage(self, project_id, user_id, resource, in_use, reserved=0, until_refresh=None, created_at=None, updated_at=None): if created_at is None: created_at = timeutils.utcnow() if updated_at is None: updated_at = timeutils.utcnow() if resource == 'fixed_ips': user_id = None quota_usage_ref = self._make_quota_usage(project_id, user_id, resource, in_use, reserved, until_refresh, created_at, updated_at) self.usages[resource] = quota_usage_ref def compare_usage(self, usage_dict, expected): for usage in expected: resource = usage['resource'] for key, value in usage.items(): actual = getattr(usage_dict[resource], key) self.assertEqual(actual, value, "%s != %s on usage for resource %s" % (actual, value, resource)) def _make_reservation(self, uuid, usage_id, project_id, user_id, resource, delta, expire, created_at, updated_at): reservation_ref = sqa_models.Reservation() reservation_ref.id = len(self.reservations_created) reservation_ref.uuid = uuid reservation_ref.usage_id = usage_id reservation_ref.project_id = project_id reservation_ref.user_id = user_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.created_at = created_at reservation_ref.updated_at = updated_at reservation_ref.deleted_at = None reservation_ref.deleted = False return reservation_ref def compare_reservation(self, reservations, expected): reservations = set(reservations) for resv in expected: resource = resv['resource'] resv_obj = self.reservations_created[resource] self.assertIn(resv_obj.uuid, reservations) reservations.discard(resv_obj.uuid) for key, value in resv.items(): actual = getattr(resv_obj, key) self.assertEqual(actual, value, "%s != %s on reservation for resource %s" % (actual, value, resource)) self.assertEqual(len(reservations), 0) def _update_reservations_list(self, usage_id_change=False, delta_change=False): reservations_list = [ dict(resource='instances', project_id='test_project', delta=2), dict(resource='cores', project_id='test_project', delta=4), dict(resource='ram', delta=2 * 1024), dict(resource='fixed_ips', project_id='test_project', delta=2), ] if usage_id_change: reservations_list[0]["usage_id"] = self.usages_created['instances'] reservations_list[1]["usage_id"] = self.usages_created['cores'] reservations_list[2]["usage_id"] = self.usages_created['ram'] reservations_list[3]["usage_id"] = self.usages_created['fixed_ips'] else: reservations_list[0]["usage_id"] = self.usages['instances'] reservations_list[1]["usage_id"] = self.usages['cores'] reservations_list[2]["usage_id"] = self.usages['ram'] reservations_list[3]["usage_id"] = self.usages['fixed_ips'] if delta_change: reservations_list[0]["delta"] = -2 reservations_list[1]["delta"] = -4 reservations_list[2]["delta"] = -2 * 1024 reservations_list[3]["delta"] = -2 return reservations_list def _init_usages(self, *in_use, **kwargs): for i, option in enumerate(('instances', 'cores', 'ram', 'fixed_ips')): self.init_usage('test_project', 'fake_user', option, in_use[i], **kwargs) return FakeContext('test_project', 'test_class') def test_quota_reserve_create_usages(self): context = FakeContext('test_project', 'test_class') result = sqa_api.quota_reserve(context, self.resources, self.quotas, self.quotas, self.deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram', 'fixed_ips'])) self.usages_list[0]["in_use"] = 0 self.usages_list[1]["in_use"] = 0 self.usages_list[2]["in_use"] = 0 self.usages_list[3]["in_use"] = 0 self.compare_usage(self.usages_created, self.usages_list) reservations_list = self._update_reservations_list(True) self.compare_reservation(result, reservations_list) def test_quota_reserve_negative_in_use(self): context = self._init_usages(-1, -1, -1, -1, until_refresh=1) result = sqa_api.quota_reserve(context, self.resources, self.quotas, self.quotas, self.deltas, self.expire, 5, 0) self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram', 'fixed_ips'])) self.usages_list[0]["until_refresh"] = 5 self.usages_list[1]["until_refresh"] = 5 self.usages_list[2]["until_refresh"] = 5 self.usages_list[3]["until_refresh"] = 5 self.compare_usage(self.usages, self.usages_list) self.assertEqual(self.usages_created, {}) self.compare_reservation(result, self._update_reservations_list()) def test_quota_reserve_until_refresh(self): context = self._init_usages(3, 3, 3, 3, until_refresh=1) result = sqa_api.quota_reserve(context, self.resources, self.quotas, self.quotas, self.deltas, self.expire, 5, 0) self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram', 'fixed_ips'])) self.usages_list[0]["until_refresh"] = 5 self.usages_list[1]["until_refresh"] = 5 self.usages_list[2]["until_refresh"] = 5 self.usages_list[3]["until_refresh"] = 5 self.compare_usage(self.usages, self.usages_list) self.assertEqual(self.usages_created, {}) self.compare_reservation(result, self._update_reservations_list()) def test_quota_reserve_max_age(self): max_age = 3600 record_created = (timeutils.utcnow() - datetime.timedelta(seconds=max_age)) context = self._init_usages(3, 3, 3, 3, created_at=record_created, updated_at=record_created) result = sqa_api.quota_reserve(context, self.resources, self.quotas, self.quotas, self.deltas, self.expire, 0, max_age) self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram', 'fixed_ips'])) self.compare_usage(self.usages, self.usages_list) self.assertEqual(self.usages_created, {}) self.compare_reservation(result, self._update_reservations_list()) def test_quota_reserve_no_refresh(self): context = self._init_usages(3, 3, 3, 3) result = sqa_api.quota_reserve(context, self.resources, self.quotas, self.quotas, self.deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) self.usages_list[0]["in_use"] = 3 self.usages_list[1]["in_use"] = 3 self.usages_list[2]["in_use"] = 3 self.usages_list[3]["in_use"] = 3 self.compare_usage(self.usages, self.usages_list) self.assertEqual(self.usages_created, {}) self.compare_reservation(result, self._update_reservations_list()) def test_quota_reserve_unders(self): context = self._init_usages(1, 3, 1 * 1024, 1) self.deltas["instances"] = -2 self.deltas["cores"] = -4 self.deltas["ram"] = -2 * 1024 self.deltas["fixed_ips"] = -2 result = sqa_api.quota_reserve(context, self.resources, self.quotas, self.quotas, self.deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) self.usages_list[0]["in_use"] = 1 self.usages_list[0]["reserved"] = 0 self.usages_list[1]["in_use"] = 3 self.usages_list[1]["reserved"] = 0 self.usages_list[2]["in_use"] = 1 * 1024 self.usages_list[2]["reserved"] = 0 self.usages_list[3]["in_use"] = 1 self.usages_list[3]["reserved"] = 0 self.compare_usage(self.usages, self.usages_list) self.assertEqual(self.usages_created, {}) reservations_list = self._update_reservations_list(False, True) self.compare_reservation(result, reservations_list) def test_quota_reserve_overs(self): context = self._init_usages(4, 8, 10 * 1024, 4) try: sqa_api.quota_reserve(context, self.resources, self.quotas, self.quotas, self.deltas, self.expire, 0, 0) except exception.OverQuota as e: expected_kwargs = {'code': 500, 'usages': {'instances': {'reserved': 0, 'in_use': 4}, 'ram': {'reserved': 0, 'in_use': 10240}, 'fixed_ips': {'reserved': 0, 'in_use': 4}, 'cores': {'reserved': 0, 'in_use': 8}}, 'overs': ['cores', 'fixed_ips', 'instances', 'ram'], 'quotas': {'cores': 10, 'ram': 10240, 'fixed_ips': 5, 'instances': 5}} self.assertEqual(e.kwargs, expected_kwargs) else: self.fail('Expected OverQuota failure') self.assertEqual(self.sync_called, set([])) self.usages_list[0]["in_use"] = 4 self.usages_list[0]["reserved"] = 0 self.usages_list[1]["in_use"] = 8 self.usages_list[1]["reserved"] = 0 self.usages_list[2]["in_use"] = 10 * 1024 self.usages_list[2]["reserved"] = 0 self.usages_list[3]["in_use"] = 4 self.usages_list[3]["reserved"] = 0 self.compare_usage(self.usages, self.usages_list) self.assertEqual(self.usages_created, {}) self.assertEqual(self.reservations_created, {}) def test_quota_reserve_cores_unlimited(self): # Requesting 8 cores, quota_cores set to unlimited: self.flags(quota_cores=-1) self._init_usages(1, 8, 1 * 1024, 1) self.assertEqual(self.sync_called, set([])) self.usages_list[0]["in_use"] = 1 self.usages_list[0]["reserved"] = 0 self.usages_list[1]["in_use"] = 8 self.usages_list[1]["reserved"] = 0 self.usages_list[2]["in_use"] = 1 * 1024 self.usages_list[2]["reserved"] = 0 self.usages_list[3]["in_use"] = 1 self.usages_list[3]["reserved"] = 0 self.compare_usage(self.usages, self.usages_list) self.assertEqual(self.usages_created, {}) self.assertEqual(self.reservations_created, {}) def test_quota_reserve_ram_unlimited(self): # Requesting 10*1024 ram, quota_ram set to unlimited: self.flags(quota_ram=-1) self._init_usages(1, 1, 10 * 1024, 1) self.assertEqual(self.sync_called, set([])) self.usages_list[0]["in_use"] = 1 self.usages_list[0]["reserved"] = 0 self.usages_list[1]["in_use"] = 1 self.usages_list[1]["reserved"] = 0 self.usages_list[2]["in_use"] = 10 * 1024 self.usages_list[2]["reserved"] = 0 self.usages_list[3]["in_use"] = 1 self.usages_list[3]["reserved"] = 0 self.compare_usage(self.usages, self.usages_list) self.assertEqual(self.usages_created, {}) self.assertEqual(self.reservations_created, {}) def test_quota_reserve_reduction(self): context = self._init_usages(10, 20, 20 * 1024, 10) self.deltas["instances"] = -2 self.deltas["cores"] = -4 self.deltas["ram"] = -2 * 1024 self.deltas["fixed_ips"] = -2 result = sqa_api.quota_reserve(context, self.resources, self.quotas, self.quotas, self.deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) self.usages_list[0]["in_use"] = 10 self.usages_list[0]["reserved"] = 0 self.usages_list[1]["in_use"] = 20 self.usages_list[1]["reserved"] = 0 self.usages_list[2]["in_use"] = 20 * 1024 self.usages_list[2]["reserved"] = 0 self.usages_list[3]["in_use"] = 10 self.usages_list[3]["reserved"] = 0 self.compare_usage(self.usages, self.usages_list) self.assertEqual(self.usages_created, {}) reservations_list = self._update_reservations_list(False, True) self.compare_reservation(result, reservations_list) class NoopQuotaDriverTestCase(test.TestCase): def setUp(self): super(NoopQuotaDriverTestCase, self).setUp() self.flags(quota_instances=10, quota_cores=20, quota_ram=50 * 1024, quota_floating_ips=10, quota_metadata_items=128, quota_injected_files=5, quota_injected_file_content_bytes=10 * 1024, quota_injected_file_path_length=255, quota_security_groups=10, quota_security_group_rules=20, reservation_expire=86400, until_refresh=0, max_age=0, ) self.expected_with_usages = {} self.expected_without_usages = {} self.expected_without_dict = {} self.expected_settable_quotas = {} for r in quota.QUOTAS._resources: self.expected_with_usages[r] = dict(limit=-1, in_use=-1, reserved=-1) self.expected_without_usages[r] = dict(limit=-1) self.expected_without_dict[r] = -1 self.expected_settable_quotas[r] = dict(minimum=0, maximum=-1) self.driver = quota.NoopQuotaDriver() def test_get_defaults(self): # Use our pre-defined resources result = self.driver.get_defaults(None, quota.QUOTAS._resources) self.assertEqual(self.expected_without_dict, result) def test_get_class_quotas(self): result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, 'test_class') self.assertEqual(self.expected_without_dict, result) def test_get_class_quotas_no_defaults(self): result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, 'test_class', False) self.assertEqual(self.expected_without_dict, result) def test_get_project_quotas(self): result = self.driver.get_project_quotas(None, quota.QUOTAS._resources, 'test_project') self.assertEqual(self.expected_with_usages, result) def test_get_user_quotas(self): result = self.driver.get_user_quotas(None, quota.QUOTAS._resources, 'test_project', 'fake_user') self.assertEqual(self.expected_with_usages, result) def test_get_project_quotas_no_defaults(self): result = self.driver.get_project_quotas(None, quota.QUOTAS._resources, 'test_project', defaults=False) self.assertEqual(self.expected_with_usages, result) def test_get_user_quotas_no_defaults(self): result = self.driver.get_user_quotas(None, quota.QUOTAS._resources, 'test_project', 'fake_user', defaults=False) self.assertEqual(self.expected_with_usages, result) def test_get_project_quotas_no_usages(self): result = self.driver.get_project_quotas(None, quota.QUOTAS._resources, 'test_project', usages=False) self.assertEqual(self.expected_without_usages, result) def test_get_user_quotas_no_usages(self): result = self.driver.get_user_quotas(None, quota.QUOTAS._resources, 'test_project', 'fake_user', usages=False) self.assertEqual(self.expected_without_usages, result) def test_get_settable_quotas_with_user(self): result = self.driver.get_settable_quotas(None, quota.QUOTAS._resources, 'test_project', 'fake_user') self.assertEqual(self.expected_settable_quotas, result) def test_get_settable_quotas_without_user(self): result = self.driver.get_settable_quotas(None, quota.QUOTAS._resources, 'test_project') self.assertEqual(self.expected_settable_quotas, result) nova-13.0.0/nova/tests/unit/test_rpc.py0000664000567000056710000003142712701407773021177 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import fixtures import mock import oslo_messaging as messaging from oslo_serialization import jsonutils import testtools from nova import context from nova import rpc from nova import test # Make a class that resets all of the global variables in nova.rpc class RPCResetFixture(fixtures.Fixture): def _setUp(self): self.trans = copy.copy(rpc.TRANSPORT) self.noti_trans = copy.copy(rpc.NOTIFICATION_TRANSPORT) self.noti = copy.copy(rpc.NOTIFIER) self.all_mods = copy.copy(rpc.ALLOWED_EXMODS) self.ext_mods = copy.copy(rpc.EXTRA_EXMODS) self.addCleanup(self._reset_everything) def _reset_everything(self): rpc.TRANSPORT = self.trans rpc.NOTIFICATION_TRANSPORT = self.noti_trans rpc.NOTIFIER = self.noti rpc.ALLOWED_EXMODS = self.all_mods rpc.EXTRA_EXMODS = self.ext_mods # We can't import nova.test.TestCase because that sets up an RPCFixture # that pretty much nullifies all of this testing class TestRPC(testtools.TestCase): def setUp(self): super(TestRPC, self).setUp() self.useFixture(RPCResetFixture()) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_transport') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init_unversioned(self, mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods): # The expected call to get the legacy notifier will require no new # kwargs, and we expect the new notifier will need the noop driver expected = [{}, {'driver': 'noop'}] self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods, 'unversioned', expected) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_transport') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init_both(self, mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods): expected = [{}, {'topic': 'versioned_notifications'}] self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods, 'both', expected) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_transport') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init_versioned(self, mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods): expected = [{'driver': 'noop'}, {'topic': 'versioned_notifications'}] self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods, 'versioned', expected) def test_cleanup_transport_null(self): rpc.NOTIFICATION_TRANSPORT = mock.Mock() rpc.LEGACY_NOTIFIER = mock.Mock() rpc.NOTIFIER = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup_notification_transport_null(self): rpc.TRANSPORT = mock.Mock() rpc.NOTIFIER = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup_legacy_notifier_null(self): rpc.TRANSPORT = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() rpc.NOTIFIER = mock.Mock() def test_cleanup_notifier_null(self): rpc.TRANSPORT = mock.Mock() rpc.LEGACY_NOTIFIER = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup(self): rpc.LEGACY_NOTIFIER = mock.Mock() rpc.NOTIFIER = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() rpc.TRANSPORT = mock.Mock() trans_cleanup = mock.Mock() not_trans_cleanup = mock.Mock() rpc.TRANSPORT.cleanup = trans_cleanup rpc.NOTIFICATION_TRANSPORT.cleanup = not_trans_cleanup rpc.cleanup() trans_cleanup.assert_called_once_with() not_trans_cleanup.assert_called_once_with() self.assertIsNone(rpc.TRANSPORT) self.assertIsNone(rpc.NOTIFICATION_TRANSPORT) self.assertIsNone(rpc.LEGACY_NOTIFIER) self.assertIsNone(rpc.NOTIFIER) @mock.patch.object(messaging, 'set_transport_defaults') def test_set_defaults(self, mock_set): control_exchange = mock.Mock() rpc.set_defaults(control_exchange) mock_set.assert_called_once_with(control_exchange) def test_add_extra_exmods(self): rpc.EXTRA_EXMODS = [] rpc.add_extra_exmods('foo', 'bar') self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS) def test_clear_extra_exmods(self): rpc.EXTRA_EXMODS = ['foo', 'bar'] rpc.clear_extra_exmods() self.assertEqual(0, len(rpc.EXTRA_EXMODS)) def test_get_allowed_exmods(self): rpc.ALLOWED_EXMODS = ['foo'] rpc.EXTRA_EXMODS = ['bar'] exmods = rpc.get_allowed_exmods() self.assertEqual(['foo', 'bar'], exmods) @mock.patch.object(messaging, 'TransportURL') def test_get_transport_url(self, mock_url): conf = mock.Mock() rpc.CONF = conf mock_url.parse.return_value = 'foo' url = rpc.get_transport_url(url_str='bar') self.assertEqual('foo', url) mock_url.parse.assert_called_once_with(conf, 'bar', rpc.TRANSPORT_ALIASES) @mock.patch.object(messaging, 'TransportURL') def test_get_transport_url_null(self, mock_url): conf = mock.Mock() rpc.CONF = conf mock_url.parse.return_value = 'foo' url = rpc.get_transport_url() self.assertEqual('foo', url) mock_url.parse.assert_called_once_with(conf, None, rpc.TRANSPORT_ALIASES) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'RPCClient') def test_get_client(self, mock_client, mock_ser): rpc.TRANSPORT = mock.Mock() tgt = mock.Mock() ser = mock.Mock() mock_client.return_value = 'client' mock_ser.return_value = ser client = rpc.get_client(tgt, version_cap='1.0', serializer='foo') mock_ser.assert_called_once_with('foo') mock_client.assert_called_once_with(rpc.TRANSPORT, tgt, version_cap='1.0', serializer=ser) self.assertEqual('client', client) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_server') def test_get_server(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() ser = mock.Mock() tgt = mock.Mock() ends = mock.Mock() mock_ser.return_value = ser mock_get.return_value = 'server' server = rpc.get_server(tgt, ends, serializer='foo') mock_ser.assert_called_once_with('foo') mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, executor='eventlet', serializer=ser) self.assertEqual('server', server) def test_get_notifier(self): rpc.LEGACY_NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.LEGACY_NOTIFIER.prepare = mock_prep notifier = rpc.get_notifier('service', publisher_id='foo') mock_prep.assert_called_once_with(publisher_id='foo') self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier) self.assertEqual('notifier', notifier.notifier) def test_get_notifier_null_publisher(self): rpc.LEGACY_NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.LEGACY_NOTIFIER.prepare = mock_prep notifier = rpc.get_notifier('service', host='bar') mock_prep.assert_called_once_with(publisher_id='service.bar') self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier) self.assertEqual('notifier', notifier.notifier) def test_get_versioned_notifier(self): rpc.NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.NOTIFIER.prepare = mock_prep notifier = rpc.get_versioned_notifier('service.foo') mock_prep.assert_called_once_with(publisher_id='service.foo') self.assertEqual('notifier', notifier) def _test_init(self, mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods, notif_format, expected_driver_topic_kwargs): legacy_notifier = mock.Mock() notifier = mock.Mock() notif_transport = mock.Mock() transport = mock.Mock() serializer = mock.Mock() conf = mock.Mock() conf.notification_format = notif_format mock_exmods.return_value = ['foo'] mock_trans.return_value = transport mock_noti_trans.return_value = notif_transport mock_ser.return_value = serializer mock_notif.side_effect = [legacy_notifier, notifier] rpc.init(conf) mock_exmods.assert_called_once_with() mock_trans.assert_called_once_with(conf, allowed_remote_exmods=['foo'], aliases=rpc.TRANSPORT_ALIASES) self.assertIsNotNone(rpc.TRANSPORT) self.assertIsNotNone(rpc.LEGACY_NOTIFIER) self.assertIsNotNone(rpc.NOTIFIER) self.assertEqual(legacy_notifier, rpc.LEGACY_NOTIFIER) self.assertEqual(notifier, rpc.NOTIFIER) expected_calls = [] for kwargs in expected_driver_topic_kwargs: expected_kwargs = {'serializer': serializer} expected_kwargs.update(kwargs) expected_calls.append(((notif_transport,), expected_kwargs)) self.assertEqual(expected_calls, mock_notif.call_args_list, "The calls to messaging.Notifier() did not create " "the legacy and versioned notifiers properly.") class TestJsonPayloadSerializer(test.NoDBTestCase): def test_serialize_entity(self): with mock.patch.object(jsonutils, 'to_primitive') as mock_prim: rpc.JsonPayloadSerializer.serialize_entity('context', 'entity') mock_prim.assert_called_once_with('entity', convert_instances=True) class TestRequestContextSerializer(test.NoDBTestCase): def setUp(self): super(TestRequestContextSerializer, self).setUp() self.mock_base = mock.Mock() self.ser = rpc.RequestContextSerializer(self.mock_base) self.ser_null = rpc.RequestContextSerializer(None) def test_serialize_entity(self): self.mock_base.serialize_entity.return_value = 'foo' ser_ent = self.ser.serialize_entity('context', 'entity') self.mock_base.serialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', ser_ent) def test_serialize_entity_null_base(self): ser_ent = self.ser_null.serialize_entity('context', 'entity') self.assertEqual('entity', ser_ent) def test_deserialize_entity(self): self.mock_base.deserialize_entity.return_value = 'foo' deser_ent = self.ser.deserialize_entity('context', 'entity') self.mock_base.deserialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', deser_ent) def test_deserialize_entity_null_base(self): deser_ent = self.ser_null.deserialize_entity('context', 'entity') self.assertEqual('entity', deser_ent) def test_serialize_context(self): context = mock.Mock() self.ser.serialize_context(context) context.to_dict.assert_called_once_with() @mock.patch.object(context, 'RequestContext') def test_deserialize_context(self, mock_req): self.ser.deserialize_context('context') mock_req.from_dict.assert_called_once_with('context') nova-13.0.0/nova/tests/unit/fake_hosts.py0000664000567000056710000000255712701407773021504 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Provides some fake hosts to test host and service related functions """ from nova.tests.unit.objects import test_service HOST_LIST = [ {"host_name": "host_c1", "service": "compute", "zone": "nova"}, {"host_name": "host_c2", "service": "compute", "zone": "nova"}] OS_API_HOST_LIST = {"hosts": HOST_LIST} HOST_LIST_NOVA_ZONE = [ {"host_name": "host_c1", "service": "compute", "zone": "nova"}, {"host_name": "host_c2", "service": "compute", "zone": "nova"}] service_base = test_service.fake_service SERVICES_LIST = [ dict(service_base, host='host_c1', topic='compute', binary='nova-compute'), dict(service_base, host='host_c2', topic='compute', binary='nova-compute')] nova-13.0.0/nova/tests/unit/fake_notifier.py0000664000567000056710000000723112701407773022155 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools import oslo_messaging as messaging from oslo_serialization import jsonutils from nova import rpc NOTIFICATIONS = [] VERSIONED_NOTIFICATIONS = [] def reset(): del NOTIFICATIONS[:] del VERSIONED_NOTIFICATIONS[:] FakeMessage = collections.namedtuple('Message', ['publisher_id', 'priority', 'event_type', 'payload', 'context']) class FakeNotifier(object): def __init__(self, transport, publisher_id, serializer=None): self.transport = transport self.publisher_id = publisher_id self._serializer = serializer or messaging.serializer.NoOpSerializer() for priority in ['debug', 'info', 'warn', 'error', 'critical']: setattr(self, priority, functools.partial(self._notify, priority.upper())) def prepare(self, publisher_id=None): if publisher_id is None: publisher_id = self.publisher_id return self.__class__(self.transport, publisher_id, serializer=self._serializer) def _notify(self, priority, ctxt, event_type, payload): payload = self._serializer.serialize_entity(ctxt, payload) # NOTE(sileht): simulate the kombu serializer # this permit to raise an exception if something have not # been serialized correctly jsonutils.to_primitive(payload) # NOTE(melwitt): Try to serialize the context, as the rpc would. # An exception will be raised if something is wrong # with the context. self._serializer.serialize_context(ctxt) msg = FakeMessage(self.publisher_id, priority, event_type, payload, ctxt) NOTIFICATIONS.append(msg) class FakeVersionedNotifier(FakeNotifier): def _notify(self, priority, ctxt, event_type, payload): payload = self._serializer.serialize_entity(ctxt, payload) VERSIONED_NOTIFICATIONS.append({'publisher_id': self.publisher_id, 'priority': priority, 'event_type': event_type, 'payload': payload}) def stub_notifier(stubs): stubs.Set(messaging, 'Notifier', FakeNotifier) if rpc.LEGACY_NOTIFIER and rpc.NOTIFIER: stubs.Set(rpc, 'LEGACY_NOTIFIER', FakeNotifier(rpc.LEGACY_NOTIFIER.transport, rpc.LEGACY_NOTIFIER.publisher_id, serializer=getattr(rpc.LEGACY_NOTIFIER, '_serializer', None))) stubs.Set(rpc, 'NOTIFIER', FakeVersionedNotifier(rpc.NOTIFIER.transport, rpc.NOTIFIER.publisher_id, serializer=getattr(rpc.NOTIFIER, '_serializer', None))) nova-13.0.0/nova/tests/unit/volume/0000775000567000056710000000000012701410205020262 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/volume/__init__.py0000664000567000056710000000000012701407773022401 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/volume/encryptors/0000775000567000056710000000000012701410205022472 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/volume/encryptors/__init__.py0000664000567000056710000000000012701407773024611 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/volume/encryptors/test_luks.py0000664000567000056710000001615612701407773025112 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_concurrency import processutils from nova.tests.unit.volume.encryptors import test_cryptsetup from nova.volume.encryptors import luks class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase): def _create(self, connection_info): return luks.LuksEncryptor(connection_info) @mock.patch('nova.utils.execute') def test_is_luks(self, mock_execute): luks.is_luks(self.dev_path) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path, run_as_root=True, check_exit_code=True), ], any_order=False) self.assertEqual(1, mock_execute.call_count) @mock.patch('nova.volume.encryptors.luks.LOG') @mock.patch('nova.utils.execute') def test_is_luks_with_error(self, mock_execute, mock_log): error_msg = "Device %s is not a valid LUKS device." % self.dev_path mock_execute.side_effect = \ processutils.ProcessExecutionError(exit_code=1, stderr=error_msg) luks.is_luks(self.dev_path) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path, run_as_root=True, check_exit_code=True), ]) self.assertEqual(1, mock_execute.call_count) self.assertEqual(1, mock_log.warning.call_count) # warning logged @mock.patch('nova.utils.execute') def test__format_volume(self, mock_execute): self.encryptor._format_volume("passphrase") mock_execute.assert_has_calls([ mock.call('cryptsetup', '--batch-mode', 'luksFormat', '--key-file=-', self.dev_path, process_input='passphrase', run_as_root=True, check_exit_code=True, attempts=3), ]) self.assertEqual(1, mock_execute.call_count) @mock.patch('nova.utils.execute') def test__open_volume(self, mock_execute): self.encryptor._open_volume("passphrase") mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input='passphrase', run_as_root=True, check_exit_code=True), ]) self.assertEqual(1, mock_execute.call_count) @mock.patch('nova.utils.execute') def test_attach_volume(self, mock_execute): self.encryptor._get_key = mock.MagicMock() self.encryptor._get_key.return_value = \ test_cryptsetup.fake__get_key(None) self.encryptor.attach_volume(None) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input='0' * 32, run_as_root=True, check_exit_code=True), mock.call('ln', '--symbolic', '--force', '/dev/mapper/%s' % self.dev_name, self.symlink_path, run_as_root=True, check_exit_code=True), ]) self.assertEqual(2, mock_execute.call_count) @mock.patch('nova.utils.execute') def test_attach_volume_not_formatted(self, mock_execute): self.encryptor._get_key = mock.MagicMock() self.encryptor._get_key.return_value = \ test_cryptsetup.fake__get_key(None) mock_execute.side_effect = [ processutils.ProcessExecutionError(exit_code=1), # luksOpen processutils.ProcessExecutionError(exit_code=1), # isLuks mock.DEFAULT, # luksFormat mock.DEFAULT, # luksOpen mock.DEFAULT, # ln ] self.encryptor.attach_volume(None) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input='0' * 32, run_as_root=True, check_exit_code=True), mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path, run_as_root=True, check_exit_code=True), mock.call('cryptsetup', '--batch-mode', 'luksFormat', '--key-file=-', self.dev_path, process_input='0' * 32, run_as_root=True, check_exit_code=True, attempts=3), mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input='0' * 32, run_as_root=True, check_exit_code=True), mock.call('ln', '--symbolic', '--force', '/dev/mapper/%s' % self.dev_name, self.symlink_path, run_as_root=True, check_exit_code=True), ], any_order=False) self.assertEqual(5, mock_execute.call_count) @mock.patch('nova.utils.execute') def test_attach_volume_fail(self, mock_execute): self.encryptor._get_key = mock.MagicMock() self.encryptor._get_key.return_value = \ test_cryptsetup.fake__get_key(None) mock_execute.side_effect = [ processutils.ProcessExecutionError(exit_code=1), # luksOpen mock.DEFAULT, # isLuks ] self.assertRaises(processutils.ProcessExecutionError, self.encryptor.attach_volume, None) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input='0' * 32, run_as_root=True, check_exit_code=True), mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path, run_as_root=True, check_exit_code=True), ], any_order=False) self.assertEqual(2, mock_execute.call_count) @mock.patch('nova.utils.execute') def test__close_volume(self, mock_execute): self.encryptor.detach_volume() mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksClose', self.dev_name, attempts=3, run_as_root=True, check_exit_code=True), ]) self.assertEqual(1, mock_execute.call_count) @mock.patch('nova.utils.execute') def test_detach_volume(self, mock_execute): self.encryptor.detach_volume() mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksClose', self.dev_name, attempts=3, run_as_root=True, check_exit_code=True), ]) self.assertEqual(1, mock_execute.call_count) nova-13.0.0/nova/tests/unit/volume/encryptors/test_base.py0000664000567000056710000000727512701407773025050 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import keymgr from nova import test from nova.tests.unit.keymgr import fake from nova.volume import encryptors from nova.volume.encryptors import cryptsetup from nova.volume.encryptors import luks from nova.volume.encryptors import nop class VolumeEncryptorTestCase(test.NoDBTestCase): def _create(self, device_path): pass def setUp(self): super(VolumeEncryptorTestCase, self).setUp() self.stubs.Set(keymgr, 'API', fake.fake_api) self.connection_info = { "data": { "device_path": "/dev/disk/by-path/" "ip-192.0.2.0:3260-iscsi-iqn.2010-10.org.openstack" ":volume-fake_uuid-lun-1", }, } self.encryptor = self._create(self.connection_info) def test_get_encryptors(self): encryption = {'control_location': 'front-end', 'provider': 'LuksEncryptor'} encryptor = encryptors.get_volume_encryptor(self.connection_info, **encryption) self.assertIsInstance(encryptor, luks.LuksEncryptor, "encryptor is not an instance of LuksEncryptor") encryption = {'control_location': 'front-end', 'provider': 'CryptsetupEncryptor'} encryptor = encryptors.get_volume_encryptor(self.connection_info, **encryption) self.assertIsInstance(encryptor, cryptsetup.CryptsetupEncryptor, "encryptor is not an instance of" "CryptsetupEncryptor") encryption = {'control_location': 'front-end', 'provider': 'NoOpEncryptor'} encryptor = encryptors.get_volume_encryptor(self.connection_info, **encryption) self.assertIsInstance(encryptor, nop.NoOpEncryptor, "encryptor is not an instance of NoOpEncryptor") def test_get_error_encryptos(self): encryption = {'control_location': 'front-end', 'provider': 'ErrorEncryptor'} self.assertRaises(ValueError, encryptors.get_volume_encryptor, self.connection_info, **encryption) @mock.patch('nova.volume.encryptors.LOG') def test_error_log(self, log): encryption = {'control_location': 'front-end', 'provider': 'TestEncryptor'} provider = 'TestEncryptor' try: encryptors.get_volume_encryptor(self.connection_info, **encryption) except Exception as e: log.error.assert_called_once_with("Error instantiating " "%(provider)s: " "%(exception)s", {'provider': provider, 'exception': e}) nova-13.0.0/nova/tests/unit/volume/encryptors/test_nop.py0000664000567000056710000000206612701407773024723 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.tests.unit.volume.encryptors import test_base from nova.volume.encryptors import nop class NoOpEncryptorTestCase(test_base.VolumeEncryptorTestCase): def _create(self, connection_info): return nop.NoOpEncryptor(connection_info) def test_attach_volume(self): self.encryptor.attach_volume(None) def test_detach_volume(self): self.encryptor.detach_volume() nova-13.0.0/nova/tests/unit/volume/encryptors/test_cryptsetup.py0000664000567000056710000000752212701407773026353 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import array import mock import six from nova import exception from nova.keymgr import key from nova.tests.unit.volume.encryptors import test_base from nova.volume.encryptors import cryptsetup def fake__get_key(context): raw = array.array('B', ('0' * 64).decode('hex')).tolist() symmetric_key = key.SymmetricKey('AES', raw) return symmetric_key class CryptsetupEncryptorTestCase(test_base.VolumeEncryptorTestCase): def _create(self, connection_info): return cryptsetup.CryptsetupEncryptor(connection_info) def setUp(self): super(CryptsetupEncryptorTestCase, self).setUp() self.dev_path = self.connection_info['data']['device_path'] self.dev_name = self.dev_path.split('/')[-1] self.symlink_path = self.dev_path @mock.patch('nova.utils.execute') def test__open_volume(self, mock_execute): self.encryptor._open_volume("passphrase") mock_execute.assert_has_calls([ mock.call('cryptsetup', 'create', '--key-file=-', self.dev_name, self.dev_path, process_input='passphrase', run_as_root=True, check_exit_code=True), ]) self.assertEqual(1, mock_execute.call_count) @mock.patch('nova.utils.execute') def test_attach_volume(self, mock_execute): self.encryptor._get_key = mock.MagicMock() self.encryptor._get_key.return_value = fake__get_key(None) self.encryptor.attach_volume(None) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'create', '--key-file=-', self.dev_name, self.dev_path, process_input='0' * 32, run_as_root=True, check_exit_code=True), mock.call('ln', '--symbolic', '--force', '/dev/mapper/%s' % self.dev_name, self.symlink_path, run_as_root=True, check_exit_code=True), ]) self.assertEqual(2, mock_execute.call_count) @mock.patch('nova.utils.execute') def test__close_volume(self, mock_execute): self.encryptor.detach_volume() mock_execute.assert_has_calls([ mock.call('cryptsetup', 'remove', self.dev_name, run_as_root=True, check_exit_code=[0, 4]), ]) self.assertEqual(1, mock_execute.call_count) @mock.patch('nova.utils.execute') def test_detach_volume(self, mock_execute): self.encryptor.detach_volume() mock_execute.assert_has_calls([ mock.call('cryptsetup', 'remove', self.dev_name, run_as_root=True, check_exit_code=[0, 4]), ]) self.assertEqual(1, mock_execute.call_count) def test_init_volume_encryption_not_supported(self): # Tests that creating a CryptsetupEncryptor fails if there is no # device_path key. type = 'unencryptable' data = dict(volume_id='a194699b-aa07-4433-a945-a5d23802043e') connection_info = dict(driver_volume_type=type, data=data) exc = self.assertRaises(exception.VolumeEncryptionNotSupported, cryptsetup.CryptsetupEncryptor, connection_info) self.assertIn(type, six.text_type(exc)) nova-13.0.0/nova/tests/unit/volume/test_cinder.py0000664000567000056710000005137112701407773023166 0ustar jenkinsjenkins00000000000000# Copyright 2013 Mirantis, Inc. # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinderclient import exceptions as cinder_exception from keystoneclient import exceptions as keystone_exception import mock from nova import context from nova import exception from nova import test from nova.tests.unit.fake_instance import fake_instance_obj from nova.tests import uuidsentinel as uuids from nova.volume import cinder class FakeCinderClient(object): class Volumes(object): def get(self, volume_id): return {'id': volume_id} def list(self, detailed, search_opts=None): if search_opts is not None and 'id' in search_opts: return [{'id': search_opts['id']}] else: return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None def __init__(self): self.volumes = self.Volumes() self.volume_snapshots = self.volumes class CinderApiTestCase(test.NoDBTestCase): def setUp(self): super(CinderApiTestCase, self).setUp() self.api = cinder.API() self.cinderclient = FakeCinderClient() self.ctx = context.get_admin_context() self.mox.StubOutWithMock(cinder, 'cinderclient') self.mox.StubOutWithMock(cinder, '_untranslate_volume_summary_view') self.mox.StubOutWithMock(cinder, '_untranslate_snapshot_summary_view') def test_get(self): volume_id = 'volume_id1' cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'volume_id1'}) self.mox.ReplayAll() self.api.get(self.ctx, volume_id) def test_get_failed(self): volume_id = 'volume_id' cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound('')) cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest('')) cinder.cinderclient(self.ctx).AndRaise( cinder_exception.ConnectionError('')) self.mox.ReplayAll() self.assertRaises(exception.VolumeNotFound, self.api.get, self.ctx, volume_id) self.assertRaises(exception.InvalidInput, self.api.get, self.ctx, volume_id) self.assertRaises(exception.CinderConnectionFailed, self.api.get, self.ctx, volume_id) def test_create(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'created_id'}) self.mox.ReplayAll() self.api.create(self.ctx, 1, '', '') @mock.patch('nova.volume.cinder.cinderclient') def test_create_failed(self, mock_cinderclient): mock_cinderclient.return_value.volumes.create.side_effect = ( cinder_exception.BadRequest('')) self.assertRaises(exception.InvalidInput, self.api.create, self.ctx, 1, '', '') @mock.patch('nova.volume.cinder.cinderclient') def test_create_over_quota_failed(self, mock_cinderclient): mock_cinderclient.return_value.volumes.create.side_effect = ( cinder_exception.OverLimit(413)) self.assertRaises(exception.OverQuota, self.api.create, self.ctx, 1, '', '') mock_cinderclient.return_value.volumes.create.assert_called_once_with( 1, user_id=None, imageRef=None, availability_zone=None, volume_type=None, description='', snapshot_id=None, name='', project_id=None, metadata=None) def test_get_all(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'id1'}).AndReturn('id1') cinder._untranslate_volume_summary_view(self.ctx, {'id': 'id2'}).AndReturn('id2') self.mox.ReplayAll() self.assertEqual(['id1', 'id2'], self.api.get_all(self.ctx)) def test_get_all_with_search(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'id1'}).AndReturn('id1') self.mox.ReplayAll() self.assertEqual(['id1'], self.api.get_all(self.ctx, search_opts={'id': 'id1'})) def test_check_attach_volume_status_error(self): volume = {'id': 'fake', 'status': 'error'} self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_volume_already_attached(self): volume = {'id': 'fake', 'status': 'available'} volume['attach_status'] = "attached" self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_availability_zone_differs(self): volume = {'id': 'fake', 'status': 'available'} volume['attach_status'] = "detached" instance = {'id': 'fake', 'availability_zone': 'zone1', 'host': 'fakehost'} with mock.patch.object(cinder.az, 'get_instance_availability_zone', side_effect=lambda context, instance: 'zone1') as mock_get_instance_az: cinder.CONF.set_override('cross_az_attach', False, group='cinder') volume['availability_zone'] = 'zone1' self.assertIsNone(self.api.check_attach(self.ctx, volume, instance)) mock_get_instance_az.assert_called_once_with(self.ctx, instance) mock_get_instance_az.reset_mock() volume['availability_zone'] = 'zone2' self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume, instance) mock_get_instance_az.assert_called_once_with(self.ctx, instance) mock_get_instance_az.reset_mock() del instance['host'] volume['availability_zone'] = 'zone1' self.assertIsNone(self.api.check_attach( self.ctx, volume, instance)) mock_get_instance_az.assert_called_once_with(self.ctx, instance) mock_get_instance_az.reset_mock() volume['availability_zone'] = 'zone2' self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume, instance) mock_get_instance_az.assert_called_once_with(self.ctx, instance) cinder.CONF.reset() def test_check_attach(self): volume = {'status': 'available'} volume['attach_status'] = "detached" volume['availability_zone'] = 'zone1' volume['multiattach'] = False instance = {'availability_zone': 'zone1', 'host': 'fakehost'} cinder.CONF.set_override('cross_az_attach', False, group='cinder') with mock.patch.object(cinder.az, 'get_instance_availability_zone', side_effect=lambda context, instance: 'zone1'): self.assertIsNone(self.api.check_attach( self.ctx, volume, instance)) cinder.CONF.reset() def test_check_detach(self): volume = {'id': 'fake', 'status': 'in-use', 'attach_status': 'attached', 'attachments': {uuids.instance: { 'attachment_id': uuids.attachment}} } self.assertIsNone(self.api.check_detach(self.ctx, volume)) instance = fake_instance_obj(self.ctx) instance.uuid = uuids.instance self.assertIsNone(self.api.check_detach(self.ctx, volume, instance)) instance.uuid = uuids.instance2 self.assertRaises(exception.VolumeUnattached, self.api.check_detach, self.ctx, volume, instance) volume['attachments'] = {} self.assertRaises(exception.VolumeUnattached, self.api.check_detach, self.ctx, volume, instance) volume['status'] = 'available' self.assertRaises(exception.InvalidVolume, self.api.check_detach, self.ctx, volume) volume['attach_status'] = 'detached' self.assertRaises(exception.InvalidVolume, self.api.check_detach, self.ctx, volume) def test_reserve_volume(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'reserve', use_mock_anything=True) self.cinderclient.volumes.reserve('id1') self.mox.ReplayAll() self.api.reserve_volume(self.ctx, 'id1') def test_unreserve_volume(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'unreserve', use_mock_anything=True) self.cinderclient.volumes.unreserve('id1') self.mox.ReplayAll() self.api.unreserve_volume(self.ctx, 'id1') def test_begin_detaching(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'begin_detaching', use_mock_anything=True) self.cinderclient.volumes.begin_detaching('id1') self.mox.ReplayAll() self.api.begin_detaching(self.ctx, 'id1') def test_roll_detaching(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'roll_detaching', use_mock_anything=True) self.cinderclient.volumes.roll_detaching('id1') self.mox.ReplayAll() self.api.roll_detaching(self.ctx, 'id1') @mock.patch('nova.volume.cinder.cinderclient') def test_attach(self, mock_cinderclient): mock_volumes = mock.MagicMock() mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) self.api.attach(self.ctx, 'id1', 'uuid', 'point') mock_cinderclient.assert_called_once_with(self.ctx) mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point', mode='rw') @mock.patch('nova.volume.cinder.cinderclient') def test_attach_with_mode(self, mock_cinderclient): mock_volumes = mock.MagicMock() mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) self.api.attach(self.ctx, 'id1', 'uuid', 'point', mode='ro') mock_cinderclient.assert_called_once_with(self.ctx) mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point', mode='ro') def test_detach(self): self.mox.StubOutWithMock(self.api, 'get', use_mock_anything=True) self.api.get(self.ctx, 'id1').\ AndReturn({'id': 'id1', 'status': 'in-use', 'multiattach': True, 'attach_status': 'attached', 'attachments': {'fake_uuid': {'attachment_id': 'fakeid'}} }) cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'detach', use_mock_anything=True) self.cinderclient.volumes.detach('id1', 'fakeid') self.mox.ReplayAll() self.api.detach(self.ctx, 'id1', instance_uuid='fake_uuid') @mock.patch('nova.volume.cinder.cinderclient') def test_initialize_connection(self, mock_cinderclient): connection_info = {'foo': 'bar'} mock_cinderclient.return_value.volumes. \ initialize_connection.return_value = connection_info volume_id = 'fake_vid' connector = {'host': 'fakehost1'} actual = self.api.initialize_connection(self.ctx, volume_id, connector) expected = connection_info expected['connector'] = connector self.assertEqual(expected, actual) mock_cinderclient.return_value.volumes. \ initialize_connection.assert_called_once_with(volume_id, connector) @mock.patch('nova.volume.cinder.cinderclient') def test_initialize_connection_rollback(self, mock_cinderclient): mock_cinderclient.return_value.volumes.\ initialize_connection.side_effect = ( cinder_exception.ClientException(500, "500")) connector = {'host': 'host1'} ex = self.assertRaises(cinder_exception.ClientException, self.api.initialize_connection, self.ctx, 'id1', connector) self.assertEqual(500, ex.code) mock_cinderclient.return_value.volumes.\ terminate_connection.assert_called_once_with('id1', connector) @mock.patch('nova.volume.cinder.cinderclient') def test_initialize_connection_no_rollback(self, mock_cinderclient): mock_cinderclient.return_value.volumes.\ initialize_connection.side_effect = test.TestingException connector = {'host': 'host1'} self.assertRaises(test.TestingException, self.api.initialize_connection, self.ctx, 'id1', connector) self.assertFalse(mock_cinderclient.return_value.volumes. terminate_connection.called) def test_terminate_connection(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'terminate_connection', use_mock_anything=True) self.cinderclient.volumes.terminate_connection('id1', 'connector') self.mox.ReplayAll() self.api.terminate_connection(self.ctx, 'id1', 'connector') def test_delete(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'delete', use_mock_anything=True) self.cinderclient.volumes.delete('id1') self.mox.ReplayAll() self.api.delete(self.ctx, 'id1') def test_update(self): self.assertRaises(NotImplementedError, self.api.update, self.ctx, '', '') def test_get_snapshot(self): snapshot_id = 'snapshot_id' cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': snapshot_id}) self.mox.ReplayAll() self.api.get_snapshot(self.ctx, snapshot_id) def test_get_snapshot_failed(self): snapshot_id = 'snapshot_id' cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound('')) cinder.cinderclient(self.ctx).AndRaise( cinder_exception.ConnectionError('')) self.mox.ReplayAll() self.assertRaises(exception.SnapshotNotFound, self.api.get_snapshot, self.ctx, snapshot_id) self.assertRaises(exception.CinderConnectionFailed, self.api.get_snapshot, self.ctx, snapshot_id) def test_get_all_snapshots(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'id1'}).AndReturn('id1') cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'id2'}).AndReturn('id2') self.mox.ReplayAll() self.assertEqual(['id1', 'id2'], self.api.get_all_snapshots(self.ctx)) def test_create_snapshot(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'created_id'}) self.mox.ReplayAll() self.api.create_snapshot(self.ctx, {'id': 'id1'}, '', '') def test_create_force(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'created_id'}) self.mox.ReplayAll() self.api.create_snapshot_force(self.ctx, {'id': 'id1'}, '', '') def test_delete_snapshot(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volume_snapshots, 'delete', use_mock_anything=True) self.cinderclient.volume_snapshots.delete('id1') self.mox.ReplayAll() self.api.delete_snapshot(self.ctx, 'id1') def test_update_snapshot_status(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volume_snapshots, 'update_snapshot_status', use_mock_anything=True) self.cinderclient.volume_snapshots.update_snapshot_status( 'id1', {'status': 'error', 'progress': '90%'}) self.mox.ReplayAll() self.api.update_snapshot_status(self.ctx, 'id1', 'error') def test_get_volume_encryption_metadata(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'get_encryption_metadata', use_mock_anything=True) self.cinderclient.volumes.\ get_encryption_metadata({'encryption_key_id': 'fake_key'}) self.mox.ReplayAll() self.api.get_volume_encryption_metadata(self.ctx, {'encryption_key_id': 'fake_key'}) def test_translate_cinder_exception_no_error(self): my_func = mock.Mock() my_func.__name__ = 'my_func' my_func.return_value = 'foo' res = cinder.translate_cinder_exception(my_func)('fizzbuzz', 'bar', 'baz') self.assertEqual('foo', res) my_func.assert_called_once_with('fizzbuzz', 'bar', 'baz') def test_translate_cinder_exception_cinder_connection_error(self): self._do_translate_cinder_exception_test( cinder_exception.ConnectionError, exception.CinderConnectionFailed) def test_translate_cinder_exception_keystone_connection_error(self): self._do_translate_cinder_exception_test( keystone_exception.ConnectionError, exception.CinderConnectionFailed) def test_translate_cinder_exception_cinder_bad_request(self): self._do_translate_cinder_exception_test( cinder_exception.BadRequest(''), exception.InvalidInput) def test_translate_cinder_exception_keystone_bad_request(self): self._do_translate_cinder_exception_test( keystone_exception.BadRequest, exception.InvalidInput) def test_translate_cinder_exception_cinder_forbidden(self): self._do_translate_cinder_exception_test( cinder_exception.Forbidden(''), exception.Forbidden) def test_translate_cinder_exception_keystone_forbidden(self): self._do_translate_cinder_exception_test( keystone_exception.Forbidden, exception.Forbidden) def _do_translate_cinder_exception_test(self, raised_exc, expected_exc): my_func = mock.Mock() my_func.__name__ = 'my_func' my_func.side_effect = raised_exc self.assertRaises(expected_exc, cinder.translate_cinder_exception(my_func), 'foo', 'bar', 'baz') nova-13.0.0/nova/tests/unit/utils.py0000664000567000056710000001557012701407773020515 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import platform import socket import sys from oslo_config import cfg from six.moves import range from nova.compute import flavors import nova.context import nova.db from nova import exception from nova.image import glance from nova.network import minidns from nova.network import model as network_model from nova import objects import nova.utils CONF = cfg.CONF CONF.import_opt('use_ipv6', 'nova.netconf') def get_test_admin_context(): return nova.context.get_admin_context() def get_test_image_object(context, instance_ref): if not context: context = get_test_admin_context() image_ref = instance_ref['image_ref'] image_service, image_id = glance.get_remote_image_service(context, image_ref) return objects.ImageMeta.from_dict( image_service.show(context, image_id)) def get_test_flavor(context=None, options=None): options = options or {} if not context: context = get_test_admin_context() test_flavor = {'name': 'kinda.big', 'flavorid': 'someid', 'memory_mb': 2048, 'vcpus': 4, 'root_gb': 40, 'ephemeral_gb': 80, 'swap': 1024} test_flavor.update(options) try: flavor_ref = nova.db.flavor_create(context, test_flavor) except (exception.FlavorExists, exception.FlavorIdExists): flavor_ref = nova.db.flavor_get_by_name(context, 'kinda.big') return flavor_ref def get_test_instance(context=None, flavor=None, obj=False): if not context: context = get_test_admin_context() if not flavor: flavor = get_test_flavor(context) test_instance = {'memory_kb': '2048000', 'basepath': '/some/path', 'bridge_name': 'br100', 'vcpus': 4, 'root_gb': 40, 'bridge': 'br101', 'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175', 'instance_type_id': flavor['id'], 'system_metadata': {}, 'extra_specs': {}, 'user_id': context.user_id, 'project_id': context.project_id, } if obj: instance = objects.Instance(context, **test_instance) instance.flavor = objects.Flavor.get_by_id(context, flavor['id']) instance.create() else: flavors.save_flavor_info(test_instance['system_metadata'], flavor, '') instance = nova.db.instance_create(context, test_instance) return instance def get_test_network_info(count=1): ipv6 = CONF.use_ipv6 fake = 'fake' fake_ip = '0.0.0.0' fake_vlan = 100 fake_bridge_interface = 'eth0' def current(): subnet_4 = network_model.Subnet(cidr=fake_ip, dns=[network_model.IP(fake_ip), network_model.IP(fake_ip)], gateway=network_model.IP(fake_ip), ips=[network_model.IP(fake_ip), network_model.IP(fake_ip)], routes=None, dhcp_server=fake_ip) subnet_6 = network_model.Subnet(cidr=fake_ip, gateway=network_model.IP(fake_ip), ips=[network_model.IP(fake_ip), network_model.IP(fake_ip), network_model.IP(fake_ip)], routes=None, version=6) subnets = [subnet_4] if ipv6: subnets.append(subnet_6) network = network_model.Network(id=None, bridge=fake, label=None, subnets=subnets, vlan=fake_vlan, bridge_interface=fake_bridge_interface, injected=False) vif = network_model.VIF(id='vif-xxx-yyy-zzz', address=fake, network=network, type=network_model.VIF_TYPE_BRIDGE, devname=None, ovs_interfaceid=None) return vif return network_model.NetworkInfo([current() for x in range(0, count)]) def is_osx(): return platform.mac_ver()[0] != '' def is_linux(): return platform.system() == 'Linux' def coreutils_readlink_available(): _out, err = nova.utils.trycmd('readlink', '-nm', '/') return err == '' test_dns_managers = [] def dns_manager(): global test_dns_managers manager = minidns.MiniDNS() test_dns_managers.append(manager) return manager def cleanup_dns_managers(): global test_dns_managers for manager in test_dns_managers: manager.delete_dns_file() test_dns_managers = [] def killer_xml_body(): return ((""" ]> %(d)s """) % { 'a': 'A' * 10, 'b': '&a;' * 10, 'c': '&b;' * 10, 'd': '&c;' * 9999, }).strip() def is_ipv6_supported(): has_ipv6_support = socket.has_ipv6 try: s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) s.close() except socket.error as e: if e.errno == errno.EAFNOSUPPORT: has_ipv6_support = False else: raise # check if there is at least one interface with ipv6 if has_ipv6_support and sys.platform.startswith('linux'): try: with open('/proc/net/if_inet6') as f: if not f.read(): has_ipv6_support = False except IOError: has_ipv6_support = False return has_ipv6_support def get_api_version(request): if request.path[2:3].isdigit(): return int(request.path[2:3]) nova-13.0.0/nova/tests/unit/__init__.py0000664000567000056710000000266112701407773021111 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nova.tests.unit` -- Nova Unittests ===================================================== .. automodule:: nova.tests.unit :platform: Unix """ import eventlet from nova import objects eventlet.monkey_patch(os=False) # NOTE(alaski): Make sure this is done after eventlet monkey patching otherwise # the threading.local() store used in oslo_messaging will be initialized to # threadlocal storage rather than greenthread local. This will cause context # sets and deletes in that storage to clobber each other. # NOTE(comstud): Make sure we have all of the objects loaded. We do this # at module import time, because we may be using mock decorators in our # tests that run at import time. objects.register_all() nova-13.0.0/nova/tests/unit/conductor/0000775000567000056710000000000012701410205020753 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/conductor/test_conductor.py0000664000567000056710000022726312701410011024373 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the conductor service.""" import copy import uuid import mock from mox3 import mox import oslo_messaging as messaging from oslo_utils import timeutils import six from nova.compute import flavors from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import vm_states from nova import conductor from nova.conductor import api as conductor_api from nova.conductor import manager as conductor_manager from nova.conductor import rpcapi as conductor_rpcapi from nova.conductor.tasks import live_migrate from nova.conductor.tasks import migrate from nova import context from nova import db from nova import exception as exc from nova.image import api as image_api from nova import objects from nova.objects import base as obj_base from nova.objects import fields from nova import rpc from nova.scheduler import client as scheduler_client from nova.scheduler import utils as scheduler_utils from nova import test from nova.tests import fixtures from nova.tests.unit import cast_as_call from nova.tests.unit.compute import test_compute from nova.tests.unit import fake_instance from nova.tests.unit import fake_notifier from nova.tests.unit import fake_request_spec from nova.tests.unit import fake_server_actions from nova.tests.unit import fake_utils from nova import utils class FakeContext(context.RequestContext): def elevated(self): """Return a consistent elevated context so we can detect it.""" if not hasattr(self, '_elevated'): self._elevated = super(FakeContext, self).elevated() return self._elevated class _BaseTestCase(object): def setUp(self): super(_BaseTestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = FakeContext(self.user_id, self.project_id) fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) def fake_deserialize_context(serializer, ctxt_dict): self.assertEqual(self.context.user_id, ctxt_dict['user_id']) self.assertEqual(self.context.project_id, ctxt_dict['project_id']) return self.context self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context', fake_deserialize_context) fake_utils.stub_out_utils_spawn_n(self.stubs) class ConductorTestCase(_BaseTestCase, test.TestCase): """Conductor Manager Tests.""" def setUp(self): super(ConductorTestCase, self).setUp() self.conductor = conductor_manager.ConductorManager() self.conductor_manager = self.conductor def _test_object_action(self, is_classmethod, raise_exception): class TestObject(obj_base.NovaObject): def foo(self, raise_exception=False): if raise_exception: raise Exception('test') else: return 'test' @classmethod def bar(cls, context, raise_exception=False): if raise_exception: raise Exception('test') else: return 'test' obj_base.NovaObjectRegistry.register(TestObject) obj = TestObject() # NOTE(danms): After a trip over RPC, any tuple will be a list, # so use a list here to make sure we can handle it fake_args = [] if is_classmethod: versions = {'TestObject': '1.0'} result = self.conductor.object_class_action_versions( self.context, TestObject.obj_name(), 'bar', versions, fake_args, {'raise_exception': raise_exception}) else: updates, result = self.conductor.object_action( self.context, obj, 'foo', fake_args, {'raise_exception': raise_exception}) self.assertEqual('test', result) def test_object_action(self): self._test_object_action(False, False) def test_object_action_on_raise(self): self.assertRaises(messaging.ExpectedException, self._test_object_action, False, True) def test_object_class_action(self): self._test_object_action(True, False) def test_object_class_action_on_raise(self): self.assertRaises(messaging.ExpectedException, self._test_object_action, True, True) def test_object_action_copies_object(self): class TestObject(obj_base.NovaObject): fields = {'dict': fields.DictOfStringsField()} def touch_dict(self): self.dict['foo'] = 'bar' self.obj_reset_changes() obj_base.NovaObjectRegistry.register(TestObject) obj = TestObject() obj.dict = {} obj.obj_reset_changes() updates, result = self.conductor.object_action( self.context, obj, 'touch_dict', tuple(), {}) # NOTE(danms): If conductor did not properly copy the object, then # the new and reference copies of the nested dict object will be # the same, and thus 'dict' will not be reported as changed self.assertIn('dict', updates) self.assertEqual({'foo': 'bar'}, updates['dict']) def test_object_class_action_versions(self): @obj_base.NovaObjectRegistry.register class TestObject(obj_base.NovaObject): VERSION = '1.10' @classmethod def foo(cls, context): return cls() versions = { 'TestObject': '1.2', 'OtherObj': '1.0', } with mock.patch.object(self.conductor_manager, '_object_dispatch') as m: m.return_value = TestObject() m.return_value.obj_to_primitive = mock.MagicMock() self.conductor.object_class_action_versions( self.context, TestObject.obj_name(), 'foo', versions, tuple(), {}) m.return_value.obj_to_primitive.assert_called_once_with( target_version='1.2', version_manifest=versions) def test_reset(self): with mock.patch.object(objects.Service, 'clear_min_version_cache' ) as mock_clear_cache: self.conductor.reset() mock_clear_cache.assert_called_once_with() def test_provider_fw_rule_get_all(self): result = self.conductor.provider_fw_rule_get_all(self.context) self.assertEqual([], result) class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase): """Conductor RPC API Tests.""" def setUp(self): super(ConductorRPCAPITestCase, self).setUp() self.conductor_service = self.start_service( 'conductor', manager='nova.conductor.manager.ConductorManager') self.conductor_manager = self.conductor_service.manager self.conductor = conductor_rpcapi.ConductorAPI() class ConductorAPITestCase(_BaseTestCase, test.TestCase): """Conductor API Tests.""" def setUp(self): super(ConductorAPITestCase, self).setUp() self.conductor_service = self.start_service( 'conductor', manager='nova.conductor.manager.ConductorManager') self.conductor = conductor_api.API() self.conductor_manager = self.conductor_service.manager def test_wait_until_ready(self): timeouts = [] calls = dict(count=0) def fake_ping(context, message, timeout): timeouts.append(timeout) calls['count'] += 1 if calls['count'] < 15: raise messaging.MessagingTimeout("fake") self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping) self.conductor.wait_until_ready(self.context) self.assertEqual(timeouts.count(10), 10) self.assertIn(None, timeouts) @mock.patch('oslo_versionedobjects.base.obj_tree_get_versions') def test_object_backport_redirect(self, mock_ovo): mock_ovo.return_value = mock.sentinel.obj_versions mock_objinst = mock.Mock() with mock.patch.object(self.conductor, 'object_backport_versions') as mock_call: self.conductor.object_backport(mock.sentinel.ctxt, mock_objinst, mock.sentinel.target_version) mock_call.assert_called_once_with(mock.sentinel.ctxt, mock_objinst, mock.sentinel.obj_versions) class ConductorLocalAPITestCase(ConductorAPITestCase): """Conductor LocalAPI Tests.""" def setUp(self): super(ConductorLocalAPITestCase, self).setUp() self.conductor = conductor_api.LocalAPI() self.conductor_manager = self.conductor._manager._target def test_wait_until_ready(self): # Override test in ConductorAPITestCase pass class ConductorImportTest(test.NoDBTestCase): def test_import_conductor_local(self): self.flags(use_local=True, group='conductor') self.assertIsInstance(conductor.API(), conductor_api.LocalAPI) self.assertIsInstance(conductor.ComputeTaskAPI(), conductor_api.LocalComputeTaskAPI) def test_import_conductor_rpc(self): self.flags(use_local=False, group='conductor') self.assertIsInstance(conductor.API(), conductor_api.API) self.assertIsInstance(conductor.ComputeTaskAPI(), conductor_api.ComputeTaskAPI) def test_import_conductor_override_to_local(self): self.flags(use_local=False, group='conductor') self.assertIsInstance(conductor.API(use_local=True), conductor_api.LocalAPI) self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True), conductor_api.LocalComputeTaskAPI) class _BaseTaskTestCase(object): def setUp(self): super(_BaseTaskTestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = FakeContext(self.user_id, self.project_id) fake_server_actions.stub_out_action_events(self.stubs) def fake_deserialize_context(serializer, ctxt_dict): self.assertEqual(self.context.user_id, ctxt_dict['user_id']) self.assertEqual(self.context.project_id, ctxt_dict['project_id']) return self.context self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context', fake_deserialize_context) self.useFixture(fixtures.SpawnIsSynchronousFixture()) def _prepare_rebuild_args(self, update_args=None): # Args that don't get passed in to the method but do get passed to RPC migration = update_args and update_args.pop('migration', None) node = update_args and update_args.pop('node', None) limits = update_args and update_args.pop('limits', None) rebuild_args = {'new_pass': 'admin_password', 'injected_files': 'files_to_inject', 'image_ref': 'image_ref', 'orig_image_ref': 'orig_image_ref', 'orig_sys_metadata': 'orig_sys_meta', 'bdms': {}, 'recreate': False, 'on_shared_storage': False, 'preserve_ephemeral': False, 'host': 'compute-host', 'request_spec': None} if update_args: rebuild_args.update(update_args) compute_rebuild_args = copy.deepcopy(rebuild_args) compute_rebuild_args['migration'] = migration compute_rebuild_args['node'] = node compute_rebuild_args['limits'] = limits # Args that are passed in to the method but don't get passed to RPC compute_rebuild_args.pop('request_spec') return rebuild_args, compute_rebuild_args @mock.patch('nova.objects.Migration') def test_live_migrate(self, migobj): inst = fake_instance.fake_db_instance() inst_obj = objects.Instance._from_db_object( self.context, objects.Instance(), inst, []) migration = migobj() self.mox.StubOutWithMock(live_migrate.LiveMigrationTask, 'execute') task = self.conductor_manager._build_live_migrate_task( self.context, inst_obj, 'destination', 'block_migration', 'disk_over_commit', migration) task.execute() self.mox.ReplayAll() if isinstance(self.conductor, (conductor_api.ComputeTaskAPI, conductor_api.LocalComputeTaskAPI)): # The API method is actually 'live_migrate_instance'. It gets # converted into 'migrate_server' when doing RPC. self.conductor.live_migrate_instance(self.context, inst_obj, 'destination', 'block_migration', 'disk_over_commit') else: self.conductor.migrate_server(self.context, inst_obj, {'host': 'destination'}, True, False, None, 'block_migration', 'disk_over_commit') self.assertEqual('accepted', migration.status) self.assertEqual('destination', migration.dest_compute) self.assertEqual(inst_obj.host, migration.source_compute) def _test_cold_migrate(self, clean_shutdown=True): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(migrate.MigrationTask, 'execute') inst = fake_instance.fake_db_instance(image_ref='image_ref') inst_obj = objects.Instance._from_db_object( self.context, objects.Instance(), inst, []) inst_obj.system_metadata = {'image_hw_disk_bus': 'scsi'} flavor = flavors.get_default_flavor() flavor.extra_specs = {'extra_specs': 'fake'} filter_properties = {'limits': {}, 'retry': {'num_attempts': 1, 'hosts': [['host1', None]]}} request_spec = {'instance_type': obj_base.obj_to_primitive(flavor), 'instance_properties': {}} utils.get_image_from_system_metadata( inst_obj.system_metadata).AndReturn('image') scheduler_utils.build_request_spec( self.context, 'image', [mox.IsA(objects.Instance)], instance_type=mox.IsA(objects.Flavor)).AndReturn(request_spec) task = self.conductor_manager._build_cold_migrate_task( self.context, inst_obj, flavor, filter_properties, request_spec, [], clean_shutdown=clean_shutdown) task.execute() self.mox.ReplayAll() scheduler_hint = {'filter_properties': {}} if isinstance(self.conductor, (conductor_api.ComputeTaskAPI, conductor_api.LocalComputeTaskAPI)): # The API method is actually 'resize_instance'. It gets # converted into 'migrate_server' when doing RPC. self.conductor.resize_instance( self.context, inst_obj, {}, scheduler_hint, flavor, [], clean_shutdown) else: self.conductor.migrate_server( self.context, inst_obj, scheduler_hint, False, False, flavor, None, None, [], clean_shutdown) def test_cold_migrate(self): self._test_cold_migrate() def test_cold_migrate_forced_shutdown(self): self._test_cold_migrate(clean_shutdown=False) @mock.patch('nova.objects.Instance.refresh') def test_build_instances(self, mock_refresh): instance_type = flavors.get_default_flavor() instances = [objects.Instance(context=self.context, id=i, uuid=uuid.uuid4(), flavor=instance_type) for i in range(2)] instance_type_p = obj_base.obj_to_primitive(instance_type) instance_properties = obj_base.obj_to_primitive(instances[0]) instance_properties['system_metadata'] = flavors.save_flavor_info( {}, instance_type) self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances') self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'build_and_run_instance') spec = {'image': {'fake_data': 'should_pass_silently'}, 'instance_properties': instance_properties, 'instance_type': instance_type_p, 'num_instances': 2} filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}} self.conductor_manager._schedule_instances(self.context, spec, filter_properties).AndReturn( [{'host': 'host1', 'nodename': 'node1', 'limits': []}, {'host': 'host2', 'nodename': 'node2', 'limits': []}]) db.block_device_mapping_get_all_by_instance(self.context, instances[0].uuid).AndReturn([]) self.conductor_manager.compute_rpcapi.build_and_run_instance( self.context, instance=mox.IgnoreArg(), host='host1', image={'fake_data': 'should_pass_silently'}, request_spec={ 'image': {'fake_data': 'should_pass_silently'}, 'instance_properties': instance_properties, 'instance_type': instance_type_p, 'num_instances': 2}, filter_properties={'retry': {'num_attempts': 1, 'hosts': [['host1', 'node1']]}, 'limits': []}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping=mox.IgnoreArg(), node='node1', limits=[]) db.block_device_mapping_get_all_by_instance(self.context, instances[1].uuid).AndReturn([]) self.conductor_manager.compute_rpcapi.build_and_run_instance( self.context, instance=mox.IgnoreArg(), host='host2', image={'fake_data': 'should_pass_silently'}, request_spec={ 'image': {'fake_data': 'should_pass_silently'}, 'instance_properties': instance_properties, 'instance_type': instance_type_p, 'num_instances': 2}, filter_properties={'limits': [], 'retry': {'num_attempts': 1, 'hosts': [['host2', 'node2']]}}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping=mox.IgnoreArg(), node='node2', limits=[]) self.mox.ReplayAll() # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.conductor.build_instances(self.context, instances=instances, image={'fake_data': 'should_pass_silently'}, filter_properties={}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(conductor_manager.ComputeTaskManager, '_cleanup_allocated_networks') def test_build_instances_scheduler_failure( self, cleanup_mock, sd_mock, state_mock, sig_mock, bs_mock): instances = [fake_instance.fake_instance_obj(self.context) for i in range(2)] image = {'fake-data': 'should_pass_silently'} spec = {'fake': 'specs', 'instance_properties': instances[0]} exception = exc.NoValidHost(reason='fake-reason') bs_mock.return_value = spec sd_mock.side_effect = exception updates = {'vm_state': vm_states.ERROR, 'task_state': None} # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.conductor.build_instances( self.context, instances=instances, image=image, filter_properties={}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) set_state_calls = [] cleanup_network_calls = [] for instance in instances: set_state_calls.append(mock.call( self.context, instance.uuid, 'compute_task', 'build_instances', updates, exception, spec)) cleanup_network_calls.append(mock.call( self.context, mock.ANY, None)) state_mock.assert_has_calls(set_state_calls) cleanup_mock.assert_has_calls(cleanup_network_calls) def test_build_instances_retry_exceeded(self): instances = [fake_instance.fake_instance_obj(self.context)] image = {'fake-data': 'should_pass_silently'} filter_properties = {'retry': {'num_attempts': 10, 'hosts': []}} updates = {'vm_state': vm_states.ERROR, 'task_state': None} @mock.patch.object(conductor_manager.ComputeTaskManager, '_cleanup_allocated_networks') @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') @mock.patch.object(scheduler_utils, 'populate_retry') def _test(populate_retry, set_vm_state_and_notify, cleanup_mock): # build_instances() is a cast, we need to wait for it to # complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) populate_retry.side_effect = exc.MaxRetriesExceeded( reason="Too many try") self.conductor.build_instances( self.context, instances=instances, image=image, filter_properties=filter_properties, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) populate_retry.assert_called_once_with( filter_properties, instances[0].uuid) set_vm_state_and_notify.assert_called_once_with( self.context, instances[0].uuid, 'compute_task', 'build_instances', updates, mock.ANY, {}) cleanup_mock.assert_called_once_with(self.context, mock.ANY, None) _test() @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') @mock.patch.object(conductor_manager.ComputeTaskManager, '_cleanup_allocated_networks') def test_build_instances_scheduler_group_failure( self, cleanup_mock, state_mock, sig_mock, bs_mock): instances = [fake_instance.fake_instance_obj(self.context) for i in range(2)] image = {'fake-data': 'should_pass_silently'} spec = {'fake': 'specs', 'instance_properties': instances[0]} bs_mock.return_value = spec exception = exc.UnsupportedPolicyException(reason='fake-reason') sig_mock.side_effect = exception updates = {'vm_state': vm_states.ERROR, 'task_state': None} # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.conductor.build_instances( context=self.context, instances=instances, image=image, filter_properties={}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) set_state_calls = [] cleanup_network_calls = [] for instance in instances: set_state_calls.append(mock.call( self.context, instance.uuid, 'build_instances', updates, exception, spec)) cleanup_network_calls.append(mock.call( self.context, mock.ANY, None)) state_mock.assert_has_calls(set_state_calls) cleanup_mock.assert_has_calls(cleanup_network_calls) def test_unshelve_instance_on_host(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED instance.task_state = task_states.UNSHELVING instance.save() system_metadata = instance.system_metadata self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'start_instance') self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'unshelve_instance') self.conductor_manager.compute_rpcapi.start_instance(self.context, instance) self.mox.ReplayAll() system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_image_id'] = 'fake_image_id' system_metadata['shelved_host'] = 'fake-mini' self.conductor_manager.unshelve_instance(self.context, instance) def test_unshelve_offload_instance_on_host_with_request_spec(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = task_states.UNSHELVING instance.save() system_metadata = instance.system_metadata system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_image_id'] = 'fake_image_id' system_metadata['shelved_host'] = 'fake-mini' fake_spec = fake_request_spec.fake_spec_obj() # FIXME(sbauza): Modify the fake RequestSpec object to either add a # non-empty SchedulerRetries object or nullify the field fake_spec.retry = None # FIXME(sbauza): Modify the fake RequestSpec object to either add a # non-empty SchedulerLimits object or nullify the field fake_spec.limits = None # FIXME(sbauza): Modify the fake RequestSpec object to either add a # non-empty InstanceGroup object or nullify the field fake_spec.instance_group = None filter_properties = fake_spec.to_legacy_filter_properties_dict() request_spec = fake_spec.to_legacy_request_spec_dict() host = {'host': 'host1', 'nodename': 'node1', 'limits': []} # unshelve_instance() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) @mock.patch.object(self.conductor_manager.compute_rpcapi, 'unshelve_instance') @mock.patch.object(scheduler_utils, 'populate_filter_properties') @mock.patch.object(scheduler_utils, 'populate_retry') @mock.patch.object(self.conductor_manager, '_schedule_instances') @mock.patch.object(objects.RequestSpec, 'to_legacy_request_spec_dict') @mock.patch.object(objects.RequestSpec, 'to_legacy_filter_properties_dict') @mock.patch.object(objects.RequestSpec, 'reset_forced_destinations') def do_test(reset_forced_destinations, to_filtprops, to_reqspec, sched_instances, populate_retry, populate_filter_properties, unshelve_instance): to_filtprops.return_value = filter_properties to_reqspec.return_value = request_spec sched_instances.return_value = [host] self.conductor.unshelve_instance(self.context, instance, fake_spec) reset_forced_destinations.assert_called_once_with() sched_instances.assert_called_once_with(self.context, request_spec, filter_properties) # NOTE(sbauza): Since the instance is dehydrated when passing thru # the RPC API, we can only assert mock.ANY for it unshelve_instance.assert_called_once_with( self.context, mock.ANY, host['host'], image=mock.ANY, filter_properties=filter_properties, node=host['nodename'] ) do_test() def test_unshelve_offloaded_instance_glance_image_not_found(self): shelved_image_id = "image_not_found" instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = task_states.UNSHELVING instance.save() system_metadata = instance.system_metadata self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get') e = exc.ImageNotFound(image_id=shelved_image_id) self.conductor_manager.image_api.get( self.context, shelved_image_id, show_deleted=False).AndRaise(e) self.mox.ReplayAll() system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_host'] = 'fake-mini' system_metadata['shelved_image_id'] = shelved_image_id self.assertRaises( exc.UnshelveException, self.conductor_manager.unshelve_instance, self.context, instance) self.assertEqual(instance.vm_state, vm_states.ERROR) def test_unshelve_offloaded_instance_image_id_is_none(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = task_states.UNSHELVING # 'shelved_image_id' is None for volumebacked instance instance.system_metadata['shelved_image_id'] = None with test.nested( mock.patch.object(self.conductor_manager, '_schedule_instances'), mock.patch.object(self.conductor_manager.compute_rpcapi, 'unshelve_instance'), ) as (schedule_mock, unshelve_mock): schedule_mock.return_value = [{'host': 'fake_host', 'nodename': 'fake_node', 'limits': {}}] self.conductor_manager.unshelve_instance(self.context, instance) self.assertEqual(1, unshelve_mock.call_count) def test_unshelve_instance_schedule_and_rebuild(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.save() filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}} system_metadata = instance.system_metadata self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances') self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'unshelve_instance') self.conductor_manager.image_api.get(self.context, 'fake_image_id', show_deleted=False).AndReturn('fake_image') scheduler_utils.build_request_spec(self.context, 'fake_image', mox.IgnoreArg()).AndReturn('req_spec') self.conductor_manager._schedule_instances(self.context, 'req_spec', filter_properties).AndReturn( [{'host': 'fake_host', 'nodename': 'fake_node', 'limits': {}}]) self.conductor_manager.compute_rpcapi.unshelve_instance(self.context, instance, 'fake_host', image='fake_image', filter_properties={'limits': {}, 'retry': {'num_attempts': 1, 'hosts': [['fake_host', 'fake_node']]}}, node='fake_node') self.mox.ReplayAll() system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_image_id'] = 'fake_image_id' system_metadata['shelved_host'] = 'fake-mini' self.conductor_manager.unshelve_instance(self.context, instance) def test_unshelve_instance_schedule_and_rebuild_novalid_host(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.save() system_metadata = instance.system_metadata def fake_schedule_instances(context, image, filter_properties, *instances): raise exc.NoValidHost(reason='') with test.nested( mock.patch.object(self.conductor_manager.image_api, 'get', return_value='fake_image'), mock.patch.object(self.conductor_manager, '_schedule_instances', fake_schedule_instances) ) as (_get_image, _schedule_instances): system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_image_id'] = 'fake_image_id' system_metadata['shelved_host'] = 'fake-mini' self.conductor_manager.unshelve_instance(self.context, instance) _get_image.assert_has_calls([mock.call(self.context, system_metadata['shelved_image_id'], show_deleted=False)]) self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state) @mock.patch.object(conductor_manager.ComputeTaskManager, '_schedule_instances', side_effect=messaging.MessagingTimeout()) @mock.patch.object(image_api.API, 'get', return_value='fake_image') def test_unshelve_instance_schedule_and_rebuild_messaging_exception( self, mock_get_image, mock_schedule_instances): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = task_states.UNSHELVING instance.save() system_metadata = instance.system_metadata system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_image_id'] = 'fake_image_id' system_metadata['shelved_host'] = 'fake-mini' self.assertRaises(messaging.MessagingTimeout, self.conductor_manager.unshelve_instance, self.context, instance) mock_get_image.assert_has_calls([mock.call(self.context, system_metadata['shelved_image_id'], show_deleted=False)]) self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state) self.assertIsNone(instance.task_state) def test_unshelve_instance_schedule_and_rebuild_volume_backed(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.save() filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}} system_metadata = instance.system_metadata self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances') self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'unshelve_instance') scheduler_utils.build_request_spec(self.context, None, mox.IgnoreArg()).AndReturn('req_spec') self.conductor_manager._schedule_instances(self.context, 'req_spec', filter_properties).AndReturn( [{'host': 'fake_host', 'nodename': 'fake_node', 'limits': {}}]) self.conductor_manager.compute_rpcapi.unshelve_instance(self.context, instance, 'fake_host', image=None, filter_properties={'limits': {}, 'retry': {'num_attempts': 1, 'hosts': [['fake_host', 'fake_node']]}}, node='fake_node') self.mox.ReplayAll() system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_host'] = 'fake-mini' self.conductor_manager.unshelve_instance(self.context, instance) def test_rebuild_instance(self): inst_obj = self._create_fake_instance_obj() rebuild_args, compute_args = self._prepare_rebuild_args( {'host': inst_obj.host}) with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations') ) as (rebuild_mock, select_dest_mock): self.conductor_manager.rebuild_instance(context=self.context, instance=inst_obj, **rebuild_args) self.assertFalse(select_dest_mock.called) rebuild_mock.assert_called_once_with(self.context, instance=inst_obj, **compute_args) def test_rebuild_instance_with_scheduler(self): inst_obj = self._create_fake_instance_obj() inst_obj.host = 'noselect' expected_host = 'thebesthost' expected_node = 'thebestnode' expected_limits = 'fake-limits' rebuild_args, compute_args = self._prepare_rebuild_args( {'host': None, 'node': expected_node, 'limits': expected_limits}) request_spec = {} filter_properties = {'ignore_hosts': [(inst_obj.host)]} fake_spec = objects.RequestSpec() with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), mock.patch.object(scheduler_utils, 'setup_instance_group', return_value=False), mock.patch.object(objects.RequestSpec, 'from_primitives', return_value=fake_spec), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations', return_value=[{'host': expected_host, 'nodename': expected_node, 'limits': expected_limits}]), mock.patch('nova.scheduler.utils.build_request_spec', return_value=request_spec) ) as (rebuild_mock, sig_mock, fp_mock, select_dest_mock, bs_mock): self.conductor_manager.rebuild_instance(context=self.context, instance=inst_obj, **rebuild_args) fp_mock.assert_called_once_with(self.context, request_spec, filter_properties) select_dest_mock.assert_called_once_with(self.context, fake_spec) compute_args['host'] = expected_host rebuild_mock.assert_called_once_with(self.context, instance=inst_obj, **compute_args) self.assertEqual('compute.instance.rebuild.scheduled', fake_notifier.NOTIFICATIONS[0].event_type) def test_rebuild_instance_with_scheduler_no_host(self): inst_obj = self._create_fake_instance_obj() inst_obj.host = 'noselect' rebuild_args, _ = self._prepare_rebuild_args({'host': None}) request_spec = {} filter_properties = {'ignore_hosts': [(inst_obj.host)]} fake_spec = objects.RequestSpec() with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), mock.patch.object(scheduler_utils, 'setup_instance_group', return_value=False), mock.patch.object(objects.RequestSpec, 'from_primitives', return_value=fake_spec), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations', side_effect=exc.NoValidHost(reason='')), mock.patch('nova.scheduler.utils.build_request_spec', return_value=request_spec) ) as (rebuild_mock, sig_mock, fp_mock, select_dest_mock, bs_mock): self.assertRaises(exc.NoValidHost, self.conductor_manager.rebuild_instance, context=self.context, instance=inst_obj, **rebuild_args) fp_mock.assert_called_once_with(self.context, request_spec, filter_properties) select_dest_mock.assert_called_once_with(self.context, fake_spec) self.assertFalse(rebuild_mock.called) @mock.patch.object(conductor_manager.compute_rpcapi.ComputeAPI, 'rebuild_instance') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(conductor_manager.scheduler_client.SchedulerClient, 'select_destinations') @mock.patch('nova.scheduler.utils.build_request_spec') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') def test_rebuild_instance_with_scheduler_group_failure(self, state_mock, bs_mock, select_dest_mock, sig_mock, rebuild_mock): inst_obj = self._create_fake_instance_obj() rebuild_args, _ = self._prepare_rebuild_args({'host': None}) request_spec = {} bs_mock.return_value = request_spec exception = exc.UnsupportedPolicyException(reason='') sig_mock.side_effect = exception # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.assertRaises(exc.UnsupportedPolicyException, self.conductor.rebuild_instance, self.context, inst_obj, **rebuild_args) updates = {'vm_state': vm_states.ACTIVE, 'task_state': None} state_mock.assert_called_once_with(self.context, inst_obj.uuid, 'rebuild_server', updates, exception, request_spec) self.assertFalse(select_dest_mock.called) self.assertFalse(rebuild_mock.called) def test_rebuild_instance_evacuate_migration_record(self): inst_obj = self._create_fake_instance_obj() migration = objects.Migration(context=self.context, source_compute=inst_obj.host, source_node=inst_obj.node, instance_uuid=inst_obj.uuid, status='accepted', migration_type='evacuation') rebuild_args, compute_args = self._prepare_rebuild_args( {'host': inst_obj.host, 'migration': migration}) with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations'), mock.patch.object(objects.Migration, 'get_by_instance_and_status', return_value=migration) ) as (rebuild_mock, select_dest_mock, get_migration_mock): self.conductor_manager.rebuild_instance(context=self.context, instance=inst_obj, **rebuild_args) self.assertFalse(select_dest_mock.called) rebuild_mock.assert_called_once_with(self.context, instance=inst_obj, **compute_args) def test_rebuild_instance_with_request_spec(self): inst_obj = self._create_fake_instance_obj() inst_obj.host = 'noselect' expected_host = 'thebesthost' expected_node = 'thebestnode' expected_limits = 'fake-limits' request_spec = {} filter_properties = {'ignore_hosts': [(inst_obj.host)]} fake_spec = objects.RequestSpec(ignore_hosts=[]) augmented_spec = objects.RequestSpec(ignore_hosts=[inst_obj.host]) rebuild_args, compute_args = self._prepare_rebuild_args( {'host': None, 'node': expected_node, 'limits': expected_limits, 'request_spec': fake_spec}) with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), mock.patch.object(scheduler_utils, 'setup_instance_group', return_value=False), mock.patch.object(objects.RequestSpec, 'from_primitives', return_value=augmented_spec), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations', return_value=[{'host': expected_host, 'nodename': expected_node, 'limits': expected_limits}]), mock.patch.object(fake_spec, 'reset_forced_destinations'), mock.patch.object(fake_spec, 'to_legacy_request_spec_dict', return_value=request_spec), mock.patch.object(fake_spec, 'to_legacy_filter_properties_dict', return_value=filter_properties), ) as (rebuild_mock, sig_mock, fp_mock, select_dest_mock, reset_fd, to_reqspec, to_filtprops): self.conductor_manager.rebuild_instance(context=self.context, instance=inst_obj, **rebuild_args) reset_fd.assert_called_once_with() to_reqspec.assert_called_once_with() to_filtprops.assert_called_once_with() fp_mock.assert_called_once_with(self.context, request_spec, filter_properties) select_dest_mock.assert_called_once_with(self.context, augmented_spec) compute_args['host'] = expected_host rebuild_mock.assert_called_once_with(self.context, instance=inst_obj, **compute_args) self.assertEqual('compute.instance.rebuild.scheduled', fake_notifier.NOTIFICATIONS[0].event_type) class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase): """ComputeTaskManager Tests.""" def setUp(self): super(ConductorTaskTestCase, self).setUp() self.conductor = conductor_manager.ComputeTaskManager() self.conductor_manager = self.conductor def test_reset(self): with mock.patch('nova.compute.rpcapi.ComputeAPI') as mock_rpc: old_rpcapi = self.conductor_manager.compute_rpcapi self.conductor_manager.reset() mock_rpc.assert_called_once_with() self.assertNotEqual(old_rpcapi, self.conductor_manager.compute_rpcapi) def test_migrate_server_fails_with_rebuild(self): self.assertRaises(NotImplementedError, self.conductor.migrate_server, self.context, None, None, True, True, None, None, None) def test_migrate_server_fails_with_flavor(self): flavor = flavors.get_flavor_by_name('m1.tiny') self.assertRaises(NotImplementedError, self.conductor.migrate_server, self.context, None, None, True, False, flavor, None, None) def _build_request_spec(self, instance): return { 'instance_properties': { 'uuid': instance['uuid'], }, } @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') @mock.patch.object(live_migrate.LiveMigrationTask, 'execute') def _test_migrate_server_deals_with_expected_exceptions(self, ex, mock_execute, mock_set): instance = fake_instance.fake_db_instance(uuid='uuid', vm_state=vm_states.ACTIVE) inst_obj = objects.Instance._from_db_object( self.context, objects.Instance(), instance, []) mock_execute.side_effect = ex self.conductor = utils.ExceptionHelper(self.conductor) self.assertRaises(type(ex), self.conductor.migrate_server, self.context, inst_obj, {'host': 'destination'}, True, False, None, 'block_migration', 'disk_over_commit') mock_set.assert_called_once_with(self.context, inst_obj.uuid, 'compute_task', 'migrate_server', {'vm_state': vm_states.ACTIVE, 'task_state': None, 'expected_task_state': task_states.MIGRATING}, ex, self._build_request_spec(inst_obj)) def test_migrate_server_deals_with_invalidcpuinfo_exception(self): instance = fake_instance.fake_db_instance(uuid='uuid', vm_state=vm_states.ACTIVE) inst_obj = objects.Instance._from_db_object( self.context, objects.Instance(), instance, []) self.mox.StubOutWithMock(live_migrate.LiveMigrationTask, 'execute') self.mox.StubOutWithMock(scheduler_utils, 'set_vm_state_and_notify') ex = exc.InvalidCPUInfo(reason="invalid cpu info.") task = self.conductor._build_live_migrate_task( self.context, inst_obj, 'destination', 'block_migration', 'disk_over_commit', mox.IsA(objects.Migration)) task.execute().AndRaise(ex) scheduler_utils.set_vm_state_and_notify(self.context, inst_obj.uuid, 'compute_task', 'migrate_server', {'vm_state': vm_states.ACTIVE, 'task_state': None, 'expected_task_state': task_states.MIGRATING}, ex, self._build_request_spec(inst_obj)) self.mox.ReplayAll() self.conductor = utils.ExceptionHelper(self.conductor) self.assertRaises(exc.InvalidCPUInfo, self.conductor.migrate_server, self.context, inst_obj, {'host': 'destination'}, True, False, None, 'block_migration', 'disk_over_commit') def test_migrate_server_deals_with_expected_exception(self): exs = [exc.InstanceInvalidState(instance_uuid="fake", attr='', state='', method=''), exc.DestinationHypervisorTooOld(), exc.HypervisorUnavailable(host='dummy'), exc.LiveMigrationWithOldNovaNotSafe(server='dummy'), exc.LiveMigrationWithOldNovaNotSupported(), exc.MigrationPreCheckError(reason='dummy'), exc.InvalidSharedStorage(path='dummy', reason='dummy'), exc.NoValidHost(reason='dummy'), exc.ComputeServiceUnavailable(host='dummy'), exc.InvalidHypervisorType(), exc.InvalidCPUInfo(reason='dummy'), exc.UnableToMigrateToSelf(instance_id='dummy', host='dummy'), exc.InvalidLocalStorage(path='dummy', reason='dummy'), exc.MigrationSchedulerRPCError(reason='dummy')] for ex in exs: self._test_migrate_server_deals_with_expected_exceptions(ex) @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') @mock.patch.object(live_migrate.LiveMigrationTask, 'execute') def test_migrate_server_deals_with_unexpected_exceptions(self, mock_live_migrate, mock_set_state): expected_ex = IOError('fake error') mock_live_migrate.side_effect = expected_ex instance = fake_instance.fake_db_instance() inst_obj = objects.Instance._from_db_object( self.context, objects.Instance(), instance, []) ex = self.assertRaises(exc.MigrationError, self.conductor.migrate_server, self.context, inst_obj, {'host': 'destination'}, True, False, None, 'block_migration', 'disk_over_commit') request_spec = {'instance_properties': { 'uuid': instance['uuid'], }, } mock_set_state.assert_called_once_with(self.context, instance['uuid'], 'compute_task', 'migrate_server', dict(vm_state=vm_states.ERROR, task_state=inst_obj.task_state, expected_task_state=task_states.MIGRATING,), expected_ex, request_spec) self.assertEqual(ex.kwargs['reason'], six.text_type(expected_ex)) def test_set_vm_state_and_notify(self): self.mox.StubOutWithMock(scheduler_utils, 'set_vm_state_and_notify') scheduler_utils.set_vm_state_and_notify( self.context, 1, 'compute_task', 'method', 'updates', 'ex', 'request_spec') self.mox.ReplayAll() self.conductor._set_vm_state_and_notify( self.context, 1, 'method', 'updates', 'ex', 'request_spec') @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(objects.RequestSpec, 'from_primitives') @mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch.object(objects.Quotas, 'from_reservations') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') @mock.patch.object(migrate.MigrationTask, 'rollback') def test_cold_migrate_no_valid_host_back_in_active_state( self, rollback_mock, notify_mock, select_dest_mock, quotas_mock, metadata_mock, spec_fp_mock, sig_mock, brs_mock): flavor = flavors.get_flavor_by_name('m1.tiny') inst_obj = objects.Instance( image_ref='fake-image_ref', instance_type_id=flavor['id'], vm_state=vm_states.ACTIVE, system_metadata={}, uuid='fake', user_id='fake') request_spec = dict(instance_type=dict(extra_specs=dict()), instance_properties=dict()) filter_props = dict(context=None) resvs = 'fake-resvs' image = 'fake-image' fake_spec = objects.RequestSpec() metadata_mock.return_value = image brs_mock.return_value = request_spec spec_fp_mock.return_value = fake_spec exc_info = exc.NoValidHost(reason="") select_dest_mock.side_effect = exc_info updates = {'vm_state': vm_states.ACTIVE, 'task_state': None} self.assertRaises(exc.NoValidHost, self.conductor._cold_migrate, self.context, inst_obj, flavor, filter_props, [resvs], clean_shutdown=True) metadata_mock.assert_called_with({}) brs_mock.assert_called_once_with(self.context, image, [inst_obj], instance_type=flavor) quotas_mock.assert_called_once_with(self.context, [resvs], instance=inst_obj) sig_mock.assert_called_once_with(self.context, request_spec, filter_props) notify_mock.assert_called_once_with(self.context, inst_obj.uuid, 'migrate_server', updates, exc_info, request_spec) rollback_mock.assert_called_once_with() @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(objects.RequestSpec, 'from_primitives') @mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch.object(objects.Quotas, 'from_reservations') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') @mock.patch.object(migrate.MigrationTask, 'rollback') def test_cold_migrate_no_valid_host_back_in_stopped_state( self, rollback_mock, notify_mock, select_dest_mock, quotas_mock, metadata_mock, spec_fp_mock, sig_mock, brs_mock): flavor = flavors.get_flavor_by_name('m1.tiny') inst_obj = objects.Instance( image_ref='fake-image_ref', vm_state=vm_states.STOPPED, instance_type_id=flavor['id'], system_metadata={}, uuid='fake', user_id='fake') image = 'fake-image' request_spec = dict(instance_type=dict(extra_specs=dict()), instance_properties=dict(), image=image) filter_props = dict(context=None) resvs = 'fake-resvs' fake_spec = objects.RequestSpec() metadata_mock.return_value = image brs_mock.return_value = request_spec spec_fp_mock.return_value = fake_spec exc_info = exc.NoValidHost(reason="") select_dest_mock.side_effect = exc_info updates = {'vm_state': vm_states.STOPPED, 'task_state': None} self.assertRaises(exc.NoValidHost, self.conductor._cold_migrate, self.context, inst_obj, flavor, filter_props, [resvs], clean_shutdown=True) metadata_mock.assert_called_with({}) brs_mock.assert_called_once_with(self.context, image, [inst_obj], instance_type=flavor) quotas_mock.assert_called_once_with(self.context, [resvs], instance=inst_obj) sig_mock.assert_called_once_with(self.context, request_spec, filter_props) notify_mock.assert_called_once_with(self.context, inst_obj.uuid, 'migrate_server', updates, exc_info, request_spec) rollback_mock.assert_called_once_with() def test_cold_migrate_no_valid_host_error_msg(self): flavor = flavors.get_flavor_by_name('m1.tiny') inst_obj = objects.Instance( image_ref='fake-image_ref', vm_state=vm_states.STOPPED, instance_type_id=flavor['id'], system_metadata={}, uuid='fake', user_id='fake') request_spec = dict(instance_type=dict(extra_specs=dict()), instance_properties=dict()) filter_props = dict(context=None) resvs = 'fake-resvs' image = 'fake-image' with test.nested( mock.patch.object(utils, 'get_image_from_system_metadata', return_value=image), mock.patch.object(scheduler_utils, 'build_request_spec', return_value=request_spec), mock.patch.object(self.conductor, '_set_vm_state_and_notify'), mock.patch.object(migrate.MigrationTask, 'execute', side_effect=exc.NoValidHost(reason="")), mock.patch.object(migrate.MigrationTask, 'rollback') ) as (image_mock, brs_mock, set_vm_mock, task_execute_mock, task_rollback_mock): nvh = self.assertRaises(exc.NoValidHost, self.conductor._cold_migrate, self.context, inst_obj, flavor, filter_props, [resvs], clean_shutdown=True) self.assertIn('cold migrate', nvh.message) @mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch('nova.scheduler.utils.build_request_spec') @mock.patch.object(migrate.MigrationTask, 'execute') @mock.patch.object(migrate.MigrationTask, 'rollback') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') def test_cold_migrate_no_valid_host_in_group(self, set_vm_mock, task_rollback_mock, task_exec_mock, brs_mock, image_mock): flavor = flavors.get_flavor_by_name('m1.tiny') inst_obj = objects.Instance( image_ref='fake-image_ref', vm_state=vm_states.STOPPED, instance_type_id=flavor['id'], system_metadata={}, uuid='fake', user_id='fake') request_spec = dict(instance_type=dict(extra_specs=dict()), instance_properties=dict()) filter_props = dict(context=None) resvs = 'fake-resvs' image = 'fake-image' exception = exc.UnsupportedPolicyException(reason='') image_mock.return_value = image brs_mock.return_value = request_spec task_exec_mock.side_effect = exception self.assertRaises(exc.UnsupportedPolicyException, self.conductor._cold_migrate, self.context, inst_obj, flavor, filter_props, [resvs], clean_shutdown=True) updates = {'vm_state': vm_states.STOPPED, 'task_state': None} set_vm_mock.assert_called_once_with(self.context, inst_obj.uuid, 'migrate_server', updates, exception, request_spec) @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(objects.RequestSpec, 'from_primitives') @mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch.object(objects.Quotas, 'from_reservations') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') @mock.patch.object(migrate.MigrationTask, 'rollback') @mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize') def test_cold_migrate_exception_host_in_error_state_and_raise( self, prep_resize_mock, rollback_mock, notify_mock, select_dest_mock, quotas_mock, metadata_mock, spec_fp_mock, sig_mock, brs_mock): flavor = flavors.get_flavor_by_name('m1.tiny') inst_obj = objects.Instance( image_ref='fake-image_ref', vm_state=vm_states.STOPPED, instance_type_id=flavor['id'], system_metadata={}, uuid='fake', user_id='fake') image = 'fake-image' request_spec = dict(instance_type=dict(), instance_properties=dict(), image=image) filter_props = dict(context=None) resvs = 'fake-resvs' fake_spec = objects.RequestSpec() hosts = [dict(host='host1', nodename=None, limits={})] metadata_mock.return_value = image brs_mock.return_value = request_spec spec_fp_mock.return_value = fake_spec exc_info = test.TestingException('something happened') select_dest_mock.return_value = hosts updates = {'vm_state': vm_states.STOPPED, 'task_state': None} prep_resize_mock.side_effect = exc_info self.assertRaises(test.TestingException, self.conductor._cold_migrate, self.context, inst_obj, flavor, filter_props, [resvs], clean_shutdown=True) metadata_mock.assert_called_with({}) brs_mock.assert_called_once_with(self.context, image, [inst_obj], instance_type=flavor) quotas_mock.assert_called_once_with(self.context, [resvs], instance=inst_obj) sig_mock.assert_called_once_with(self.context, request_spec, filter_props) select_dest_mock.assert_called_once_with( self.context, fake_spec) prep_resize_mock.assert_called_once_with( self.context, image, inst_obj, flavor, hosts[0]['host'], [resvs], request_spec=request_spec, filter_properties=filter_props, node=hosts[0]['nodename'], clean_shutdown=True) notify_mock.assert_called_once_with(self.context, inst_obj.uuid, 'migrate_server', updates, exc_info, request_spec) rollback_mock.assert_called_once_with() def test_resize_no_valid_host_error_msg(self): flavor = flavors.get_flavor_by_name('m1.tiny') flavor_new = flavors.get_flavor_by_name('m1.small') inst_obj = objects.Instance( image_ref='fake-image_ref', vm_state=vm_states.STOPPED, instance_type_id=flavor['id'], system_metadata={}, uuid='fake', user_id='fake') request_spec = dict(instance_type=dict(extra_specs=dict()), instance_properties=dict()) filter_props = dict(context=None) resvs = 'fake-resvs' image = 'fake-image' with test.nested( mock.patch.object(utils, 'get_image_from_system_metadata', return_value=image), mock.patch.object(scheduler_utils, 'build_request_spec', return_value=request_spec), mock.patch.object(self.conductor, '_set_vm_state_and_notify'), mock.patch.object(migrate.MigrationTask, 'execute', side_effect=exc.NoValidHost(reason="")), mock.patch.object(migrate.MigrationTask, 'rollback') ) as (image_mock, brs_mock, vm_st_mock, task_execute_mock, task_rb_mock): nvh = self.assertRaises(exc.NoValidHost, self.conductor._cold_migrate, self.context, inst_obj, flavor_new, filter_props, [resvs], clean_shutdown=True) self.assertIn('resize', nvh.message) def test_build_instances_instance_not_found(self): instances = [fake_instance.fake_instance_obj(self.context) for i in range(2)] self.mox.StubOutWithMock(instances[0], 'refresh') self.mox.StubOutWithMock(instances[1], 'refresh') image = {'fake-data': 'should_pass_silently'} spec = {'fake': 'specs', 'instance_properties': instances[0]} self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances') self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'build_and_run_instance') scheduler_utils.build_request_spec(self.context, image, mox.IgnoreArg()).AndReturn(spec) filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}} self.conductor_manager._schedule_instances(self.context, spec, filter_properties).AndReturn( [{'host': 'host1', 'nodename': 'node1', 'limits': []}, {'host': 'host2', 'nodename': 'node2', 'limits': []}]) instances[0].refresh().AndRaise( exc.InstanceNotFound(instance_id=instances[0].uuid)) instances[1].refresh() self.conductor_manager.compute_rpcapi.build_and_run_instance( self.context, instance=instances[1], host='host2', image={'fake-data': 'should_pass_silently'}, request_spec=spec, filter_properties={'limits': [], 'retry': {'num_attempts': 1, 'hosts': [['host2', 'node2']]}}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping=mox.IsA(objects.BlockDeviceMappingList), node='node2', limits=[]) self.mox.ReplayAll() # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.conductor.build_instances(self.context, instances=instances, image=image, filter_properties={}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(scheduler_utils, 'build_request_spec') def test_build_instances_info_cache_not_found(self, build_request_spec, setup_instance_group): instances = [fake_instance.fake_instance_obj(self.context) for i in range(2)] image = {'fake-data': 'should_pass_silently'} destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []}, {'host': 'host2', 'nodename': 'node2', 'limits': []}] spec = {'fake': 'specs', 'instance_properties': instances[0]} build_request_spec.return_value = spec with test.nested( mock.patch.object(instances[0], 'refresh', side_effect=exc.InstanceInfoCacheNotFound( instance_uuid=instances[0].uuid)), mock.patch.object(instances[1], 'refresh'), mock.patch.object(objects.RequestSpec, 'from_primitives'), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations', return_value=destinations), mock.patch.object(self.conductor_manager.compute_rpcapi, 'build_and_run_instance') ) as (inst1_refresh, inst2_refresh, from_primitives, select_destinations, build_and_run_instance): # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.conductor.build_instances(self.context, instances=instances, image=image, filter_properties={}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) # NOTE(sbauza): Due to populate_retry() later in the code, # filter_properties is dynamically modified setup_instance_group.assert_called_once_with( self.context, spec, {'retry': {'num_attempts': 1, 'hosts': []}}) build_and_run_instance.assert_called_once_with(self.context, instance=instances[1], host='host2', image={'fake-data': 'should_pass_silently'}, request_spec=spec, filter_properties={'limits': [], 'retry': {'num_attempts': 1, 'hosts': [['host2', 'node2']]}}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping=mock.ANY, node='node2', limits=[]) class ConductorTaskRPCAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase): """Conductor compute_task RPC namespace Tests.""" def setUp(self): super(ConductorTaskRPCAPITestCase, self).setUp() self.conductor_service = self.start_service( 'conductor', manager='nova.conductor.manager.ConductorManager') self.conductor = conductor_rpcapi.ComputeTaskAPI() service_manager = self.conductor_service.manager self.conductor_manager = service_manager.compute_task_mgr class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase): """Compute task API Tests.""" def setUp(self): super(ConductorTaskAPITestCase, self).setUp() self.conductor_service = self.start_service( 'conductor', manager='nova.conductor.manager.ConductorManager') self.conductor = conductor_api.ComputeTaskAPI() service_manager = self.conductor_service.manager self.conductor_manager = service_manager.compute_task_mgr class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase): """Conductor LocalComputeTaskAPI Tests.""" def setUp(self): super(ConductorLocalComputeTaskAPITestCase, self).setUp() self.conductor = conductor_api.LocalComputeTaskAPI() self.conductor_manager = self.conductor._manager._target nova-13.0.0/nova/tests/unit/conductor/__init__.py0000664000567000056710000000000012701407773023072 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/conductor/tasks/0000775000567000056710000000000012701410205022100 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/conductor/tasks/__init__.py0000664000567000056710000000000012701407773024217 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/conductor/tasks/test_live_migrate.py0000664000567000056710000005637312701410011026171 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import oslo_messaging as messaging from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import vm_states from nova.conductor.tasks import live_migrate from nova import exception from nova import objects from nova.scheduler import client as scheduler_client from nova.scheduler import utils as scheduler_utils from nova import servicegroup from nova import test from nova.tests.unit import fake_instance from nova import utils class LiveMigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(LiveMigrationTaskTestCase, self).setUp() self.context = "context" self.instance_host = "host" self.instance_uuid = "uuid" self.instance_image = "image_ref" db_instance = fake_instance.fake_db_instance( host=self.instance_host, uuid=self.instance_uuid, power_state=power_state.RUNNING, vm_state = vm_states.ACTIVE, memory_mb=512, image_ref=self.instance_image) self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.destination = "destination" self.block_migration = "bm" self.disk_over_commit = "doc" self.migration = objects.Migration() self.fake_spec = objects.RequestSpec() self._generate_task() def _generate_task(self): self.task = live_migrate.LiveMigrationTask(self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), self.fake_spec) def test_execute_with_destination(self): self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(self.task, '_check_requested_destination') self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration') self.task._check_host_is_up(self.instance_host) self.task._check_requested_destination() self.task.compute_rpcapi.live_migration(self.context, host=self.instance_host, instance=self.instance, dest=self.destination, block_migration=self.block_migration, migration=self.migration, migrate_data=None).AndReturn("bob") self.mox.ReplayAll() self.assertEqual("bob", self.task.execute()) def test_execute_without_destination(self): self.destination = None self._generate_task() self.assertIsNone(self.task.destination) self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(self.task, '_find_destination') self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration') self.task._check_host_is_up(self.instance_host) self.task._find_destination().AndReturn("found_host") self.task.compute_rpcapi.live_migration(self.context, host=self.instance_host, instance=self.instance, dest="found_host", block_migration=self.block_migration, migration=self.migration, migrate_data=None).AndReturn("bob") self.mox.ReplayAll() with mock.patch.object(self.migration, 'save') as mock_save: self.assertEqual("bob", self.task.execute()) self.assertTrue(mock_save.called) self.assertEqual('found_host', self.migration.dest_compute) def test_check_instance_is_active_passes_when_paused(self): self.task.instance['power_state'] = power_state.PAUSED self.task._check_instance_is_active() def test_check_instance_is_active_fails_when_shutdown(self): self.task.instance['power_state'] = power_state.SHUTDOWN self.assertRaises(exception.InstanceInvalidState, self.task._check_instance_is_active) def test_check_instance_host_is_up(self): self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') objects.Service.get_by_compute_host(self.context, "host").AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(True) self.mox.ReplayAll() self.task._check_host_is_up("host") def test_check_instance_host_is_up_fails_if_not_up(self): self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') objects.Service.get_by_compute_host(self.context, "host").AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") def test_check_instance_host_is_up_fails_if_not_found(self): self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') objects.Service.get_by_compute_host( self.context, "host").AndRaise(exception.NotFound) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") def test_check_requested_destination(self): self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') self.mox.StubOutWithMock(self.task, '_get_compute_info') self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') self.mox.StubOutWithMock(self.task.compute_rpcapi, 'check_can_live_migrate_destination') objects.Service.get_by_compute_host( self.context, self.destination).AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(True) hypervisor_details = objects.ComputeNode( hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0, ) self.task._get_compute_info(self.destination)\ .AndReturn(hypervisor_details) self.task._get_compute_info(self.instance_host)\ .AndReturn(hypervisor_details) self.task._get_compute_info(self.destination)\ .AndReturn(hypervisor_details) self.task.compute_rpcapi.check_can_live_migrate_destination( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit).AndReturn( "migrate_data") self.mox.ReplayAll() self.task._check_requested_destination() self.assertEqual("migrate_data", self.task.migrate_data) def test_check_requested_destination_fails_with_same_dest(self): self.task.destination = "same" self.task.source = "same" self.assertRaises(exception.UnableToMigrateToSelf, self.task._check_requested_destination) def test_check_requested_destination_fails_when_destination_is_up(self): self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') objects.Service.get_by_compute_host( self.context, self.destination).AndRaise(exception.NotFound) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_requested_destination) def test_check_requested_destination_fails_with_not_enough_memory(self): self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') self.task._check_host_is_up(self.destination) objects.ComputeNode.get_first_node_by_host_for_old_compat(self.context, self.destination).AndReturn( objects.ComputeNode(free_ram_mb=513, memory_mb=1024, ram_allocation_ratio=0.9, )) self.mox.ReplayAll() # free_ram is bigger than instance.ram (512) but the allocation ratio # reduces the total available RAM to 410MB (1024 * 0.9 - (1024 - 513)) self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) def test_check_requested_destination_fails_with_hypervisor_diff(self): self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(self.task, '_check_destination_has_enough_memory') self.mox.StubOutWithMock(self.task, '_get_compute_info') self.task._check_host_is_up(self.destination) self.task._check_destination_has_enough_memory() self.task._get_compute_info(self.instance_host).AndReturn({ "hypervisor_type": "b" }) self.task._get_compute_info(self.destination).AndReturn({ "hypervisor_type": "a" }) self.mox.ReplayAll() self.assertRaises(exception.InvalidHypervisorType, self.task._check_requested_destination) def test_check_requested_destination_fails_with_hypervisor_too_old(self): self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(self.task, '_check_destination_has_enough_memory') self.mox.StubOutWithMock(self.task, '_get_compute_info') self.task._check_host_is_up(self.destination) self.task._check_destination_has_enough_memory() self.task._get_compute_info(self.instance_host).AndReturn({ "hypervisor_type": "a", "hypervisor_version": 7 }) self.task._get_compute_info(self.destination).AndReturn({ "hypervisor_type": "a", "hypervisor_version": 6 }) self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.task._check_requested_destination) def test_find_destination_works(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'reset_forced_destinations') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.fake_spec.reset_forced_destinations() self.task.scheduler_client.select_destinations( self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination()) def test_find_destination_works_with_no_request_spec(self): task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), request_spec=None) another_spec = objects.RequestSpec() self.instance.flavor = objects.Flavor() self.instance.numa_topology = None self.instance.pci_requests = None @mock.patch.object(task, '_call_livem_checks_on_host') @mock.patch.object(task, '_check_compatible_with_source_hypervisor') @mock.patch.object(task.scheduler_client, 'select_destinations') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(utils, 'get_image_from_system_metadata') def do_test(get_image, setup_ig, from_components, select_dest, check_compat, call_livem_checks): get_image.return_value = "image" from_components.return_value = another_spec select_dest.return_value = [{'host': 'host1'}] self.assertEqual("host1", task._find_destination()) get_image.assert_called_once_with(self.instance.system_metadata) fake_props = {'instance_properties': {'uuid': self.instance_uuid}} setup_ig.assert_called_once_with( self.context, fake_props, {'ignore_hosts': [self.instance_host]} ) select_dest.assert_called_once_with(self.context, another_spec) check_compat.assert_called_once_with("host1") call_livem_checks.assert_called_once_with("host1") do_test() def test_find_destination_no_image_works(self): self.instance['image_ref'] = '' self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination()) def _test_find_destination_retry_hypervisor_raises(self, error): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(error) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination()) def test_find_destination_retry_with_old_hypervisor(self): self._test_find_destination_retry_hypervisor_raises( exception.DestinationHypervisorTooOld) def test_find_destination_retry_with_invalid_hypervisor_type(self): self._test_find_destination_retry_hypervisor_raises( exception.InvalidHypervisorType) def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.Invalid) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination()) def test_find_destination_retry_with_failed_migration_pre_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.MigrationPreCheckError("reason")) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination()) def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() with mock.patch.object(self.task.migration, 'save') as save_mock: self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) save_mock.assert_called_once_with() def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndRaise( exception.NoValidHost(reason="")) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination) @mock.patch("nova.utils.get_image_from_system_metadata") @mock.patch("nova.scheduler.utils.build_request_spec") @mock.patch("nova.scheduler.utils.setup_instance_group") @mock.patch("nova.objects.RequestSpec.from_primitives") def test_find_destination_with_remoteError(self, m_from_primitives, m_setup_instance_group, m_build_request_spec, m_get_image_from_system_metadata): m_get_image_from_system_metadata.return_value = {'properties': {}} m_build_request_spec.return_value = {} fake_spec = objects.RequestSpec() m_from_primitives.return_value = fake_spec with mock.patch.object(self.task.scheduler_client, 'select_destinations') as m_select_destinations: error = messaging.RemoteError() m_select_destinations.side_effect = error self.assertRaises(exception.MigrationSchedulerRPCError, self.task._find_destination) def test_call_livem_checks_on_host(self): with mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', side_effect=messaging.MessagingTimeout): self.assertRaises(exception.MigrationPreCheckError, self.task._call_livem_checks_on_host, {}) nova-13.0.0/nova/tests/unit/conductor/tasks/test_base.py0000664000567000056710000000310412701407773024441 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.conductor.tasks import base from nova import test class FakeTask(base.TaskBase): def __init__(self, context, instance, fail=False): super(FakeTask, self).__init__(context, instance) self.fail = fail def _execute(self): if self.fail: raise Exception else: pass class TaskBaseTestCase(test.NoDBTestCase): def setUp(self): super(TaskBaseTestCase, self).setUp() self.task = FakeTask(mock.MagicMock(), mock.MagicMock()) @mock.patch.object(FakeTask, 'rollback') def test_wrapper_exception(self, fake_rollback): self.task.fail = True try: self.task.execute() except Exception: pass fake_rollback.assert_called_once_with() @mock.patch.object(FakeTask, 'rollback') def test_wrapper_no_exception(self, fake_rollback): try: self.task.execute() except Exception: pass self.assertFalse(fake_rollback.called) nova-13.0.0/nova/tests/unit/conductor/tasks/test_migrate.py0000664000567000056710000001033012701407773025156 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.compute import rpcapi as compute_rpcapi from nova.conductor.tasks import migrate from nova import objects from nova.objects import base as obj_base from nova.scheduler import client as scheduler_client from nova.scheduler import utils as scheduler_utils from nova import test from nova.tests.unit.conductor.test_conductor import FakeContext from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance class MigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(MigrationTaskTestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = FakeContext(self.user_id, self.project_id) inst = fake_instance.fake_db_instance(image_ref='image_ref') self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), inst, []) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.flavor = fake_flavor.fake_flavor_obj(self.context) self.flavor.extra_specs = {'extra_specs': 'fake'} self.request_spec = {'instance_type': obj_base.obj_to_primitive(self.flavor), 'instance_properties': {}, 'image': 'image'} self.hosts = [dict(host='host1', nodename=None, limits={})] self.filter_properties = {'limits': {}, 'retry': {'num_attempts': 1, 'hosts': [['host1', None]]}} self.reservations = [] self.clean_shutdown = True def _generate_task(self): return migrate.MigrationTask(self.context, self.instance, self.flavor, self.filter_properties, self.request_spec, self.reservations, self.clean_shutdown, compute_rpcapi.ComputeAPI(), scheduler_client.SchedulerClient()) @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(objects.RequestSpec, 'from_primitives') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize') @mock.patch.object(objects.Quotas, 'from_reservations') def test_execute(self, quotas_mock, prep_resize_mock, sel_dest_mock, spec_fp_mock, sig_mock, brs_mock): brs_mock.return_value = self.request_spec fake_spec = objects.RequestSpec() spec_fp_mock.return_value = fake_spec sel_dest_mock.return_value = self.hosts task = self._generate_task() task.execute() quotas_mock.assert_called_once_with(self.context, self.reservations, instance=self.instance) sig_mock.assert_called_once_with(self.context, self.request_spec, self.filter_properties) task.scheduler_client.select_destinations.assert_called_once_with( self.context, fake_spec) prep_resize_mock.assert_called_once_with( self.context, 'image', self.instance, self.flavor, self.hosts[0]['host'], self.reservations, request_spec=self.request_spec, filter_properties=self.filter_properties, node=self.hosts[0]['nodename'], clean_shutdown=self.clean_shutdown) self.assertFalse(quotas_mock.return_value.rollback.called) def test_rollback(self): task = self._generate_task() task.quotas = mock.MagicMock() task.rollback() task.quotas.rollback.assert_called_once_with() nova-13.0.0/nova/tests/unit/test_cache.py0000664000567000056710000001212212701407773021445 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import cache_utils from nova import test class TestOsloCache(test.NoDBTestCase): def test_get_default_cache_region(self): region = cache_utils._get_default_cache_region(expiration_time=60) self.assertEqual(60, region.expiration_time) self.assertIsNotNone(region) def test_get_default_cache_region_default_expiration_time(self): region = cache_utils._get_default_cache_region(expiration_time=0) # default oslo.cache expiration_time value 600 was taken self.assertEqual(600, region.expiration_time) self.assertIsNotNone(region) @mock.patch('dogpile.cache.region.CacheRegion.configure') def test_get_client(self, mock_cacheregion): self.assertIsNotNone( cache_utils.get_client(expiration_time=60)) self.flags(memcached_servers=['localhost:11211']) self.assertIsNotNone( cache_utils.get_client(expiration_time=60)) self.flags(memcached_servers=None) self.flags(group='cache', enabled=True) self.assertIsNotNone( cache_utils.get_client(expiration_time=60)) self.flags(memcached_servers=None) self.flags(group='cache', enabled=False) client = cache_utils.get_client(expiration_time=60) self.assertIsNotNone(client.region) mock_cacheregion.assert_has_calls( [mock.call('oslo_cache.dict', arguments={'expiration_time': 60}, expiration_time=60), mock.call('dogpile.cache.memcached', arguments={'url': ['localhost:11211']}, expiration_time=60), mock.call('dogpile.cache.null', _config_argument_dict=mock.ANY, _config_prefix='cache.oslo.arguments.', expiration_time=60, wrap=None), mock.call('oslo_cache.dict', arguments={'expiration_time': 60}, expiration_time=60)], ) @mock.patch('dogpile.cache.region.CacheRegion.configure') def test_get_custom_cache_region(self, mock_cacheregion): self.assertRaises(RuntimeError, cache_utils._get_custom_cache_region) self.assertIsNotNone( cache_utils._get_custom_cache_region( backend='oslo_cache.dict')) self.assertIsNotNone( cache_utils._get_custom_cache_region( backend='dogpile.cache.memcached', url=['localhost:11211'])) mock_cacheregion.assert_has_calls( [mock.call('oslo_cache.dict', arguments={'expiration_time': 604800}, expiration_time=604800), mock.call('dogpile.cache.memcached', arguments={'url': ['localhost:11211']}, expiration_time=604800)] ) @mock.patch('dogpile.cache.region.CacheRegion.configure') def test_get_memcached_client(self, mock_cacheregion): self.flags(memcached_servers=None) self.flags(group='cache', enabled=False) self.assertRaises( RuntimeError, cache_utils.get_memcached_client, expiration_time=60) self.flags(memcached_servers=['localhost:11211']) self.assertIsNotNone( cache_utils.get_memcached_client(expiration_time=60)) self.flags(memcached_servers=['localhost:11211']) self.assertIsNotNone( cache_utils.get_memcached_client(expiration_time=60)) self.flags(memcached_servers=None) self.flags(group='cache', enabled=True) self.flags(group='cache', memcache_servers=['localhost:11211']) self.assertIsNotNone( cache_utils.get_memcached_client(expiration_time=60)) mock_cacheregion.assert_has_calls( [mock.call('dogpile.cache.memcached', arguments={'url': ['localhost:11211']}, expiration_time=60), mock.call('dogpile.cache.memcached', arguments={'url': ['localhost:11211']}, expiration_time=60), mock.call('dogpile.cache.null', _config_argument_dict=mock.ANY, _config_prefix='cache.oslo.arguments.', expiration_time=60, wrap=None)] ) nova-13.0.0/nova/tests/unit/test_configdrive2.py0000664000567000056710000001116212701407773022766 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still and Canonical Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile import mock from mox3 import mox from oslo_config import cfg from oslo_utils import fileutils from nova import context from nova import test from nova.tests.unit import fake_instance from nova import utils from nova.virt import configdrive CONF = cfg.CONF class FakeInstanceMD(object): def metadata_for_config_drive(self): yield ('this/is/a/path/hello', 'This is some content') class ConfigDriveTestCase(test.NoDBTestCase): def test_create_configdrive_iso(self): CONF.set_override('config_drive_format', 'iso9660') imagefile = None try: self.mox.StubOutWithMock(utils, 'execute') utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots', '-allow-lowercase', '-allow-multidot', '-l', '-publisher', mox.IgnoreArg(), '-quiet', '-J', '-r', '-V', 'config-2', mox.IgnoreArg(), attempts=1, run_as_root=False).AndReturn(None) self.mox.ReplayAll() with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c: (fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_') os.close(fd) c.make_drive(imagefile) finally: if imagefile: fileutils.delete_if_exists(imagefile) def test_create_configdrive_vfat(self): CONF.set_override('config_drive_format', 'vfat') imagefile = None try: self.mox.StubOutWithMock(utils, 'mkfs') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(utils, 'trycmd') utils.mkfs('vfat', mox.IgnoreArg(), label='config-2').AndReturn(None) utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True).AndReturn((None, None)) utils.execute('umount', mox.IgnoreArg(), run_as_root=True).AndReturn(None) self.mox.ReplayAll() with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c: (fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_') os.close(fd) c.make_drive(imagefile) # NOTE(mikal): we can't check for a VFAT output here because the # filesystem creation stuff has been mocked out because it # requires root permissions finally: if imagefile: fileutils.delete_if_exists(imagefile) def test_config_drive_required_by_image_property(self): inst = fake_instance.fake_instance_obj(context.get_admin_context()) inst.config_drive = '' inst.system_metadata = { utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'mandatory'} self.assertTrue(configdrive.required_by(inst)) inst.system_metadata = { utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'optional'} self.assertFalse(configdrive.required_by(inst)) @mock.patch.object(configdrive, 'required_by', return_value=False) def test_config_drive_update_instance_required_by_false(self, mock_required): inst = fake_instance.fake_instance_obj(context.get_admin_context()) inst.config_drive = '' configdrive.update_instance(inst) self.assertEqual('', inst.config_drive) inst.config_drive = True configdrive.update_instance(inst) self.assertTrue(inst.config_drive) @mock.patch.object(configdrive, 'required_by', return_value=True) def test_config_drive_update_instance(self, mock_required): inst = fake_instance.fake_instance_obj(context.get_admin_context()) inst.config_drive = '' configdrive.update_instance(inst) self.assertTrue(inst.config_drive) inst.config_drive = True configdrive.update_instance(inst) self.assertTrue(inst.config_drive) nova-13.0.0/nova/tests/unit/test_availability_zones.py0000664000567000056710000002730512701407773024303 0ustar jenkinsjenkins00000000000000# Copyright 2013 Netease Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for availability zones """ import mock import six from nova import availability_zones as az import nova.conf from nova import context from nova import db from nova import objects from nova import test CONF = nova.conf.CONF class AvailabilityZoneTestCases(test.TestCase): """Test case for aggregate based availability zone.""" def setUp(self): super(AvailabilityZoneTestCases, self).setUp() self.host = 'me' self.availability_zone = 'nova-test' self.default_az = CONF.default_availability_zone self.default_in_az = CONF.internal_service_availability_zone self.context = context.get_admin_context() self.agg = self._create_az('az_agg', self.availability_zone) def tearDown(self): db.aggregate_delete(self.context, self.agg['id']) super(AvailabilityZoneTestCases, self).tearDown() def _create_az(self, agg_name, az_name): agg_meta = {'name': agg_name} agg = db.aggregate_create(self.context, agg_meta) metadata = {'availability_zone': az_name} db.aggregate_metadata_add(self.context, agg['id'], metadata) return agg def _update_az(self, aggregate, az_name): metadata = {'availability_zone': az_name} db.aggregate_update(self.context, aggregate['id'], metadata) def _create_service_with_topic(self, topic, host, disabled=False): values = { 'binary': 'bin', 'host': host, 'topic': topic, 'disabled': disabled, } return db.service_create(self.context, values) def _destroy_service(self, service): return db.service_destroy(self.context, service['id']) def _add_to_aggregate(self, service, aggregate): return db.aggregate_host_add(self.context, aggregate['id'], service['host']) def _delete_from_aggregate(self, service, aggregate): return db.aggregate_host_delete(self.context, aggregate['id'], service['host']) def test_rest_availability_zone_reset_cache(self): az._get_cache().add('cache', 'fake_value') az.reset_cache() self.assertIsNone(az._get_cache().get('cache')) def test_update_host_availability_zone_cache(self): """Test availability zone cache could be update.""" service = self._create_service_with_topic('compute', self.host) # Create a new aggregate with an AZ and add the host to the AZ az_name = 'az1' cache_key = az._make_cache_key(self.host) agg_az1 = self._create_az('agg-az1', az_name) self._add_to_aggregate(service, agg_az1) az.update_host_availability_zone_cache(self.context, self.host) self.assertEqual('az1', az._get_cache().get(cache_key)) az.update_host_availability_zone_cache(self.context, self.host, 'az2') self.assertEqual('az2', az._get_cache().get(cache_key)) def test_set_availability_zone_compute_service(self): """Test for compute service get right availability zone.""" service = self._create_service_with_topic('compute', self.host) services = db.service_get_all(self.context) # The service is not add into aggregate, so confirm it is default # availability zone. new_service = az.set_availability_zones(self.context, services)[0] self.assertEqual(self.default_az, new_service['availability_zone']) # The service is added into aggregate, confirm return the aggregate # availability zone. self._add_to_aggregate(service, self.agg) new_service = az.set_availability_zones(self.context, services)[0] self.assertEqual(self.availability_zone, new_service['availability_zone']) self._destroy_service(service) def test_set_availability_zone_unicode_key(self): """Test set availability zone cache key is unicode.""" service = self._create_service_with_topic('network', self.host) services = db.service_get_all(self.context) az.set_availability_zones(self.context, services) self.assertIsInstance(services[0]['host'], six.text_type) cached_key = az._make_cache_key(services[0]['host']) self.assertIsInstance(cached_key, str) self._destroy_service(service) def test_set_availability_zone_not_compute_service(self): """Test not compute service get right availability zone.""" service = self._create_service_with_topic('network', self.host) services = db.service_get_all(self.context) new_service = az.set_availability_zones(self.context, services)[0] self.assertEqual(self.default_in_az, new_service['availability_zone']) self._destroy_service(service) def test_get_host_availability_zone(self): """Test get right availability zone by given host.""" self.assertEqual(self.default_az, az.get_host_availability_zone(self.context, self.host)) service = self._create_service_with_topic('compute', self.host) self._add_to_aggregate(service, self.agg) self.assertEqual(self.availability_zone, az.get_host_availability_zone(self.context, self.host)) def test_update_host_availability_zone(self): """Test availability zone could be update by given host.""" service = self._create_service_with_topic('compute', self.host) # Create a new aggregate with an AZ and add the host to the AZ az_name = 'az1' agg_az1 = self._create_az('agg-az1', az_name) self._add_to_aggregate(service, agg_az1) self.assertEqual(az_name, az.get_host_availability_zone(self.context, self.host)) # Update AZ new_az_name = 'az2' self._update_az(agg_az1, new_az_name) self.assertEqual(new_az_name, az.get_host_availability_zone(self.context, self.host)) def test_delete_host_availability_zone(self): """Test availability zone could be deleted successfully.""" service = self._create_service_with_topic('compute', self.host) # Create a new aggregate with an AZ and add the host to the AZ az_name = 'az1' agg_az1 = self._create_az('agg-az1', az_name) self._add_to_aggregate(service, agg_az1) self.assertEqual(az_name, az.get_host_availability_zone(self.context, self.host)) # Delete the AZ via deleting the aggregate self._delete_from_aggregate(service, agg_az1) self.assertEqual(self.default_az, az.get_host_availability_zone(self.context, self.host)) def test_get_availability_zones(self): """Test get_availability_zones.""" # When the param get_only_available of get_availability_zones is set # to default False, it returns two lists, zones with at least one # enabled services, and zones with no enabled services, # when get_only_available is set to True, only return a list of zones # with at least one enabled services. # Use the following test data: # # zone host enabled # nova-test host1 Yes # nova-test host2 No # nova-test2 host3 Yes # nova-test3 host4 No # host5 No agg2 = self._create_az('agg-az2', 'nova-test2') agg3 = self._create_az('agg-az3', 'nova-test3') service1 = self._create_service_with_topic('compute', 'host1', disabled=False) service2 = self._create_service_with_topic('compute', 'host2', disabled=True) service3 = self._create_service_with_topic('compute', 'host3', disabled=False) service4 = self._create_service_with_topic('compute', 'host4', disabled=True) self._create_service_with_topic('compute', 'host5', disabled=True) self._add_to_aggregate(service1, self.agg) self._add_to_aggregate(service2, self.agg) self._add_to_aggregate(service3, agg2) self._add_to_aggregate(service4, agg3) zones, not_zones = az.get_availability_zones(self.context) self.assertEqual(['nova-test', 'nova-test2'], zones) self.assertEqual(['nova-test3', 'nova'], not_zones) zones = az.get_availability_zones(self.context, True) self.assertEqual(['nova-test', 'nova-test2'], zones) zones, not_zones = az.get_availability_zones(self.context, with_hosts=True) self.assertJsonEqual(zones, [(u'nova-test2', set([u'host3'])), (u'nova-test', set([u'host1']))]) self.assertJsonEqual(not_zones, [(u'nova-test3', set([u'host4'])), (u'nova', set([u'host5']))]) def test_get_instance_availability_zone_default_value(self): """Test get right availability zone by given an instance.""" fake_inst = objects.Instance(host=self.host, availability_zone=None) self.assertEqual(self.default_az, az.get_instance_availability_zone(self.context, fake_inst)) def test_get_instance_availability_zone_from_aggregate(self): """Test get availability zone from aggregate by given an instance.""" host = 'host170' service = self._create_service_with_topic('compute', host) self._add_to_aggregate(service, self.agg) fake_inst = objects.Instance(host=host, availability_zone=self.availability_zone) self.assertEqual(self.availability_zone, az.get_instance_availability_zone(self.context, fake_inst)) @mock.patch.object(az._get_cache(), 'get') def test_get_instance_availability_zone_cache_differs(self, cache_get): host = 'host170' service = self._create_service_with_topic('compute', host) self._add_to_aggregate(service, self.agg) cache_get.return_value = self.default_az fake_inst = objects.Instance(host=host, availability_zone=self.availability_zone) self.assertEqual( self.availability_zone, az.get_instance_availability_zone(self.context, fake_inst)) def test_get_instance_availability_zone_no_host(self): """Test get availability zone from instance if host not set.""" fake_inst = objects.Instance(host=None, availability_zone='inst-az') result = az.get_instance_availability_zone(self.context, fake_inst) self.assertEqual('inst-az', result) def test_get_instance_availability_zone_no_host_no_az(self): """Test get availability zone if neither host nor az is set.""" fake_inst = objects.Instance(host=None, availability_zone=None) result = az.get_instance_availability_zone(self.context, fake_inst) self.assertIsNone(result) nova-13.0.0/nova/tests/unit/fake_loadables/0000775000567000056710000000000012701410205021667 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/fake_loadables/fake_loadable1.py0000664000567000056710000000237012701407773025075 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fake Loadable subclasses module #1 """ from nova.tests.unit import fake_loadables class FakeLoadableSubClass1(fake_loadables.FakeLoadable): pass class FakeLoadableSubClass2(fake_loadables.FakeLoadable): pass class _FakeLoadableSubClass3(fake_loadables.FakeLoadable): """Classes beginning with '_' will be ignored.""" pass class FakeLoadableSubClass4(object): """Not a correct subclass.""" def return_valid_classes(): return [FakeLoadableSubClass1, FakeLoadableSubClass2] def return_invalid_classes(): return [FakeLoadableSubClass1, _FakeLoadableSubClass3, FakeLoadableSubClass4] nova-13.0.0/nova/tests/unit/fake_loadables/fake_loadable2.py0000664000567000056710000000215012701407773025072 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fake Loadable subclasses module #2 """ from nova.tests.unit import fake_loadables class FakeLoadableSubClass5(fake_loadables.FakeLoadable): pass class FakeLoadableSubClass6(fake_loadables.FakeLoadable): pass class _FakeLoadableSubClass7(fake_loadables.FakeLoadable): """Classes beginning with '_' will be ignored.""" pass class FakeLoadableSubClass8(BaseException): """Not a correct subclass.""" def return_valid_class(): return [FakeLoadableSubClass6] nova-13.0.0/nova/tests/unit/fake_loadables/__init__.py0000664000567000056710000000154312701407773024023 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Loadable class handling. """ from nova import loadables class FakeLoadable(object): pass class FakeLoader(loadables.BaseLoader): def __init__(self): super(FakeLoader, self).__init__(FakeLoadable) nova-13.0.0/nova/tests/unit/console/0000775000567000056710000000000012701410205020415 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/console/test_type.py0000664000567000056710000000400012701407773023021 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.console import type as ctype from nova import test class TypeTestCase(test.NoDBTestCase): def test_console(self): c = ctype.Console(host='127.0.0.1', port=8945) self.assertTrue(hasattr(c, 'host')) self.assertTrue(hasattr(c, 'port')) self.assertTrue(hasattr(c, 'internal_access_path')) self.assertEqual('127.0.0.1', c.host) self.assertEqual(8945, c.port) self.assertIsNone(c.internal_access_path) self.assertEqual({ 'host': '127.0.0.1', 'port': 8945, 'internal_access_path': None, 'token': 'a-token', 'access_url': 'an-url'}, c.get_connection_info('a-token', 'an-url')) def test_console_vnc(self): c = ctype.ConsoleVNC(host='127.0.0.1', port=8945) self.assertIsInstance(c, ctype.Console) def test_console_rdp(self): c = ctype.ConsoleRDP(host='127.0.0.1', port=8945) self.assertIsInstance(c, ctype.Console) def test_console_spice(self): c = ctype.ConsoleSpice(host='127.0.0.1', port=8945, tlsPort=6547) self.assertIsInstance(c, ctype.Console) self.assertEqual(6547, c.tlsPort) self.assertEqual( 6547, c.get_connection_info('a-token', 'an-url')['tlsPort']) def test_console_serial(self): c = ctype.ConsoleSerial(host='127.0.0.1', port=8945) self.assertIsInstance(c, ctype.Console) nova-13.0.0/nova/tests/unit/console/__init__.py0000664000567000056710000000000012701407773022534 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/console/test_console.py0000664000567000056710000001676112701410011023476 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For Console proxy.""" import mock from oslo_config import cfg from oslo_utils import importutils from nova.compute import rpcapi as compute_rpcapi from nova.console import api as console_api from nova import context from nova import db from nova import exception from nova import objects from nova import test CONF = cfg.CONF CONF.import_opt('console_manager', 'nova.service') CONF.import_opt('console_driver', 'nova.console.manager') class ConsoleTestCase(test.TestCase): """Test case for console proxy manager.""" def setUp(self): super(ConsoleTestCase, self).setUp() self.flags(console_driver='nova.console.fake.FakeConsoleProxy', stub_compute=True) self.console = importutils.import_object(CONF.console_manager) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.host = 'test_compute_host' def test_reset(self): with mock.patch('nova.compute.rpcapi.ComputeAPI') as mock_rpc: old_rpcapi = self.console.compute_rpcapi self.console.reset() mock_rpc.assert_called_once_with() self.assertNotEqual(old_rpcapi, self.console.compute_rpcapi) def _create_instance(self): """Create a test instance.""" inst = {} inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id inst['project_id'] = self.project_id inst['instance_type_id'] = 1 inst['ami_launch_index'] = 0 return db.instance_create(self.context, inst) def test_get_pool_for_instance_host(self): pool = self.console._get_pool_for_instance_host(self.context, self.host) self.assertEqual(pool['compute_host'], self.host) def test_get_pool_creates_new_pool_if_needed(self): self.assertRaises(exception.NotFound, db.console_pool_get_by_host_type, self.context, self.host, self.console.host, self.console.driver.console_type) pool = self.console._get_pool_for_instance_host(self.context, self.host) pool2 = db.console_pool_get_by_host_type(self.context, self.host, self.console.host, self.console.driver.console_type) self.assertEqual(pool['id'], pool2['id']) def test_get_pool_does_not_create_new_pool_if_exists(self): pool_info = {'address': '127.0.0.1', 'username': 'test', 'password': '1234pass', 'host': self.console.host, 'console_type': self.console.driver.console_type, 'compute_host': 'sometesthostname'} new_pool = db.console_pool_create(self.context, pool_info) pool = self.console._get_pool_for_instance_host(self.context, 'sometesthostname') self.assertEqual(pool['id'], new_pool['id']) def test_add_console(self): instance = self._create_instance() self.console.add_console(self.context, instance['id']) instance = db.instance_get(self.context, instance['id']) pool = db.console_pool_get_by_host_type(self.context, instance['host'], self.console.host, self.console.driver.console_type) console_instances = [con['instance_uuid'] for con in pool['consoles']] self.assertIn(instance['uuid'], console_instances) db.instance_destroy(self.context, instance['uuid']) def test_add_console_does_not_duplicate(self): instance = self._create_instance() cons1 = self.console.add_console(self.context, instance['id']) cons2 = self.console.add_console(self.context, instance['id']) self.assertEqual(cons1, cons2) db.instance_destroy(self.context, instance['uuid']) def test_remove_console(self): instance = self._create_instance() console_id = self.console.add_console(self.context, instance['id']) self.console.remove_console(self.context, console_id) self.assertRaises(exception.NotFound, db.console_get, self.context, console_id) db.instance_destroy(self.context, instance['uuid']) class ConsoleAPITestCase(test.NoDBTestCase): """Test case for console API.""" def setUp(self): super(ConsoleAPITestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.console_api = console_api.API() self.fake_uuid = '00000000-aaaa-bbbb-cccc-000000000000' self.fake_instance = { 'id': 1, 'uuid': self.fake_uuid, 'host': 'fake_host' } self.fake_console = { 'pool': {'host': 'fake_host'}, 'id': 'fake_id' } def _fake_db_console_get(_ctxt, _console_uuid, _instance_uuid): return self.fake_console self.stub_out('nova.db.console_get', _fake_db_console_get) def _fake_db_console_get_all_by_instance(_ctxt, _instance_uuid, columns_to_join): return [self.fake_console] self.stub_out('nova.db.console_get_all_by_instance', _fake_db_console_get_all_by_instance) def test_get_consoles(self): console = self.console_api.get_consoles(self.context, self.fake_uuid) self.assertEqual(console, [self.fake_console]) def test_get_console(self): console = self.console_api.get_console(self.context, self.fake_uuid, 'fake_id') self.assertEqual(console, self.fake_console) @mock.patch('nova.console.rpcapi.ConsoleAPI.remove_console') def test_delete_console(self, mock_remove): self.console_api.delete_console(self.context, self.fake_uuid, 'fake_id') mock_remove.assert_called_once_with(self.context, 'fake_id') @mock.patch.object(compute_rpcapi.ComputeAPI, 'get_console_topic', return_value='compute.fake_host') @mock.patch.object(objects.Instance, 'get_by_uuid') def test_create_console(self, mock_get_instance_by_uuid, mock_get_console_topic): mock_get_instance_by_uuid.return_value = objects.Instance( **self.fake_instance) self.console_api.create_console(self.context, self.fake_uuid) mock_get_console_topic.assert_called_once_with(self.context, 'fake_host') nova-13.0.0/nova/tests/unit/console/test_websocketproxy.py0000664000567000056710000003304212701407773025140 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for nova websocketproxy.""" import mock from nova.console import websocketproxy from nova import exception from nova import test class NovaProxyRequestHandlerBaseTestCase(test.NoDBTestCase): def setUp(self): super(NovaProxyRequestHandlerBaseTestCase, self).setUp() self.flags(console_allowed_origins = ['allowed-origin-example-1.net', 'allowed-origin-example-2.net']) self.wh = websocketproxy.NovaProxyRequestHandlerBase() self.wh.socket = mock.MagicMock() self.wh.msg = mock.MagicMock() self.wh.do_proxy = mock.MagicMock() self.wh.headers = mock.MagicMock() def _fake_getheader(self, header): if header == 'cookie': return 'token="123-456-789"' elif header == 'Origin': return 'https://example.net:6080' elif header == 'Host': return 'example.net:6080' else: return def _fake_getheader_ipv6(self, header): if header == 'cookie': return 'token="123-456-789"' elif header == 'Origin': return 'https://[2001:db8::1]:6080' elif header == 'Host': return '[2001:db8::1]:6080' else: return def _fake_getheader_bad_token(self, header): if header == 'cookie': return 'token="XXX"' elif header == 'Origin': return 'https://example.net:6080' elif header == 'Host': return 'example.net:6080' else: return def _fake_getheader_bad_origin(self, header): if header == 'cookie': return 'token="123-456-789"' elif header == 'Origin': return 'https://bad-origin-example.net:6080' elif header == 'Host': return 'example.net:6080' else: return def _fake_getheader_allowed_origin(self, header): if header == 'cookie': return 'token="123-456-789"' elif header == 'Origin': return 'https://allowed-origin-example-2.net:6080' elif header == 'Host': return 'example.net:6080' else: return def _fake_getheader_blank_origin(self, header): if header == 'cookie': return 'token="123-456-789"' elif header == 'Origin': return '' elif header == 'Host': return 'example.net:6080' else: return def _fake_getheader_no_origin(self, header): if header == 'cookie': return 'token="123-456-789"' elif header == 'Origin': return None elif header == 'Host': return 'any-example.net:6080' else: return def _fake_getheader_http(self, header): if header == 'cookie': return 'token="123-456-789"' elif header == 'Origin': return 'http://example.net:6080' elif header == 'Host': return 'example.net:6080' else: return @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'novnc', 'access_url': 'https://example.net:6080' } self.wh.socket.return_value = '' self.wh.path = "http://127.0.0.1/?token=123-456-789" self.wh.headers.getheader = self._fake_getheader self.wh.new_websocket_client() check_token.assert_called_with(mock.ANY, token="123-456-789") self.wh.socket.assert_called_with('node1', 10000, connect=True) self.wh.do_proxy.assert_called_with('') @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_ipv6_url(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'novnc', 'access_url': 'https://[2001:db8::1]:6080' } self.wh.socket.return_value = '' self.wh.path = "http://[2001:db8::1]/?token=123-456-789" self.wh.headers.getheader = self._fake_getheader_ipv6 self.wh.new_websocket_client() check_token.assert_called_with(mock.ANY, token="123-456-789") self.wh.socket.assert_called_with('node1', 10000, connect=True) self.wh.do_proxy.assert_called_with('') @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_token_invalid(self, check_token): check_token.return_value = False self.wh.path = "http://127.0.0.1/?token=XXX" self.wh.headers.getheader = self._fake_getheader_bad_token self.assertRaises(exception.InvalidToken, self.wh.new_websocket_client) check_token.assert_called_with(mock.ANY, token="XXX") @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_internal_access_path(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'internal_access_path': 'vmid', 'console_type': 'novnc', 'access_url': 'https://example.net:6080' } tsock = mock.MagicMock() tsock.recv.return_value = "HTTP/1.1 200 OK\r\n\r\n" self.wh.socket.return_value = tsock self.wh.path = "http://127.0.0.1/?token=123-456-789" self.wh.headers.getheader = self._fake_getheader self.wh.new_websocket_client() check_token.assert_called_with(mock.ANY, token="123-456-789") self.wh.socket.assert_called_with('node1', 10000, connect=True) self.wh.do_proxy.assert_called_with(tsock) @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_internal_access_path_err(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'internal_access_path': 'xxx', 'console_type': 'novnc', 'access_url': 'https://example.net:6080' } tsock = mock.MagicMock() tsock.recv.return_value = "HTTP/1.1 500 Internal Server Error\r\n\r\n" self.wh.socket.return_value = tsock self.wh.path = "http://127.0.0.1/?token=123-456-789" self.wh.headers.getheader = self._fake_getheader self.assertRaises(exception.InvalidConnectionInfo, self.wh.new_websocket_client) check_token.assert_called_with(mock.ANY, token="123-456-789") @mock.patch('sys.version_info') @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_py273_good_scheme( self, check_token, version_info): version_info.return_value = (2, 7, 3) check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'novnc', 'access_url': 'https://example.net:6080' } self.wh.socket.return_value = '' self.wh.path = "http://127.0.0.1/?token=123-456-789" self.wh.headers.getheader = self._fake_getheader self.wh.new_websocket_client() check_token.assert_called_with(mock.ANY, token="123-456-789") self.wh.socket.assert_called_with('node1', 10000, connect=True) self.wh.do_proxy.assert_called_with('') @mock.patch('sys.version_info') @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_py273_special_scheme( self, check_token, version_info): version_info.return_value = (2, 7, 3) check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'novnc' } self.wh.socket.return_value = '' self.wh.path = "ws://127.0.0.1/?token=123-456-789" self.wh.headers.getheader = self._fake_getheader self.assertRaises(exception.NovaException, self.wh.new_websocket_client) @mock.patch('socket.getfqdn') def test_address_string_doesnt_do_reverse_dns_lookup(self, getfqdn): request_mock = mock.MagicMock() request_mock.makefile().readline.side_effect = [ 'GET /vnc.html?token=123-456-789 HTTP/1.1\r\n', '' ] server_mock = mock.MagicMock() client_address = ('8.8.8.8', 54321) handler = websocketproxy.NovaProxyRequestHandler( request_mock, client_address, server_mock) handler.log_message('log message using client address context info') self.assertFalse(getfqdn.called) # no reverse dns look up self.assertEqual(handler.address_string(), '8.8.8.8') # plain address @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_novnc_bad_origin_header(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'novnc' } self.wh.path = "http://127.0.0.1/" self.wh.headers.getheader = self._fake_getheader_bad_origin self.assertRaises(exception.ValidationError, self.wh.new_websocket_client) @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_novnc_allowed_origin_header(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'novnc', 'access_url': 'https://example.net:6080' } self.wh.socket.return_value = '' self.wh.path = "http://127.0.0.1/" self.wh.headers.getheader = self._fake_getheader_allowed_origin self.wh.new_websocket_client() check_token.assert_called_with(mock.ANY, token="123-456-789") self.wh.socket.assert_called_with('node1', 10000, connect=True) self.wh.do_proxy.assert_called_with('') @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_novnc_blank_origin_header(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'novnc' } self.wh.path = "http://127.0.0.1/" self.wh.headers.getheader = self._fake_getheader_blank_origin self.assertRaises(exception.ValidationError, self.wh.new_websocket_client) @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_novnc_no_origin_header(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'novnc' } self.wh.socket.return_value = '' self.wh.path = "http://127.0.0.1/" self.wh.headers.getheader = self._fake_getheader_no_origin self.wh.new_websocket_client() check_token.assert_called_with(mock.ANY, token="123-456-789") self.wh.socket.assert_called_with('node1', 10000, connect=True) self.wh.do_proxy.assert_called_with('') @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_novnc_https_origin_proto_http(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'novnc', 'access_url': 'http://example.net:6080' } self.wh.path = "https://127.0.0.1/" self.wh.headers.getheader = self._fake_getheader self.assertRaises(exception.ValidationError, self.wh.new_websocket_client) @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_novnc_https_origin_proto_ws(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'serial', 'access_url': 'ws://example.net:6080' } self.wh.path = "https://127.0.0.1/" self.wh.headers.getheader = self._fake_getheader self.assertRaises(exception.ValidationError, self.wh.new_websocket_client) @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token') def test_new_websocket_client_novnc_bad_console_type(self, check_token): check_token.return_value = { 'host': 'node1', 'port': '10000', 'console_type': 'bad-console-type' } self.wh.path = "http://127.0.0.1/" self.wh.headers.getheader = self._fake_getheader self.assertRaises(exception.ValidationError, self.wh.new_websocket_client) nova-13.0.0/nova/tests/unit/console/test_rpcapi.py0000664000567000056710000000430212701407773023323 0ustar jenkinsjenkins00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.console.rpcapi """ import mock from oslo_config import cfg from nova.console import rpcapi as console_rpcapi from nova import context from nova import test CONF = cfg.CONF class ConsoleRpcAPITestCase(test.NoDBTestCase): def _test_console_api(self, method, rpc_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = console_rpcapi.ConsoleAPI() self.assertIsNotNone(rpcapi.client) self.assertEqual(rpcapi.client.target.topic, CONF.console_topic) orig_prepare = rpcapi.client.prepare with test.nested( mock.patch.object(rpcapi.client, rpc_method), mock.patch.object(rpcapi.client, 'prepare'), mock.patch.object(rpcapi.client, 'can_send_version'), ) as ( rpc_mock, prepare_mock, csv_mock ): prepare_mock.return_value = rpcapi.client rpc_mock.return_value = 'foo' if rpc_method == 'call' else None csv_mock.side_effect = ( lambda v: orig_prepare().can_send_version()) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, rpc_mock.return_value) prepare_mock.assert_called_once_with() rpc_mock.assert_called_once_with(ctxt, method, **kwargs) def test_add_console(self): self._test_console_api('add_console', instance_id='i', rpc_method='cast') def test_remove_console(self): self._test_console_api('remove_console', console_id='i', rpc_method='cast') nova-13.0.0/nova/tests/unit/console/test_serial.py0000664000567000056710000001143512701407773023331 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Serial Console.""" import socket import mock import six.moves from nova.console import serial from nova import exception from nova import test class SerialTestCase(test.NoDBTestCase): def setUp(self): super(SerialTestCase, self).setUp() serial.ALLOCATED_PORTS = set() def test_get_port_range(self): start, stop = serial._get_port_range() self.assertEqual(10000, start) self.assertEqual(20000, stop) def test_get_port_range_customized(self): self.flags(port_range='30000:40000', group='serial_console') start, stop = serial._get_port_range() self.assertEqual(30000, start) self.assertEqual(40000, stop) def test_get_port_range_bad_range(self): self.flags(port_range='40000:30000', group='serial_console') start, stop = serial._get_port_range() self.assertEqual(10000, start) self.assertEqual(20000, stop) def test_get_port_range_not_numeric(self): self.flags(port_range='xxx:yyy', group='serial_console') start, stop = serial._get_port_range() self.assertEqual(10000, start) self.assertEqual(20000, stop) def test_get_port_range_invalid_syntax(self): self.flags(port_range='10:20:30', group='serial_console') start, stop = serial._get_port_range() self.assertEqual(10000, start) self.assertEqual(20000, stop) @mock.patch('socket.socket') def test_verify_port(self, fake_socket): s = mock.MagicMock() fake_socket.return_value = s serial._verify_port('127.0.0.1', 10) s.bind.assert_called_once_with(('127.0.0.1', 10)) @mock.patch('socket.socket') def test_verify_port_in_use(self, fake_socket): s = mock.MagicMock() s.bind.side_effect = socket.error() fake_socket.return_value = s self.assertRaises( exception.SocketPortInUseException, serial._verify_port, '127.0.0.1', 10) s.bind.assert_called_once_with(('127.0.0.1', 10)) @mock.patch('nova.console.serial._verify_port', lambda x, y: None) def test_acquire_port(self): start, stop = 15, 20 self.flags( port_range='%d:%d' % (start, stop), group='serial_console') for port in six.moves.range(start, stop): self.assertEqual(port, serial.acquire_port('127.0.0.1')) for port in six.moves.range(start, stop): self.assertEqual(port, serial.acquire_port('127.0.0.2')) self.assertEqual(10, len(serial.ALLOCATED_PORTS)) @mock.patch('nova.console.serial._verify_port') def test_acquire_port_in_use(self, fake_verify_port): def port_10000_already_used(host, port): if port == 10000 and host == '127.0.0.1': raise exception.SocketPortInUseException( port=port, host=host, error="already in use") fake_verify_port.side_effect = port_10000_already_used self.assertEqual(10001, serial.acquire_port('127.0.0.1')) self.assertEqual(10000, serial.acquire_port('127.0.0.2')) self.assertNotIn(('127.0.0.1', 10000), serial.ALLOCATED_PORTS) self.assertIn(('127.0.0.1', 10001), serial.ALLOCATED_PORTS) self.assertIn(('127.0.0.2', 10000), serial.ALLOCATED_PORTS) @mock.patch('nova.console.serial._verify_port') def test_acquire_port_not_ble_to_bind_at_any_port(self, fake_verify_port): start, stop = 15, 20 self.flags( port_range='%d:%d' % (start, stop), group='serial_console') fake_verify_port.side_effect = ( exception.SocketPortRangeExhaustedException(host='127.0.0.1')) self.assertRaises( exception.SocketPortRangeExhaustedException, serial.acquire_port, '127.0.0.1') def test_release_port(self): serial.ALLOCATED_PORTS.add(('127.0.0.1', 100)) serial.ALLOCATED_PORTS.add(('127.0.0.2', 100)) self.assertEqual(2, len(serial.ALLOCATED_PORTS)) serial.release_port('127.0.0.1', 100) self.assertEqual(1, len(serial.ALLOCATED_PORTS)) serial.release_port('127.0.0.2', 100) self.assertEqual(0, len(serial.ALLOCATED_PORTS)) nova-13.0.0/nova/tests/unit/cast_as_call.py0000664000567000056710000000345512701407773021764 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import oslo_messaging as messaging class CastAsCall(fixtures.Fixture): """Make RPC 'cast' behave like a 'call'. This is a little hack for tests that need to know when a cast operation has completed. The idea is that we wait for the RPC endpoint method to complete and return before continuing on the caller. See Ia7f40718533e450f00cd3e7d753ac65755c70588 for more background. """ def __init__(self, stubs): super(CastAsCall, self).__init__() self.stubs = stubs @staticmethod def _stub_out(stubs, obj): orig_prepare = obj.prepare def prepare(self, *args, **kwargs): # Casts with fanout=True would throw errors if its monkeypatched to # the call method, so we must override fanout to False if 'fanout' in kwargs: kwargs['fanout'] = False cctxt = orig_prepare(self, *args, **kwargs) CastAsCall._stub_out(stubs, cctxt) # woo, recurse! return cctxt stubs.Set(obj, 'prepare', prepare) stubs.Set(obj, 'cast', obj.call) def setUp(self): super(CastAsCall, self).setUp() self._stub_out(self.stubs, messaging.RPCClient) nova-13.0.0/nova/tests/unit/test_hacking.py0000664000567000056710000007365312701410011022001 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import textwrap import mock import pep8 from nova.hacking import checks from nova import test class HackingTestCase(test.NoDBTestCase): """This class tests the hacking checks in nova.hacking.checks by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ def test_virt_driver_imports(self): expect = (0, "N311: importing code from other virt drivers forbidden") self.assertEqual(expect, checks.import_no_virt_driver_import_deps( "from nova.virt.libvirt import utils as libvirt_utils", "./nova/virt/xenapi/driver.py")) self.assertEqual(expect, checks.import_no_virt_driver_import_deps( "import nova.virt.libvirt.utils as libvirt_utils", "./nova/virt/xenapi/driver.py")) self.assertIsNone(checks.import_no_virt_driver_import_deps( "from nova.virt.libvirt import utils as libvirt_utils", "./nova/virt/libvirt/driver.py")) self.assertIsNone(checks.import_no_virt_driver_import_deps( "import nova.virt.firewall", "./nova/virt/libvirt/firewall.py")) def test_virt_driver_config_vars(self): self.assertIsInstance(checks.import_no_virt_driver_config_deps( "CONF.import_opt('volume_drivers', " "'nova.virt.libvirt.driver', group='libvirt')", "./nova/virt/xenapi/driver.py"), tuple) self.assertIsNone(checks.import_no_virt_driver_config_deps( "CONF.import_opt('volume_drivers', " "'nova.virt.libvirt.driver', group='libvirt')", "./nova/virt/libvirt/volume.py")) def test_no_vi_headers(self): lines = ['Line 1\n', 'Line 2\n', 'Line 3\n', 'Line 4\n', 'Line 5\n', 'Line 6\n', 'Line 7\n', 'Line 8\n', 'Line 9\n', 'Line 10\n', 'Line 11\n', 'Line 12\n', 'Line 13\n', 'Line14\n', 'Line15\n'] self.assertIsNone(checks.no_vi_headers( "Test string foo", 1, lines)) self.assertEqual(len(list(checks.no_vi_headers( "# vim: et tabstop=4 shiftwidth=4 softtabstop=4", 2, lines))), 2) self.assertIsNone(checks.no_vi_headers( "# vim: et tabstop=4 shiftwidth=4 softtabstop=4", 6, lines)) self.assertIsNone(checks.no_vi_headers( "# vim: et tabstop=4 shiftwidth=4 softtabstop=4", 9, lines)) self.assertEqual(len(list(checks.no_vi_headers( "# vim: et tabstop=4 shiftwidth=4 softtabstop=4", 14, lines))), 2) self.assertIsNone(checks.no_vi_headers( "Test end string for vi", 15, lines)) def test_assert_true_instance(self): self.assertEqual(len(list(checks.assert_true_instance( "self.assertTrue(isinstance(e, " "exception.BuildAbortException))"))), 1) self.assertEqual( len(list(checks.assert_true_instance("self.assertTrue()"))), 0) def test_assert_equal_type(self): self.assertEqual(len(list(checks.assert_equal_type( "self.assertEqual(type(als['QuicAssist']), list)"))), 1) self.assertEqual( len(list(checks.assert_equal_type("self.assertTrue()"))), 0) def test_assert_equal_in(self): self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual(a in b, True)"))), 1) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual('str' in 'string', True)"))), 1) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual(any(a==1 for a in b), True)"))), 0) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual(True, a in b)"))), 1) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual(True, 'str' in 'string')"))), 1) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual(True, any(a==1 for a in b))"))), 0) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual(a in b, False)"))), 1) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual('str' in 'string', False)"))), 1) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual(any(a==1 for a in b), False)"))), 0) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual(False, a in b)"))), 1) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual(False, 'str' in 'string')"))), 1) self.assertEqual(len(list(checks.assert_equal_in( "self.assertEqual(False, any(a==1 for a in b))"))), 0) def test_assert_equal_none(self): self.assertEqual(len(list(checks.assert_equal_none( "self.assertEqual(A, None)"))), 1) self.assertEqual(len(list(checks.assert_equal_none( "self.assertEqual(None, A)"))), 1) self.assertEqual( len(list(checks.assert_equal_none("self.assertIsNone()"))), 0) def test_assert_true_or_false_with_in_or_not_in(self): self.assertEqual(len(list(checks.assert_equal_none( "self.assertEqual(A, None)"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertTrue(A in B)"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertFalse(A in B)"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertTrue(A not in B)"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertFalse(A not in B)"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertTrue(A in B, 'some message')"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertFalse(A in B, 'some message')"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertTrue(A not in B, 'some message')"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertFalse(A not in B, 'some message')"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertTrue(A in 'some string with spaces')"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertTrue(A in 'some string with spaces')"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertTrue(A in ['1', '2', '3'])"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertTrue(A in [1, 2, 3])"))), 1) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertTrue(any(A > 5 for A in B))"))), 0) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertTrue(any(A > 5 for A in B), 'some message')"))), 0) self.assertEqual(len(list(checks.assert_true_or_false_with_in( "self.assertFalse(some in list1 and some2 in list2)"))), 0) def test_no_translate_debug_logs(self): self.assertEqual(len(list(checks.no_translate_debug_logs( "LOG.debug(_('foo'))", "nova/scheduler/foo.py"))), 1) self.assertEqual(len(list(checks.no_translate_debug_logs( "LOG.debug('foo')", "nova/scheduler/foo.py"))), 0) self.assertEqual(len(list(checks.no_translate_debug_logs( "LOG.info(_('foo'))", "nova/scheduler/foo.py"))), 0) def test_no_setting_conf_directly_in_tests(self): self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests( "CONF.option = 1", "nova/tests/test_foo.py"))), 1) self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests( "CONF.group.option = 1", "nova/tests/test_foo.py"))), 1) self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests( "CONF.option = foo = 1", "nova/tests/test_foo.py"))), 1) # Shouldn't fail with comparisons self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests( "CONF.option == 'foo'", "nova/tests/test_foo.py"))), 0) self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests( "CONF.option != 1", "nova/tests/test_foo.py"))), 0) # Shouldn't fail since not in nova/tests/ self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests( "CONF.option = 1", "nova/compute/foo.py"))), 0) def test_log_translations(self): logs = ['audit', 'error', 'info', 'warning', 'critical', 'warn', 'exception'] levels = ['_LI', '_LW', '_LE', '_LC'] debug = "LOG.debug('OK')" self.assertEqual( 0, len(list(checks.validate_log_translations(debug, debug, 'f')))) for log in logs: bad = 'LOG.%s("Bad")' % log self.assertEqual(1, len(list( checks.validate_log_translations(bad, bad, 'f')))) ok = "LOG.%s('OK') # noqa" % log self.assertEqual(0, len(list( checks.validate_log_translations(ok, ok, 'f')))) ok = "LOG.%s(variable)" % log self.assertEqual(0, len(list( checks.validate_log_translations(ok, ok, 'f')))) for level in levels: ok = "LOG.%s(%s('OK'))" % (log, level) self.assertEqual(0, len(list( checks.validate_log_translations(ok, ok, 'f')))) def test_no_mutable_default_args(self): self.assertEqual(1, len(list(checks.no_mutable_default_args( "def get_info_from_bdm(virt_type, bdm, mapping=[])")))) self.assertEqual(0, len(list(checks.no_mutable_default_args( "defined = []")))) self.assertEqual(0, len(list(checks.no_mutable_default_args( "defined, undefined = [], {}")))) def test_check_explicit_underscore_import(self): self.assertEqual(len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "cinder/tests/other_files.py"))), 1) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder/tests/other_files.py"))), 1) self.assertEqual(len(list(checks.check_explicit_underscore_import( "from cinder.i18n import _", "cinder/tests/other_files.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "cinder/tests/other_files.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder/tests/other_files.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "from cinder.i18n import _, _LW", "cinder/tests/other_files2.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder/tests/other_files2.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "_ = translations.ugettext", "cinder/tests/other_files3.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder/tests/other_files3.py"))), 0) def test_use_jsonutils(self): def __get_msg(fun): msg = ("N324: jsonutils.%(fun)s must be used instead of " "json.%(fun)s" % {'fun': fun}) return [(0, msg)] for method in ('dump', 'dumps', 'load', 'loads'): self.assertEqual( __get_msg(method), list(checks.use_jsonutils("json.%s(" % method, "./nova/virt/xenapi/driver.py"))) self.assertEqual(0, len(list(checks.use_jsonutils("json.%s(" % method, "./plugins/xenserver/script.py")))) self.assertEqual(0, len(list(checks.use_jsonutils("jsonx.%s(" % method, "./nova/virt/xenapi/driver.py")))) self.assertEqual(0, len(list(checks.use_jsonutils("json.dumb", "./nova/virt/xenapi/driver.py")))) # We are patching pep8 so that only the check under test is actually # installed. @mock.patch('pep8._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def _run_check(self, code, checker, filename=None): pep8.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) checker = pep8.Checker(filename=filename, lines=lines) checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def _assert_has_errors(self, code, checker, expected_errors=None, filename=None): actual_errors = [e[:3] for e in self._run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) def _assert_has_no_errors(self, code, checker, filename=None): self._assert_has_errors(code, checker, filename=filename) def test_str_unicode_exception(self): checker = checks.CheckForStrUnicodeExc code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: p = str(e) return p """ errors = [(5, 16, 'N325')] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): try: p = unicode(a) + str(b) except ValueError as e: p = e return p """ self._assert_has_no_errors(code, checker) code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: p = unicode(e) return p """ errors = [(5, 20, 'N325')] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: try: p = unicode(a) + unicode(b) except ValueError as ve: p = str(e) + str(ve) p = e return p """ errors = [(8, 20, 'N325'), (8, 29, 'N325')] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: try: p = unicode(a) + unicode(b) except ValueError as ve: p = str(e) + unicode(ve) p = str(e) return p """ errors = [(8, 20, 'N325'), (8, 33, 'N325'), (9, 16, 'N325')] self._assert_has_errors(code, checker, expected_errors=errors) def test_api_version_decorator_check(self): code = """ @some_other_decorator @wsgi.api_version("2.5") def my_method(): pass """ self._assert_has_errors(code, checks.check_api_version_decorator, expected_errors=[(2, 0, "N332")]) def test_oslo_assert_raises_regexp(self): code = """ self.assertRaisesRegexp(ValueError, "invalid literal for.*XYZ'$", int, 'XYZ') """ self._assert_has_errors(code, checks.assert_raises_regexp, expected_errors=[(1, 0, "N335")]) def test_api_version_decorator_check_no_errors(self): code = """ class ControllerClass(): @wsgi.api_version("2.5") def my_method(): pass """ self._assert_has_no_errors(code, checks.check_api_version_decorator) def test_trans_add(self): checker = checks.CheckForTransAdd code = """ def fake_tran(msg): return msg _ = fake_tran _LI = _ _LW = _ _LE = _ _LC = _ def f(a, b): msg = _('test') + 'add me' msg = _LI('test') + 'add me' msg = _LW('test') + 'add me' msg = _LE('test') + 'add me' msg = _LC('test') + 'add me' msg = 'add to me' + _('test') return msg """ errors = [(13, 10, 'N326'), (14, 10, 'N326'), (15, 10, 'N326'), (16, 10, 'N326'), (17, 10, 'N326'), (18, 24, 'N326')] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): msg = 'test' + 'add me' return msg """ self._assert_has_no_errors(code, checker) def test_dict_constructor_with_list_copy(self): self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([(i, connect_info[i])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " attrs = dict([(k, _from_json(v))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " type_names = dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( "foo(param=dict((k, v) for k, v in bar.items()))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([[i,i] for i in range(3)])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dd = dict([i,i] for i in range(3))")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " create_kwargs = dict(snapshot=snapshot,")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " self._render_dict(xml, data_el, data.__dict__)")))) def test_check_http_not_implemented(self): code = """ except NotImplementedError: common.raise_http_not_implemented_error() """ filename = "nova/api/openstack/compute/v21/test.py" self._assert_has_no_errors(code, checks.check_http_not_implemented, filename=filename) code = """ except NotImplementedError: msg = _("Unable to set password on instance") raise exc.HTTPNotImplemented(explanation=msg) """ errors = [(3, 4, 'N339')] self._assert_has_errors(code, checks.check_http_not_implemented, expected_errors=errors, filename=filename) filename = "nova/api/openstack/compute/legacy_v2/test.py" self._assert_has_no_errors(code, checks.check_http_not_implemented, filename=filename) def test_check_contextlib_use(self): code = """ with test.nested( mock.patch.object(network_model.NetworkInfo, 'hydrate'), mock.patch.object(objects.InstanceInfoCache, 'save'), ) as ( hydrate_mock, save_mock ) """ filename = "nova/api/openstack/compute/v21/test.py" self._assert_has_no_errors(code, checks.check_no_contextlib_nested, filename=filename) code = """ with contextlib.nested( mock.patch.object(network_model.NetworkInfo, 'hydrate'), mock.patch.object(objects.InstanceInfoCache, 'save'), ) as ( hydrate_mock, save_mock ) """ filename = "nova/api/openstack/compute/legacy_v2/test.py" errors = [(1, 0, 'N341')] self._assert_has_errors(code, checks.check_no_contextlib_nested, expected_errors=errors, filename=filename) def test_check_greenthread_spawns(self): errors = [(1, 0, "N340")] code = "greenthread.spawn(func, arg1, kwarg1=kwarg1)" self._assert_has_errors(code, checks.check_greenthread_spawns, expected_errors=errors) code = "greenthread.spawn_n(func, arg1, kwarg1=kwarg1)" self._assert_has_errors(code, checks.check_greenthread_spawns, expected_errors=errors) code = "eventlet.greenthread.spawn(func, arg1, kwarg1=kwarg1)" self._assert_has_errors(code, checks.check_greenthread_spawns, expected_errors=errors) code = "eventlet.spawn(func, arg1, kwarg1=kwarg1)" self._assert_has_errors(code, checks.check_greenthread_spawns, expected_errors=errors) code = "eventlet.spawn_n(func, arg1, kwarg1=kwarg1)" self._assert_has_errors(code, checks.check_greenthread_spawns, expected_errors=errors) code = "nova.utils.spawn(func, arg1, kwarg1=kwarg1)" self._assert_has_no_errors(code, checks.check_greenthread_spawns) code = "nova.utils.spawn_n(func, arg1, kwarg1=kwarg1)" self._assert_has_no_errors(code, checks.check_greenthread_spawns) def test_config_option_regex_match(self): def should_match(code): self.assertTrue(checks.cfg_opt_re.match(code)) def should_not_match(code): self.assertFalse(checks.cfg_opt_re.match(code)) should_match("opt = cfg.StrOpt('opt_name')") should_match("opt = cfg.IntOpt('opt_name')") should_match("opt = cfg.DictOpt('opt_name')") should_match("opt = cfg.Opt('opt_name')") should_match("opts=[cfg.Opt('opt_name')]") should_match(" cfg.Opt('opt_name')") should_not_match("opt_group = cfg.OptGroup('opt_group_name')") def test_check_config_option_in_central_place(self): errors = [(1, 0, "N342")] code = """ opts = [ cfg.StrOpt('random_opt', default='foo', help='I am here to do stuff'), ] """ # option at the right place in the tree self._assert_has_no_errors(code, checks.check_config_option_in_central_place, filename="nova/conf/serial_console.py") # option at a location which is not in scope right now # TODO(markus_z): This is remporary until all config options are # moved to /nova/conf self._assert_has_no_errors(code, checks.check_config_option_in_central_place, filename="nova/dummy/non_existent.py") # option at the wrong place in the tree self._assert_has_errors(code, checks.check_config_option_in_central_place, filename="nova/cmd/serialproxy.py", expected_errors=errors) def test_check_doubled_words(self): errors = [(1, 0, "N343")] # Artificial break to stop pep8 detecting the test ! code = "This is the" + " the best comment" self._assert_has_errors(code, checks.check_doubled_words, expected_errors=errors) code = "This is the then best comment" self._assert_has_no_errors(code, checks.check_doubled_words) def test_dict_iteritems(self): self.assertEqual(1, len(list(checks.check_python3_no_iteritems( "obj.iteritems()")))) self.assertEqual(0, len(list(checks.check_python3_no_iteritems( "six.iteritems(ob))")))) def test_dict_iterkeys(self): self.assertEqual(1, len(list(checks.check_python3_no_iterkeys( "obj.iterkeys()")))) self.assertEqual(0, len(list(checks.check_python3_no_iterkeys( "six.iterkeys(ob))")))) def test_dict_itervalues(self): self.assertEqual(1, len(list(checks.check_python3_no_itervalues( "obj.itervalues()")))) self.assertEqual(0, len(list(checks.check_python3_no_itervalues( "six.itervalues(ob))")))) def test_cfg_help_with_enough_text(self): errors = [(1, 0, 'N347')] # Doesn't have help text at all => should raise error code1 = """ opt = cfg.StrOpt("opt1") """ self._assert_has_errors(code1, checks.cfg_help_with_enough_text, expected_errors=errors) # Explicitly sets an empty string => should raise error code2 = """ opt = cfg.StrOpt("opt2", help="") """ self._assert_has_errors(code2, checks.cfg_help_with_enough_text, expected_errors=errors) # Has help text but too few characters => should raise error code3 = """ opt = cfg.StrOpt("opt3", help="meh") """ self._assert_has_errors(code3, checks.cfg_help_with_enough_text, expected_errors=errors) # Has long enough help text => should *not* raise an error code4 = """ opt = cfg.StrOpt("opt4", help="This option does stuff") """ self._assert_has_no_errors(code4, checks.cfg_help_with_enough_text) # OptGroup objects help is optional => should *not* raise error code5 = """ opt_group = cfg.OptGroup(name="group1", title="group title") """ self._assert_has_no_errors(code5, checks.cfg_help_with_enough_text) # The help text gets translated code6 = """ opt = cfg.StrOpt("opt6", help=_("help with translation usage")) """ self._assert_has_no_errors(code6, checks.cfg_help_with_enough_text) # The help text uses a paranthesis (weird, but produces a valid string) code7 = """ opt = cfg.StrOpt("opt7", help=("help text uses extra paranthesis")) """ self._assert_has_no_errors(code7, checks.cfg_help_with_enough_text) # Ignore deprecated options. They should be in the release notes code8 = """ opt = cfg.DeprecatedOpt('opt8') """ self._assert_has_no_errors(code8, checks.cfg_help_with_enough_text) code9 = """ opt = cfg.StrOpt("opt9", help=\"\"\" This is multiline help text. \"\"\") """ self._assert_has_no_errors(code9, checks.cfg_help_with_enough_text) def test_no_os_popen(self): code = """ import os foobar_cmd = "foobar -get -beer" answer = os.popen(foobar_cmd).read() if answer == nok": try: os.popen(os.popen('foobar -beer -please')).read() except ValueError: go_home() """ errors = [(4, 0, 'N348'), (8, 8, 'N348')] self._assert_has_errors(code, checks.no_os_popen, expected_errors=errors) nova-13.0.0/nova/tests/unit/test_uuid_sentinels.py0000664000567000056710000000177712701407773023452 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from oslo_utils import uuidutils from nova.tests import uuidsentinel class TestUUIDSentinels(testtools.TestCase): def test_different_sentinel(self): uuid1 = uuidsentinel.foobar uuid2 = uuidsentinel.barfoo self.assertNotEqual(uuid1, uuid2) def test_returns_uuid(self): self.assertTrue(uuidutils.is_uuid_like(uuidsentinel.foo)) def test_returns_string(self): self.assertIsInstance(uuidsentinel.foo, str) nova-13.0.0/nova/tests/unit/cmd/0000775000567000056710000000000012701410205017516 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/cmd/test_manage.py0000664000567000056710000000354512701407773022406 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from nova.cmd import manage from nova import objects from nova import test class ServiceCommandsTestCase(test.NoDBTestCase): def setUp(self): super(ServiceCommandsTestCase, self).setUp() self.svc_cmds = manage.ServiceCommands() @mock.patch('nova.db.instance_get_all_by_host') @mock.patch.object(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') def test__show_host_resources(self, mock_cn_get, mock_inst_get): resources = {'vcpus': 4, 'memory_mb': 65536, 'local_gb': 100, 'vcpus_used': 1, 'memory_mb_used': 16384, 'local_gb_used': 20} mock_cn_get.return_value = objects.ComputeNode(**resources) mock_inst_get.return_value = [] result = self.svc_cmds._show_host_resources(mock.sentinel.ctxt, mock.sentinel.host) mock_cn_get.assert_called_once_with(mock.sentinel.ctxt, mock.sentinel.host) mock_inst_get.assert_called_once_with(mock.sentinel.ctxt, mock.sentinel.host) self.assertEqual(resources, result['resource']) self.assertEqual({}, result['usage']) nova-13.0.0/nova/tests/unit/cmd/__init__.py0000664000567000056710000000000012701407773021635 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/cmd/test_idmapshift.py0000664000567000056710000006203512701407773023305 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace, Andrew Melton # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import mock from nova.cmd import idmapshift from nova import test def join_side_effect(root, *args): path = root if root != '/': path += '/' path += '/'.join(args) return path class FakeStat(object): def __init__(self, uid, gid): self.st_uid = uid self.st_gid = gid class BaseTestCase(test.NoDBTestCase): def __init__(self, *args, **kwargs): super(BaseTestCase, self).__init__(*args, **kwargs) self.uid_maps = [(0, 10000, 10), (10, 20000, 1000)] self.gid_maps = [(0, 10000, 10), (10, 20000, 1000)] class FindTargetIDTestCase(BaseTestCase): def test_find_target_id_range_1_first(self): actual_target = idmapshift.find_target_id(0, self.uid_maps, idmapshift.NOBODY_ID, dict()) self.assertEqual(10000, actual_target) def test_find_target_id_inside_range_1(self): actual_target = idmapshift.find_target_id(2, self.uid_maps, idmapshift.NOBODY_ID, dict()) self.assertEqual(10002, actual_target) def test_find_target_id_range_2_first(self): actual_target = idmapshift.find_target_id(10, self.uid_maps, idmapshift.NOBODY_ID, dict()) self.assertEqual(20000, actual_target) def test_find_target_id_inside_range_2(self): actual_target = idmapshift.find_target_id(100, self.uid_maps, idmapshift.NOBODY_ID, dict()) self.assertEqual(20090, actual_target) def test_find_target_id_outside_range(self): actual_target = idmapshift.find_target_id(10000, self.uid_maps, idmapshift.NOBODY_ID, dict()) self.assertEqual(idmapshift.NOBODY_ID, actual_target) def test_find_target_id_no_mappings(self): actual_target = idmapshift.find_target_id(0, [], idmapshift.NOBODY_ID, dict()) self.assertEqual(idmapshift.NOBODY_ID, actual_target) def test_find_target_id_updates_memo(self): memo = dict() idmapshift.find_target_id(0, self.uid_maps, idmapshift.NOBODY_ID, memo) self.assertIn(0, memo) self.assertEqual(10000, memo[0]) def test_find_target_guest_id_greater_than_count(self): uid_maps = [(500, 10000, 10)] # Below range actual_target = idmapshift.find_target_id(499, uid_maps, idmapshift.NOBODY_ID, dict()) self.assertEqual(idmapshift.NOBODY_ID, actual_target) # Match actual_target = idmapshift.find_target_id(501, uid_maps, idmapshift.NOBODY_ID, dict()) self.assertEqual(10001, actual_target) # Beyond range actual_target = idmapshift.find_target_id(510, uid_maps, idmapshift.NOBODY_ID, dict()) self.assertEqual(idmapshift.NOBODY_ID, actual_target) class ShiftPathTestCase(BaseTestCase): @mock.patch('os.lchown') @mock.patch('os.lstat') def test_shift_path(self, mock_lstat, mock_lchown): mock_lstat.return_value = FakeStat(0, 0) idmapshift.shift_path('/test/path', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID, dict(), dict()) mock_lstat.assert_has_calls([mock.call('/test/path')]) mock_lchown.assert_has_calls([mock.call('/test/path', 10000, 10000)]) @mock.patch('os.lchown') @mock.patch('os.lstat') def test_shift_path_dry_run(self, mock_lstat, mock_lchown): mock_lstat.return_value = FakeStat(0, 0) idmapshift.shift_path('/test/path', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID, dict(), dict(), dry_run=True) mock_lstat.assert_has_calls([mock.call('/test/path')]) self.assertEqual(0, len(mock_lchown.mock_calls)) @mock.patch('os.lchown') @mock.patch('nova.cmd.idmapshift.print_chown') @mock.patch('os.lstat') def test_shift_path_verbose(self, mock_lstat, mock_print, mock_lchown): mock_lstat.return_value = FakeStat(0, 0) idmapshift.shift_path('/test/path', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID, dict(), dict(), verbose=True) mock_lstat.assert_has_calls([mock.call('/test/path')]) mock_print_call = mock.call('/test/path', 0, 0, 10000, 10000) mock_print.assert_has_calls([mock_print_call]) mock_lchown.assert_has_calls([mock.call('/test/path', 10000, 10000)]) class ShiftDirTestCase(BaseTestCase): @mock.patch('nova.cmd.idmapshift.shift_path') @mock.patch('os.path.join') @mock.patch('os.walk') def test_shift_dir(self, mock_walk, mock_join, mock_shift_path): mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])] mock_join.side_effect = join_side_effect idmapshift.shift_dir('/', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) files = ['a', 'b', 'c', 'd'] mock_walk.assert_has_calls([mock.call('/')]) mock_join_calls = [mock.call('/', x) for x in files] mock_join.assert_has_calls(mock_join_calls) args = (self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) kwargs = dict(dry_run=False, verbose=False, uid_memo=dict(), gid_memo=dict()) shift_path_calls = [mock.call('/', *args, **kwargs)] shift_path_calls += [mock.call('/' + x, *args, **kwargs) for x in files] mock_shift_path.assert_has_calls(shift_path_calls) @mock.patch('nova.cmd.idmapshift.shift_path') @mock.patch('os.path.join') @mock.patch('os.walk') def test_shift_dir_dry_run(self, mock_walk, mock_join, mock_shift_path): mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])] mock_join.side_effect = join_side_effect idmapshift.shift_dir('/', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID, dry_run=True) mock_walk.assert_has_calls([mock.call('/')]) files = ['a', 'b', 'c', 'd'] mock_join_calls = [mock.call('/', x) for x in files] mock_join.assert_has_calls(mock_join_calls) args = (self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) kwargs = dict(dry_run=True, verbose=False, uid_memo=dict(), gid_memo=dict()) shift_path_calls = [mock.call('/', *args, **kwargs)] shift_path_calls += [mock.call('/' + x, *args, **kwargs) for x in files] mock_shift_path.assert_has_calls(shift_path_calls) class ConfirmPathTestCase(test.NoDBTestCase): @mock.patch('os.lstat') def test_confirm_path(self, mock_lstat): uid_ranges = [(1000, 1999)] gid_ranges = [(300, 399)] mock_lstat.return_value = FakeStat(1000, 301) result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges, 50000) mock_lstat.assert_has_calls([mock.call('/test/path')]) self.assertTrue(result) @mock.patch('os.lstat') def test_confirm_path_nobody(self, mock_lstat): uid_ranges = [(1000, 1999)] gid_ranges = [(300, 399)] mock_lstat.return_value = FakeStat(50000, 50000) result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges, 50000) mock_lstat.assert_has_calls([mock.call('/test/path')]) self.assertTrue(result) @mock.patch('os.lstat') def test_confirm_path_uid_mismatch(self, mock_lstat): uid_ranges = [(1000, 1999)] gid_ranges = [(300, 399)] mock_lstat.return_value = FakeStat(0, 301) result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges, 50000) mock_lstat.assert_has_calls([mock.call('/test/path')]) self.assertFalse(result) @mock.patch('os.lstat') def test_confirm_path_gid_mismatch(self, mock_lstat): uid_ranges = [(1000, 1999)] gid_ranges = [(300, 399)] mock_lstat.return_value = FakeStat(1000, 0) result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges, 50000) mock_lstat.assert_has_calls([mock.call('/test/path')]) self.assertFalse(result) @mock.patch('os.lstat') def test_confirm_path_uid_nobody(self, mock_lstat): uid_ranges = [(1000, 1999)] gid_ranges = [(300, 399)] mock_lstat.return_value = FakeStat(50000, 301) result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges, 50000) mock_lstat.assert_has_calls([mock.call('/test/path')]) self.assertTrue(result) @mock.patch('os.lstat') def test_confirm_path_gid_nobody(self, mock_lstat): uid_ranges = [(1000, 1999)] gid_ranges = [(300, 399)] mock_lstat.return_value = FakeStat(1000, 50000) result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges, 50000) mock_lstat.assert_has_calls([mock.call('/test/path')]) self.assertTrue(result) class ConfirmDirTestCase(BaseTestCase): def setUp(self): super(ConfirmDirTestCase, self).setUp() self.uid_map_ranges = idmapshift.get_ranges(self.uid_maps) self.gid_map_ranges = idmapshift.get_ranges(self.gid_maps) @mock.patch('nova.cmd.idmapshift.confirm_path') @mock.patch('os.path.join') @mock.patch('os.walk') def test_confirm_dir(self, mock_walk, mock_join, mock_confirm_path): mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])] mock_join.side_effect = join_side_effect mock_confirm_path.return_value = True idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) files = ['a', 'b', 'c', 'd'] mock_walk.assert_has_calls([mock.call('/')]) mock_join_calls = [mock.call('/', x) for x in files] mock_join.assert_has_calls(mock_join_calls) args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID) confirm_path_calls = [mock.call('/', *args)] confirm_path_calls += [mock.call('/' + x, *args) for x in files] mock_confirm_path.assert_has_calls(confirm_path_calls) @mock.patch('nova.cmd.idmapshift.confirm_path') @mock.patch('os.path.join') @mock.patch('os.walk') def test_confirm_dir_short_circuit_root(self, mock_walk, mock_join, mock_confirm_path): mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])] mock_join.side_effect = join_side_effect mock_confirm_path.return_value = False idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID) confirm_path_calls = [mock.call('/', *args)] mock_confirm_path.assert_has_calls(confirm_path_calls) @mock.patch('nova.cmd.idmapshift.confirm_path') @mock.patch('os.path.join') @mock.patch('os.walk') def test_confirm_dir_short_circuit_file(self, mock_walk, mock_join, mock_confirm_path): mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])] mock_join.side_effect = join_side_effect def confirm_path_side_effect(path, *args): if 'a' in path: return False return True mock_confirm_path.side_effect = confirm_path_side_effect idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) mock_walk.assert_has_calls([mock.call('/')]) mock_join.assert_has_calls([mock.call('/', 'a')]) args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID) confirm_path_calls = [mock.call('/', *args), mock.call('/' + 'a', *args)] mock_confirm_path.assert_has_calls(confirm_path_calls) @mock.patch('nova.cmd.idmapshift.confirm_path') @mock.patch('os.path.join') @mock.patch('os.walk') def test_confirm_dir_short_circuit_dir(self, mock_walk, mock_join, mock_confirm_path): mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])] mock_join.side_effect = join_side_effect def confirm_path_side_effect(path, *args): if 'c' in path: return False return True mock_confirm_path.side_effect = confirm_path_side_effect idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) files = ['a', 'b', 'c'] mock_walk.assert_has_calls([mock.call('/')]) mock_join_calls = [mock.call('/', x) for x in files] mock_join.assert_has_calls(mock_join_calls) args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID) confirm_path_calls = [mock.call('/', *args)] confirm_path_calls += [mock.call('/' + x, *args) for x in files] mock_confirm_path.assert_has_calls(confirm_path_calls) class IDMapTypeTestCase(test.NoDBTestCase): def test_id_map_type(self): result = idmapshift.id_map_type("1:1:1,2:2:2") self.assertEqual([(1, 1, 1), (2, 2, 2)], result) def test_id_map_type_not_int(self): self.assertRaises(argparse.ArgumentTypeError, idmapshift.id_map_type, "a:1:1") def test_id_map_type_not_proper_format(self): self.assertRaises(argparse.ArgumentTypeError, idmapshift.id_map_type, "1:1") class MainTestCase(BaseTestCase): @mock.patch('nova.cmd.idmapshift.shift_dir') @mock.patch('argparse.ArgumentParser') def test_main(self, mock_parser_class, mock_shift_dir): mock_parser = mock.MagicMock() mock_parser.parse_args.return_value = mock_parser mock_parser.idempotent = False mock_parser.confirm = False mock_parser.path = '/test/path' mock_parser.uid = self.uid_maps mock_parser.gid = self.gid_maps mock_parser.nobody = idmapshift.NOBODY_ID mock_parser.dry_run = False mock_parser.verbose = False mock_parser_class.return_value = mock_parser idmapshift.main() mock_shift_dir_call = mock.call('/test/path', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID, dry_run=False, verbose=False) mock_shift_dir.assert_has_calls([mock_shift_dir_call]) @mock.patch('nova.cmd.idmapshift.shift_dir') @mock.patch('nova.cmd.idmapshift.confirm_dir') @mock.patch('argparse.ArgumentParser') def test_main_confirm_dir_idempotent_unshifted(self, mock_parser_class, mock_confirm_dir, mock_shift_dir): mock_parser = mock.MagicMock() mock_parser.parse_args.return_value = mock_parser mock_parser.idempotent = True mock_parser.confirm = False mock_parser.path = '/test/path' mock_parser.uid = self.uid_maps mock_parser.gid = self.gid_maps mock_parser.nobody = idmapshift.NOBODY_ID mock_parser.dry_run = False mock_parser.verbose = False mock_parser_class.return_value = mock_parser mock_confirm_dir.return_value = False idmapshift.main() mock_confirm_dir_call = mock.call('/test/path', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) mock_confirm_dir.assert_has_calls([mock_confirm_dir_call]) mock_shift_dir_call = mock.call('/test/path', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID, dry_run=False, verbose=False) mock_shift_dir.assert_has_calls([mock_shift_dir_call]) @mock.patch('nova.cmd.idmapshift.shift_dir') @mock.patch('nova.cmd.idmapshift.confirm_dir') @mock.patch('argparse.ArgumentParser') def test_main_confirm_dir_idempotent_shifted(self, mock_parser_class, mock_confirm_dir, mock_shift_dir): mock_parser = mock.MagicMock() mock_parser.parse_args.return_value = mock_parser mock_parser.idempotent = True mock_parser.confirm = False mock_parser.path = '/test/path' mock_parser.uid = self.uid_maps mock_parser.gid = self.gid_maps mock_parser.nobody = idmapshift.NOBODY_ID mock_parser.dry_run = False mock_parser.verbose = False mock_parser_class.return_value = mock_parser mock_confirm_dir.return_value = True try: idmapshift.main() except SystemExit as sys_exit: self.assertEqual(sys_exit.code, 0) mock_confirm_dir_call = mock.call('/test/path', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) mock_confirm_dir.assert_has_calls([mock_confirm_dir_call]) mock_shift_dir.assert_has_calls([]) @mock.patch('nova.cmd.idmapshift.shift_dir') @mock.patch('nova.cmd.idmapshift.confirm_dir') @mock.patch('argparse.ArgumentParser') def test_main_confirm_dir_confirm_unshifted(self, mock_parser_class, mock_confirm_dir, mock_shift_dir): mock_parser = mock.MagicMock() mock_parser.parse_args.return_value = mock_parser mock_parser.idempotent = False mock_parser.confirm = True mock_parser.exit_on_fail = True mock_parser.path = '/test/path' mock_parser.uid = self.uid_maps mock_parser.gid = self.gid_maps mock_parser.nobody = idmapshift.NOBODY_ID mock_parser.dry_run = False mock_parser.verbose = False mock_parser_class.return_value = mock_parser mock_confirm_dir.return_value = False try: idmapshift.main() except SystemExit as sys_exit: self.assertEqual(sys_exit.code, 1) mock_confirm_dir_call = mock.call('/test/path', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) mock_confirm_dir.assert_has_calls([mock_confirm_dir_call]) mock_shift_dir.assert_has_calls([]) @mock.patch('nova.cmd.idmapshift.shift_dir') @mock.patch('nova.cmd.idmapshift.confirm_dir') @mock.patch('argparse.ArgumentParser') def test_main_confirm_dir_confirm_shifted(self, mock_parser_class, mock_confirm_dir, mock_shift_dir): mock_parser = mock.MagicMock() mock_parser.parse_args.return_value = mock_parser mock_parser.idempotent = False mock_parser.confirm = True mock_parser.exit_on_fail = True mock_parser.path = '/test/path' mock_parser.uid = self.uid_maps mock_parser.gid = self.gid_maps mock_parser.nobody = idmapshift.NOBODY_ID mock_parser.dry_run = False mock_parser.verbose = False mock_parser_class.return_value = mock_parser mock_confirm_dir.return_value = True try: idmapshift.main() except SystemExit as sys_exit: self.assertEqual(sys_exit.code, 0) mock_confirm_dir_call = mock.call('/test/path', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) mock_confirm_dir.assert_has_calls([mock_confirm_dir_call]) mock_shift_dir.assert_has_calls([]) class IntegrationTestCase(BaseTestCase): @mock.patch('os.lchown') @mock.patch('os.lstat') @mock.patch('os.path.join') @mock.patch('os.walk') def test_integrated_shift_dir(self, mock_walk, mock_join, mock_lstat, mock_lchown): mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']), ('/tmp/test/d', ['1', '2'], [])] mock_join.side_effect = join_side_effect def lstat(path): stats = { 't': FakeStat(0, 0), 'a': FakeStat(0, 0), 'b': FakeStat(0, 2), 'c': FakeStat(30000, 30000), 'd': FakeStat(100, 100), '1': FakeStat(0, 100), '2': FakeStat(100, 100), } return stats[path[-1]] mock_lstat.side_effect = lstat idmapshift.shift_dir('/tmp/test', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID, verbose=True) lchown_calls = [ mock.call('/tmp/test', 10000, 10000), mock.call('/tmp/test/a', 10000, 10000), mock.call('/tmp/test/b', 10000, 10002), mock.call('/tmp/test/c', idmapshift.NOBODY_ID, idmapshift.NOBODY_ID), mock.call('/tmp/test/d', 20090, 20090), mock.call('/tmp/test/d/1', 10000, 20090), mock.call('/tmp/test/d/2', 20090, 20090), ] mock_lchown.assert_has_calls(lchown_calls) @mock.patch('os.lchown') @mock.patch('os.lstat') @mock.patch('os.path.join') @mock.patch('os.walk') def test_integrated_shift_dir_dry_run(self, mock_walk, mock_join, mock_lstat, mock_lchown): mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']), ('/tmp/test/d', ['1', '2'], [])] mock_join.side_effect = join_side_effect def lstat(path): stats = { 't': FakeStat(0, 0), 'a': FakeStat(0, 0), 'b': FakeStat(0, 2), 'c': FakeStat(30000, 30000), 'd': FakeStat(100, 100), '1': FakeStat(0, 100), '2': FakeStat(100, 100), } return stats[path[-1]] mock_lstat.side_effect = lstat idmapshift.shift_dir('/tmp/test', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID, dry_run=True, verbose=True) self.assertEqual(0, len(mock_lchown.mock_calls)) @mock.patch('os.lstat') @mock.patch('os.path.join') @mock.patch('os.walk') def test_integrated_confirm_dir_shifted(self, mock_walk, mock_join, mock_lstat): mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']), ('/tmp/test/d', ['1', '2'], [])] mock_join.side_effect = join_side_effect def lstat(path): stats = { 't': FakeStat(10000, 10000), 'a': FakeStat(10000, 10000), 'b': FakeStat(10000, 10002), 'c': FakeStat(idmapshift.NOBODY_ID, idmapshift.NOBODY_ID), 'd': FakeStat(20090, 20090), '1': FakeStat(10000, 20090), '2': FakeStat(20090, 20090), } return stats[path[-1]] mock_lstat.side_effect = lstat result = idmapshift.confirm_dir('/tmp/test', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) self.assertTrue(result) @mock.patch('os.lstat') @mock.patch('os.path.join') @mock.patch('os.walk') def test_integrated_confirm_dir_unshifted(self, mock_walk, mock_join, mock_lstat): mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']), ('/tmp/test/d', ['1', '2'], [])] mock_join.side_effect = join_side_effect def lstat(path): stats = { 't': FakeStat(0, 0), 'a': FakeStat(0, 0), 'b': FakeStat(0, 2), 'c': FakeStat(30000, 30000), 'd': FakeStat(100, 100), '1': FakeStat(0, 100), '2': FakeStat(100, 100), } return stats[path[-1]] mock_lstat.side_effect = lstat result = idmapshift.confirm_dir('/tmp/test', self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID) self.assertFalse(result) nova-13.0.0/nova/tests/unit/cmd/test_baseproxy.py0000664000567000056710000000676512701410011023154 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from nova.cmd import baseproxy from nova import config from nova.console import websocketproxy from nova import test from nova import version @mock.patch.object(config, 'parse_args', new=lambda *args, **kwargs: None) class BaseProxyTestCase(test.NoDBTestCase): @mock.patch('os.path.exists', return_value=False) # NOTE(mriedem): sys.exit raises TestingException so we can actually exit # the test normally. @mock.patch('sys.exit', side_effect=test.TestingException) def test_proxy_ssl_without_cert(self, mock_exit, mock_exists): self.flags(ssl_only=True) self.assertRaises(test.TestingException, baseproxy.proxy, '0.0.0.0', '6080') mock_exit.assert_called_once_with(-1) @mock.patch('os.path.exists', return_value=False) @mock.patch('sys.exit', side_effect=test.TestingException) def test_proxy_web_dir_does_not_exist(self, mock_exit, mock_exists): self.flags(web='/my/fake/webserver/') self.assertRaises(test.TestingException, baseproxy.proxy, '0.0.0.0', '6080') mock_exit.assert_called_once_with(-1) @mock.patch('os.path.exists', return_value=True) @mock.patch.object(logging, 'setup') @mock.patch.object(gmr.TextGuruMeditation, 'setup_autorun') @mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.__init__', return_value=None) @mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.start_server') def test_proxy(self, mock_start, mock_init, mock_gmr, mock_log, mock_exists): # Force verbose=False so something else testing nova.cmd.baseproxy # doesn't impact the call to mocked NovaWebSocketProxy.__init__. self.flags(verbose=False) baseproxy.proxy('0.0.0.0', '6080') mock_log.assert_called_once_with(baseproxy.CONF, 'nova') mock_gmr.mock_assert_called_once_with(version) mock_init.assert_called_once_with( listen_host='0.0.0.0', listen_port='6080', source_is_ipv6=False, verbose=False, cert='self.pem', key=None, ssl_only=False, daemon=False, record=False, traffic=False, web='/usr/share/spice-html5', file_only=True, RequestHandlerClass=websocketproxy.NovaProxyRequestHandler) mock_start.assert_called_once_with() @mock.patch('sys.stderr.write') @mock.patch('os.path.exists', return_value=False) @mock.patch('sys.exit', side_effect=test.TestingException) def test_proxy_exit_with_error(self, mock_exit, mock_exists, mock_stderr): self.flags(ssl_only=True) self.assertRaises(test.TestingException, baseproxy.proxy, '0.0.0.0', '6080') mock_stderr.assert_called_once_with( 'SSL only and self.pem not found\n') mock_exit.assert_called_once_with(-1) nova-13.0.0/nova/tests/unit/test_utils.py0000664000567000056710000015312212701407773021550 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import hashlib import importlib import logging import os import os.path import socket import struct import tempfile import eventlet import mock import netaddr from oslo_concurrency import processutils from oslo_config import cfg from oslo_context import context as common_context from oslo_context import fixture as context_fixture from oslo_utils import encodeutils from oslo_utils import fixture as utils_fixture from oslo_utils import units import six import nova from nova import context from nova import exception from nova import test from nova import utils CONF = cfg.CONF class GenericUtilsTestCase(test.NoDBTestCase): def test_parse_server_string(self): result = utils.parse_server_string('::1') self.assertEqual(('::1', ''), result) result = utils.parse_server_string('[::1]:8773') self.assertEqual(('::1', '8773'), result) result = utils.parse_server_string('2001:db8::192.168.1.1') self.assertEqual(('2001:db8::192.168.1.1', ''), result) result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773') self.assertEqual(('2001:db8::192.168.1.1', '8773'), result) result = utils.parse_server_string('192.168.1.1') self.assertEqual(('192.168.1.1', ''), result) result = utils.parse_server_string('192.168.1.2:8773') self.assertEqual(('192.168.1.2', '8773'), result) result = utils.parse_server_string('192.168.1.3') self.assertEqual(('192.168.1.3', ''), result) result = utils.parse_server_string('www.example.com:8443') self.assertEqual(('www.example.com', '8443'), result) result = utils.parse_server_string('www.example.com') self.assertEqual(('www.example.com', ''), result) # error case result = utils.parse_server_string('www.exa:mple.com:8443') self.assertEqual(('', ''), result) result = utils.parse_server_string('') self.assertEqual(('', ''), result) def test_hostname_unicode_sanitization(self): hostname = u"\u7684.test.example.com" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_periods(self): hostname = "....test.example.com..." self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_dashes(self): hostname = "----test.example.com---" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_characters(self): hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+" self.assertEqual("91----test-host.example.com-0", utils.sanitize_hostname(hostname)) def test_hostname_translate(self): hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" self.assertEqual("hello", utils.sanitize_hostname(hostname)) def test_hostname_has_default(self): hostname = u"\u7684hello" defaultname = "Server-1" self.assertEqual("hello", utils.sanitize_hostname(hostname, defaultname)) def test_hostname_empty_has_default(self): hostname = u"\u7684" defaultname = "Server-1" self.assertEqual(defaultname, utils.sanitize_hostname(hostname, defaultname)) def test_hostname_empty_has_default_too_long(self): hostname = u"\u7684" defaultname = "a" * 64 self.assertEqual("a" * 63, utils.sanitize_hostname(hostname, defaultname)) def test_hostname_empty_no_default(self): hostname = u"\u7684" self.assertEqual("", utils.sanitize_hostname(hostname)) def test_hostname_empty_minus_period(self): hostname = "---..." self.assertEqual("", utils.sanitize_hostname(hostname)) def test_hostname_with_space(self): hostname = " a b c " self.assertEqual("a-b-c", utils.sanitize_hostname(hostname)) def test_hostname_too_long(self): hostname = "a" * 64 self.assertEqual(63, len(utils.sanitize_hostname(hostname))) def test_hostname_truncated_no_hyphen(self): hostname = "a" * 62 hostname = hostname + '-' + 'a' res = utils.sanitize_hostname(hostname) # we trim to 63 and then trim the trailing dash self.assertEqual(62, len(res)) self.assertFalse(res.endswith('-'), 'The hostname ends with a -') def test_generate_password(self): password = utils.generate_password() self.assertTrue([c for c in password if c in '0123456789']) self.assertTrue([c for c in password if c in 'abcdefghijklmnopqrstuvwxyz']) self.assertTrue([c for c in password if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']) def test_read_file_as_root(self): def fake_execute(*args, **kwargs): if args[1] == 'bad': raise processutils.ProcessExecutionError() return 'fakecontents', None self.stub_out('nova.utils.execute', fake_execute) contents = utils.read_file_as_root('good') self.assertEqual(contents, 'fakecontents') self.assertRaises(exception.FileNotFound, utils.read_file_as_root, 'bad') def test_temporary_chown(self): def fake_execute(*args, **kwargs): if args[0] == 'chown': fake_execute.uid = args[1] self.stub_out('nova.utils.execute', fake_execute) with tempfile.NamedTemporaryFile() as f: with utils.temporary_chown(f.name, owner_uid=2): self.assertEqual(fake_execute.uid, 2) self.assertEqual(fake_execute.uid, os.getuid()) def test_xhtml_escape(self): self.assertEqual('"foo"', utils.xhtml_escape('"foo"')) self.assertEqual(''foo'', utils.xhtml_escape("'foo'")) self.assertEqual('&', utils.xhtml_escape('&')) self.assertEqual('>', utils.xhtml_escape('>')) self.assertEqual('<', utils.xhtml_escape('<')) self.assertEqual('<foo>', utils.xhtml_escape('')) def test_is_valid_ipv6_cidr(self): self.assertTrue(utils.is_valid_ipv6_cidr("2600::/64")) self.assertTrue(utils.is_valid_ipv6_cidr( "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254/48")) self.assertTrue(utils.is_valid_ipv6_cidr( "0000:0000:0000:0000:0000:0000:0000:0001/32")) self.assertTrue(utils.is_valid_ipv6_cidr( "0000:0000:0000:0000:0000:0000:0000:0001")) self.assertFalse(utils.is_valid_ipv6_cidr("foo")) self.assertFalse(utils.is_valid_ipv6_cidr("127.0.0.1")) def test_get_shortened_ipv6(self): self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe", utils.get_shortened_ipv6( "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254")) self.assertEqual("::1", utils.get_shortened_ipv6( "0000:0000:0000:0000:0000:0000:0000:0001")) self.assertEqual("caca::caca:0:babe:201:102", utils.get_shortened_ipv6( "caca:0000:0000:caca:0000:babe:0201:0102")) self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6, "127.0.0.1") self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6, "failure") def test_get_shortened_ipv6_cidr(self): self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr( "2600:0000:0000:0000:0000:0000:0000:0000/64")) self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr( "2600::1/64")) self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6_cidr, "127.0.0.1") self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6_cidr, "failure") def test_safe_ip_format(self): self.assertEqual("[::1]", utils.safe_ip_format("::1")) self.assertEqual("127.0.0.1", utils.safe_ip_format("127.0.0.1")) self.assertEqual("[::ffff:127.0.0.1]", utils.safe_ip_format( "::ffff:127.0.0.1")) self.assertEqual("localhost", utils.safe_ip_format("localhost")) def test_get_hash_str(self): base_str = b"foo" base_unicode = u"foo" value = hashlib.md5(base_str).hexdigest() self.assertEqual( value, utils.get_hash_str(base_str)) self.assertEqual( value, utils.get_hash_str(base_unicode)) def test_use_rootwrap(self): self.flags(disable_rootwrap=False, group='workarounds') self.flags(rootwrap_config='foo') cmd = utils.get_root_helper() self.assertEqual('sudo nova-rootwrap foo', cmd) @mock.patch('nova.utils.RootwrapProcessHelper') def test_get_root_helper_proc(self, mock_proc_helper): self.flags(use_rootwrap_daemon=False) self.flags(rootwrap_config="/path/to/conf") utils._get_rootwrap_helper() mock_proc_helper.assert_called_once_with() @mock.patch('nova.utils.RootwrapDaemonHelper') def test_get_root_helper_daemon(self, mock_daemon_helper): conf_path = '/path/to/conf' self.flags(use_rootwrap_daemon=True) self.flags(rootwrap_config=conf_path) utils._get_rootwrap_helper() mock_daemon_helper.assert_called_once_with(conf_path) def test_use_sudo(self): self.flags(disable_rootwrap=True, group='workarounds') cmd = utils.get_root_helper() self.assertEqual('sudo', cmd) def test_ssh_execute(self): expected_args = ('ssh', '-o', 'BatchMode=yes', 'remotehost', 'ls', '-l') with mock.patch('nova.utils.execute') as mock_method: utils.ssh_execute('remotehost', 'ls', '-l') mock_method.assert_called_once_with(*expected_args) class TestCachedFile(test.NoDBTestCase): @mock.patch('os.path.getmtime', return_value=1) def test_read_cached_file(self, getmtime): utils._FILE_CACHE = { '/this/is/a/fake': {"data": 1123, "mtime": 1} } fresh, data = utils.read_cached_file("/this/is/a/fake") fdata = utils._FILE_CACHE['/this/is/a/fake']["data"] self.assertEqual(fdata, data) @mock.patch('os.path.getmtime', return_value=2) def test_read_modified_cached_file(self, getmtime): utils._FILE_CACHE = { '/this/is/a/fake': {"data": 1123, "mtime": 1} } fake_contents = "lorem ipsum" with mock.patch('six.moves.builtins.open', mock.mock_open(read_data=fake_contents)): fresh, data = utils.read_cached_file("/this/is/a/fake") self.assertEqual(data, fake_contents) self.assertTrue(fresh) def test_delete_cached_file(self): filename = '/this/is/a/fake/deletion/of/cached/file' utils._FILE_CACHE = { filename: {"data": 1123, "mtime": 1} } self.assertIn(filename, utils._FILE_CACHE) utils.delete_cached_file(filename) self.assertNotIn(filename, utils._FILE_CACHE) def test_delete_cached_file_not_exist(self): # We expect that if cached file does not exist no Exception raised. filename = '/this/is/a/fake/deletion/attempt/of/not/cached/file' self.assertNotIn(filename, utils._FILE_CACHE) utils.delete_cached_file(filename) self.assertNotIn(filename, utils._FILE_CACHE) class RootwrapDaemonTesetCase(test.NoDBTestCase): @mock.patch('oslo_rootwrap.client.Client') def test_get_client(self, mock_client): mock_conf = mock.MagicMock() utils.RootwrapDaemonHelper(mock_conf) mock_client.assert_called_once_with( ["sudo", "nova-rootwrap-daemon", mock_conf]) @mock.patch('nova.utils.LOG.info') def test_execute(self, mock_info): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(0, None, None)) daemon.execute('a', 1, foo='bar', run_as_root=True) daemon.client.execute.assert_called_once_with(['a', '1'], None) mock_info.assert_has_calls([mock.call( u'Executing RootwrapDaemonHelper.execute cmd=[%(cmd)r] ' u'kwargs=[%(kwargs)r]', {'cmd': u'a 1', 'kwargs': {'run_as_root': True, 'foo': 'bar'}})]) def test_execute_with_kwargs(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(0, None, None)) daemon.execute('a', 1, foo='bar', run_as_root=True, process_input=True) daemon.client.execute.assert_called_once_with(['a', '1'], True) def test_execute_fail(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) self.assertRaises(processutils.ProcessExecutionError, daemon.execute, 'b', 2) def test_execute_pass_with_check_exit_code(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) daemon.execute('b', 2, check_exit_code=[-2]) def test_execute_fail_with_retry(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) self.assertRaises(processutils.ProcessExecutionError, daemon.execute, 'b', 2, attempts=2) daemon.client.execute.assert_has_calls( [mock.call(['b', '2'], None), mock.call(['b', '2'], None)]) @mock.patch('nova.utils.LOG.log') def test_execute_fail_and_logging(self, mock_log): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) self.assertRaises(processutils.ProcessExecutionError, daemon.execute, 'b', 2, attempts=2, loglevel=logging.CRITICAL, log_errors=processutils.LOG_ALL_ERRORS) mock_log.assert_has_calls( [ mock.call(logging.CRITICAL, u'Running cmd (subprocess): %s', u'b 2'), mock.call(logging.CRITICAL, 'CMD "%(sanitized_cmd)s" returned: %(return_code)s ' 'in %(end_time)0.3fs', {'sanitized_cmd': u'b 2', 'return_code': -2, 'end_time': mock.ANY}), mock.call(logging.CRITICAL, u'%(desc)r\ncommand: %(cmd)r\nexit code: %(code)r' u'\nstdout: %(stdout)r\nstderr: %(stderr)r', {'code': -2, 'cmd': u'b 2', 'stdout': u'None', 'stderr': u'None', 'desc': None}), mock.call(logging.CRITICAL, u'%r failed. Retrying.', u'b 2'), mock.call(logging.CRITICAL, u'Running cmd (subprocess): %s', u'b 2'), mock.call(logging.CRITICAL, 'CMD "%(sanitized_cmd)s" returned: %(return_code)s ' 'in %(end_time)0.3fs', {'sanitized_cmd': u'b 2', 'return_code': -2, 'end_time': mock.ANY}), mock.call(logging.CRITICAL, u'%(desc)r\ncommand: %(cmd)r\nexit code: %(code)r' u'\nstdout: %(stdout)r\nstderr: %(stderr)r', {'code': -2, 'cmd': u'b 2', 'stdout': u'None', 'stderr': u'None', 'desc': None}), mock.call(logging.CRITICAL, u'%r failed. Not Retrying.', u'b 2')] ) def test_trycmd(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(0, None, None)) daemon.trycmd('a', 1, foo='bar', run_as_root=True) daemon.client.execute.assert_called_once_with(['a', '1'], None) def test_trycmd_with_kwargs(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.execute = mock.Mock(return_value=('out', 'err')) daemon.trycmd('a', 1, foo='bar', run_as_root=True, loglevel=logging.WARN, log_errors=True, process_input=True, delay_on_retry=False, attempts=5, check_exit_code=[200]) daemon.execute.assert_called_once_with('a', 1, attempts=5, check_exit_code=[200], delay_on_retry=False, foo='bar', log_errors=True, loglevel=30, process_input=True, run_as_root=True) def test_trycmd_fail(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) expected_err = six.text_type('''\ Unexpected error while running command. Command: a 1 Exit code: -2''') out, err = daemon.trycmd('a', 1, foo='bar', run_as_root=True) daemon.client.execute.assert_called_once_with(['a', '1'], None) self.assertIn(expected_err, err) def test_trycmd_fail_with_rety(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) expected_err = six.text_type('''\ Unexpected error while running command. Command: a 1 Exit code: -2''') out, err = daemon.trycmd('a', 1, foo='bar', run_as_root=True, attempts=3) self.assertIn(expected_err, err) daemon.client.execute.assert_has_calls( [mock.call(['a', '1'], None), mock.call(['a', '1'], None), mock.call(['a', '1'], None)]) class VPNPingTestCase(test.NoDBTestCase): """Unit tests for utils.vpn_ping().""" def setUp(self): super(VPNPingTestCase, self).setUp() self.port = 'fake' self.address = 'fake' self.session_id = 0x1234 self.fmt = '!BQxxxxxQxxxx' def fake_reply_packet(self, pkt_id=0x40): return struct.pack(self.fmt, pkt_id, 0x0, self.session_id) def setup_socket(self, mock_socket, return_value, side_effect=None): socket_obj = mock.MagicMock() if side_effect is not None: socket_obj.recv.side_effect = side_effect else: socket_obj.recv.return_value = return_value mock_socket.return_value = socket_obj @mock.patch.object(socket, 'socket') def test_vpn_ping_timeout(self, mock_socket): """Server doesn't reply within timeout.""" self.setup_socket(mock_socket, None, socket.timeout) rc = utils.vpn_ping(self.address, self.port, session_id=self.session_id) self.assertFalse(rc) @mock.patch.object(socket, 'socket') def test_vpn_ping_bad_len(self, mock_socket): """Test a short/invalid server reply.""" self.setup_socket(mock_socket, 'fake_reply') rc = utils.vpn_ping(self.address, self.port, session_id=self.session_id) self.assertFalse(rc) @mock.patch.object(socket, 'socket') def test_vpn_ping_bad_id(self, mock_socket): """Server sends an unknown packet ID.""" self.setup_socket(mock_socket, self.fake_reply_packet(pkt_id=0x41)) rc = utils.vpn_ping(self.address, self.port, session_id=self.session_id) self.assertFalse(rc) @mock.patch.object(socket, 'socket') def test_vpn_ping_ok(self, mock_socket): self.setup_socket(mock_socket, self.fake_reply_packet()) rc = utils.vpn_ping(self.address, self.port, session_id=self.session_id) self.assertTrue(rc) class MonkeyPatchTestCase(test.NoDBTestCase): """Unit test for utils.monkey_patch().""" def setUp(self): super(MonkeyPatchTestCase, self).setUp() self.example_package = 'nova.tests.unit.monkey_patch_example.' self.flags( monkey_patch=True, monkey_patch_modules=[self.example_package + 'example_a' + ':' + self.example_package + 'example_decorator']) def test_monkey_patch(self): utils.monkey_patch() nova.tests.unit.monkey_patch_example.CALLED_FUNCTION = [] from nova.tests.unit.monkey_patch_example import example_a from nova.tests.unit.monkey_patch_example import example_b self.assertEqual('Example function', example_a.example_function_a()) exampleA = example_a.ExampleClassA() exampleA.example_method() ret_a = exampleA.example_method_add(3, 5) self.assertEqual(ret_a, 8) self.assertEqual('Example function', example_b.example_function_b()) exampleB = example_b.ExampleClassB() exampleB.example_method() ret_b = exampleB.example_method_add(3, 5) self.assertEqual(ret_b, 8) package_a = self.example_package + 'example_a.' self.assertIn(package_a + 'example_function_a', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertIn(package_a + 'ExampleClassA.example_method', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertIn(package_a + 'ExampleClassA.example_method_add', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) package_b = self.example_package + 'example_b.' self.assertNotIn(package_b + 'example_function_b', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertNotIn(package_b + 'ExampleClassB.example_method', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertNotIn(package_b + 'ExampleClassB.example_method_add', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) class MonkeyPatchDefaultTestCase(test.NoDBTestCase): """Unit test for default monkey_patch_modules value.""" def setUp(self): super(MonkeyPatchDefaultTestCase, self).setUp() self.flags( monkey_patch=True) def test_monkey_patch_default_mod(self): # monkey_patch_modules is defined to be # : # Here we check that both parts of the default values are # valid for module in CONF.monkey_patch_modules: m = module.split(':', 1) # Check we can import the module to be patched importlib.import_module(m[0]) # check the decorator is valid decorator_name = m[1].rsplit('.', 1) decorator_module = importlib.import_module(decorator_name[0]) getattr(decorator_module, decorator_name[1]) class AuditPeriodTest(test.NoDBTestCase): def setUp(self): super(AuditPeriodTest, self).setUp() # a fairly random time to test with self.useFixture(utils_fixture.TimeFixture( datetime.datetime(second=23, minute=12, hour=8, day=5, month=3, year=2012))) def test_hour(self): begin, end = utils.last_completed_audit_period(unit='hour') self.assertEqual(begin, datetime.datetime( hour=7, day=5, month=3, year=2012)) self.assertEqual(end, datetime.datetime( hour=8, day=5, month=3, year=2012)) def test_hour_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='hour@10') self.assertEqual(begin, datetime.datetime( minute=10, hour=7, day=5, month=3, year=2012)) self.assertEqual(end, datetime.datetime( minute=10, hour=8, day=5, month=3, year=2012)) def test_hour_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='hour@30') self.assertEqual(begin, datetime.datetime( minute=30, hour=6, day=5, month=3, year=2012)) self.assertEqual(end, datetime.datetime( minute=30, hour=7, day=5, month=3, year=2012)) def test_day(self): begin, end = utils.last_completed_audit_period(unit='day') self.assertEqual(begin, datetime.datetime( day=4, month=3, year=2012)) self.assertEqual(end, datetime.datetime( day=5, month=3, year=2012)) def test_day_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='day@6') self.assertEqual(begin, datetime.datetime( hour=6, day=4, month=3, year=2012)) self.assertEqual(end, datetime.datetime( hour=6, day=5, month=3, year=2012)) def test_day_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='day@10') self.assertEqual(begin, datetime.datetime( hour=10, day=3, month=3, year=2012)) self.assertEqual(end, datetime.datetime( hour=10, day=4, month=3, year=2012)) def test_month(self): begin, end = utils.last_completed_audit_period(unit='month') self.assertEqual(begin, datetime.datetime( day=1, month=2, year=2012)) self.assertEqual(end, datetime.datetime( day=1, month=3, year=2012)) def test_month_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='month@2') self.assertEqual(begin, datetime.datetime( day=2, month=2, year=2012)) self.assertEqual(end, datetime.datetime( day=2, month=3, year=2012)) def test_month_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='month@15') self.assertEqual(begin, datetime.datetime( day=15, month=1, year=2012)) self.assertEqual(end, datetime.datetime( day=15, month=2, year=2012)) def test_year(self): begin, end = utils.last_completed_audit_period(unit='year') self.assertEqual(begin, datetime.datetime( day=1, month=1, year=2011)) self.assertEqual(end, datetime.datetime( day=1, month=1, year=2012)) def test_year_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='year@2') self.assertEqual(begin, datetime.datetime( day=1, month=2, year=2011)) self.assertEqual(end, datetime.datetime( day=1, month=2, year=2012)) def test_year_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='year@6') self.assertEqual(begin, datetime.datetime( day=1, month=6, year=2010)) self.assertEqual(end, datetime.datetime( day=1, month=6, year=2011)) class MkfsTestCase(test.NoDBTestCase): @mock.patch('nova.utils.execute') def test_mkfs_ext4(self, mock_execute): utils.mkfs('ext4', '/my/block/dev') mock_execute.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '/my/block/dev', run_as_root=False) @mock.patch('nova.utils.execute') def test_mkfs_msdos(self, mock_execute): utils.mkfs('msdos', '/my/msdos/block/dev') mock_execute.assert_called_once_with('mkfs', '-t', 'msdos', '/my/msdos/block/dev', run_as_root=False) @mock.patch('nova.utils.execute') def test_mkfs_swap(self, mock_execute): utils.mkfs('swap', '/my/swap/block/dev') mock_execute.assert_called_once_with('mkswap', '/my/swap/block/dev', run_as_root=False) @mock.patch('nova.utils.execute') def test_mkfs_ext4_withlabel(self, mock_execute): utils.mkfs('ext4', '/my/block/dev', 'ext4-vol') mock_execute.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L', 'ext4-vol', '/my/block/dev', run_as_root=False) @mock.patch('nova.utils.execute') def test_mkfs_msdos_withlabel(self, mock_execute): utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol') mock_execute.assert_called_once_with('mkfs', '-t', 'msdos', '-n', 'msdos-vol', '/my/msdos/block/dev', run_as_root=False) @mock.patch('nova.utils.execute') def test_mkfs_swap_withlabel(self, mock_execute): utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol') mock_execute.assert_called_once_with('mkswap', '-L', 'swap-vol', '/my/swap/block/dev', run_as_root=False) class LastBytesTestCase(test.NoDBTestCase): """Test the last_bytes() utility method.""" def setUp(self): super(LastBytesTestCase, self).setUp() self.f = six.BytesIO(b'1234567890') def test_truncated(self): self.f.seek(0, os.SEEK_SET) out, remaining = utils.last_bytes(self.f, 5) self.assertEqual(out, b'67890') self.assertTrue(remaining > 0) def test_read_all(self): self.f.seek(0, os.SEEK_SET) out, remaining = utils.last_bytes(self.f, 1000) self.assertEqual(out, b'1234567890') self.assertFalse(remaining > 0) def test_seek_too_far_real_file(self): # StringIO doesn't raise IOError if you see past the start of the file. with tempfile.TemporaryFile() as flo: content = b'1234567890' flo.write(content) self.assertEqual((content, 0), utils.last_bytes(flo, 1000)) class MetadataToDictTestCase(test.NoDBTestCase): def test_metadata_to_dict(self): self.assertEqual(utils.metadata_to_dict( [{'key': 'foo1', 'value': 'bar'}, {'key': 'foo2', 'value': 'baz'}]), {'foo1': 'bar', 'foo2': 'baz'}) def test_metadata_to_dict_with_include_deleted(self): metadata = [{'key': 'foo1', 'value': 'bar', 'deleted': 1442875429, 'other': 'stuff'}, {'key': 'foo2', 'value': 'baz', 'deleted': 0, 'other': 'stuff2'}] self.assertEqual({'foo1': 'bar', 'foo2': 'baz'}, utils.metadata_to_dict(metadata, include_deleted=True)) self.assertEqual({'foo2': 'baz'}, utils.metadata_to_dict(metadata, include_deleted=False)) # verify correct default behavior self.assertEqual(utils.metadata_to_dict(metadata), utils.metadata_to_dict(metadata, include_deleted=False)) def test_metadata_to_dict_empty(self): self.assertEqual({}, utils.metadata_to_dict([])) self.assertEqual({}, utils.metadata_to_dict([], include_deleted=True)) self.assertEqual({}, utils.metadata_to_dict([], include_deleted=False)) def test_dict_to_metadata(self): def sort_key(adict): return sorted(adict.items()) metadata = utils.dict_to_metadata(dict(foo1='bar1', foo2='bar2')) expected = [{'key': 'foo1', 'value': 'bar1'}, {'key': 'foo2', 'value': 'bar2'}] self.assertEqual(sorted(metadata, key=sort_key), sorted(expected, key=sort_key)) def test_dict_to_metadata_empty(self): self.assertEqual(utils.dict_to_metadata({}), []) class ExpectedArgsTestCase(test.NoDBTestCase): def test_passes(self): @utils.expects_func_args('foo', 'baz') def dec(f): return f @dec def func(foo, bar, baz="lol"): pass # Call to ensure nothing errors func(None, None) def test_raises(self): @utils.expects_func_args('foo', 'baz') def dec(f): return f def func(bar, baz): pass self.assertRaises(TypeError, dec, func) def test_var_no_of_args(self): @utils.expects_func_args('foo') def dec(f): return f @dec def func(bar, *args, **kwargs): pass # Call to ensure nothing errors func(None) def test_more_layers(self): @utils.expects_func_args('foo', 'baz') def dec(f): return f def dec_2(f): def inner_f(*a, **k): return f() return inner_f @dec_2 def func(bar, baz): pass self.assertRaises(TypeError, dec, func) class StringLengthTestCase(test.NoDBTestCase): def test_check_string_length(self): self.assertIsNone(utils.check_string_length( 'test', 'name', max_length=255)) self.assertRaises(exception.InvalidInput, utils.check_string_length, 11, 'name', max_length=255) self.assertRaises(exception.InvalidInput, utils.check_string_length, '', 'name', min_length=1) self.assertRaises(exception.InvalidInput, utils.check_string_length, 'a' * 256, 'name', max_length=255) def test_check_string_length_noname(self): self.assertIsNone(utils.check_string_length( 'test', max_length=255)) self.assertRaises(exception.InvalidInput, utils.check_string_length, 11, max_length=255) self.assertRaises(exception.InvalidInput, utils.check_string_length, '', min_length=1) self.assertRaises(exception.InvalidInput, utils.check_string_length, 'a' * 256, max_length=255) class ValidateIntegerTestCase(test.NoDBTestCase): def test_valid_inputs(self): self.assertEqual( utils.validate_integer(42, "answer"), 42) self.assertEqual( utils.validate_integer("42", "answer"), 42) self.assertEqual( utils.validate_integer( "7", "lucky", min_value=7, max_value=8), 7) self.assertEqual( utils.validate_integer( 7, "lucky", min_value=6, max_value=7), 7) self.assertEqual( utils.validate_integer( 300, "Spartaaa!!!", min_value=300), 300) self.assertEqual( utils.validate_integer( "300", "Spartaaa!!!", max_value=300), 300) def test_invalid_inputs(self): self.assertRaises(exception.InvalidInput, utils.validate_integer, "im-not-an-int", "not-an-int") self.assertRaises(exception.InvalidInput, utils.validate_integer, 3.14, "Pie") self.assertRaises(exception.InvalidInput, utils.validate_integer, "299", "Sparta no-show", min_value=300, max_value=300) self.assertRaises(exception.InvalidInput, utils.validate_integer, 55, "doing 55 in a 54", max_value=54) self.assertRaises(exception.InvalidInput, utils.validate_integer, six.unichr(129), "UnicodeError", max_value=1000) class ValidateNeutronConfiguration(test.NoDBTestCase): def test_nova_network(self): self.assertFalse(utils.is_neutron()) def test_neutron(self): self.flags(use_neutron=True) self.assertTrue(utils.is_neutron()) class AutoDiskConfigUtilTestCase(test.NoDBTestCase): def test_is_auto_disk_config_disabled(self): self.assertTrue(utils.is_auto_disk_config_disabled("Disabled ")) def test_is_auto_disk_config_disabled_none(self): self.assertFalse(utils.is_auto_disk_config_disabled(None)) def test_is_auto_disk_config_disabled_false(self): self.assertFalse(utils.is_auto_disk_config_disabled("false")) class GetSystemMetadataFromImageTestCase(test.NoDBTestCase): def get_image(self): image_meta = { "id": "fake-image", "name": "fake-name", "min_ram": 1, "min_disk": 1, "disk_format": "raw", "container_format": "bare", } return image_meta def get_flavor(self): flavor = { "id": "fake.flavor", "root_gb": 10, } return flavor def test_base_image_properties(self): image = self.get_image() # Verify that we inherit all the needed keys sys_meta = utils.get_system_metadata_from_image(image) for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(image[key], sys_meta.get(sys_key)) # Verify that everything else is ignored self.assertEqual(len(sys_meta), len(utils.SM_INHERITABLE_KEYS)) def test_inherit_image_properties(self): image = self.get_image() image["properties"] = {"foo1": "bar", "foo2": "baz"} sys_meta = utils.get_system_metadata_from_image(image) # Verify that we inherit all the image properties for key, expected in six.iteritems(image["properties"]): sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(sys_meta[sys_key], expected) def test_skip_image_properties(self): image = self.get_image() image["properties"] = { "foo1": "bar", "foo2": "baz", "mappings": "wizz", "img_block_device_mapping": "eek", } sys_meta = utils.get_system_metadata_from_image(image) # Verify that we inherit all the image properties for key, expected in six.iteritems(image["properties"]): sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) if key in utils.SM_SKIP_KEYS: self.assertNotIn(sys_key, sys_meta) else: self.assertEqual(sys_meta[sys_key], expected) def test_vhd_min_disk_image(self): image = self.get_image() flavor = self.get_flavor() image["disk_format"] = "vhd" sys_meta = utils.get_system_metadata_from_image(image, flavor) # Verify that the min_disk property is taken from # flavor's root_gb when using vhd disk format sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, "min_disk") self.assertEqual(sys_meta[sys_key], flavor["root_gb"]) def test_dont_inherit_empty_values(self): image = self.get_image() for key in utils.SM_INHERITABLE_KEYS: image[key] = None sys_meta = utils.get_system_metadata_from_image(image) # Verify that the empty properties have not been inherited for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertNotIn(sys_key, sys_meta) class GetImageFromSystemMetadataTestCase(test.NoDBTestCase): def get_system_metadata(self): sys_meta = { "image_min_ram": 1, "image_min_disk": 1, "image_disk_format": "raw", "image_container_format": "bare", } return sys_meta def test_image_from_system_metadata(self): sys_meta = self.get_system_metadata() sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar" sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz" sys_meta["%simg_block_device_mapping" % utils.SM_IMAGE_PROP_PREFIX] = "eek" image = utils.get_image_from_system_metadata(sys_meta) # Verify that we inherit all the needed keys for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(image[key], sys_meta.get(sys_key)) # Verify that we inherit the rest of metadata as properties self.assertIn("properties", image) for key, value in six.iteritems(image["properties"]): sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(image["properties"][key], sys_meta[sys_key]) self.assertNotIn("img_block_device_mapping", image["properties"]) def test_dont_inherit_empty_values(self): sys_meta = self.get_system_metadata() for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) sys_meta[sys_key] = None image = utils.get_image_from_system_metadata(sys_meta) # Verify that the empty properties have not been inherited for key in utils.SM_INHERITABLE_KEYS: self.assertNotIn(key, image) class GetImageMetadataFromVolumeTestCase(test.NoDBTestCase): def test_inherit_image_properties(self): properties = {"fake_prop": "fake_value"} volume = {"volume_image_metadata": properties} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual(properties, image_meta["properties"]) def test_image_size(self): volume = {"size": 10} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual(10 * units.Gi, image_meta["size"]) def test_image_status(self): volume = {} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual("active", image_meta["status"]) def test_values_conversion(self): properties = {"min_ram": "5", "min_disk": "7"} volume = {"volume_image_metadata": properties} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual(5, image_meta["min_ram"]) self.assertEqual(7, image_meta["min_disk"]) def test_suppress_not_image_properties(self): properties = {"min_ram": "256", "min_disk": "128", "image_id": "fake_id", "image_name": "fake_name", "container_format": "ami", "disk_format": "ami", "size": "1234", "checksum": "fake_checksum"} volume = {"volume_image_metadata": properties} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual({}, image_meta["properties"]) self.assertEqual(0, image_meta["size"]) # volume's properties should not be touched self.assertNotEqual({}, properties) class ResourceFilterTestCase(test.NoDBTestCase): def _assert_filtering(self, res_list, filts, expected_tags): actual_tags = utils.filter_and_format_resource_metadata('instance', res_list, filts, 'metadata') self.assertJsonEqual(expected_tags, actual_tags) def test_filter_and_format_resource_metadata(self): # Create some tags # One overlapping pair, and one different key value pair # i1 : foo=bar, bax=wibble # i2 : foo=bar, baz=quux # resources i1 = { 'uuid': '1', 'metadata': {'foo': 'bar', 'bax': 'wibble'}, } i2 = { 'uuid': '2', 'metadata': {'foo': 'bar', 'baz': 'quux'}, } # Resources list rl = [i1, i2] # tags i11 = {'instance_id': '1', 'key': 'foo', 'value': 'bar'} i12 = {'instance_id': '1', 'key': 'bax', 'value': 'wibble'} i21 = {'instance_id': '2', 'key': 'foo', 'value': 'bar'} i22 = {'instance_id': '2', 'key': 'baz', 'value': 'quux'} # No filter self._assert_filtering(rl, [], [i11, i12, i21, i22]) self._assert_filtering(rl, {}, [i11, i12, i21, i22]) # Key search # Both should have tags with key 'foo' and value 'bar' self._assert_filtering(rl, {'key': 'foo', 'value': 'bar'}, [i11, i21]) # Both should have tags with key 'foo' self._assert_filtering(rl, {'key': 'foo'}, [i11, i21]) # Only i2 should have tags with key 'baz' and value 'quux' self._assert_filtering(rl, {'key': 'baz', 'value': 'quux'}, [i22]) # Only i2 should have tags with value 'quux' self._assert_filtering(rl, {'value': 'quux'}, [i22]) # Empty list should be returned when no tags match self._assert_filtering(rl, {'key': 'split', 'value': 'banana'}, []) # Multiple values # Only i2 should have tags with key 'baz' and values in the set # ['quux', 'wibble'] self._assert_filtering(rl, {'key': 'baz', 'value': ['quux', 'wibble']}, [i22]) # But when specified as two different filters, no tags should be # returned. This is because, the filter will mean "return tags which # have (key=baz AND value=quux) AND (key=baz AND value=wibble) self._assert_filtering(rl, [{'key': 'baz', 'value': 'quux'}, {'key': 'baz', 'value': 'wibble'}], []) # Test for regex self._assert_filtering(rl, {'value': '\\Aqu..*\\Z(?s)'}, [i22]) # Make sure bug #1365887 is fixed i1['metadata']['key3'] = 'a' self._assert_filtering(rl, {'value': 'banana'}, []) class SafeTruncateTestCase(test.NoDBTestCase): def test_exception_to_dict_with_long_message_3_bytes(self): # Generate Chinese byte string whose length is 300. This Chinese UTF-8 # character occupies 3 bytes. After truncating, the byte string length # should be 255. msg = u'\u8d75' * 100 truncated_msg = utils.safe_truncate(msg, 255) byte_message = encodeutils.safe_encode(truncated_msg) self.assertEqual(255, len(byte_message)) def test_exception_to_dict_with_long_message_2_bytes(self): # Generate Russian byte string whose length is 300. This Russian UTF-8 # character occupies 2 bytes. After truncating, the byte string length # should be 254. msg = encodeutils.safe_decode('\xd0\x92' * 150) truncated_msg = utils.safe_truncate(msg, 255) byte_message = encodeutils.safe_encode(truncated_msg) self.assertEqual(254, len(byte_message)) class SpawnNTestCase(test.NoDBTestCase): def setUp(self): super(SpawnNTestCase, self).setUp() self.useFixture(context_fixture.ClearRequestContext()) self.spawn_name = 'spawn_n' def test_spawn_n_no_context(self): self.assertIsNone(common_context.get_current()) def _fake_spawn(func, *args, **kwargs): # call the method to ensure no error is raised func(*args, **kwargs) self.assertEqual('test', args[0]) def fake(arg): pass with mock.patch.object(eventlet, self.spawn_name, _fake_spawn): getattr(utils, self.spawn_name)(fake, 'test') self.assertIsNone(common_context.get_current()) def test_spawn_n_context(self): self.assertIsNone(common_context.get_current()) ctxt = context.RequestContext('user', 'project') def _fake_spawn(func, *args, **kwargs): # call the method to ensure no error is raised func(*args, **kwargs) self.assertEqual(ctxt, args[0]) self.assertEqual('test', kwargs['kwarg1']) def fake(context, kwarg1=None): pass with mock.patch.object(eventlet, self.spawn_name, _fake_spawn): getattr(utils, self.spawn_name)(fake, ctxt, kwarg1='test') self.assertEqual(ctxt, common_context.get_current()) def test_spawn_n_context_different_from_passed(self): self.assertIsNone(common_context.get_current()) ctxt = context.RequestContext('user', 'project') ctxt_passed = context.RequestContext('user', 'project', overwrite=False) self.assertEqual(ctxt, common_context.get_current()) def _fake_spawn(func, *args, **kwargs): # call the method to ensure no error is raised func(*args, **kwargs) self.assertEqual(ctxt_passed, args[0]) self.assertEqual('test', kwargs['kwarg1']) def fake(context, kwarg1=None): pass with mock.patch.object(eventlet, self.spawn_name, _fake_spawn): getattr(utils, self.spawn_name)(fake, ctxt_passed, kwarg1='test') self.assertEqual(ctxt, common_context.get_current()) class SpawnTestCase(SpawnNTestCase): def setUp(self): super(SpawnTestCase, self).setUp() self.spawn_name = 'spawn' class UT8TestCase(test.NoDBTestCase): def test_none_value(self): self.assertIsInstance(utils.utf8(None), type(None)) def test_bytes_value(self): some_value = b"fake data" return_value = utils.utf8(some_value) # check that type of returned value doesn't changed self.assertIsInstance(return_value, type(some_value)) self.assertEqual(some_value, return_value) def test_not_text_type(self): return_value = utils.utf8(1) self.assertEqual(b"1", return_value) self.assertIsInstance(return_value, six.binary_type) def test_text_type_with_encoding(self): some_value = 'test\u2026config' self.assertEqual(some_value, utils.utf8(some_value).decode("utf-8")) nova-13.0.0/nova/tests/unit/test_weights.py0000664000567000056710000000525412701407773022064 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For weights. """ import mock from nova.scheduler import weights as scheduler_weights from nova.scheduler.weights import ram from nova import test from nova.tests.unit.scheduler import fakes from nova import weights class TestWeigher(test.NoDBTestCase): def test_no_multiplier(self): class FakeWeigher(weights.BaseWeigher): def _weigh_object(self, *args, **kwargs): pass self.assertEqual(1.0, FakeWeigher().weight_multiplier()) def test_no_weight_object(self): class FakeWeigher(weights.BaseWeigher): def weight_multiplier(self, *args, **kwargs): pass self.assertRaises(TypeError, FakeWeigher) def test_normalization(self): # weight_list, expected_result, minval, maxval map_ = ( ((), (), None, None), ((0.0, 0.0), (0.0, 0.0), None, None), ((1.0, 1.0), (0.0, 0.0), None, None), ((20.0, 50.0), (0.0, 1.0), None, None), ((20.0, 50.0), (0.0, 0.375), None, 100.0), ((20.0, 50.0), (0.4, 1.0), 0.0, None), ((20.0, 50.0), (0.2, 0.5), 0.0, 100.0), ) for seq, result, minval, maxval in map_: ret = weights.normalize(seq, minval=minval, maxval=maxval) self.assertEqual(tuple(ret), result) @mock.patch('nova.weights.BaseWeigher.weigh_objects') def test_only_one_host(self, mock_weigh): host_values = [ ('host1', 'node1', {'free_ram_mb': 512}), ] hostinfo = [fakes.FakeHostState(host, node, values) for host, node, values in host_values] weight_handler = scheduler_weights.HostWeightHandler() weighers = [ram.RAMWeigher()] weighed_host = weight_handler.get_weighed_objects(weighers, hostinfo, {}) self.assertEqual(1, len(weighed_host)) self.assertEqual('host1', weighed_host[0].obj.host) self.assertFalse(mock_weigh.called) nova-13.0.0/nova/tests/unit/test_service.py0000664000567000056710000003570212701410011022026 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for remote procedure calls using queue """ import sys import mock from mox3 import mox from oslo_concurrency import processutils from oslo_config import cfg from oslo_service import service as _service import testtools from nova import exception from nova import manager from nova import objects from nova import rpc from nova import service from nova import test from nova.tests.unit import utils from nova import wsgi test_service_opts = [ cfg.StrOpt("fake_manager", default="nova.tests.unit.test_service.FakeManager", help="Manager for testing"), cfg.StrOpt("test_service_listen", default='127.0.0.1', help="Host to bind test service to"), cfg.IntOpt("test_service_listen_port", default=0, help="Port number to bind test service to"), ] CONF = cfg.CONF CONF.register_opts(test_service_opts) class FakeManager(manager.Manager): """Fake manager for tests.""" def test_method(self): return 'manager' class ExtendedService(service.Service): def test_method(self): return 'service' class ServiceManagerTestCase(test.NoDBTestCase): """Test cases for Services.""" def test_message_gets_to_manager(self): serv = service.Service('test', 'test', 'test', 'nova.tests.unit.test_service.FakeManager') self.assertEqual('manager', serv.test_method()) def test_override_manager_method(self): serv = ExtendedService('test', 'test', 'test', 'nova.tests.unit.test_service.FakeManager') self.assertEqual('service', serv.test_method()) def test_service_with_min_down_time(self): # TODO(hanlind): This really tests code in the servicegroup api. self.flags(service_down_time=10, report_interval=10) service.Service('test', 'test', 'test', 'nova.tests.unit.test_service.FakeManager') self.assertEqual(25, CONF.service_down_time) class ServiceTestCase(test.NoDBTestCase): """Test cases for Services.""" def setUp(self): super(ServiceTestCase, self).setUp() self.host = 'foo' self.binary = 'nova-fake' self.topic = 'fake' self.flags(use_local=True, group='conductor') def test_create(self): # NOTE(vish): Create was moved out of mox replay to make sure that # the looping calls are created in StartService. app = service.Service.create(host=self.host, binary=self.binary, topic=self.topic) self.assertTrue(app) def _service_start_mocks(self): self.mox.StubOutWithMock(objects.Service, 'create') self.mox.StubOutWithMock(objects.Service, 'get_by_host_and_binary') objects.Service.get_by_host_and_binary(mox.IgnoreArg(), self.host, self.binary) objects.Service.create() def test_init_and_start_hooks(self): self.manager_mock = self.mox.CreateMock(FakeManager) self.mox.StubOutWithMock(sys.modules[__name__], 'FakeManager', use_mock_anything=True) self.mox.StubOutWithMock(self.manager_mock, 'init_host') self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook') self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook') FakeManager(host=self.host).AndReturn(self.manager_mock) self.manager_mock.service_name = self.topic self.manager_mock.additional_endpoints = [] # init_host is called before any service record is created self.manager_mock.init_host() self._service_start_mocks() # pre_start_hook is called after service record is created, # but before RPC consumer is created self.manager_mock.pre_start_hook() # post_start_hook is called after RPC consumer is created. self.manager_mock.post_start_hook() self.mox.ReplayAll() serv = service.Service(self.host, self.binary, self.topic, 'nova.tests.unit.test_service.FakeManager') serv.start() def _test_service_check_create_race(self, ex): self.manager_mock = self.mox.CreateMock(FakeManager) self.mox.StubOutWithMock(sys.modules[__name__], 'FakeManager', use_mock_anything=True) self.mox.StubOutWithMock(self.manager_mock, 'init_host') self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook') self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook') self.mox.StubOutWithMock(objects.Service, 'create') self.mox.StubOutWithMock(objects.Service, 'get_by_host_and_binary') FakeManager(host=self.host).AndReturn(self.manager_mock) # init_host is called before any service record is created self.manager_mock.init_host() objects.Service.get_by_host_and_binary(mox.IgnoreArg(), self.host, self.binary) objects.Service.create().AndRaise(ex) class TestException(Exception): pass objects.Service.get_by_host_and_binary( mox.IgnoreArg(), self.host, self.binary).AndRaise(TestException()) self.mox.ReplayAll() serv = service.Service(self.host, self.binary, self.topic, 'nova.tests.unit.test_service.FakeManager') self.assertRaises(TestException, serv.start) def test_service_check_create_race_topic_exists(self): ex = exception.ServiceTopicExists(host='foo', topic='bar') self._test_service_check_create_race(ex) def test_service_check_create_race_binary_exists(self): ex = exception.ServiceBinaryExists(host='foo', binary='bar') self._test_service_check_create_race(ex) def test_parent_graceful_shutdown(self): self.manager_mock = self.mox.CreateMock(FakeManager) self.mox.StubOutWithMock(sys.modules[__name__], 'FakeManager', use_mock_anything=True) self.mox.StubOutWithMock(self.manager_mock, 'init_host') self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook') self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook') self.mox.StubOutWithMock(_service.Service, 'stop') FakeManager(host=self.host).AndReturn(self.manager_mock) self.manager_mock.service_name = self.topic self.manager_mock.additional_endpoints = [] # init_host is called before any service record is created self.manager_mock.init_host() self._service_start_mocks() # pre_start_hook is called after service record is created, # but before RPC consumer is created self.manager_mock.pre_start_hook() # post_start_hook is called after RPC consumer is created. self.manager_mock.post_start_hook() _service.Service.stop() self.mox.ReplayAll() serv = service.Service(self.host, self.binary, self.topic, 'nova.tests.unit.test_service.FakeManager') serv.start() serv.stop() @mock.patch('nova.servicegroup.API') @mock.patch('nova.objects.service.Service.get_by_host_and_binary') def test_parent_graceful_shutdown_with_cleanup_host( self, mock_svc_get_by_host_and_binary, mock_API): mock_manager = mock.Mock() serv = service.Service(self.host, self.binary, self.topic, 'nova.tests.unit.test_service.FakeManager') serv.manager = mock_manager serv.manager.additional_endpoints = [] serv.start() serv.manager.init_host.assert_called_with() serv.stop() serv.manager.cleanup_host.assert_called_with() @mock.patch('nova.servicegroup.API') @mock.patch('nova.objects.service.Service.get_by_host_and_binary') @mock.patch.object(rpc, 'get_server') def test_service_stop_waits_for_rpcserver( self, mock_rpc, mock_svc_get_by_host_and_binary, mock_API): serv = service.Service(self.host, self.binary, self.topic, 'nova.tests.unit.test_service.FakeManager') serv.start() serv.stop() serv.rpcserver.start.assert_called_once_with() serv.rpcserver.stop.assert_called_once_with() serv.rpcserver.wait.assert_called_once_with() def test_reset(self): serv = service.Service(self.host, self.binary, self.topic, 'nova.tests.unit.test_service.FakeManager') with mock.patch.object(serv.manager, 'reset') as mock_reset: serv.reset() mock_reset.assert_called_once_with() class TestWSGIService(test.NoDBTestCase): def setUp(self): super(TestWSGIService, self).setUp() self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) @mock.patch('nova.objects.Service.get_by_host_and_binary') @mock.patch('nova.objects.Service.create') def test_service_start_creates_record(self, mock_create, mock_get): mock_get.return_value = None test_service = service.WSGIService("test_service") test_service.start() self.assertTrue(mock_create.called) @mock.patch('nova.objects.Service.get_by_host_and_binary') @mock.patch('nova.objects.Service.create') def test_service_start_does_not_create_record(self, mock_create, mock_get): test_service = service.WSGIService("test_service") test_service.start() self.assertFalse(mock_create.called) @mock.patch('nova.objects.Service.get_by_host_and_binary') def test_service_random_port(self, mock_get): test_service = service.WSGIService("test_service") test_service.start() self.assertNotEqual(0, test_service.port) test_service.stop() def test_workers_set_default(self): test_service = service.WSGIService("osapi_compute") self.assertEqual(test_service.workers, processutils.get_worker_count()) def test_workers_set_good_user_setting(self): CONF.set_override('osapi_compute_workers', 8) test_service = service.WSGIService("osapi_compute") self.assertEqual(test_service.workers, 8) def test_workers_set_zero_user_setting(self): CONF.set_override('osapi_compute_workers', 0) test_service = service.WSGIService("osapi_compute") # If a value less than 1 is used, defaults to number of procs available self.assertEqual(test_service.workers, processutils.get_worker_count()) def test_service_start_with_illegal_workers(self): CONF.set_override("osapi_compute_workers", -1) self.assertRaises(exception.InvalidInput, service.WSGIService, "osapi_compute") def test_openstack_compute_api_workers_set_default(self): test_service = service.WSGIService("openstack_compute_api_v2") self.assertEqual(test_service.workers, processutils.get_worker_count()) def test_openstack_compute_api_workers_set_good_user_setting(self): CONF.set_override('osapi_compute_workers', 8) test_service = service.WSGIService("openstack_compute_api_v2") self.assertEqual(test_service.workers, 8) def test_openstack_compute_api_workers_set_zero_user_setting(self): CONF.set_override('osapi_compute_workers', 0) test_service = service.WSGIService("openstack_compute_api_v2") # If a value less than 1 is used, defaults to number of procs available self.assertEqual(test_service.workers, processutils.get_worker_count()) def test_openstack_compute_api_service_start_with_illegal_workers(self): CONF.set_override("osapi_compute_workers", -1) self.assertRaises(exception.InvalidInput, service.WSGIService, "openstack_compute_api_v2") @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support") @mock.patch('nova.objects.Service.get_by_host_and_binary') def test_service_random_port_with_ipv6(self, mock_get): CONF.set_default("test_service_listen", "::1") test_service = service.WSGIService("test_service") test_service.start() self.assertEqual("::1", test_service.host) self.assertNotEqual(0, test_service.port) test_service.stop() @mock.patch('nova.objects.Service.get_by_host_and_binary') def test_reset_pool_size_to_default(self, mock_get): test_service = service.WSGIService("test_service") test_service.start() # Stopping the service, which in turn sets pool size to 0 test_service.stop() self.assertEqual(test_service.server._pool.size, 0) # Resetting pool size to default test_service.reset() test_service.start() self.assertEqual(test_service.server._pool.size, CONF.wsgi_default_pool_size) class TestLauncher(test.NoDBTestCase): @mock.patch.object(_service, 'launch') def test_launch_app(self, mock_launch): service._launcher = None service.serve(mock.sentinel.service) mock_launch.assert_called_once_with(mock.ANY, mock.sentinel.service, workers=None) @mock.patch.object(_service, 'launch') def test_launch_app_with_workers(self, mock_launch): service._launcher = None service.serve(mock.sentinel.service, workers=mock.sentinel.workers) mock_launch.assert_called_once_with(mock.ANY, mock.sentinel.service, workers=mock.sentinel.workers) @mock.patch.object(_service, 'launch') def test_launch_app_more_than_once_raises(self, mock_launch): service._launcher = None service.serve(mock.sentinel.service) self.assertRaises(RuntimeError, service.serve, mock.sentinel.service) nova-13.0.0/nova/tests/unit/api_samples_test_base/0000775000567000056710000000000012701410205023301 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/api_samples_test_base/__init__.py0000664000567000056710000000000012701407773025420 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/api_samples_test_base/test_compare_result.py0000664000567000056710000003761112701407773027766 0ustar jenkinsjenkins00000000000000# Copyright 2015 HPE, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock import testtools from nova import test from nova.tests.functional import api_samples_test_base class TestCompareResult(test.NoDBTestCase): """Provide test coverage for result comparison logic in functional tests. _compare_result two types of comparisons, template data and sample data. Template data means the response is checked against a regex that is referenced by the template name. The template name is specified in the format %(name) Sample data is a normal value comparison. """ def getApiSampleTestBaseHelper(self): """Build an instance without running any unwanted test methods""" # NOTE(auggy): TestCase takes a "test" method name to run in __init__ # calling this way prevents additional test methods from running ast_instance = api_samples_test_base.ApiSampleTestBase('setUp') # required by ApiSampleTestBase ast_instance.api_major_version = 'v2' ast_instance._project_id = 'True' # automagically create magic methods usually handled by test classes ast_instance.compute = mock.MagicMock() ast_instance.subs = ast_instance._get_regexes() return ast_instance def setUp(self): super(TestCompareResult, self).setUp() self.ast = self.getApiSampleTestBaseHelper() def test_bare_strings_match(self): """compare 2 bare strings that match""" sample_data = u'foo' response_data = u'foo' result = self.ast._compare_result( expected=sample_data, result=response_data, result_str="Test") # NOTE(auggy): _compare_result will not return a matched value in the # case of bare strings. If they don't match it will throw an exception, # otherwise it returns "None". self.assertEqual( expected=None, observed=result, message='Check _compare_result of 2 bare strings') def test_bare_strings_no_match(self): """check 2 bare strings that don't match""" sample_data = u'foo' response_data = u'bar' with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=sample_data, result=response_data, result_str="Test") def test_template_strings_match(self): """compare 2 template strings (contain %) that match""" template_data = u'%(id)s' response_data = u'858f295a-8543-45fa-804a-08f8356d616d' result = self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") self.assertEqual( expected=response_data, observed=result, message='Check _compare_result of 2 template strings') def test_template_strings_no_match(self): """check 2 template strings (contain %) that don't match""" template_data = u'%(id)s' response_data = u'$58f295a-8543-45fa-804a-08f8356d616d' with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") # TODO(auggy): _compare_result needs a consistent return value # In some cases it returns the value if it matched, in others it returns # None. In all cases, it throws an exception if there's no match. def test_bare_int_match(self): """check 2 bare ints that match""" sample_data = 42 response_data = 42 result = self.ast._compare_result( expected=sample_data, result=response_data, result_str="Test") self.assertEqual( expected=None, observed=result, message='Check _compare_result of 2 bare ints') def test_bare_int_no_match(self): """check 2 bare ints that don't match""" sample_data = 42 response_data = 43 with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=sample_data, result=response_data, result_str="Test") # TODO(auggy): _compare_result needs a consistent return value def test_template_int_match(self): """check template int against string containing digits""" template_data = u'%(int)s' response_data = u'42' result = self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") self.assertEqual( expected=None, observed=result, message='Check _compare_result of template ints') def test_template_int_no_match(self): """check template int against a string containing no digits""" template_data = u'%(int)s' response_data = u'foo' with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") def test_template_int_value(self): """check an int value of a template int throws exception""" # template_data = u'%(int_test)' # response_data = 42 # use an int instead of a string as the subs value local_subs = copy.deepcopy(self.ast.subs) local_subs.update({'int_test': 42}) with testtools.ExpectedException(TypeError): self.ast.subs = local_subs # TODO(auggy): _compare_result needs a consistent return value def test_dict_match(self): """check 2 matching dictionaries""" template_data = { u'server': { u'id': u'%(id)s', u'adminPass': u'%(password)s' } } response_data = { u'server': { u'id': u'858f295a-8543-45fa-804a-08f8356d616d', u'adminPass': u'4ZQ3bb6WYbC2'} } result = self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") self.assertEqual( expected=u'858f295a-8543-45fa-804a-08f8356d616d', observed=result, message='Check _compare_result of 2 dictionaries') def test_dict_no_match_value(self): """check 2 dictionaries where one has a different value""" sample_data = { u'server': { u'id': u'858f295a-8543-45fa-804a-08f8356d616d', u'adminPass': u'foo' } } response_data = { u'server': { u'id': u'858f295a-8543-45fa-804a-08f8356d616d', u'adminPass': u'4ZQ3bb6WYbC2'} } with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=sample_data, result=response_data, result_str="Test") def test_dict_no_match_extra_key(self): """check 2 dictionaries where one has an extra key""" template_data = { u'server': { u'id': u'%(id)s', u'adminPass': u'%(password)s', u'foo': u'foo' } } response_data = { u'server': { u'id': u'858f295a-8543-45fa-804a-08f8356d616d', u'adminPass': u'4ZQ3bb6WYbC2'} } with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") def test_dict_result_type_mismatch(self): """check expected is a dictionary and result is not a dictionary""" template_data = { u'server': { u'id': u'%(id)s', u'adminPass': u'%(password)s', } } response_data = u'foo' with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") # TODO(auggy): _compare_result needs a consistent return value def test_list_match(self): """check 2 matching lists""" template_data = { u'links': [ { u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s', u'rel': u'self' }, { u'href': u'%(compute_endpoint)s/servers/%(uuid)s', u'rel': u'bookmark' } ] } response_data = { u'links': [ { u'href': (u'http://openstack.example.com/v2/%s/server/' '858f295a-8543-45fa-804a-08f8356d616d' % api_samples_test_base.PROJECT_ID ), u'rel': u'self' }, { u'href': (u'http://openstack.example.com/%s/servers/' '858f295a-8543-45fa-804a-08f8356d616d' % api_samples_test_base.PROJECT_ID ), u'rel': u'bookmark' } ] } result = self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") self.assertEqual( expected=None, observed=result, message='Check _compare_result of 2 lists') def test_list_match_extra_item_result(self): """check extra list items in result """ template_data = { u'links': [ { u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s', u'rel': u'self' }, { u'href': u'%(compute_endpoint)s/servers/%(uuid)s', u'rel': u'bookmark' } ] } response_data = { u'links': [ { u'href': (u'http://openstack.example.com/v2/openstack/server/' '858f295a-8543-45fa-804a-08f8356d616d'), u'rel': u'self' }, { u'href': (u'http://openstack.example.com/openstack/servers/' '858f295a-8543-45fa-804a-08f8356d616d'), u'rel': u'bookmark' }, u'foo' ] } with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") def test_list_match_extra_item_template(self): """check extra list items in template """ template_data = { u'links': [ { u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s', u'rel': u'self' }, { u'href': u'%(compute_endpoint)s/servers/%(uuid)s', u'rel': u'bookmark' }, u'foo' # extra field ] } response_data = { u'links': [ { u'href': (u'http://openstack.example.com/v2/openstack/server/' '858f295a-8543-45fa-804a-08f8356d616d'), u'rel': u'self' }, { u'href': (u'http://openstack.example.com/openstack/servers/' '858f295a-8543-45fa-804a-08f8356d616d'), u'rel': u'bookmark' } ] } with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") def test_list_no_match(self): """check 2 matching lists""" template_data = { u'things': [ { u'foo': u'bar', u'baz': 0 }, { u'foo': u'zod', u'baz': 1 } ] } response_data = { u'things': [ { u'foo': u'bar', u'baz': u'0' }, { u'foo': u'zod', u'baz': 1 } ] } # TODO(auggy): This error returns "extra list items" # it should show the item/s in the list that didn't match with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") def test_none_match(self): """check that None matches""" sample_data = None response_data = None result = self.ast._compare_result( expected=sample_data, result=response_data, result_str="Test") # NOTE(auggy): _compare_result will not return a matched value in the # case of bare strings. If they don't match it will throw an exception, # otherwise it returns "None". self.assertEqual( expected=None, observed=result, message='Check _compare_result of None') def test_none_no_match(self): """check expected none and non-None response don't match""" sample_data = None response_data = u'bar' with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=sample_data, result=response_data, result_str="Test") def test_none_result_no_match(self): """check result none and expected non-None response don't match""" sample_data = u'foo' response_data = None with testtools.ExpectedException(api_samples_test_base.NoMatch): self.ast._compare_result( expected=sample_data, result=response_data, result_str="Test") def test_template_no_subs_key(self): """check an int value of a template int throws exception""" template_data = u'%(foo)' response_data = 'bar' with testtools.ExpectedException(KeyError): self.ast._compare_result( expected=template_data, result=response_data, result_str="Test") nova-13.0.0/nova/tests/unit/fake_pci_device_pools.py0000664000567000056710000000266112701407773023646 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.objects import pci_device_pool # This represents the format that PCI device pool info was stored in the DB # before this info was made into objects. fake_pool_dict = { 'product_id': 'fake-product', 'vendor_id': 'fake-vendor', 'numa_node': 1, 't1': 'v1', 't2': 'v2', 'count': 2, } fake_pool = pci_device_pool.PciDevicePool(count=5, product_id='foo', vendor_id='bar', numa_node=0, tags={'t1': 'v1', 't2': 'v2'}) fake_pool_primitive = fake_pool.obj_to_primitive() fake_pool_list = pci_device_pool.PciDevicePoolList(objects=[fake_pool]) fake_pool_list_primitive = fake_pool_list.obj_to_primitive() nova-13.0.0/nova/tests/unit/monkey_patch_example/0000775000567000056710000000000012701410205023147 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/monkey_patch_example/example_a.py0000664000567000056710000000162512701407773025500 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module A for testing utils.monkey_patch().""" def example_function_a(): return 'Example function' class ExampleClassA(object): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 nova-13.0.0/nova/tests/unit/monkey_patch_example/__init__.py0000664000567000056710000000212712701407773025302 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module for testing utils.monkey_patch().""" CALLED_FUNCTION = [] def example_decorator(name, function): """decorator for notify which is used from utils.monkey_patch() :param name: name of the function :param function: - object of the function :returns: function -- decorated function """ def wrapped_func(*args, **kwarg): CALLED_FUNCTION.append(name) return function(*args, **kwarg) return wrapped_func nova-13.0.0/nova/tests/unit/monkey_patch_example/example_b.py0000664000567000056710000000162612701407773025502 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module B for testing utils.monkey_patch().""" def example_function_b(): return 'Example function' class ExampleClassB(object): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 nova-13.0.0/nova/tests/unit/test_notifier.py0000664000567000056710000000422412701407773022225 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import rpc from nova import test class TestNotifier(test.NoDBTestCase): @mock.patch('oslo_messaging.get_transport') @mock.patch('oslo_messaging.get_notification_transport') @mock.patch('oslo_messaging.Notifier') def test_notification_format_affects_notification_driver(self, mock_notifier, mock_noti_trans, mock_transport): conf = mock.Mock() cases = { 'unversioned': [ mock.call(mock.ANY, serializer=mock.ANY), mock.call(mock.ANY, serializer=mock.ANY, driver='noop')], 'both': [ mock.call(mock.ANY, serializer=mock.ANY), mock.call(mock.ANY, serializer=mock.ANY, topic='versioned_notifications')], 'versioned': [ mock.call(mock.ANY, serializer=mock.ANY, driver='noop'), mock.call(mock.ANY, serializer=mock.ANY, topic='versioned_notifications')]} for config in cases: mock_notifier.reset_mock() mock_notifier.side_effect = ['first', 'second'] conf.notification_format = config rpc.init(conf) self.assertEqual(cases[config], mock_notifier.call_args_list) self.assertEqual('first', rpc.LEGACY_NOTIFIER) self.assertEqual('second', rpc.NOTIFIER) nova-13.0.0/nova/tests/unit/network/0000775000567000056710000000000012701410205020444 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/network/interfaces-override.template0000664000567000056710000000235012701407773026161 0ustar jenkinsjenkins00000000000000# Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback {% for ifc in interfaces %} auto {{ ifc.name }} iface {{ ifc.name }} inet static address {{ ifc.address }} netmask {{ ifc.netmask }} broadcast {{ ifc.broadcast }} {% if ifc.gateway %} gateway {{ ifc.gateway }} {% endif %} {% if ifc.dns %} dns-nameservers {{ ifc.dns }} {% endif %} {% for route in ifc.routes %} post-up ip route add {{ route.cidr }} via {{ route.gateway }} dev {{ ifc.name }} pre-down ip route del {{ route.cidr }} via {{ route.gateway }} dev {{ ifc.name }} {% endfor %} {% if use_ipv6 %} {% if libvirt_virt_type == 'lxc' %} {% if ifc.address_v6 %} post-up ip -6 addr add {{ ifc.address_v6 }}/{{ifc.netmask_v6 }} dev ${IFACE} {% endif %} {% if ifc.gateway_v6 %} post-up ip -6 route add default via {{ ifc.gateway_v6 }} dev ${IFACE} {% endif %} {% else %} iface {{ ifc.name }} inet6 static address {{ ifc.address_v6 }} netmask {{ ifc.netmask_v6 }} {% if ifc.gateway_v6 %} gateway {{ ifc.gateway_v6 }} {% endif %} {% endif %} {% endif %} {% endfor %} nova-13.0.0/nova/tests/unit/network/test_linux_net.py0000664000567000056710000017253512701410011024072 0ustar jenkinsjenkins00000000000000# Copyright 2011 NTT # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import calendar import datetime import os import re import time import mock from mox3 import mox import netifaces from oslo_concurrency import processutils from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import fileutils from oslo_utils import timeutils from nova import context from nova import db from nova import exception from nova.network import driver from nova.network import linux_net from nova.network import model as network_model from nova import objects from nova import test from nova import utils CONF = cfg.CONF CONF.import_opt('share_dhcp_address', 'nova.objects.network') CONF.import_opt('network_device_mtu', 'nova.objects.network') HOST = "testhost" instances = {'00000000-0000-0000-0000-0000000000000000': {'id': 0, 'uuid': '00000000-0000-0000-0000-0000000000000000', 'host': 'fake_instance00', 'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0), 'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0), 'hostname': 'fake_instance00'}, '00000000-0000-0000-0000-0000000000000001': {'id': 1, 'uuid': '00000000-0000-0000-0000-0000000000000001', 'host': 'fake_instance01', 'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0), 'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0), 'hostname': 'fake_instance01'}, '00000000-0000-0000-0000-0000000000000002': {'id': 2, 'uuid': '00000000-0000-0000-0000-0000000000000002', 'host': 'fake_instance02', 'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0), 'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0), 'hostname': 'really_long_fake_instance02_to_test_hostname_' 'truncation_when_too_long'}} addresses = [{"address": "10.0.0.1"}, {"address": "10.0.0.2"}, {"address": "10.0.0.3"}, {"address": "10.0.0.4"}, {"address": "10.0.0.5"}, {"address": "10.0.0.6"}] networks = [{'id': 0, 'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", 'label': 'test0', 'injected': False, 'multi_host': False, 'cidr': '192.168.0.0/24', 'cidr_v6': '2001:db8::/64', 'gateway_v6': '2001:db8::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa0', 'bridge_interface': 'fake_fa0', 'gateway': '192.168.0.1', 'broadcast': '192.168.0.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'dhcp_server': '192.168.0.1', 'dhcp_start': '192.168.100.1', 'vlan': None, 'host': None, 'project_id': 'fake_project', 'vpn_public_address': '192.168.0.2', 'mtu': None, 'enable_dhcp': True, 'share_address': False}, {'id': 1, 'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb", 'label': 'test1', 'injected': False, 'multi_host': True, 'cidr': '192.168.1.0/24', 'cidr_v6': '2001:db9::/64', 'gateway_v6': '2001:db9::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa1', 'bridge_interface': 'fake_fa1', 'gateway': '192.168.1.1', 'broadcast': '192.168.1.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'dhcp_server': '192.168.1.1', 'dhcp_start': '192.168.100.1', 'vlan': None, 'host': None, 'project_id': 'fake_project', 'vpn_public_address': '192.168.1.2', 'mtu': None, 'enable_dhcp': True, 'share_address': False}, {'id': 2, 'uuid': "cccccccc-cccc-cccc-cccc-cccccccccccc", 'label': 'test2', 'injected': False, 'multi_host': True, 'cidr': '192.168.2.0/24', 'cidr_v6': '2001:db10::/64', 'gateway_v6': '2001:db10::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa2', 'bridge_interface': 'fake_fa2', 'gateway': '192.168.2.1', 'broadcast': '192.168.2.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'dhcp_server': '192.168.2.1', 'dhcp_start': '192.168.100.1', 'vlan': None, 'host': None, 'project_id': 'fake_project', 'vpn_public_address': '192.168.2.2', 'mtu': None, 'enable_dhcp': True, 'share_address': False}] fixed_ips = [{'id': 0, 'network_id': 0, 'address': '192.168.0.100', 'instance_id': 0, 'allocated': True, 'leased': True, 'virtual_interface_id': 0, 'default_route': True, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000', 'floating_ips': []}, {'id': 1, 'network_id': 1, 'address': '192.168.1.100', 'instance_id': 0, 'allocated': True, 'leased': True, 'virtual_interface_id': 1, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000', 'floating_ips': []}, {'id': 2, 'network_id': 1, 'address': '192.168.0.101', 'instance_id': 1, 'allocated': True, 'leased': True, 'virtual_interface_id': 2, 'default_route': True, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}, {'id': 3, 'network_id': 0, 'address': '192.168.1.101', 'instance_id': 1, 'allocated': True, 'leased': True, 'virtual_interface_id': 3, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}, {'id': 4, 'network_id': 0, 'address': '192.168.0.102', 'instance_id': 0, 'allocated': True, 'leased': False, 'virtual_interface_id': 4, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000', 'floating_ips': []}, {'id': 5, 'network_id': 1, 'address': '192.168.1.102', 'instance_id': 1, 'allocated': True, 'leased': False, 'virtual_interface_id': 5, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}, {'id': 6, 'network_id': 1, 'address': '192.168.1.103', 'instance_id': 1, 'allocated': False, 'leased': True, 'virtual_interface_id': 6, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}, {'id': 7, 'network_id': 2, 'address': '192.168.2.100', 'instance_id': 2, 'allocated': True, 'leased': False, 'virtual_interface_id': 7, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000002', 'floating_ips': []}] vifs = [{'id': 0, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:00', 'uuid': '00000000-0000-0000-0000-0000000000000000', 'network_id': 0, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000'}, {'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:01', 'uuid': '00000000-0000-0000-0000-0000000000000001', 'network_id': 1, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000'}, {'id': 2, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:02', 'uuid': '00000000-0000-0000-0000-0000000000000002', 'network_id': 1, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}, {'id': 3, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:03', 'uuid': '00000000-0000-0000-0000-0000000000000003', 'network_id': 0, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}, {'id': 4, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:04', 'uuid': '00000000-0000-0000-0000-0000000000000004', 'network_id': 0, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000'}, {'id': 5, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:05', 'uuid': '00000000-0000-0000-0000-0000000000000005', 'network_id': 1, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}, {'id': 6, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:06', 'uuid': '00000000-0000-0000-0000-0000000000000006', 'network_id': 1, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}, {'id': 7, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:07', 'uuid': '00000000-0000-0000-0000-0000000000000007', 'network_id': 2, 'instance_uuid': '00000000-0000-0000-0000-0000000000000002'}] def get_associated(context, network_id, host=None, address=None): result = [] for datum in fixed_ips: if (datum['network_id'] == network_id and datum['instance_uuid'] is not None and datum['virtual_interface_id'] is not None): instance = instances[datum['instance_uuid']] if host and host != instance['host']: continue if address and address != datum['address']: continue cleaned = {} cleaned['address'] = datum['address'] cleaned['instance_uuid'] = datum['instance_uuid'] cleaned['network_id'] = datum['network_id'] cleaned['vif_id'] = datum['virtual_interface_id'] vif = vifs[datum['virtual_interface_id']] cleaned['vif_address'] = vif['address'] cleaned['instance_hostname'] = instance['hostname'] cleaned['instance_updated'] = instance['updated_at'] cleaned['instance_created'] = instance['created_at'] cleaned['allocated'] = datum['allocated'] cleaned['leased'] = datum['leased'] cleaned['default_route'] = datum['default_route'] result.append(cleaned) return result class LinuxNetworkUtilsTestCase(test.NoDBTestCase): def test_is_pid_cmdline_correct(self): # Negative general case fake_open = mock.mock_open(read_data='no-such-process') with mock.patch.object(linux_net, 'open', fake_open, create=True): self.assertFalse(linux_net.is_pid_cmdline_correct(1, "foo"), "foo should not be in 'no-such-process'") # Negative case that would be a thing we would want to skip fake_open = mock.mock_open( read_data=('/usr/sbin/dnsmasq ' '--conf-file=/var/run/NetworkManager/dnsmasq.conf')) with mock.patch.object(linux_net, 'open', fake_open, create=True): self.assertFalse( linux_net.is_pid_cmdline_correct(1, "nova-br100.conf"), "nova-br100.conf should not have been found") # Positive matching case fake_open = mock.mock_open( read_data=('/usr/sbin/dnsmasq ' '--dhcp-hostsfile=' '/opt/stack/data/nova/networks/nova-br100.conf')) with mock.patch.object(linux_net, 'open', fake_open, create=True): self.assertTrue( linux_net.is_pid_cmdline_correct(1, "nova-br100.conf"), 'nova-br100.conf should have been found') # Negative case. This would match except we throw an IOError/OSError # because the file couldn't be read or opened, this should then fail. for err in (IOError, OSError): fake_open = mock.mock_open( read_data=('/usr/sbin/dnsmasq ' '--dhcp-hostsfile=' '/opt/stack/data/nova/networks/nova-br100.conf')) fake_open.side_effect = err with mock.patch.object(linux_net, 'open', fake_open, create=True): self.assertFalse( linux_net.is_pid_cmdline_correct(1, "nova-br100.conf"), 'nova-br100.conf should not have been found') class LinuxNetworkTestCase(test.NoDBTestCase): REQUIRES_LOCKING = True def setUp(self): super(LinuxNetworkTestCase, self).setUp() self.driver = driver.load_network_driver() self.driver.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=True) def get_vifs(_context, instance_uuid, use_slave): return [vif for vif in vifs if vif['instance_uuid'] == instance_uuid] def get_instance(_context, instance_id): return instances[instance_id] self.stub_out('nova.db.virtual_interface_get_by_instance', get_vifs) self.stub_out('nova.db.instance_get', get_instance) self.stub_out('nova.db.network_get_associated_fixed_ips', get_associated) def _test_add_snat_rule(self, expected, is_external): def verify_add_rule(chain, rule): self.assertEqual('snat', chain) self.assertEqual(expected, rule) self.called = True self.stubs.Set(linux_net.iptables_manager.ipv4['nat'], 'add_rule', verify_add_rule) self.called = False linux_net.add_snat_rule('10.0.0.0/24', is_external) if expected: self.assertTrue(self.called) def test_add_snat_rule_no_ext(self): self.flags(routing_source_ip='10.10.10.1') expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 ' '-j SNAT --to-source 10.10.10.1 -o eth0') self._test_add_snat_rule(expected, False) def test_add_snat_rule_ext(self): self.flags(routing_source_ip='10.10.10.1') expected = () self._test_add_snat_rule(expected, True) def test_add_snat_rule_snat_range_no_ext(self): self.flags(routing_source_ip='10.10.10.1', force_snat_range=['10.10.10.0/24']) expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 ' '-j SNAT --to-source 10.10.10.1 -o eth0') self._test_add_snat_rule(expected, False) def test_add_snat_rule_snat_range_ext(self): self.flags(routing_source_ip='10.10.10.1', force_snat_range=['10.10.10.0/24']) expected = ('-s 10.0.0.0/24 -d 10.10.10.0/24 ' '-j SNAT --to-source 10.10.10.1') self._test_add_snat_rule(expected, True) def test_update_dhcp_for_nw00(self): self.flags(use_single_default_gateway=True) self.mox.StubOutWithMock(self.driver, 'write_to_file') self.mox.StubOutWithMock(fileutils, 'ensure_tree') self.mox.StubOutWithMock(os, 'chmod') self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.driver.update_dhcp(self.context, "eth0", networks[0]) def test_update_dhcp_for_nw01(self): self.flags(use_single_default_gateway=True) self.mox.StubOutWithMock(self.driver, 'write_to_file') self.mox.StubOutWithMock(fileutils, 'ensure_tree') self.mox.StubOutWithMock(os, 'chmod') self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.driver.update_dhcp(self.context, "eth0", networks[0]) def _get_fixedips(self, network, host=None): return objects.FixedIPList.get_by_network(self.context, network, host=host) def test_get_dhcp_hosts_for_nw00(self): self.flags(use_single_default_gateway=True) expected = ( "DE:AD:BE:EF:00:00,fake_instance00.novalocal," "192.168.0.100,net:NW-0\n" "DE:AD:BE:EF:00:03,fake_instance01.novalocal," "192.168.1.101,net:NW-3\n" "DE:AD:BE:EF:00:04,fake_instance00.novalocal," "192.168.0.102,net:NW-4" ) fixedips = self._get_fixedips(networks[0]) actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[0], fixedips) self.assertEqual(expected, actual_hosts) def test_get_dhcp_hosts_for_nw01(self): self.flags(use_single_default_gateway=True) expected = ( "DE:AD:BE:EF:00:02,fake_instance01.novalocal," "192.168.0.101,net:NW-2\n" "DE:AD:BE:EF:00:05,fake_instance01.novalocal," "192.168.1.102,net:NW-5" ) fixedips = self._get_fixedips(networks[1], host='fake_instance01') actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[1], fixedips) self.assertEqual(expected, actual_hosts) def test_get_dns_hosts_for_nw00(self): expected = ( "192.168.0.100\tfake_instance00.novalocal\n" "192.168.1.101\tfake_instance01.novalocal\n" "192.168.0.102\tfake_instance00.novalocal" ) actual_hosts = self.driver.get_dns_hosts(self.context, networks[0]) self.assertEqual(expected, actual_hosts) def test_get_dns_hosts_for_nw01(self): expected = ( "192.168.1.100\tfake_instance00.novalocal\n" "192.168.0.101\tfake_instance01.novalocal\n" "192.168.1.102\tfake_instance01.novalocal" ) actual_hosts = self.driver.get_dns_hosts(self.context, networks[1]) self.assertEqual(expected, actual_hosts) def test_get_dhcp_opts_for_nw00(self): self.flags(use_single_default_gateway=True) expected_opts = 'NW-0,3,192.168.0.1\nNW-3,3\nNW-4,3' fixedips = self._get_fixedips(networks[0]) actual_opts = self.driver.get_dhcp_opts(self.context, networks[0], fixedips) self.assertEqual(expected_opts, actual_opts) def test_get_dhcp_opts_for_nw00_no_single_default_gateway(self): self.flags(use_single_default_gateway=False) expected_opts = '3,192.168.0.1' fixedips = self._get_fixedips(networks[0]) actual_opts = self.driver.get_dhcp_opts(self.context, networks[0], fixedips) self.assertEqual(expected_opts, actual_opts) def test_get_dhcp_opts_for_nw01(self): self.flags(use_single_default_gateway=True) expected_opts = "NW-2,3,192.168.1.1\nNW-5,3" fixedips = self._get_fixedips(networks[1], 'fake_instance01') actual_opts = self.driver.get_dhcp_opts(self.context, networks[1], fixedips) self.assertEqual(expected_opts, actual_opts) def test_get_dhcp_leases_for_nw00(self): timestamp = timeutils.utcnow() seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) leases = self.driver.get_dhcp_leases(self.context, networks[0]) leases = leases.split('\n') for lease in leases: lease = lease.split(' ') data = get_associated(self.context, 0, address=lease[2])[0] self.assertTrue(data['allocated']) self.assertTrue(data['leased']) self.assertTrue(int(lease[0]) > seconds_since_epoch) self.assertEqual(data['vif_address'], lease[1]) self.assertEqual(data['address'], lease[2]) self.assertEqual(data['instance_hostname'], lease[3]) self.assertEqual('*', lease[4]) def test_get_dhcp_leases_for_nw01(self): self.flags(host='fake_instance01') timestamp = timeutils.utcnow() seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) leases = self.driver.get_dhcp_leases(self.context, networks[1]) leases = leases.split('\n') for lease in leases: lease = lease.split(' ') data = get_associated(self.context, 1, address=lease[2])[0] self.assertTrue(data['leased']) self.assertTrue(int(lease[0]) > seconds_since_epoch) self.assertEqual(data['vif_address'], lease[1]) self.assertEqual(data['address'], lease[2]) self.assertEqual(data['instance_hostname'], lease[3]) self.assertEqual('*', lease[4]) def test_dhcp_opts_not_default_gateway_network(self): expected = "NW-0,3" fixedip = objects.FixedIPList.get_by_network(self.context, {'id': 0})[0] actual = self.driver._host_dhcp_opts(fixedip.virtual_interface_id) self.assertEqual(expected, actual) def test_host_dhcp_without_default_gateway_network(self): expected = ','.join(['DE:AD:BE:EF:00:00', 'fake_instance00.novalocal', '192.168.0.100']) fixedip = objects.FixedIPList.get_by_network(self.context, {'id': 0})[0] actual = self.driver._host_dhcp(fixedip) self.assertEqual(expected, actual) def test_host_dhcp_truncated_hostname(self): expected = ','.join(['DE:AD:BE:EF:00:07', 're-ng_fake_instance02_to_test_hostname_' 'truncation_when_too_long.novalocal', '192.168.2.100']) fixedip = objects.FixedIPList.get_by_network(self.context, {'id': 2})[0] actual = self.driver._host_dhcp(fixedip) self.assertEqual(expected, actual) def test_host_dns_without_default_gateway_network(self): expected = "192.168.0.100\tfake_instance00.novalocal" fixedip = objects.FixedIPList.get_by_network(self.context, {'id': 0})[0] actual = self.driver._host_dns(fixedip) self.assertEqual(expected, actual) def test_linux_bridge_driver_plug(self): """Makes sure plug doesn't drop FORWARD by default. Ensures bug 890195 doesn't reappear. """ def fake_execute(*args, **kwargs): return "", "" self.stubs.Set(utils, 'execute', fake_execute) def verify_add_rule(chain, rule): self.assertEqual('FORWARD', chain) self.assertIn('ACCEPT', rule) self.stubs.Set(linux_net.iptables_manager.ipv4['filter'], 'add_rule', verify_add_rule) driver = linux_net.LinuxBridgeInterfaceDriver() driver.plug({"bridge": "br100", "bridge_interface": "eth0", "share_address": False}, "fakemac") def test_linux_ovs_driver_plug_exception(self): self.flags(fake_network=False) def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError('specific_error') def fake_device_exists(*args, **kwargs): return False self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(linux_net, 'device_exists', fake_device_exists) driver = linux_net.LinuxOVSInterfaceDriver() exc = self.assertRaises(exception.OvsConfigurationFailure, driver.plug, {'uuid': 'fake_network_uuid'}, 'fake_mac') self.assertRegex( str(exc), re.compile("OVS configuration failed with: .*specific_error.*", re.DOTALL)) self.assertIsInstance(exc.kwargs['inner_exception'], processutils.ProcessExecutionError) def test_vlan_override(self): """Makes sure vlan_interface flag overrides network bridge_interface. Allows heterogeneous networks a la bug 833426 """ driver = linux_net.LinuxBridgeInterfaceDriver() info = {} @staticmethod def test_ensure(vlan, bridge, interface, network, mac_address, mtu): info['passed_interface'] = interface self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver, 'ensure_vlan_bridge', test_ensure) network = { "bridge": "br100", "bridge_interface": "base_interface", "share_address": False, "vlan": "fake" } self.flags(vlan_interface="") driver.plug(network, "fakemac") self.assertEqual("base_interface", info['passed_interface']) self.flags(vlan_interface="override_interface") driver.plug(network, "fakemac") self.assertEqual("override_interface", info['passed_interface']) driver.plug(network, "fakemac") def test_flat_override(self): """Makes sure flat_interface flag overrides network bridge_interface. Allows heterogeneous networks a la bug 833426 """ driver = linux_net.LinuxBridgeInterfaceDriver() info = {} @staticmethod def test_ensure(bridge, interface, network, gateway): info['passed_interface'] = interface self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver, 'ensure_bridge', test_ensure) network = { "bridge": "br100", "bridge_interface": "base_interface", "share_address": False, } driver.plug(network, "fakemac") self.assertEqual("base_interface", info['passed_interface']) self.flags(flat_interface="override_interface") driver.plug(network, "fakemac") self.assertEqual("override_interface", info['passed_interface']) def _test_dnsmasq_execute(self, extra_expected=None): network_ref = {'id': 'fake', 'label': 'fake', 'gateway': '10.0.0.1', 'multi_host': False, 'cidr': '10.0.0.0/24', 'netmask': '255.255.255.0', 'dns1': '8.8.4.4', 'dhcp_start': '1.0.0.2', 'dhcp_server': '10.0.0.1', 'share_address': False} def fake_execute(*args, **kwargs): executes.append(args) return "", "" def fake_add_dhcp_mangle_rule(*args, **kwargs): executes.append(args) self.stubs.Set(linux_net, '_execute', fake_execute) self.stubs.Set(linux_net, '_add_dhcp_mangle_rule', fake_add_dhcp_mangle_rule) self.stub_out('os.chmod', lambda *a, **kw: None) self.stubs.Set(linux_net, 'write_to_file', lambda *a, **kw: None) self.stubs.Set(linux_net, '_dnsmasq_pid_for', lambda *a, **kw: None) dev = 'br100' default_domain = CONF.dhcp_domain for domain in ('', default_domain): executes = [] self.flags(dhcp_domain=domain) fixedips = self._get_fixedips(network_ref) linux_net.restart_dhcp(self.context, dev, network_ref, fixedips) expected = ['env', 'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile), 'NETWORK_ID=fake', 'dnsmasq', '--strict-order', '--bind-interfaces', '--conf-file=%s' % CONF.dnsmasq_config_file, '--pid-file=%s' % linux_net._dhcp_file(dev, 'pid'), '--dhcp-optsfile=%s' % linux_net._dhcp_file(dev, 'opts'), '--listen-address=%s' % network_ref['dhcp_server'], '--except-interface=lo', "--dhcp-range=set:%s,%s,static,%s,%ss" % (network_ref['label'], network_ref['dhcp_start'], network_ref['netmask'], CONF.dhcp_lease_time), '--dhcp-lease-max=256', '--dhcp-hostsfile=%s' % linux_net._dhcp_file(dev, 'conf'), '--dhcp-script=%s' % CONF.dhcpbridge, '--no-hosts', '--leasefile-ro'] if CONF.dhcp_domain: expected.append('--domain=%s' % CONF.dhcp_domain) if extra_expected: expected += extra_expected self.assertEqual([(dev,), tuple(expected)], executes) def test_dnsmasq_execute(self): self._test_dnsmasq_execute() def test_dnsmasq_execute_dns_servers(self): self.flags(dns_server=['1.1.1.1', '2.2.2.2']) expected = [ '--no-resolv', '--server=1.1.1.1', '--server=2.2.2.2', ] self._test_dnsmasq_execute(expected) def test_dnsmasq_execute_use_network_dns_servers(self): self.flags(use_network_dns_servers=True) expected = [ '--no-resolv', '--server=8.8.4.4', ] self._test_dnsmasq_execute(expected) def test_isolated_host(self): self.flags(fake_network=False, share_dhcp_address=True) # NOTE(vish): use a fresh copy of the manager for each test self.stubs.Set(linux_net, 'iptables_manager', linux_net.IptablesManager()) self.stubs.Set(linux_net, 'binary_name', 'test') executes = [] def fake_execute(*args, **kwargs): executes.append(args) return "", "" self.stubs.Set(utils, 'execute', fake_execute) driver = linux_net.LinuxBridgeInterfaceDriver() @staticmethod def fake_ensure(bridge, interface, network, gateway): return bridge self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver, 'ensure_bridge', fake_ensure) iface = 'eth0' dhcp = '192.168.1.1' network = {'dhcp_server': dhcp, 'share_address': False, 'bridge': 'br100', 'bridge_interface': iface} driver.plug(network, 'fakemac') expected = [ ('ebtables', '--concurrent', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i', iface, '--arp-ip-dst', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-I', 'INPUT', '-p', 'ARP', '-i', iface, '--arp-ip-dst', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface, '--arp-ip-src', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-I', 'OUTPUT', '-p', 'ARP', '-o', iface, '--arp-ip-src', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-i', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-I', 'FORWARD', '-p', 'IPv4', '-i', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-o', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-I', 'FORWARD', '-p', 'IPv4', '-o', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ('iptables-save', '-c'), ('iptables-restore', '-c'), ('ip6tables-save', '-c'), ('ip6tables-restore', '-c'), ] self.assertEqual(expected, executes) executes = [] @staticmethod def fake_remove(bridge, gateway): return self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver, 'remove_bridge', fake_remove) driver.unplug(network) expected = [ ('ebtables', '--concurrent', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i', iface, '--arp-ip-dst', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface, '--arp-ip-src', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-i', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-o', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ] self.assertEqual(expected, executes) def _test_initialize_gateway(self, existing, expected, routes=''): self.flags(fake_network=False) executes = [] def fake_execute(*args, **kwargs): executes.append(args) if args[0] == 'ip' and args[1] == 'addr' and args[2] == 'show': return existing, "" if args[0] == 'ip' and args[1] == 'route' and args[2] == 'show': return routes, "" if args[0] == 'sysctl': return '1\n', '' self.stubs.Set(utils, 'execute', fake_execute) network = {'dhcp_server': '192.168.1.1', 'cidr': '192.168.1.0/24', 'broadcast': '192.168.1.255', 'cidr_v6': '2001:db8::/64'} self.driver.initialize_gateway_device('eth0', network) self.assertEqual(expected, executes) def test_initialize_gateway_moves_wrong_ip(self): existing = ("2: eth0: " " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n" " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n" " inet6 dead::beef:dead:beef:dead/64 scope link\n" " valid_lft forever preferred_lft forever\n") expected = [ ('sysctl', '-n', 'net.ipv4.ip_forward'), ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ('ip', 'route', 'show', 'dev', 'eth0'), ('ip', 'addr', 'del', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.1.1/24', 'brd', '192.168.1.255', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', '-f', 'inet6', 'addr', 'change', '2001:db8::/64', 'dev', 'eth0'), ] self._test_initialize_gateway(existing, expected) def test_initialize_gateway_ip_with_dynamic_flag(self): existing = ("2: eth0: " " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n" " inet 192.168.0.1/24 brd 192.168.0.255 scope global " "dynamic eth0\n" " inet6 dead::beef:dead:beef:dead/64 scope link\n" " valid_lft forever preferred_lft forever\n") expected = [ ('sysctl', '-n', 'net.ipv4.ip_forward'), ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ('ip', 'route', 'show', 'dev', 'eth0'), ('ip', 'addr', 'del', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.1.1/24', 'brd', '192.168.1.255', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', '-f', 'inet6', 'addr', 'change', '2001:db8::/64', 'dev', 'eth0'), ] self._test_initialize_gateway(existing, expected) def test_initialize_gateway_resets_route(self): routes = ("default via 192.168.0.1 dev eth0\n" "192.168.100.0/24 via 192.168.0.254 dev eth0 proto static\n") existing = ("2: eth0: " " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n" " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n" " inet6 dead::beef:dead:beef:dead/64 scope link\n" " valid_lft forever preferred_lft forever\n") expected = [ ('sysctl', '-n', 'net.ipv4.ip_forward'), ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ('ip', 'route', 'show', 'dev', 'eth0'), ('ip', 'route', 'del', 'default', 'dev', 'eth0'), ('ip', 'route', 'del', '192.168.100.0/24', 'dev', 'eth0'), ('ip', 'addr', 'del', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.1.1/24', 'brd', '192.168.1.255', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', 'route', 'add', 'default', 'via', '192.168.0.1', 'dev', 'eth0'), ('ip', 'route', 'add', '192.168.100.0/24', 'via', '192.168.0.254', 'dev', 'eth0', 'proto', 'static'), ('ip', '-f', 'inet6', 'addr', 'change', '2001:db8::/64', 'dev', 'eth0'), ] self._test_initialize_gateway(existing, expected, routes) def test_initialize_gateway_no_move_right_ip(self): existing = ("2: eth0: " " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n" " inet 192.168.1.1/24 brd 192.168.1.255 scope global eth0\n" " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n" " inet6 dead::beef:dead:beef:dead/64 scope link\n" " valid_lft forever preferred_lft forever\n") expected = [ ('sysctl', '-n', 'net.ipv4.ip_forward'), ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ('ip', '-f', 'inet6', 'addr', 'change', '2001:db8::/64', 'dev', 'eth0'), ] self._test_initialize_gateway(existing, expected) def test_initialize_gateway_add_if_blank(self): existing = ("2: eth0: " " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n" " inet6 dead::beef:dead:beef:dead/64 scope link\n" " valid_lft forever preferred_lft forever\n") expected = [ ('sysctl', '-n', 'net.ipv4.ip_forward'), ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ('ip', 'route', 'show', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.1.1/24', 'brd', '192.168.1.255', 'dev', 'eth0'), ('ip', '-f', 'inet6', 'addr', 'change', '2001:db8::/64', 'dev', 'eth0'), ] self._test_initialize_gateway(existing, expected) def test_ensure_floating_no_duplicate_forwards(self): ln = linux_net self.stubs.Set(ln.iptables_manager, 'apply', lambda: None) self.stubs.Set(ln, 'ensure_ebtables_rules', lambda *a, **kw: None) net = {'bridge': 'br100', 'cidr': '10.0.0.0/24'} ln.ensure_floating_forward('10.10.10.10', '10.0.0.1', 'eth0', net) ln.ensure_floating_forward('10.10.10.11', '10.0.0.10', 'eth0', net) two_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules) ln.ensure_floating_forward('10.10.10.10', '10.0.0.3', 'eth0', net) dup_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules) self.assertEqual(two_forward_rules, dup_forward_rules) def test_apply_ran(self): manager = linux_net.IptablesManager() manager.iptables_apply_deferred = False self.mox.StubOutWithMock(manager, '_apply') manager._apply() self.mox.ReplayAll() empty_ret = manager.apply() self.assertIsNone(empty_ret) def test_apply_not_run(self): manager = linux_net.IptablesManager() manager.iptables_apply_deferred = True self.mox.StubOutWithMock(manager, '_apply') self.mox.ReplayAll() manager.apply() def test_deferred_unset_apply_ran(self): manager = linux_net.IptablesManager() manager.iptables_apply_deferred = True self.mox.StubOutWithMock(manager, '_apply') manager._apply() self.mox.ReplayAll() manager.defer_apply_off() self.assertFalse(manager.iptables_apply_deferred) def _test_add_metadata_accept_rule(self, expected): def verify_add_rule(chain, rule): self.assertEqual('INPUT', chain) self.assertEqual(expected, rule) self.stubs.Set(linux_net.iptables_manager.ipv4['filter'], 'add_rule', verify_add_rule) linux_net.metadata_accept() def _test_add_metadata_accept_ipv6_rule(self, expected): def verify_add_rule(chain, rule): self.assertEqual('INPUT', chain) self.assertEqual(expected, rule) self.stubs.Set(linux_net.iptables_manager.ipv6['filter'], 'add_rule', verify_add_rule) linux_net.metadata_accept() def test_metadata_accept(self): self.flags(metadata_port='8775') self.flags(metadata_host='10.10.10.1') expected = ('-p tcp -m tcp --dport 8775 ' '-d 10.10.10.1 -j ACCEPT') self._test_add_metadata_accept_rule(expected) def test_metadata_accept_ipv6(self): self.flags(metadata_port='8775') self.flags(metadata_host='2600::') expected = ('-p tcp -m tcp --dport 8775 ' '-d 2600:: -j ACCEPT') self._test_add_metadata_accept_ipv6_rule(expected) def test_metadata_accept_localhost(self): self.flags(metadata_port='8775') self.flags(metadata_host='127.0.0.1') expected = ('-p tcp -m tcp --dport 8775 ' '-m addrtype --dst-type LOCAL -j ACCEPT') self._test_add_metadata_accept_rule(expected) def test_metadata_accept_ipv6_localhost(self): self.flags(metadata_port='8775') self.flags(metadata_host='::1') expected = ('-p tcp -m tcp --dport 8775 ' '-m addrtype --dst-type LOCAL -j ACCEPT') self._test_add_metadata_accept_ipv6_rule(expected) def _test_add_metadata_forward_rule(self, expected): def verify_add_rule(chain, rule): self.assertEqual('PREROUTING', chain) self.assertEqual(expected, rule) self.stubs.Set(linux_net.iptables_manager.ipv4['nat'], 'add_rule', verify_add_rule) linux_net.metadata_forward() def test_metadata_forward(self): self.flags(metadata_port='8775') self.flags(metadata_host='10.10.10.1') expected = ('-s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp ' '--dport 80 -j DNAT --to-destination 10.10.10.1:8775') self._test_add_metadata_forward_rule(expected) def test_metadata_forward_localhost(self): self.flags(metadata_port='8775') self.flags(metadata_host='127.0.0.1') expected = ('-s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp ' '--dport 80 -j REDIRECT --to-ports 8775') self._test_add_metadata_forward_rule(expected) def test_ensure_bridge_brings_up_interface(self): # We have to bypass the CONF.fake_network check so that netifaces # is actually called. self.flags(fake_network=False) fake_mac = 'aa:bb:cc:00:11:22' fake_ifaces = { netifaces.AF_LINK: [{'addr': fake_mac}] } calls = { 'device_exists': [mock.call('bridge')], '_execute': [ mock.call('brctl', 'addif', 'bridge', 'eth0', run_as_root=True, check_exit_code=False), mock.call('ip', 'link', 'set', 'bridge', 'address', fake_mac, run_as_root=True), mock.call('ip', 'link', 'set', 'eth0', 'up', run_as_root=True, check_exit_code=False), mock.call('ip', 'route', 'show', 'dev', 'eth0'), mock.call('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ] } with test.nested( mock.patch.object(linux_net, 'device_exists', return_value=True), mock.patch.object(linux_net, '_execute', return_value=('', '')), mock.patch.object(netifaces, 'ifaddresses') ) as (device_exists, _execute, ifaddresses): ifaddresses.return_value = fake_ifaces driver = linux_net.LinuxBridgeInterfaceDriver() driver.ensure_bridge('bridge', 'eth0') device_exists.assert_has_calls(calls['device_exists']) _execute.assert_has_calls(calls['_execute']) ifaddresses.assert_called_once_with('eth0') def test_ensure_bridge_brclt_addif_exception(self): def fake_execute(*cmd, **kwargs): if ('brctl', 'addif', 'bridge', 'eth0') == cmd: return ('', 'some error happens') else: return ('', '') with test.nested( mock.patch.object(linux_net, 'device_exists', return_value=True), mock.patch.object(linux_net, '_execute', fake_execute) ) as (device_exists, _): driver = linux_net.LinuxBridgeInterfaceDriver() self.assertRaises(exception.NovaException, driver.ensure_bridge, 'bridge', 'eth0') device_exists.assert_called_once_with('bridge') def test_ensure_bridge_brclt_addbr_neutron_race(self): def fake_execute(*cmd, **kwargs): if ('brctl', 'addbr', 'brq1234567-89') == cmd: return ('', "device brq1234567-89 already exists; " "can't create bridge with the same name\n") else: return ('', '') with test.nested( mock.patch.object(linux_net, 'device_exists', return_value=False), mock.patch.object(linux_net, '_execute', fake_execute) ) as (device_exists, _): driver = linux_net.LinuxBridgeInterfaceDriver() driver.ensure_bridge('brq1234567-89', '') device_exists.assert_called_once_with('brq1234567-89') def test_set_device_mtu_configured(self): self.flags(network_device_mtu=10000) calls = [ mock.call('ip', 'link', 'set', 'fake-dev', 'mtu', 10000, run_as_root=True, check_exit_code=[0, 2, 254]) ] with mock.patch.object(utils, 'execute', return_value=('', '')) as ex: linux_net._set_device_mtu('fake-dev') ex.assert_has_calls(calls) def test_set_device_mtu_default(self): calls = [] with mock.patch.object(utils, 'execute', return_value=('', '')) as ex: linux_net._set_device_mtu('fake-dev') ex.assert_has_calls(calls) def _ovs_vif_port(self, calls, interface_type=None): with mock.patch.object(utils, 'execute', return_value=('', '')) as ex: linux_net.create_ovs_vif_port('fake-bridge', 'fake-dev', 'fake-iface-id', 'fake-mac', 'fake-instance-uuid', interface_type=interface_type) ex.assert_has_calls(calls) def test_ovs_vif_port_cmd(self): expected = ['--', '--if-exists', 'del-port', 'fake-dev', '--', 'add-port', 'fake-bridge', 'fake-dev', '--', 'set', 'Interface', 'fake-dev', 'external-ids:iface-id=fake-iface-id', 'external-ids:iface-status=active', 'external-ids:attached-mac=fake-mac', 'external-ids:vm-uuid=fake-instance-uuid' ] cmd = linux_net._create_ovs_vif_cmd('fake-bridge', 'fake-dev', 'fake-iface-id', 'fake-mac', 'fake-instance-uuid') self.assertEqual(expected, cmd) expected += ['type=fake-type'] cmd = linux_net._create_ovs_vif_cmd('fake-bridge', 'fake-dev', 'fake-iface-id', 'fake-mac', 'fake-instance-uuid', 'fake-type') self.assertEqual(expected, cmd) def test_ovs_vif_port(self): calls = [ mock.call('ovs-vsctl', '--timeout=120', '--', '--if-exists', 'del-port', 'fake-dev', '--', 'add-port', 'fake-bridge', 'fake-dev', '--', 'set', 'Interface', 'fake-dev', 'external-ids:iface-id=fake-iface-id', 'external-ids:iface-status=active', 'external-ids:attached-mac=fake-mac', 'external-ids:vm-uuid=fake-instance-uuid', run_as_root=True) ] self._ovs_vif_port(calls) @mock.patch.object(linux_net, '_ovs_vsctl') @mock.patch.object(linux_net, '_create_ovs_vif_cmd') @mock.patch.object(linux_net, '_set_device_mtu') def test_ovs_vif_port_with_type_vhostuser(self, mock_set_device_mtu, mock_create_cmd, mock_vsctl): linux_net.create_ovs_vif_port( 'fake-bridge', 'fake-dev', 'fake-iface-id', 'fake-mac', "fake-instance-uuid", mtu=1500, interface_type=network_model.OVS_VHOSTUSER_INTERFACE_TYPE) mock_create_cmd.assert_called_once_with('fake-bridge', 'fake-dev', 'fake-iface-id', 'fake-mac', "fake-instance-uuid", network_model.OVS_VHOSTUSER_INTERFACE_TYPE) self.assertFalse(mock_set_device_mtu.called) self.assertTrue(mock_vsctl.called) def test_ovs_vif_port_with_mtu(self): self.flags(network_device_mtu=10000) calls = [ mock.call('ovs-vsctl', '--timeout=120', '--', '--if-exists', 'del-port', 'fake-dev', '--', 'add-port', 'fake-bridge', 'fake-dev', '--', 'set', 'Interface', 'fake-dev', 'external-ids:iface-id=fake-iface-id', 'external-ids:iface-status=active', 'external-ids:attached-mac=fake-mac', 'external-ids:vm-uuid=fake-instance-uuid', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev', 'mtu', 10000, run_as_root=True, check_exit_code=[0, 2, 254]) ] self._ovs_vif_port(calls) def _create_veth_pair(self, calls): with mock.patch.object(utils, 'execute', return_value=('', '')) as ex: linux_net._create_veth_pair('fake-dev1', 'fake-dev2') ex.assert_has_calls(calls) def test_create_veth_pair(self): calls = [ mock.call('ip', 'link', 'add', 'fake-dev1', 'type', 'veth', 'peer', 'name', 'fake-dev2', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev1', 'up', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev1', 'promisc', 'on', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev2', 'up', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev2', 'promisc', 'on', run_as_root=True) ] self._create_veth_pair(calls) def test_create_veth_pair_with_mtu(self): self.flags(network_device_mtu=10000) calls = [ mock.call('ip', 'link', 'add', 'fake-dev1', 'type', 'veth', 'peer', 'name', 'fake-dev2', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev1', 'up', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev1', 'promisc', 'on', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev1', 'mtu', 10000, run_as_root=True, check_exit_code=[0, 2, 254]), mock.call('ip', 'link', 'set', 'fake-dev2', 'up', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev2', 'promisc', 'on', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev2', 'mtu', 10000, run_as_root=True, check_exit_code=[0, 2, 254]) ] self._create_veth_pair(calls) def test_exec_ebtables_success(self): executes = [] def fake_execute(*args, **kwargs): executes.append(args) return "", "" self.stubs.Set(self.driver, '_execute', fake_execute) self.driver._exec_ebtables('fake') self.assertEqual(1, len(executes)) self.mox.UnsetStubs() def _ebtables_race_stderr(self): return (u"Unable to update the kernel. Two possible causes:\n" "1. Multiple ebtables programs were executing simultaneously." " The ebtables\n userspace tool doesn't by default support " "multiple ebtables programs running\n concurrently. The " "ebtables option --concurrent or a tool like flock can be\n " "used to support concurrent scripts that update the ebtables " "kernel tables.\n2. The kernel doesn't support a certain " "ebtables extension, consider\n recompiling your kernel or " "insmod the extension.\n.\n") def test_exec_ebtables_fail_all(self): executes = [] def fake_sleep(interval): pass def fake_execute(*args, **kwargs): executes.append(args) raise processutils.ProcessExecutionError('error', stderr=self._ebtables_race_stderr()) self.stubs.Set(time, 'sleep', fake_sleep) self.stubs.Set(self.driver, '_execute', fake_execute) self.assertRaises(processutils.ProcessExecutionError, self.driver._exec_ebtables, 'fake') max_calls = CONF.ebtables_exec_attempts self.assertEqual(max_calls, len(executes)) self.mox.UnsetStubs() def test_exec_ebtables_fail_no_retry(self): executes = [] def fake_sleep(interval): pass def fake_execute(*args, **kwargs): executes.append(args) raise processutils.ProcessExecutionError('error', stderr="Sorry, rule does not exist") self.stubs.Set(time, 'sleep', fake_sleep) self.stubs.Set(self.driver, '_execute', fake_execute) self.assertRaises(processutils.ProcessExecutionError, self.driver._exec_ebtables, 'fake') self.assertEqual(1, len(executes)) self.mox.UnsetStubs() def test_exec_ebtables_fail_once(self): executes = [] def fake_sleep(interval): pass def fake_execute(*args, **kwargs): executes.append(args) if len(executes) == 1: raise processutils.ProcessExecutionError('error', stderr=self._ebtables_race_stderr()) else: return "", "" self.stubs.Set(time, 'sleep', fake_sleep) self.stubs.Set(self.driver, '_execute', fake_execute) self.driver._exec_ebtables('fake') self.assertEqual(2, len(executes)) self.mox.UnsetStubs() @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_remove_bridge(self, mock_execute, mock_exists): linux_net.LinuxBridgeInterfaceDriver.remove_bridge('fake-bridge') expected_exists_args = mock.call('/sys/class/net/fake-bridge') expected_execute_args = [ mock.call('ip', 'link', 'set', 'fake-bridge', 'down', run_as_root=True), mock.call('brctl', 'delbr', 'fake-bridge', run_as_root=True)] self.assertIn(expected_exists_args, mock_exists.mock_calls) self.assertEqual(expected_execute_args, mock_execute.mock_calls) @mock.patch.object(linux_net, '_execute') @mock.patch.object(linux_net, 'device_exists', return_value=False) @mock.patch.object(linux_net, '_set_device_mtu') def test_ensure_vlan(self, mock_set_device_mtu, mock_device_exists, mock_execute): interface = linux_net.LinuxBridgeInterfaceDriver.ensure_vlan( 1, 'eth0', 'MAC', 'MTU', "vlan_name") self.assertEqual("vlan_name", interface) mock_device_exists.assert_called_once_with('vlan_name') expected_execute_args = [ mock.call('ip', 'link', 'add', 'link', 'eth0', 'name', 'vlan_name', 'type', 'vlan', 'id', 1, check_exit_code=[0, 2, 254], run_as_root=True), mock.call('ip', 'link', 'set', 'vlan_name', 'address', 'MAC', check_exit_code=[0, 2, 254], run_as_root=True), mock.call('ip', 'link', 'set', 'vlan_name', 'up', check_exit_code=[0, 2, 254], run_as_root=True)] self.assertEqual(expected_execute_args, mock_execute.mock_calls) mock_set_device_mtu.assert_called_once_with('vlan_name', 'MTU') @mock.patch.object(linux_net, '_execute') @mock.patch.object(linux_net, 'device_exists', return_value=True) @mock.patch.object(linux_net, '_set_device_mtu') def test_ensure_vlan_device_exists(self, mock_set_device_mtu, mock_device_exists, mock_execute): interface = linux_net.LinuxBridgeInterfaceDriver.ensure_vlan(1, 'eth0') self.assertEqual("vlan1", interface) mock_device_exists.assert_called_once_with('vlan1') self.assertFalse(mock_execute.called) mock_set_device_mtu.assert_called_once_with('vlan1', None) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute', side_effect=processutils.ProcessExecutionError()) def test_remove_bridge_negative(self, mock_execute, mock_exists): self.assertRaises(processutils.ProcessExecutionError, linux_net.LinuxBridgeInterfaceDriver.remove_bridge, 'fake-bridge') nova-13.0.0/nova/tests/unit/network/__init__.py0000664000567000056710000000000012701407773022563 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/network/test_api.py0000664000567000056710000007063312701407773022657 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for network API.""" import itertools import uuid import mock from mox3 import mox from oslo_policy import policy as oslo_policy from nova.compute import flavors from nova import context from nova import exception from nova import network from nova.network import api from nova.network import base_api from nova.network import floating_ips from nova.network import model as network_model from nova.network import rpcapi as network_rpcapi from nova import objects from nova.objects import fields from nova import policy from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_fixed_ip from nova.tests.unit.objects import test_virtual_interface FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16' fake_info_cache = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'instance_uuid': 'fake-uuid', 'network_info': '[]', } class NetworkPolicyTestCase(test.TestCase): def setUp(self): super(NetworkPolicyTestCase, self).setUp() policy.reset() policy.init() self.context = context.get_admin_context() def tearDown(self): super(NetworkPolicyTestCase, self).tearDown() policy.reset() def test_check_policy(self): self.mox.StubOutWithMock(policy, 'enforce') target = { 'project_id': self.context.project_id, 'user_id': self.context.user_id, } policy.enforce(self.context, 'network:get_all', target) self.mox.ReplayAll() api.check_policy(self.context, 'get_all') def test_skip_policy(self): policy.reset() rules = {'network:get_all': '!'} policy.set_rules(oslo_policy.Rules.from_dict(rules)) api = network.API() self.assertRaises(exception.PolicyNotAuthorized, api.get_all, self.context) api = network.API(skip_policy_check=True) api.get_all(self.context) class ApiTestCase(test.TestCase): def setUp(self): super(ApiTestCase, self).setUp() self.network_api = network.API() self.context = context.RequestContext('fake-user', 'fake-project') @mock.patch('nova.objects.NetworkList.get_all') def test_get_all(self, mock_get_all): mock_get_all.return_value = mock.sentinel.get_all self.assertEqual(mock.sentinel.get_all, self.network_api.get_all(self.context)) mock_get_all.assert_called_once_with(self.context, project_only=True) @mock.patch('nova.objects.NetworkList.get_all') def test_get_all_liberal(self, mock_get_all): self.flags(network_manager='nova.network.manager.FlatDHCPManaager') mock_get_all.return_value = mock.sentinel.get_all self.assertEqual(mock.sentinel.get_all, self.network_api.get_all(self.context)) mock_get_all.assert_called_once_with(self.context, project_only="allow_none") @mock.patch('nova.objects.NetworkList.get_all') def test_get_all_no_networks(self, mock_get_all): mock_get_all.side_effect = exception.NoNetworksFound self.assertEqual([], self.network_api.get_all(self.context)) mock_get_all.assert_called_once_with(self.context, project_only=True) @mock.patch('nova.objects.Network.get_by_uuid') def test_get(self, mock_get): mock_get.return_value = mock.sentinel.get_by_uuid self.assertEqual(mock.sentinel.get_by_uuid, self.network_api.get(self.context, 'fake-uuid')) @mock.patch('nova.objects.Network.get_by_id') @mock.patch('nova.db.virtual_interface_get_by_instance') def test_get_vifs_by_instance(self, mock_get_by_instance, mock_get_by_id): mock_get_by_instance.return_value = [ dict(test_virtual_interface.fake_vif, network_id=123)] mock_get_by_id.return_value = objects.Network() mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid instance = objects.Instance(uuid=mock.sentinel.inst_uuid) vifs = self.network_api.get_vifs_by_instance(self.context, instance) self.assertEqual(1, len(vifs)) self.assertEqual(123, vifs[0].network_id) self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid) mock_get_by_instance.assert_called_once_with( self.context, str(mock.sentinel.inst_uuid)) mock_get_by_id.assert_called_once_with(self.context, 123, project_only='allow_none') @mock.patch('nova.objects.Network.get_by_id') @mock.patch('nova.db.virtual_interface_get_by_address') def test_get_vif_by_mac_address(self, mock_get_by_address, mock_get_by_id): mock_get_by_address.return_value = dict( test_virtual_interface.fake_vif, network_id=123) mock_get_by_id.return_value = objects.Network( uuid=mock.sentinel.network_uuid) vif = self.network_api.get_vif_by_mac_address(self.context, mock.sentinel.mac) self.assertEqual(123, vif.network_id) self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid) mock_get_by_address.assert_called_once_with(self.context, mock.sentinel.mac) mock_get_by_id.assert_called_once_with(self.context, 123, project_only='allow_none') def test_allocate_for_instance_handles_macs_passed(self): # If a macs argument is supplied to the 'nova-network' API, it is just # ignored. This test checks that the call down to the rpcapi layer # doesn't pass macs down: nova-network doesn't support hypervisor # mac address limits (today anyhow). macs = set(['ab:cd:ef:01:23:34']) self.mox.StubOutWithMock( self.network_api.network_rpcapi, "allocate_for_instance") kwargs = dict(zip(['host', 'instance_id', 'project_id', 'requested_networks', 'rxtx_factor', 'vpn', 'macs', 'dhcp_options'], itertools.repeat(mox.IgnoreArg()))) self.network_api.network_rpcapi.allocate_for_instance( mox.IgnoreArg(), **kwargs).AndReturn([]) self.mox.ReplayAll() flavor = flavors.get_default_flavor() flavor['rxtx_factor'] = 0 instance = objects.Instance(id=1, uuid='uuid', project_id='project_id', host='host', system_metadata={}, flavor=flavor) self.network_api.allocate_for_instance( self.context, instance, 'vpn', 'requested_networks', macs=macs) def _do_test_associate_floating_ip(self, orig_instance_uuid): """Test post-association logic.""" new_instance = objects.Instance(uuid=FAKE_UUID) def fake_associate(*args, **kwargs): return orig_instance_uuid self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip', fake_associate) def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join=None, use_slave=None): if instance_uuid == orig_instance_uuid: self.assertIn('extra.flavor', columns_to_join) return fake_instance.fake_db_instance(uuid=instance_uuid) self.stubs.Set(self.network_api.db, 'instance_get_by_uuid', fake_instance_get_by_uuid) def fake_get_nw_info(ctxt, instance): class FakeNWInfo(object): def json(self): pass return FakeNWInfo() self.stubs.Set(self.network_api, '_get_instance_nw_info', fake_get_nw_info) if orig_instance_uuid: expected_updated_instances = [new_instance.uuid, orig_instance_uuid] else: expected_updated_instances = [new_instance.uuid] def fake_instance_info_cache_update(context, instance_uuid, cache): self.assertEqual(instance_uuid, expected_updated_instances.pop()) return fake_info_cache self.stubs.Set(self.network_api.db, 'instance_info_cache_update', fake_instance_info_cache_update) def fake_update_instance_cache_with_nw_info(api, context, instance, nw_info=None, update_cells=True): return self.stubs.Set(base_api, "update_instance_cache_with_nw_info", fake_update_instance_cache_with_nw_info) self.network_api.associate_floating_ip(self.context, new_instance, '172.24.4.225', '10.0.0.2') def test_associate_preassociated_floating_ip(self): self._do_test_associate_floating_ip('orig-uuid') def test_associate_unassociated_floating_ip(self): self._do_test_associate_floating_ip(None) def test_get_floating_ip_invalid_id(self): self.assertRaises(exception.InvalidID, self.network_api.get_floating_ip, self.context, '123zzz') @mock.patch('nova.objects.FloatingIP.get_by_id') def test_get_floating_ip(self, mock_get): floating = mock.sentinel.floating mock_get.return_value = floating self.assertEqual(floating, self.network_api.get_floating_ip(self.context, 123)) mock_get.assert_called_once_with(self.context, 123) @mock.patch('nova.objects.FloatingIP.get_pool_names') def test_get_floating_ip_pools(self, mock_get): pools = ['foo', 'bar'] mock_get.return_value = pools self.assertEqual(pools, self.network_api.get_floating_ip_pools( self.context)) @mock.patch('nova.objects.FloatingIP.get_by_address') def test_get_floating_ip_by_address(self, mock_get): floating = mock.sentinel.floating mock_get.return_value = floating self.assertEqual(floating, self.network_api.get_floating_ip_by_address( self.context, mock.sentinel.address)) mock_get.assert_called_once_with(self.context, mock.sentinel.address) @mock.patch('nova.objects.FloatingIPList.get_by_project') def test_get_floating_ips_by_project(self, mock_get): floatings = mock.sentinel.floating_ips mock_get.return_value = floatings self.assertEqual(floatings, self.network_api.get_floating_ips_by_project( self.context)) mock_get.assert_called_once_with(self.context, self.context.project_id) def _stub_migrate_instance_calls(self, method, multi_host, info): fake_flavor = flavors.get_default_flavor() fake_flavor['rxtx_factor'] = 1.21 fake_instance = objects.Instance( uuid=uuid.uuid4().hex, project_id='fake_project_id', instance_type_id=fake_flavor['id'], flavor=fake_flavor, system_metadata={}) fake_migration = {'source_compute': 'fake_compute_source', 'dest_compute': 'fake_compute_dest'} def fake_mig_inst_method(*args, **kwargs): info['kwargs'] = kwargs def fake_get_multi_addresses(*args, **kwargs): return multi_host, ['fake_float1', 'fake_float2'] self.stubs.Set(network_rpcapi.NetworkAPI, method, fake_mig_inst_method) self.stubs.Set(self.network_api, '_get_multi_addresses', fake_get_multi_addresses) expected = {'instance_uuid': fake_instance.uuid, 'source_compute': 'fake_compute_source', 'dest_compute': 'fake_compute_dest', 'rxtx_factor': 1.21, 'project_id': 'fake_project_id', 'floating_addresses': None} if multi_host: expected['floating_addresses'] = ['fake_float1', 'fake_float2'] return fake_instance, fake_migration, expected def test_migrate_instance_start_with_multhost(self): info = {'kwargs': {}} arg1, arg2, expected = self._stub_migrate_instance_calls( 'migrate_instance_start', True, info) expected['host'] = 'fake_compute_source' self.network_api.migrate_instance_start(self.context, arg1, arg2) self.assertEqual(info['kwargs'], expected) def test_migrate_instance_start_without_multhost(self): info = {'kwargs': {}} arg1, arg2, expected = self._stub_migrate_instance_calls( 'migrate_instance_start', False, info) self.network_api.migrate_instance_start(self.context, arg1, arg2) self.assertEqual(info['kwargs'], expected) def test_migrate_instance_finish_with_multhost(self): info = {'kwargs': {}} arg1, arg2, expected = self._stub_migrate_instance_calls( 'migrate_instance_finish', True, info) expected['host'] = 'fake_compute_dest' self.network_api.migrate_instance_finish(self.context, arg1, arg2) self.assertEqual(info['kwargs'], expected) def test_migrate_instance_finish_without_multhost(self): info = {'kwargs': {}} arg1, arg2, expected = self._stub_migrate_instance_calls( 'migrate_instance_finish', False, info) self.network_api.migrate_instance_finish(self.context, arg1, arg2) self.assertEqual(info['kwargs'], expected) def test_is_multi_host_instance_has_no_fixed_ip(self): def fake_fixed_ip_get_by_instance(ctxt, uuid): raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid) self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance', fake_fixed_ip_get_by_instance) instance = objects.Instance(uuid=FAKE_UUID) result, floats = self.network_api._get_multi_addresses(self.context, instance) self.assertFalse(result) @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid') def _test_is_multi_host_network_has_no_project_id(self, is_multi_host, fip_get): network = objects.Network( id=123, project_id=None, multi_host=is_multi_host) fip_get.return_value = [ objects.FixedIP(instance_uuid=FAKE_UUID, network=network, floating_ips=objects.FloatingIPList())] instance = objects.Instance(uuid=FAKE_UUID) result, floats = self.network_api._get_multi_addresses(self.context, instance) self.assertEqual(is_multi_host, result) def test_is_multi_host_network_has_no_project_id_multi(self): self._test_is_multi_host_network_has_no_project_id(True) def test_is_multi_host_network_has_no_project_id_non_multi(self): self._test_is_multi_host_network_has_no_project_id(False) @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid') def _test_is_multi_host_network_has_project_id(self, is_multi_host, fip_get): network = objects.Network( id=123, project_id=self.context.project_id, multi_host=is_multi_host) fip_get.return_value = [ objects.FixedIP(instance_uuid=FAKE_UUID, network=network, floating_ips=objects.FloatingIPList())] instance = objects.Instance(uuid=FAKE_UUID) result, floats = self.network_api._get_multi_addresses(self.context, instance) self.assertEqual(is_multi_host, result) def test_is_multi_host_network_has_project_id_multi(self): self._test_is_multi_host_network_has_project_id(True) def test_is_multi_host_network_has_project_id_non_multi(self): self._test_is_multi_host_network_has_project_id(False) @mock.patch('nova.objects.Network.get_by_uuid') @mock.patch('nova.objects.Network.disassociate') def test_network_disassociate_project(self, mock_disassociate, mock_get): net_obj = objects.Network(context=self.context, id=1) mock_get.return_value = net_obj self.network_api.associate(self.context, FAKE_UUID, project=None) mock_disassociate.assert_called_once_with(self.context, net_obj.id, host=False, project=True) @mock.patch('nova.objects.Network.get_by_uuid') @mock.patch('nova.objects.Network.disassociate') def test_network_disassociate_host(self, mock_disassociate, mock_get): net_obj = objects.Network(context=self.context, id=1) mock_get.return_value = net_obj self.network_api.associate(self.context, FAKE_UUID, host=None) mock_disassociate.assert_called_once_with(self.context, net_obj.id, host=True, project=False) @mock.patch('nova.objects.Network.get_by_uuid') @mock.patch('nova.objects.Network.associate') def test_network_associate_project(self, mock_associate, mock_get): net_obj = objects.Network(context=self.context, id=1) mock_get.return_value = net_obj project = mock.sentinel.project self.network_api.associate(self.context, FAKE_UUID, project=project) mock_associate.assert_called_once_with(self.context, project, network_id=net_obj.id, force=True) @mock.patch('nova.objects.Network.get_by_uuid') @mock.patch('nova.objects.Network.save') def test_network_associate_host(self, mock_save, mock_get): net_obj = objects.Network(context=self.context, id=1) mock_get.return_value = net_obj host = str(mock.sentinel.host) self.network_api.associate(self.context, FAKE_UUID, host=host) mock_save.assert_called_once_with() self.assertEqual(host, net_obj.host) @mock.patch('nova.objects.Network.get_by_uuid') @mock.patch('nova.objects.Network.disassociate') def test_network_disassociate(self, mock_disassociate, mock_get): mock_get.return_value = objects.Network(context=self.context, id=123) self.network_api.disassociate(self.context, FAKE_UUID) mock_disassociate.assert_called_once_with(self.context, 123, project=True, host=True) def _test_refresh_cache(self, method, *args, **kwargs): # This test verifies that no call to get_instance_nw_info() is made # from the @refresh_cache decorator for the tested method. with test.nested( mock.patch.object(self.network_api.network_rpcapi, method), mock.patch.object(self.network_api.network_rpcapi, 'get_instance_nw_info'), mock.patch.object(network_model.NetworkInfo, 'hydrate'), mock.patch.object(objects.InstanceInfoCache, 'save'), ) as ( method_mock, nwinfo_mock, hydrate_mock, save_mock ): nw_info = network_model.NetworkInfo([]) method_mock.return_value = nw_info hydrate_mock.return_value = nw_info getattr(self.network_api, method)(*args, **kwargs) hydrate_mock.assert_called_once_with(nw_info) self.assertFalse(nwinfo_mock.called) def test_allocate_for_instance_refresh_cache(self): instance = fake_instance.fake_instance_obj(self.context) vpn = 'fake-vpn' requested_networks = 'fake-networks' self._test_refresh_cache('allocate_for_instance', self.context, instance, vpn, requested_networks) def test_add_fixed_ip_to_instance_refresh_cache(self): instance = fake_instance.fake_instance_obj(self.context) network_id = 'fake-network-id' self._test_refresh_cache('add_fixed_ip_to_instance', self.context, instance, network_id) def test_remove_fixed_ip_from_instance_refresh_cache(self): instance = fake_instance.fake_instance_obj(self.context) address = 'fake-address' self._test_refresh_cache('remove_fixed_ip_from_instance', self.context, instance, address) @mock.patch('nova.db.fixed_ip_get_by_address') def test_get_fixed_ip_by_address(self, fip_get): fip_get.return_value = test_fixed_ip.fake_fixed_ip fip = self.network_api.get_fixed_ip_by_address(self.context, 'fake-addr') self.assertIsInstance(fip, objects.FixedIP) @mock.patch('nova.objects.FixedIP.get_by_id') def test_get_fixed_ip(self, mock_get_by_id): mock_get_by_id.return_value = mock.sentinel.fixed_ip self.assertEqual(mock.sentinel.fixed_ip, self.network_api.get_fixed_ip(self.context, mock.sentinel.id)) mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id) @mock.patch('nova.objects.FixedIP.get_by_floating_address') def test_get_instance_by_floating_address(self, mock_get_by_floating): mock_get_by_floating.return_value = objects.FixedIP( instance_uuid = mock.sentinel.instance_uuid) self.assertEqual(str(mock.sentinel.instance_uuid), self.network_api.get_instance_id_by_floating_address( self.context, mock.sentinel.floating)) mock_get_by_floating.assert_called_once_with(self.context, mock.sentinel.floating) @mock.patch('nova.objects.FixedIP.get_by_floating_address') def test_get_instance_by_floating_address_none(self, mock_get_by_floating): mock_get_by_floating.return_value = None self.assertIsNone( self.network_api.get_instance_id_by_floating_address( self.context, mock.sentinel.floating)) mock_get_by_floating.assert_called_once_with(self.context, mock.sentinel.floating) @mock.patch('nova.network.api.API.migrate_instance_start') def test_cleanup_instance_network_on_host(self, fake_migrate_start): instance = fake_instance.fake_instance_obj(self.context) self.network_api.cleanup_instance_network_on_host( self.context, instance, 'fake_compute_source') fake_migrate_start.assert_called_once_with( self.context, instance, {'source_compute': 'fake_compute_source', 'dest_compute': None}) @mock.patch('nova.network.api.API.migrate_instance_finish') def test_setup_instance_network_on_host(self, fake_migrate_finish): instance = fake_instance.fake_instance_obj(self.context) self.network_api.setup_instance_network_on_host( self.context, instance, 'fake_compute_source') fake_migrate_finish.assert_called_once_with( self.context, instance, {'source_compute': None, 'dest_compute': 'fake_compute_source'}) @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(api.API, '_get_instance_nw_info') @mock.patch('nova.network.base_api.update_instance_cache_with_nw_info') def test_get_instance_nw_info(self, mock_update, mock_get, mock_lock): fake_result = mock.sentinel.get_nw_info_result mock_get.return_value = fake_result instance = fake_instance.fake_instance_obj(self.context) result = self.network_api.get_instance_nw_info(self.context, instance) mock_get.assert_called_once_with(self.context, instance) mock_update.assert_called_once_with(self.network_api, self.context, instance, nw_info=fake_result, update_cells=False) self.assertEqual(fake_result, result) @mock.patch('nova.network.api.API') @mock.patch('nova.db.instance_info_cache_update', return_value=fake_info_cache) class TestUpdateInstanceCache(test.NoDBTestCase): def setUp(self): super(TestUpdateInstanceCache, self).setUp() self.context = context.get_admin_context() self.instance = objects.Instance(uuid=FAKE_UUID) vifs = [network_model.VIF(id='super_vif')] self.nw_info = network_model.NetworkInfo(vifs) self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info', self.nw_info) def test_update_nw_info_none(self, db_mock, api_mock): api_mock._get_instance_nw_info.return_value = self.nw_info base_api.update_instance_cache_with_nw_info(api_mock, self.context, self.instance, None) api_mock._get_instance_nw_info.assert_called_once_with(self.context, self.instance) db_mock.assert_called_once_with(self.context, self.instance.uuid, {'network_info': self.nw_json}) def test_update_nw_info_one_network(self, db_mock, api_mock): api_mock._get_instance_nw_info.return_value = self.nw_info base_api.update_instance_cache_with_nw_info(api_mock, self.context, self.instance, self.nw_info) self.assertFalse(api_mock._get_instance_nw_info.called) db_mock.assert_called_once_with(self.context, self.instance.uuid, {'network_info': self.nw_json}) def test_update_nw_info_empty_list(self, db_mock, api_mock): api_mock._get_instance_nw_info.return_value = self.nw_info base_api.update_instance_cache_with_nw_info(api_mock, self.context, self.instance, network_model.NetworkInfo([])) self.assertFalse(api_mock._get_instance_nw_info.called) db_mock.assert_called_once_with(self.context, self.instance.uuid, {'network_info': '[]'}) def test_decorator_return_object(self, db_mock, api_mock): @base_api.refresh_cache def func(self, context, instance): return network_model.NetworkInfo([]) func(api_mock, self.context, self.instance) self.assertFalse(api_mock._get_instance_nw_info.called) db_mock.assert_called_once_with(self.context, self.instance.uuid, {'network_info': '[]'}) def test_decorator_return_none(self, db_mock, api_mock): @base_api.refresh_cache def func(self, context, instance): pass api_mock._get_instance_nw_info.return_value = self.nw_info func(api_mock, self.context, self.instance) api_mock._get_instance_nw_info.assert_called_once_with(self.context, self.instance) db_mock.assert_called_once_with(self.context, self.instance.uuid, {'network_info': self.nw_json}) class NetworkHooksTestCase(test.BaseHookTestCase): def test_instance_network_info_hook(self): info_func = base_api.update_instance_cache_with_nw_info self.assert_has_hook('instance_network_info', info_func) nova-13.0.0/nova/tests/unit/network/test_network_info.py0000664000567000056710000012506712701407773024614 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova import exception from nova.network import model from nova import test from nova.tests.unit import fake_network_cache_model from nova.virt import netutils class RouteTests(test.NoDBTestCase): def test_create_route_with_attrs(self): route = fake_network_cache_model.new_route() fake_network_cache_model.new_ip(dict(address='192.168.1.1')) self.assertEqual('0.0.0.0/24', route['cidr']) self.assertEqual('192.168.1.1', route['gateway']['address']) self.assertEqual('eth0', route['interface']) def test_routes_equal(self): route1 = model.Route() route2 = model.Route() self.assertEqual(route1, route2) def test_routes_not_equal(self): route1 = model.Route(cidr='1.1.1.0/24') route2 = model.Route(cidr='2.2.2.0/24') self.assertNotEqual(route1, route2) route1 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.1') route2 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.2') self.assertNotEqual(route1, route2) route1 = model.Route(cidr='1.1.1.1/24', interface='tap0') route2 = model.Route(cidr='1.1.1.1/24', interface='tap1') self.assertNotEqual(route1, route2) def test_hydrate(self): route = model.Route.hydrate( {'gateway': fake_network_cache_model.new_ip( dict(address='192.168.1.1'))}) self.assertIsNone(route['cidr']) self.assertEqual('192.168.1.1', route['gateway']['address']) self.assertIsNone(route['interface']) class IPTests(test.NoDBTestCase): def test_ip_equal(self): ip1 = model.IP(address='127.0.0.1') ip2 = model.IP(address='127.0.0.1') self.assertEqual(ip1, ip2) def test_ip_not_equal(self): ip1 = model.IP(address='127.0.0.1') ip2 = model.IP(address='172.0.0.3') self.assertNotEqual(ip1, ip2) ip1 = model.IP(address='127.0.0.1', type=1) ip2 = model.IP(address='172.0.0.1', type=2) self.assertNotEqual(ip1, ip2) ip1 = model.IP(address='127.0.0.1', version=4) ip2 = model.IP(address='172.0.0.1', version=6) self.assertNotEqual(ip1, ip2) class FixedIPTests(test.NoDBTestCase): def test_createnew_fixed_ip_with_attrs(self): fixed_ip = model.FixedIP(address='192.168.1.100') self.assertEqual('192.168.1.100', fixed_ip['address']) self.assertEqual([], fixed_ip['floating_ips']) self.assertEqual('fixed', fixed_ip['type']) self.assertEqual(4, fixed_ip['version']) def test_create_fixed_ipv6(self): fixed_ip = model.FixedIP(address='::1') self.assertEqual('::1', fixed_ip['address']) self.assertEqual([], fixed_ip['floating_ips']) self.assertEqual('fixed', fixed_ip['type']) self.assertEqual(6, fixed_ip['version']) def test_create_fixed_bad_ip_fails(self): self.assertRaises(exception.InvalidIpAddressError, model.FixedIP, address='picklespicklespickles') def test_equate_two_fixed_ips(self): fixed_ip = model.FixedIP(address='::1') fixed_ip2 = model.FixedIP(address='::1') self.assertEqual(fixed_ip, fixed_ip2) def test_equate_two_dissimilar_fixed_ips_fails(self): fixed_ip = model.FixedIP(address='::1') fixed_ip2 = model.FixedIP(address='::2') self.assertNotEqual(fixed_ip, fixed_ip2) fixed_ip = model.FixedIP(address='::1', type='1') fixed_ip2 = model.FixedIP(address='::1', type='2') self.assertNotEqual(fixed_ip, fixed_ip2) fixed_ip = model.FixedIP(address='::1', version='6') fixed_ip2 = model.FixedIP(address='::1', version='4') self.assertNotEqual(fixed_ip, fixed_ip2) fixed_ip = model.FixedIP(address='::1', floating_ips='1.1.1.1') fixed_ip2 = model.FixedIP(address='::1', floating_ips='8.8.8.8') self.assertNotEqual(fixed_ip, fixed_ip2) def test_hydrate(self): fixed_ip = model.FixedIP.hydrate({}) self.assertEqual([], fixed_ip['floating_ips']) self.assertIsNone(fixed_ip['address']) self.assertEqual('fixed', fixed_ip['type']) self.assertIsNone(fixed_ip['version']) def test_add_floating_ip(self): fixed_ip = model.FixedIP(address='192.168.1.100') fixed_ip.add_floating_ip('192.168.1.101') self.assertEqual(['192.168.1.101'], fixed_ip['floating_ips']) def test_add_floating_ip_repeatedly_only_one_instance(self): fixed_ip = model.FixedIP(address='192.168.1.100') for i in range(10): fixed_ip.add_floating_ip('192.168.1.101') self.assertEqual(['192.168.1.101'], fixed_ip['floating_ips']) class SubnetTests(test.NoDBTestCase): def test_create_subnet_with_attrs(self): subnet = fake_network_cache_model.new_subnet() route1 = fake_network_cache_model.new_route() self.assertEqual('10.10.0.0/24', subnet['cidr']) self.assertEqual( [fake_network_cache_model.new_ip(dict(address='1.2.3.4')), fake_network_cache_model.new_ip(dict(address='2.3.4.5'))], subnet['dns']) self.assertEqual('10.10.0.1', subnet['gateway']['address']) self.assertEqual( [fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.2')), fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.3'))], subnet['ips']) self.assertEqual([route1], subnet['routes']) self.assertEqual(4, subnet['version']) def test_subnet_equal(self): subnet1 = fake_network_cache_model.new_subnet() subnet2 = fake_network_cache_model.new_subnet() self.assertEqual(subnet1, subnet2) def test_subnet_not_equal(self): subnet1 = model.Subnet(cidr='1.1.1.0/24') subnet2 = model.Subnet(cidr='2.2.2.0/24') self.assertNotEqual(subnet1, subnet2) subnet1 = model.Subnet(dns='1.1.1.0/24') subnet2 = model.Subnet(dns='2.2.2.0/24') self.assertNotEqual(subnet1, subnet2) subnet1 = model.Subnet(gateway='1.1.1.1/24') subnet2 = model.Subnet(gateway='2.2.2.1/24') self.assertNotEqual(subnet1, subnet2) subnet1 = model.Subnet(ips='1.1.1.0/24') subnet2 = model.Subnet(ips='2.2.2.0/24') self.assertNotEqual(subnet1, subnet2) subnet1 = model.Subnet(routes='1.1.1.0/24') subnet2 = model.Subnet(routes='2.2.2.0/24') self.assertNotEqual(subnet1, subnet2) subnet1 = model.Subnet(version='4') subnet2 = model.Subnet(version='6') self.assertNotEqual(subnet1, subnet2) def test_add_route(self): subnet = fake_network_cache_model.new_subnet() route1 = fake_network_cache_model.new_route() route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'}) subnet.add_route(route2) self.assertEqual([route1, route2], subnet['routes']) def test_add_route_a_lot(self): subnet = fake_network_cache_model.new_subnet() route1 = fake_network_cache_model.new_route() route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'}) for i in range(10): subnet.add_route(route2) self.assertEqual([route1, route2], subnet['routes']) def test_add_dns(self): subnet = fake_network_cache_model.new_subnet() dns = fake_network_cache_model.new_ip(dict(address='9.9.9.9')) subnet.add_dns(dns) self.assertEqual( [fake_network_cache_model.new_ip(dict(address='1.2.3.4')), fake_network_cache_model.new_ip(dict(address='2.3.4.5')), fake_network_cache_model.new_ip(dict(address='9.9.9.9'))], subnet['dns']) def test_add_dns_a_lot(self): subnet = fake_network_cache_model.new_subnet() for i in range(10): subnet.add_dns(fake_network_cache_model.new_ip( dict(address='9.9.9.9'))) self.assertEqual( [fake_network_cache_model.new_ip(dict(address='1.2.3.4')), fake_network_cache_model.new_ip(dict(address='2.3.4.5')), fake_network_cache_model.new_ip(dict(address='9.9.9.9'))], subnet['dns']) def test_add_ip(self): subnet = fake_network_cache_model.new_subnet() subnet.add_ip(fake_network_cache_model.new_ip( dict(address='192.168.1.102'))) self.assertEqual( [fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.2')), fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.3')), fake_network_cache_model.new_ip( dict(address='192.168.1.102'))], subnet['ips']) def test_add_ip_a_lot(self): subnet = fake_network_cache_model.new_subnet() for i in range(10): subnet.add_ip(fake_network_cache_model.new_fixed_ip( dict(address='192.168.1.102'))) self.assertEqual( [fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.2')), fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.3')), fake_network_cache_model.new_fixed_ip( dict(address='192.168.1.102'))], subnet['ips']) def test_hydrate(self): subnet_dict = { 'cidr': '255.255.255.0', 'dns': [fake_network_cache_model.new_ip(dict(address='1.1.1.1'))], 'ips': [fake_network_cache_model.new_fixed_ip( dict(address='2.2.2.2'))], 'routes': [fake_network_cache_model.new_route()], 'version': 4, 'gateway': fake_network_cache_model.new_ip( dict(address='3.3.3.3'))} subnet = model.Subnet.hydrate(subnet_dict) self.assertEqual('255.255.255.0', subnet['cidr']) self.assertEqual([fake_network_cache_model.new_ip( dict(address='1.1.1.1'))], subnet['dns']) self.assertEqual('3.3.3.3', subnet['gateway']['address']) self.assertEqual([fake_network_cache_model.new_fixed_ip( dict(address='2.2.2.2'))], subnet['ips']) self.assertEqual([fake_network_cache_model.new_route()], subnet['routes']) self.assertEqual(4, subnet['version']) class NetworkTests(test.NoDBTestCase): def test_create_network(self): network = fake_network_cache_model.new_network() self.assertEqual(1, network['id']) self.assertEqual('br0', network['bridge']) self.assertEqual('public', network['label']) self.assertEqual( [fake_network_cache_model.new_subnet(), fake_network_cache_model.new_subnet( dict(cidr='255.255.255.255'))], network['subnets']) def test_add_subnet(self): network = fake_network_cache_model.new_network() network.add_subnet(fake_network_cache_model.new_subnet( dict(cidr='0.0.0.0'))) self.assertEqual( [fake_network_cache_model.new_subnet(), fake_network_cache_model.new_subnet( dict(cidr='255.255.255.255')), fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))], network['subnets']) def test_add_subnet_a_lot(self): network = fake_network_cache_model.new_network() for i in range(10): network.add_subnet(fake_network_cache_model.new_subnet( dict(cidr='0.0.0.0'))) self.assertEqual( [fake_network_cache_model.new_subnet(), fake_network_cache_model.new_subnet( dict(cidr='255.255.255.255')), fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))], network['subnets']) def test_network_equal(self): network1 = model.Network() network2 = model.Network() self.assertEqual(network1, network2) def test_network_not_equal(self): network1 = model.Network(id='1') network2 = model.Network(id='2') self.assertNotEqual(network1, network2) network1 = model.Network(bridge='br-int') network2 = model.Network(bridge='br0') self.assertNotEqual(network1, network2) network1 = model.Network(label='net1') network2 = model.Network(label='net2') self.assertNotEqual(network1, network2) network1 = model.Network(subnets='1.1.1.0/24') network2 = model.Network(subnets='2.2.2.0/24') self.assertNotEqual(network1, network2) def test_hydrate(self): fake_network_cache_model.new_subnet() fake_network_cache_model.new_subnet(dict(cidr='255.255.255.255')) network = model.Network.hydrate(fake_network_cache_model.new_network()) self.assertEqual(1, network['id']) self.assertEqual('br0', network['bridge']) self.assertEqual('public', network['label']) self.assertEqual( [fake_network_cache_model.new_subnet(), fake_network_cache_model.new_subnet( dict(cidr='255.255.255.255'))], network['subnets']) class VIFTests(test.NoDBTestCase): def test_create_vif(self): vif = fake_network_cache_model.new_vif() self.assertEqual(1, vif['id']) self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address']) self.assertEqual(fake_network_cache_model.new_network(), vif['network']) def test_vif_equal(self): vif1 = model.VIF() vif2 = model.VIF() self.assertEqual(vif1, vif2) def test_vif_not_equal(self): vif1 = model.VIF(id=1) vif2 = model.VIF(id=2) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(address='00:00:00:00:00:11') vif2 = model.VIF(address='00:00:00:00:00:22') self.assertNotEqual(vif1, vif2) vif1 = model.VIF(network='net1') vif2 = model.VIF(network='net2') self.assertNotEqual(vif1, vif2) vif1 = model.VIF(type='ovs') vif2 = model.VIF(type='linuxbridge') self.assertNotEqual(vif1, vif2) vif1 = model.VIF(devname='ovs1234') vif2 = model.VIF(devname='linuxbridge1234') self.assertNotEqual(vif1, vif2) vif1 = model.VIF(qbh_params=1) vif2 = model.VIF(qbh_params=None) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(qbg_params=1) vif2 = model.VIF(qbg_params=None) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(active=True) vif2 = model.VIF(active=False) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(vnic_type=model.VNIC_TYPE_NORMAL) vif2 = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(profile={'pci_slot': '0000:0a:00.1'}) vif2 = model.VIF(profile={'pci_slot': '0000:0a:00.2'}) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(preserve_on_delete=True) vif2 = model.VIF(preserve_on_delete=False) self.assertNotEqual(vif1, vif2) def test_create_vif_with_type(self): vif_dict = dict( id=1, address='aa:aa:aa:aa:aa:aa', network=fake_network_cache_model.new_network(), type='bridge') vif = fake_network_cache_model.new_vif(vif_dict) self.assertEqual(1, vif['id']) self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address']) self.assertEqual('bridge', vif['type']) self.assertEqual(fake_network_cache_model.new_network(), vif['network']) def test_vif_get_fixed_ips(self): vif = fake_network_cache_model.new_vif() fixed_ips = vif.fixed_ips() ips = [ fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.2')), fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.3')) ] * 2 self.assertEqual(fixed_ips, ips) def test_vif_get_floating_ips(self): vif = fake_network_cache_model.new_vif() vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1') floating_ips = vif.floating_ips() self.assertEqual(['192.168.1.1'], floating_ips) def test_vif_get_labeled_ips(self): vif = fake_network_cache_model.new_vif() labeled_ips = vif.labeled_ips() ip_dict = { 'network_id': 1, 'ips': [fake_network_cache_model.new_ip( {'address': '10.10.0.2', 'type': 'fixed'}), fake_network_cache_model.new_ip( {'address': '10.10.0.3', 'type': 'fixed'})] * 2, 'network_label': 'public'} self.assertEqual(ip_dict, labeled_ips) def test_hydrate(self): fake_network_cache_model.new_network() vif = model.VIF.hydrate(fake_network_cache_model.new_vif()) self.assertEqual(1, vif['id']) self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address']) self.assertEqual(fake_network_cache_model.new_network(), vif['network']) def test_hydrate_vif_with_type(self): vif_dict = dict( id=1, address='aa:aa:aa:aa:aa:aa', network=fake_network_cache_model.new_network(), type='bridge') vif = model.VIF.hydrate(fake_network_cache_model.new_vif(vif_dict)) self.assertEqual(1, vif['id']) self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address']) self.assertEqual('bridge', vif['type']) self.assertEqual(fake_network_cache_model.new_network(), vif['network']) class NetworkInfoTests(test.NoDBTestCase): def test_create_model(self): ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(), fake_network_cache_model.new_vif( {'address': 'bb:bb:bb:bb:bb:bb'})]) self.assertEqual( [fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.2'}), fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.3'})] * 4, ninfo.fixed_ips()) def test_create_async_model(self): def async_wrapper(): return model.NetworkInfo( [fake_network_cache_model.new_vif(), fake_network_cache_model.new_vif( {'address': 'bb:bb:bb:bb:bb:bb'})]) ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) self.assertEqual( [fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.2'}), fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.3'})] * 4, ninfo.fixed_ips()) def test_create_async_model_exceptions(self): def async_wrapper(): raise test.TestingException() ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) self.assertRaises(test.TestingException, ninfo.wait) # 2nd one doesn't raise self.assertIsNone(ninfo.wait()) # Test that do_raise=False works on .wait() ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) self.assertIsNone(ninfo.wait(do_raise=False)) # Test we also raise calling a method ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) self.assertRaises(test.TestingException, ninfo.fixed_ips) def test_get_floating_ips(self): vif = fake_network_cache_model.new_vif() vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1') ninfo = model.NetworkInfo([vif, fake_network_cache_model.new_vif( {'address': 'bb:bb:bb:bb:bb:bb'})]) self.assertEqual(['192.168.1.1'], ninfo.floating_ips()) def test_hydrate(self): ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(), fake_network_cache_model.new_vif( {'address': 'bb:bb:bb:bb:bb:bb'})]) model.NetworkInfo.hydrate(ninfo) self.assertEqual( [fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.2'}), fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.3'})] * 4, ninfo.fixed_ips()) def _setup_injected_network_scenario(self, should_inject=True, use_ipv4=True, use_ipv6=False, gateway=True, dns=True, two_interfaces=False, libvirt_virt_type=None): """Check that netutils properly decides whether to inject based on whether the supplied subnet is static or dynamic. """ network = fake_network_cache_model.new_network({'subnets': []}) subnet_dict = {} if not gateway: subnet_dict['gateway'] = None if not dns: subnet_dict['dns'] = None if not should_inject: subnet_dict['dhcp_server'] = '10.10.0.1' if use_ipv4: network.add_subnet( fake_network_cache_model.new_subnet(subnet_dict)) if should_inject and use_ipv6: gateway_ip = fake_network_cache_model.new_ip(dict( address='1234:567::1')) ip = fake_network_cache_model.new_ip(dict( address='1234:567::2')) ipv6_subnet_dict = dict( cidr='1234:567::/48', gateway=gateway_ip, dns=[fake_network_cache_model.new_ip( dict(address='2001:4860:4860::8888')), fake_network_cache_model.new_ip( dict(address='2001:4860:4860::8844'))], ips=[ip]) if not gateway: ipv6_subnet_dict['gateway'] = None network.add_subnet(fake_network_cache_model.new_subnet( ipv6_subnet_dict)) # Behave as though CONF.flat_injected is True network['meta']['injected'] = True vif = fake_network_cache_model.new_vif({'network': network}) vifs = [vif] if two_interfaces: vifs.append(vif) nwinfo = model.NetworkInfo(vifs) return netutils.get_injected_network_template( nwinfo, use_ipv6=use_ipv6, libvirt_virt_type=libvirt_virt_type) def test_injection_dynamic(self): expected = None template = self._setup_injected_network_scenario(should_inject=False) self.assertEqual(expected, template) def test_injection_static(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 """ template = self._setup_injected_network_scenario() self.assertEqual(expected, template) def test_injection_static_no_gateway(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 dns-nameservers 1.2.3.4 2.3.4.5 """ template = self._setup_injected_network_scenario(gateway=False) self.assertEqual(expected, template) def test_injection_static_no_dns(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 """ template = self._setup_injected_network_scenario(dns=False) self.assertEqual(expected, template) def test_injection_static_overriden_template(self): cfg.CONF.set_override( 'injected_network_template', 'nova/tests/unit/network/interfaces-override.template') expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 post-up ip route add 0.0.0.0/24 via 192.168.1.1 dev eth0 pre-down ip route del 0.0.0.0/24 via 192.168.1.1 dev eth0 """ template = self._setup_injected_network_scenario() self.assertEqual(expected, template) def test_injection_static_ipv6(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 iface eth0 inet6 static hwaddress ether aa:aa:aa:aa:aa:aa address 1234:567::2 netmask 48 gateway 1234:567::1 dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844 """ template = self._setup_injected_network_scenario(use_ipv6=True) self.assertEqual(expected, template) def test_injection_static_ipv6_no_gateway(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 dns-nameservers 1.2.3.4 2.3.4.5 iface eth0 inet6 static hwaddress ether aa:aa:aa:aa:aa:aa address 1234:567::2 netmask 48 dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844 """ template = self._setup_injected_network_scenario(use_ipv6=True, gateway=False) self.assertEqual(expected, template) def test_injection_static_with_ipv4_off(self): expected = None template = self._setup_injected_network_scenario(use_ipv4=False) self.assertEqual(expected, template) def test_injection_ipv6_two_interfaces(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 iface eth0 inet6 static hwaddress ether aa:aa:aa:aa:aa:aa address 1234:567::2 netmask 48 gateway 1234:567::1 dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844 auto eth1 iface eth1 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 iface eth1 inet6 static hwaddress ether aa:aa:aa:aa:aa:aa address 1234:567::2 netmask 48 gateway 1234:567::1 dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844 """ template = self._setup_injected_network_scenario(use_ipv6=True, two_interfaces=True) self.assertEqual(expected, template) def test_injection_ipv6_with_lxc(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} post-up ip -6 route add default via 1234:567::1 dev ${IFACE} auto eth1 iface eth1 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} post-up ip -6 route add default via 1234:567::1 dev ${IFACE} """ template = self._setup_injected_network_scenario( use_ipv6=True, two_interfaces=True, libvirt_virt_type='lxc') self.assertEqual(expected, template) def test_injection_ipv6_with_lxc_no_gateway(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 dns-nameservers 1.2.3.4 2.3.4.5 post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} auto eth1 iface eth1 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 dns-nameservers 1.2.3.4 2.3.4.5 post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} """ template = self._setup_injected_network_scenario( use_ipv6=True, gateway=False, two_interfaces=True, libvirt_virt_type='lxc') self.assertEqual(expected, template) class TestNetworkMetadata(test.NoDBTestCase): def setUp(self): super(TestNetworkMetadata, self).setUp() self.netinfo = model.NetworkInfo([fake_network_cache_model.new_vif( {'type': 'ethernet'})]) # Give this vif ipv4 and ipv6 dhcp subnets ipv4_subnet = fake_network_cache_model.new_subnet(version=4) ipv6_subnet = fake_network_cache_model.new_subnet(version=6) self.netinfo[0]['network']['subnets'][0] = ipv4_subnet self.netinfo[0]['network']['subnets'][1] = ipv6_subnet self.netinfo[0]['network']['mtu'] = 1500 def test_get_network_metadata_json(self): net_metadata = netutils.get_network_metadata(self.netinfo, use_ipv6=True) # Physical Ethernet self.assertEqual( { 'id': 'interface0', 'type': 'phy', 'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa', 'vif_id': 1, 'mtu': 1500 }, net_metadata['links'][0]) # IPv4 Network self.assertEqual( { 'id': 'network0', 'link': 'interface0', 'type': 'ipv4', 'ip_address': '10.10.0.2', 'netmask': '255.255.255.0', 'routes': [ { 'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '10.10.0.1' }, { 'network': '0.0.0.0', 'netmask': '255.255.255.0', 'gateway': '192.168.1.1' } ], 'network_id': 1 }, net_metadata['networks'][0]) self.assertEqual( { 'id': 'network1', 'link': 'interface0', 'type': 'ipv6', 'ip_address': 'fd00::2', 'netmask': 'ffff:ffff:ffff::', 'routes': [ { 'network': '::', 'netmask': '::', 'gateway': 'fd00::1' }, { 'network': '::', 'netmask': 'ffff:ffff:ffff::', 'gateway': 'fd00::1:1' } ], 'network_id': 1 }, net_metadata['networks'][1]) def test_get_network_metadata_json_dhcp(self): ipv4_subnet = fake_network_cache_model.new_subnet( subnet_dict=dict(dhcp_server='1.1.1.1'), version=4) ipv6_subnet = fake_network_cache_model.new_subnet( subnet_dict=dict(dhcp_server='1234:567::'), version=6) self.netinfo[0]['network']['subnets'][0] = ipv4_subnet self.netinfo[0]['network']['subnets'][1] = ipv6_subnet net_metadata = netutils.get_network_metadata(self.netinfo, use_ipv6=True) # IPv4 Network self.assertEqual( { 'id': 'network0', 'link': 'interface0', 'type': 'ipv4_dhcp', 'network_id': 1 }, net_metadata['networks'][0]) # IPv6 Network self.assertEqual( { 'id': 'network1', 'link': 'interface0', 'type': 'ipv6_dhcp', 'network_id': 1 }, net_metadata['networks'][1]) def test__get_nets(self): expected_net = { 'id': 'network0', 'ip_address': '10.10.0.2', 'link': 1, 'netmask': '255.255.255.0', 'network_id': 1, 'routes': [ { 'gateway': '10.10.0.1', 'netmask': '0.0.0.0', 'network': '0.0.0.0'}, { 'gateway': '192.168.1.1', 'netmask': '255.255.255.0', 'network': '0.0.0.0'}], 'type': 'ipv4' } net = netutils._get_nets( self.netinfo[0], self.netinfo[0]['network']['subnets'][0], 4, 0, 1) self.assertEqual(expected_net, net) def test__get_eth_link(self): expected_link = { 'id': 'interface0', 'vif_id': 1, 'type': 'vif', 'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa', 'mtu': 1500 } self.netinfo[0]['type'] = 'vif' link = netutils._get_eth_link(self.netinfo[0], 0) self.assertEqual(expected_link, link) def test__get_eth_link_physical(self): expected_link = { 'id': 'interface1', 'vif_id': 1, 'type': 'phy', 'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa', 'mtu': 1500 } link = netutils._get_eth_link(self.netinfo[0], 1) self.assertEqual(expected_link, link) def test__get_default_route(self): v4_expected = [{ 'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '10.10.0.1', }] v6_expected = [{ 'network': '::', 'netmask': '::', 'gateway': 'fd00::1' }] v4 = netutils._get_default_route( 4, self.netinfo[0]['network']['subnets'][0]) self.assertEqual(v4_expected, v4) v6 = netutils._get_default_route( 6, self.netinfo[0]['network']['subnets'][1]) self.assertEqual(v6_expected, v6) # Test for no gateway self.netinfo[0]['network']['subnets'][0]['gateway'] = None no_route = netutils._get_default_route( 4, self.netinfo[0]['network']['subnets'][0]) self.assertEqual([], no_route) def test__get_dns_services(self): expected_dns = [ {'type': 'dns', 'address': '1.2.3.4'}, {'type': 'dns', 'address': '2.3.4.5'}, {'type': 'dns', 'address': '3.4.5.6'} ] subnet = fake_network_cache_model.new_subnet(version=4) subnet['dns'].append(fake_network_cache_model.new_ip( {'address': '3.4.5.6'})) dns = netutils._get_dns_services(subnet) self.assertEqual(expected_dns, dns) def test_get_network_metadata(self): expected_json = { "links": [ { "ethernet_mac_address": "aa:aa:aa:aa:aa:aa", "id": "interface0", "type": "phy", "vif_id": 1, "mtu": 1500 }, { "ethernet_mac_address": "aa:aa:aa:aa:aa:ab", "id": "interface1", "type": "phy", "vif_id": 1, "mtu": 1500 }, ], "networks": [ { "id": "network0", "ip_address": "10.10.0.2", "link": "interface0", "netmask": "255.255.255.0", "network_id": "00000000-0000-0000-0000-000000000000", "routes": [ { "gateway": "10.10.0.1", "netmask": "0.0.0.0", "network": "0.0.0.0" }, { "gateway": "192.168.1.1", "netmask": "255.255.255.0", "network": "0.0.0.0" } ], "type": "ipv4" }, { 'id': 'network1', 'ip_address': 'fd00::2', 'link': 'interface0', 'netmask': 'ffff:ffff:ffff::', 'network_id': '00000000-0000-0000-0000-000000000000', 'routes': [{'gateway': 'fd00::1', 'netmask': '::', 'network': '::'}, {'gateway': 'fd00::1:1', 'netmask': 'ffff:ffff:ffff::', 'network': '::'}], 'type': 'ipv6' }, { "id": "network2", "ip_address": "192.168.0.2", "link": "interface1", "netmask": "255.255.255.0", "network_id": "11111111-1111-1111-1111-111111111111", "routes": [ { "gateway": "192.168.0.1", "netmask": "0.0.0.0", "network": "0.0.0.0" } ], "type": "ipv4" } ], 'services': [ {'address': '1.2.3.4', 'type': 'dns'}, {'address': '2.3.4.5', 'type': 'dns'}, {'address': '1:2:3:4::', 'type': 'dns'}, {'address': '2:3:4:5::', 'type': 'dns'} ] } self.netinfo[0]['network']['id'] = ( '00000000-0000-0000-0000-000000000000') # Add a second NIC self.netinfo.append(fake_network_cache_model.new_vif({ 'type': 'ethernet', 'address': 'aa:aa:aa:aa:aa:ab'})) address = fake_network_cache_model.new_ip({'address': '192.168.0.2'}) gateway_address = fake_network_cache_model.new_ip( {'address': '192.168.0.1'}) ipv4_subnet = fake_network_cache_model.new_subnet( {'cidr': '192.168.0.0/24', 'gateway': gateway_address, 'ips': [address], 'routes': []}) self.netinfo[1]['network']['id'] = ( '11111111-1111-1111-1111-111111111111') self.netinfo[1]['network']['subnets'][0] = ipv4_subnet self.netinfo[1]['network']['mtu'] = 1500 network_json = netutils.get_network_metadata(self.netinfo) self.assertEqual(expected_json, network_json) def test_get_network_metadata_no_ipv4(self): expected_json = { "services": [ { "type": "dns", "address": "1:2:3:4::" }, { "type": "dns", "address": "2:3:4:5::" } ], "networks": [ { "network_id": 1, "type": "ipv6", "netmask": "ffff:ffff:ffff::", "link": "interface0", "routes": [ { "netmask": "::", "network": "::", "gateway": "fd00::1" }, { "netmask": "ffff:ffff:ffff::", "network": "::", "gateway": "fd00::1:1" } ], "ip_address": "fd00::2", "id": "network0" } ], "links": [ { "ethernet_mac_address": "aa:aa:aa:aa:aa:aa", "mtu": 1500, "type": "phy", "id": "interface0", "vif_id": 1 } ] } # drop the ipv4 subnet self.netinfo[0]['network']['subnets'].pop(0) network_json = netutils.get_network_metadata(self.netinfo) self.assertEqual(expected_json, network_json) nova-13.0.0/nova/tests/unit/network/test_manager.py0000664000567000056710000050622312701410011023472 0ustar jenkinsjenkins00000000000000# Copyright 2011 Rackspace # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from mox3 import mox import netaddr from oslo_concurrency import processutils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import importutils from oslo_utils import netutils import six import testtools from nova import context from nova import db from nova.db.sqlalchemy import models from nova import exception from nova import ipv6 from nova.network import floating_ips from nova.network import linux_net from nova.network import manager as network_manager from nova.network import model as net_model from nova import objects from nova.objects import network as network_obj from nova.objects import virtual_interface as vif_obj from nova import quota from nova import test from nova.tests.unit import fake_instance from nova.tests.unit import fake_ldap from nova.tests.unit import fake_network from nova.tests.unit import matchers from nova.tests.unit.objects import test_fixed_ip from nova.tests.unit.objects import test_floating_ip from nova.tests.unit.objects import test_network from nova.tests.unit.objects import test_service from nova.tests.unit import utils as test_utils from nova.tests import uuidsentinel as uuids from nova import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) HOST = "testhost" FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" fake_inst = fake_instance.fake_db_instance networks = [{'id': 0, 'uuid': FAKEUUID, 'label': 'test0', 'injected': False, 'multi_host': False, 'cidr': '192.168.0.0/24', 'cidr_v6': '2001:db8::/64', 'gateway_v6': '2001:db8::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa0', 'bridge_interface': 'fake_fa0', 'gateway': '192.168.0.1', 'dhcp_server': '192.168.0.1', 'broadcast': '192.168.0.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'vlan': None, 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.0.2', 'vpn_public_port': '22', 'vpn_private_address': '10.0.0.2'}, {'id': 1, 'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'label': 'test1', 'injected': False, 'multi_host': False, 'cidr': '192.168.1.0/24', 'cidr_v6': '2001:db9::/64', 'gateway_v6': '2001:db9::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa1', 'bridge_interface': 'fake_fa1', 'gateway': '192.168.1.1', 'dhcp_server': '192.168.1.1', 'broadcast': '192.168.1.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'vlan': None, 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.1.2', 'vpn_public_port': '22', 'vpn_private_address': '10.0.0.2'}] fixed_ips = [{'id': 0, 'network_id': 0, 'address': '192.168.0.100', 'instance_uuid': 0, 'allocated': False, 'virtual_interface_id': 0, 'floating_ips': []}, {'id': 0, 'network_id': 1, 'address': '192.168.1.100', 'instance_uuid': 0, 'allocated': False, 'virtual_interface_id': 0, 'floating_ips': []}, {'id': 0, 'network_id': 1, 'address': '2001:db9:0:1::10', 'instance_uuid': 0, 'allocated': False, 'virtual_interface_id': 0, 'floating_ips': []}] flavor = {'id': 0, 'rxtx_cap': 3} floating_ip_fields = {'id': 0, 'address': '192.168.10.100', 'pool': 'nova', 'interface': 'eth0', 'fixed_ip_id': 0, 'project_id': None, 'auto_assigned': False} vifs = [{'id': 0, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:00', 'uuid': '00000000-0000-0000-0000-0000000000000000', 'network_id': 0, 'instance_uuid': 0}, {'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:01', 'uuid': '00000000-0000-0000-0000-0000000000000001', 'network_id': 1, 'instance_uuid': 0}, {'id': 2, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:02', 'uuid': '00000000-0000-0000-0000-0000000000000002', 'network_id': 2, 'instance_uuid': 0}] class FlatNetworkTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(FlatNetworkTestCase, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = network_manager.FlatManager(host=HOST) self.network.instance_dns_domain = '' self.network.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=False) @testtools.skipIf(test_utils.is_osx(), 'IPv6 pretty-printing broken on OSX, see bug 1409135') def test_get_instance_nw_info_fake(self): fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info nw_info = fake_get_instance_nw_info(self, 0, 2) self.assertFalse(nw_info) nw_info = fake_get_instance_nw_info(self, 1, 2) for i, vif in enumerate(nw_info): nid = i + 1 check = {'bridge': 'fake_br%d' % nid, 'cidr': '192.168.%s.0/24' % nid, 'cidr_v6': '2001:db8:0:%x::/64' % nid, 'id': getattr(uuids, 'vif%i' % nid), 'multi_host': False, 'injected': False, 'bridge_interface': None, 'vlan': None, 'broadcast': '192.168.%d.255' % nid, 'dhcp_server': '192.168.1.1', 'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid], 'gateway': '192.168.%d.1' % nid, 'gateway_v6': '2001:db8:0:1::1', 'label': 'test%d' % nid, 'mac': 'DE:AD:BE:EF:00:%02x' % nid, 'rxtx_cap': 30, 'vif_type': net_model.VIF_TYPE_BRIDGE, 'vif_devname': None, 'vif_uuid': getattr(uuids, 'vif%i' % nid), 'ovs_interfaceid': None, 'qbh_params': None, 'qbg_params': None, 'should_create_vlan': False, 'should_create_bridge': False, 'ip': '192.168.%d.%03d' % (nid, nid + 99), 'ip_v6': '2001:db8:0:1:dcad:beff:feef:%x' % nid, 'netmask': '255.255.255.0', 'netmask_v6': 64, 'physical_network': None, } network = vif['network'] net_v4 = vif['network']['subnets'][0] net_v6 = vif['network']['subnets'][1] vif_dict = dict(bridge=network['bridge'], cidr=net_v4['cidr'], cidr_v6=net_v6['cidr'], id=vif['id'], multi_host=network.get_meta('multi_host', False), injected=network.get_meta('injected', False), bridge_interface= network.get_meta('bridge_interface'), vlan=network.get_meta('vlan'), broadcast=str(net_v4.as_netaddr().broadcast), dhcp_server=network.get_meta('dhcp_server', net_v4['gateway']['address']), dns=[ip['address'] for ip in net_v4['dns']], gateway=net_v4['gateway']['address'], gateway_v6=net_v6['gateway']['address'], label=network['label'], mac=vif['address'], rxtx_cap=vif.get_meta('rxtx_cap'), vif_type=vif['type'], vif_devname=vif.get('devname'), vif_uuid=vif['id'], ovs_interfaceid=vif.get('ovs_interfaceid'), qbh_params=vif.get('qbh_params'), qbg_params=vif.get('qbg_params'), should_create_vlan= network.get_meta('should_create_vlan', False), should_create_bridge= network.get_meta('should_create_bridge', False), ip=net_v4['ips'][i]['address'], ip_v6=net_v6['ips'][i]['address'], netmask=str(net_v4.as_netaddr().netmask), netmask_v6=net_v6.as_netaddr()._prefixlen, physical_network= network.get_meta('physical_network', None)) self.assertThat(vif_dict, matchers.DictMatches(check)) def test_validate_networks(self): self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100')] ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1]) ip['network'] = dict(test_network.fake_network, **networks[1]) ip['instance_uuid'] = None db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=mox.IgnoreArg() ).AndReturn(ip) ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0]) ip['network'] = dict(test_network.fake_network, **networks[0]) ip['instance_uuid'] = None db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=mox.IgnoreArg() ).AndReturn(ip) self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_valid_fixed_ipv6(self): self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '2001:db9:0:1::10')] ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2]) ip['network'] = dict(test_network.fake_network, **networks[1]) ip['instance_uuid'] = None db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=mox.IgnoreArg() ).AndReturn(ip) self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_reserved(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) nets = self.network.create_networks(context_admin, 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) self.assertEqual(1, len(nets)) network = nets[0] self.assertEqual(4, db.network_count_reserved_ips(context_admin, network['id'])) def test_validate_reserved_start_end(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) nets = self.network.create_networks(context_admin, 'fake', '192.168.0.0/24', False, 1, 256, dhcp_server='192.168.0.11', allowed_start='192.168.0.10', allowed_end='192.168.0.245') self.assertEqual(1, len(nets)) network = nets[0] # gateway defaults to beginning of allowed_start self.assertEqual('192.168.0.10', network['gateway']) # vpn_server doesn't conflict with dhcp_start self.assertEqual('192.168.0.12', network['vpn_private_address']) # dhcp_start doesn't conflict with dhcp_server self.assertEqual('192.168.0.13', network['dhcp_start']) # NOTE(vish): 10 from the beginning, 10 from the end, and # 1 for the gateway, 1 for the dhcp server, # 1 for the vpn server self.assertEqual(23, db.network_count_reserved_ips(context_admin, network['id'])) def test_validate_reserved_start_out_of_range(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.AddressOutOfRange, self.network.create_networks, context_admin, 'fake', '192.168.0.0/24', False, 1, 256, allowed_start='192.168.1.10') def test_validate_reserved_end_invalid(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.InvalidAddress, self.network.create_networks, context_admin, 'fake', '192.168.0.0/24', False, 1, 256, allowed_end='invalid') def test_validate_cidr_invalid(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.InvalidCidr, self.network.create_networks, context_admin, 'fake', 'invalid', False, 1, 256) def test_validate_non_int_size(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.InvalidIntValue, self.network.create_networks, context_admin, 'fake', '192.168.0.0/24', False, 1, 'invalid') def test_validate_networks_none_requested_networks(self): self.network.validate_networks(self.context, None) def test_validate_networks_empty_requested_networks(self): requested_networks = [] self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_invalid_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100.1'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100.1')] self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_empty_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')] self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_none_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)] self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid') def test_get_instance_nw_info(self, get): def make_ip(index): vif = objects.VirtualInterface(uuid=index, address=index) network = objects.Network(uuid=index, bridge=index, label=index, project_id=index, injected=False, netmask='255.255.255.0', dns1=None, dns2=None, cidr_v6=None, gateway_v6=None, broadcast_v6=None, netmask_v6=None, rxtx_base=None, gateway='192.168.%s.1' % index, dhcp_server='192.168.%s.1' % index, broadcast='192.168.%s.255' % index, cidr='192.168.%s.0/24' % index) return objects.FixedIP(virtual_interface=vif, network=network, floating_ips=objects.FloatingIPList(), address='192.168.%s.2' % index) objs = [make_ip(index) for index in ('3', '1', '2')] get.return_value = objects.FixedIPList(objects=objs) nw_info = self.network.get_instance_nw_info(self.context, None, None, None) for i, vif in enumerate(nw_info): self.assertEqual(objs[i].network.bridge, vif['network']['bridge']) @mock.patch.object(objects.Network, 'get_by_id') def test_add_fixed_ip_instance_using_id_without_vpn(self, get_by_id): # Allocate a fixed ip from a network and assign it to an instance. # Network is given by network id. network_id = networks[0]['id'] with mock.patch.object(self.network, 'allocate_fixed_ip') as allocate_fixed_ip: self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST, network_id) # Assert that we fetched the network by id, not uuid get_by_id.assert_called_once_with(self.context, network_id, project_only='allow_none') # Assert that we called allocate_fixed_ip for the given network and # instance. We should not have requested a specific address from the # network. allocate_fixed_ip.assert_called_once_with(self.context, FAKEUUID, get_by_id.return_value, address=None) @mock.patch.object(objects.Network, 'get_by_uuid') def test_add_fixed_ip_instance_using_uuid_without_vpn(self, get_by_uuid): # Allocate a fixed ip from a network and assign it to an instance. # Network is given by network uuid. network_uuid = networks[0]['uuid'] with mock.patch.object(self.network, 'allocate_fixed_ip') as allocate_fixed_ip,\ mock.patch.object(self.context, 'elevated', return_value=mock.sentinel.elevated): self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST, network_uuid) # Assert that we fetched the network by uuid, not id, and with elevated # context get_by_uuid.assert_called_once_with(mock.sentinel.elevated, network_uuid) # Assert that we called allocate_fixed_ip for the given network and # instance. We should not have requested a specific address from the # network. allocate_fixed_ip.assert_called_once_with(self.context, FAKEUUID, get_by_uuid.return_value, address=None) def test_mini_dns_driver(self): zone1 = "example.org" zone2 = "example.com" driver = self.network.instance_dns_manager driver.create_entry("hostone", "10.0.0.1", "A", zone1) driver.create_entry("hosttwo", "10.0.0.2", "A", zone1) driver.create_entry("hostthree", "10.0.0.3", "A", zone1) driver.create_entry("hostfour", "10.0.0.4", "A", zone1) driver.create_entry("hostfive", "10.0.0.5", "A", zone2) driver.delete_entry("hostone", zone1) driver.modify_address("hostfour", "10.0.0.1", zone1) driver.modify_address("hostthree", "10.0.0.1", zone1) names = driver.get_entries_by_address("10.0.0.1", zone1) self.assertEqual(2, len(names)) self.assertIn('hostthree', names) self.assertIn('hostfour', names) names = driver.get_entries_by_address("10.0.0.5", zone2) self.assertEqual(1, len(names)) self.assertIn('hostfive', names) addresses = driver.get_entries_by_name("hosttwo", zone1) self.assertEqual(1, len(addresses)) self.assertIn('10.0.0.2', addresses) self.assertRaises(exception.InvalidInput, driver.create_entry, "hostname", "10.10.10.10", "invalidtype", zone1) def test_mini_dns_driver_with_mixed_case(self): zone1 = "example.org" driver = self.network.instance_dns_manager driver.create_entry("HostTen", "10.0.0.10", "A", zone1) addresses = driver.get_entries_by_address("10.0.0.10", zone1) self.assertEqual(1, len(addresses)) for n in addresses: driver.delete_entry(n, zone1) addresses = driver.get_entries_by_address("10.0.0.10", zone1) self.assertEqual(0, len(addresses)) def test_allocate_fixed_ip_instance_dns(self): # Test DNS entries are created when allocating a fixed IP. # Allocate a fixed IP to an instance. Ensure that dns entries have been # created for the instance's name and uuid. network = network_obj.Network._from_db_object( self.context, network_obj.Network(), test_network.fake_network) network.save = mock.MagicMock() # Create a minimal instance object instance_params = { 'display_name': HOST, 'security_groups': [] } instance = fake_instance.fake_instance_obj( context.RequestContext('ignore', 'ignore'), expected_attrs=instance_params.keys(), **instance_params) instance.save = mock.MagicMock() # We don't specify a specific address, so we should get a FixedIP # automatically allocated from the pool. Fix its value here. fip = objects.FixedIP(address='192.168.0.101') fip.save = mock.MagicMock() with mock.patch.object(objects.Instance, 'get_by_uuid', return_value=instance),\ mock.patch.object(objects.FixedIP, 'associate_pool', return_value=fip): self.network.allocate_fixed_ip(self.context, FAKEUUID, network) instance_manager = self.network.instance_dns_manager expected_addresses = ['192.168.0.101'] # Assert that we have a correct entry by instance display name addresses = instance_manager.get_entries_by_name(HOST, self.network.instance_dns_domain) self.assertEqual(expected_addresses, addresses) # Assert that we have a correct entry by instance uuid addresses = instance_manager.get_entries_by_name(FAKEUUID, self.network.instance_dns_domain) self.assertEqual(expected_addresses, addresses) def test_allocate_floating_ip(self): self.assertIsNone(self.network.allocate_floating_ip(self.context, 1, None)) def test_deallocate_floating_ip(self): self.assertIsNone(self.network.deallocate_floating_ip(self.context, 1, None)) def test_associate_floating_ip(self): self.assertIsNone(self.network.associate_floating_ip(self.context, None, None)) def test_disassociate_floating_ip(self): self.assertIsNone(self.network.disassociate_floating_ip(self.context, None, None)) def test_get_networks_by_uuids_ordering(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [dict(test_network.fake_network, **net) for net in networks]) self.mox.ReplayAll() res = self.network._get_networks_by_uuids(self.context, requested_networks) self.assertEqual(1, res[0]['id']) self.assertEqual(0, res[1]['id']) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.quotas.Quotas.reserve') @mock.patch('nova.objects.quotas.ids_from_instance') def test_allocate_calculates_quota_auth(self, util_method, reserve, get_by_uuid): inst = objects.Instance() inst['uuid'] = 'nosuch' get_by_uuid.return_value = inst usages = {'fixed_ips': {'in_use': 10, 'reserved': 1}} reserve.side_effect = exception.OverQuota(overs='testing', quotas={'fixed_ips': 10}, usages=usages) util_method.return_value = ('foo', 'bar') self.assertRaises(exception.FixedIpLimitExceeded, self.network.allocate_fixed_ip, self.context, 123, {'uuid': 'nosuch'}) util_method.assert_called_once_with(self.context, inst) @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address') @mock.patch('nova.objects.quotas.Quotas.reserve') @mock.patch('nova.objects.quotas.ids_from_instance') def test_deallocate_calculates_quota_auth(self, util_method, reserve, get_by_address): inst = objects.Instance(uuid='fake-uuid') fip = objects.FixedIP(instance_uuid='fake-uuid', virtual_interface_id=1) get_by_address.return_value = fip util_method.return_value = ('foo', 'bar') # This will fail right after the reserve call when it tries # to look up the fake instance we created above self.assertRaises(exception.InstanceNotFound, self.network.deallocate_fixed_ip, self.context, '1.2.3.4', instance=inst) util_method.assert_called_once_with(self.context, inst) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.fixed_ip.FixedIP.associate') def test_allocate_fixed_ip_passes_string_address(self, mock_associate, mock_get): mock_associate.side_effect = test.TestingException instance = objects.Instance(context=self.context) instance.create() mock_get.return_value = instance self.assertRaises(test.TestingException, self.network.allocate_fixed_ip, self.context, instance.uuid, {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}, address=netaddr.IPAddress('1.2.3.4')) mock_associate.assert_called_once_with(self.context, '1.2.3.4', instance.uuid, 1, vif_id=1) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.virtual_interface.VirtualInterface' '.get_by_instance_and_network') @mock.patch('nova.objects.fixed_ip.FixedIP.disassociate') @mock.patch('nova.objects.fixed_ip.FixedIP.associate') @mock.patch('nova.objects.fixed_ip.FixedIP.save') def test_allocate_fixed_ip_cleanup(self, mock_fixedip_save, mock_fixedip_associate, mock_fixedip_disassociate, mock_vif_get, mock_instance_get): address = netaddr.IPAddress('1.2.3.4') fip = objects.FixedIP(instance_uuid='fake-uuid', address=address, virtual_interface_id=1) mock_fixedip_associate.return_value = fip instance = objects.Instance(context=self.context) instance.create() mock_instance_get.return_value = instance mock_vif_get.return_value = vif_obj.VirtualInterface( instance_uuid='fake-uuid', id=1) with test.nested( mock.patch.object(self.network, '_setup_network_on_host'), mock.patch.object(self.network, 'instance_dns_manager'), mock.patch.object(self.network, '_do_trigger_security_group_members_refresh_for_instance') ) as (mock_setup_network, mock_dns_manager, mock_ignored): mock_setup_network.side_effect = test.TestingException self.assertRaises(test.TestingException, self.network.allocate_fixed_ip, self.context, instance.uuid, {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}, address=address) mock_dns_manager.delete_entry.assert_has_calls([ mock.call(instance.display_name, ''), mock.call(instance.uuid, '') ]) mock_fixedip_disassociate.assert_called_once_with(self.context) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.virtual_interface.VirtualInterface' '.get_by_instance_and_network') @mock.patch('nova.objects.fixed_ip.FixedIP.disassociate') @mock.patch('nova.objects.fixed_ip.FixedIP.associate_pool') @mock.patch('nova.network.manager.NetworkManager._add_virtual_interface') def test_allocate_fixed_ip_create_new_vifs(self, mock_add, mock_fixedip_associate, mock_fixedip_disassociate, mock_vif_get, mock_instance_get): address = netaddr.IPAddress('1.2.3.4') fip = objects.FixedIP(instance_uuid='fake-uuid', address=address, virtual_interface_id=1000) net = {'cidr': '24', 'id': 1, 'uuid': 'nosuch'} instance = objects.Instance(context=self.context) instance.create() vif = objects.VirtualInterface(context, id=1000, address='00:00:00:00:00:00', instance_uuid=instance.uuid, network_id=net['id'], uuid='nosuch') mock_fixedip_associate.return_value = fip mock_add.return_value = vif mock_instance_get.return_value = instance mock_vif_get.return_value = None with test.nested( mock.patch.object(self.network, '_setup_network_on_host'), mock.patch.object(self.network, 'instance_dns_manager'), mock.patch.object(self.network, '_do_trigger_security_group_members_refresh_for_instance') ) as (mock_setup_network, mock_dns_manager, mock_ignored): self.network.allocate_fixed_ip(self.context, instance['uuid'], net) mock_add.assert_called_once_with(self.context, instance['uuid'], net['id']) self.assertEqual(fip.virtual_interface_id, vif.id) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch.object(db, 'virtual_interface_get_by_instance_and_network', return_value=None) @mock.patch('nova.objects.fixed_ip.FixedIP') def test_allocate_fixed_ip_add_vif_fails(self, mock_fixedip, mock_get_vif, mock_instance_get): # Tests that we don't try to do anything with fixed IPs if # _add_virtual_interface fails. instance = fake_instance.fake_instance_obj(self.context) mock_instance_get.return_value = instance network = {'cidr': '24', 'id': 1, 'uuid': '398399b3-f696-4859-8695-a6560e14cb02'} vif_error = exception.VirtualInterfaceMacAddressException() # mock out quotas because we don't care in this test with mock.patch.object(self.network, 'quotas_cls', objects.QuotasNoOp): with mock.patch.object(self.network, '_add_virtual_interface', side_effect=vif_error): self.assertRaises( exception.VirtualInterfaceMacAddressException, self.network.allocate_fixed_ip, self.context, '9d2ee1e3-ffad-4e5f-81ff-c96dd97b0ee0', network) self.assertFalse(mock_fixedip.called, str(mock_fixedip.mock_calls)) class FlatDHCPNetworkTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(FlatDHCPNetworkTestCase, self).setUp() self.useFixture(test.SampleNetworks()) self.flags(use_local=True, group='conductor') self.network = network_manager.FlatDHCPManager(host=HOST) self.network.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=False) self.context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id') @mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host') @mock.patch('nova.network.linux_net.iptables_manager._apply') def test_init_host_iptables_defer_apply(self, iptable_apply, floating_get_by_host, fixed_get_by_id): def get_by_id(context, fixed_ip_id, **kwargs): net = objects.Network(bridge='testbridge', cidr='192.168.1.0/24') if fixed_ip_id == 1: return objects.FixedIP(address='192.168.1.4', network=net) elif fixed_ip_id == 2: return objects.FixedIP(address='192.168.1.5', network=net) def fake_apply(): fake_apply.count += 1 fake_apply.count = 0 ctxt = context.RequestContext('testuser', 'testproject', is_admin=True) float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1) float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2) float1._context = ctxt float2._context = ctxt iptable_apply.side_effect = fake_apply floating_get_by_host.return_value = [float1, float2] fixed_get_by_id.side_effect = get_by_id self.network.init_host() self.assertEqual(1, fake_apply.count) class VlanNetworkTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(VlanNetworkTestCase, self).setUp() self.useFixture(test.SampleNetworks()) self.network = network_manager.VlanManager(host=HOST) self.network.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=False) self.context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) def test_quota_driver_type(self): self.assertEqual(objects.QuotasNoOp, self.network.quotas_cls) def test_vpn_allocate_fixed_ip(self): self.mox.StubOutWithMock(db, 'fixed_ip_associate') self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') fixed = dict(test_fixed_ip.fake_fixed_ip, address='192.168.0.1') db.fixed_ip_associate(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), network_id=mox.IgnoreArg(), reserved=True, virtual_interface_id=vifs[0]['id'] ).AndReturn(fixed) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0]) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, uuid=FAKEUUID)) self.mox.ReplayAll() network = objects.Network._from_db_object( self.context, objects.Network(), dict(test_network.fake_network, **networks[0])) network.vpn_private_address = '192.168.0.2' self.network.allocate_fixed_ip(self.context, FAKEUUID, network, vpn=True) def test_allocate_fixed_ip(self): self.stubs.Set(self.network, '_do_trigger_security_group_members_refresh_for_instance', lambda *a, **kw: None) self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') fixed = dict(test_fixed_ip.fake_fixed_ip, address='192.168.0.1') db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), instance_uuid=mox.IgnoreArg(), host=None, virtual_interface_id=vifs[0]['id'] ).AndReturn(fixed) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0]) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, uuid=FAKEUUID)) self.mox.ReplayAll() network = objects.Network._from_db_object( self.context, objects.Network(), dict(test_network.fake_network, **networks[0])) network.vpn_private_address = '192.168.0.2' self.network.allocate_fixed_ip(self.context, FAKEUUID, network) @mock.patch('nova.network.manager.VlanManager._setup_network_on_host') @mock.patch('nova.network.manager.VlanManager.' '_validate_instance_zone_for_dns_domain') @mock.patch('nova.network.manager.VlanManager.' '_do_trigger_security_group_members_refresh_for_instance') @mock.patch('nova.network.manager.VlanManager._add_virtual_interface') @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.fixed_ip.FixedIP.associate') @mock.patch('nova.objects.VirtualInterface.get_by_instance_and_network') def test_allocate_fixed_ip_return_none(self, mock_get, mock_associate, mock_get_uuid, mock_add, mock_trigger, mock_validate, mock_setup): net = {'cidr': '24', 'id': 1, 'uuid': 'nosuch'} fip = objects.FixedIP(instance_uuid='fake-uuid', address=netaddr.IPAddress('1.2.3.4'), virtual_interface_id=1) instance = objects.Instance(context=self.context) instance.create() vif = objects.VirtualInterface(self.context, id=1000, address='00:00:00:00:00:00', instance_uuid=instance.uuid, network_id=net['id'], uuid='nosuch') mock_associate.return_value = fip mock_add.return_value = vif mock_get.return_value = None mock_get_uuid.return_value = instance mock_validate.return_value = False self.network.allocate_fixed_ip(self.context_admin, instance.uuid, net) mock_add.assert_called_once_with(self.context_admin, instance.uuid, net['id']) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.fixed_ip.FixedIP.associate') def test_allocate_fixed_ip_passes_string_address(self, mock_associate, mock_get): mock_associate.side_effect = test.TestingException instance = objects.Instance(context=self.context) instance.create() mock_get.return_value = instance self.assertRaises(test.TestingException, self.network.allocate_fixed_ip, self.context, instance.uuid, {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}, address=netaddr.IPAddress('1.2.3.4')) mock_associate.assert_called_once_with(self.context, '1.2.3.4', instance.uuid, 1, vif_id=1) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.fixed_ip.FixedIP.associate') def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate, mock_get): mock_associate.side_effect = test.TestingException instance = objects.Instance(context=self.context) instance.create() mock_get.return_value = instance self.assertRaises(test.TestingException, self.network.allocate_fixed_ip, self.context, instance.uuid, {'cidr': '24', 'id': 1, 'uuid': 'nosuch', 'vpn_private_address': netaddr.IPAddress('1.2.3.4') }, vpn=1) mock_associate.assert_called_once_with(self.context, '1.2.3.4', instance.uuid, 1, reserved=True, vif_id=1) @mock.patch.object(db, 'virtual_interface_get_by_instance_and_network', return_value=None) @mock.patch('nova.objects.fixed_ip.FixedIP') def test_allocate_fixed_ip_add_vif_fails(self, mock_fixedip, mock_get_vif): # Tests that we don't try to do anything with fixed IPs if # _add_virtual_interface fails. vif_error = exception.VirtualInterfaceMacAddressException() with mock.patch.object(self.network, '_add_virtual_interface', side_effect=vif_error): self.assertRaises(exception.VirtualInterfaceMacAddressException, self.network.allocate_fixed_ip, self.context, '9d2ee1e3-ffad-4e5f-81ff-c96dd97b0ee0', networks[0]) self.assertFalse(mock_fixedip.called, str(mock_fixedip.mock_calls)) def test_create_networks_too_big(self): self.assertRaises(ValueError, self.network.create_networks, None, num_networks=4094, vlan_start=1) def test_create_networks_too_many(self): self.assertRaises(ValueError, self.network.create_networks, None, num_networks=100, vlan_start=1, cidr='192.168.0.1/24', network_size=100) def test_duplicate_vlan_raises(self): # VLAN 100 is already used and we force the network to be created # in that vlan (vlan=100). self.assertRaises(exception.DuplicateVlan, self.network.create_networks, self.context_admin, label="fake", num_networks=1, vlan=100, cidr='192.168.0.1/24', network_size=100) def test_vlan_start(self): # VLAN 100 and 101 are used, so this network shoud be created in 102 networks = self.network.create_networks( self.context_admin, label="fake", num_networks=1, vlan_start=100, cidr='192.168.3.1/24', network_size=100) self.assertEqual(102, networks[0]["vlan"]) def test_vlan_start_multiple(self): # VLAN 100 and 101 are used, so these networks shoud be created in 102 # and 103 networks = self.network.create_networks( self.context_admin, label="fake", num_networks=2, vlan_start=100, cidr='192.168.3.1/24', network_size=100) self.assertEqual(102, networks[0]["vlan"]) self.assertEqual(103, networks[1]["vlan"]) def test_vlan_start_used(self): # VLAN 100 and 101 are used, but vlan_start=99. networks = self.network.create_networks( self.context_admin, label="fake", num_networks=1, vlan_start=99, cidr='192.168.3.1/24', network_size=100) self.assertEqual(102, networks[0]["vlan"]) def test_vlan_parameter(self): # vlan parameter could not be greater than 4094 exc = self.assertRaises(ValueError, self.network.create_networks, self.context_admin, label="fake", num_networks=1, vlan=4095, cidr='192.168.0.1/24') error_msg = 'The vlan number cannot be greater than 4094' self.assertIn(error_msg, six.text_type(exc)) # vlan parameter could not be less than 1 exc = self.assertRaises(ValueError, self.network.create_networks, self.context_admin, label="fake", num_networks=1, vlan=0, cidr='192.168.0.1/24') error_msg = 'The vlan number cannot be less than 1' self.assertIn(error_msg, six.text_type(exc)) def test_vlan_be_integer(self): # vlan must be an integer exc = self.assertRaises(ValueError, self.network.create_networks, self.context_admin, label="fake", num_networks=1, vlan='fake', cidr='192.168.0.1/24') error_msg = 'vlan must be an integer' self.assertIn(error_msg, six.text_type(exc)) def test_vlan_multiple_without_dhcp_server(self): networks = self.network.create_networks( self.context_admin, label="fake", num_networks=2, vlan_start=100, cidr='192.168.3.1/24', network_size=100) self.assertEqual("192.168.3.1", networks[0]["dhcp_server"]) self.assertEqual("192.168.3.129", networks[1]["dhcp_server"]) def test_vlan_multiple_with_dhcp_server(self): networks = self.network.create_networks( self.context_admin, label="fake", num_networks=2, vlan_start=100, cidr='192.168.3.1/24', network_size=100, dhcp_server='192.168.3.1') self.assertEqual("192.168.3.1", networks[0]["dhcp_server"]) self.assertEqual("192.168.3.1", networks[1]["dhcp_server"]) def test_validate_networks(self): self.mox.StubOutWithMock(db, "fixed_ip_get_by_address") requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100')] db_fixed1 = dict(test_fixed_ip.fake_fixed_ip, network_id=networks[1]['id'], network=dict(test_network.fake_network, **networks[1]), instance_uuid=None) db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=mox.IgnoreArg() ).AndReturn(db_fixed1) db_fixed2 = dict(test_fixed_ip.fake_fixed_ip, network_id=networks[0]['id'], network=dict(test_network.fake_network, **networks[0]), instance_uuid=None) db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=mox.IgnoreArg() ).AndReturn(db_fixed2) self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_none_requested_networks(self): self.network.validate_networks(self.context, None) def test_validate_networks_empty_requested_networks(self): requested_networks = [] self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_invalid_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100.1'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100.1')] self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_empty_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')] self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_none_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)] self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_floating_ip_owned_by_project(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) # raises because floating_ip project_id is None floating_ip = objects.FloatingIP(address='10.0.0.1', project_id=None) self.assertRaises(exception.Forbidden, self.network._floating_ip_owned_by_project, ctxt, floating_ip) # raises because floating_ip project_id is not equal to ctxt project_id floating_ip = objects.FloatingIP(address='10.0.0.1', project_id=ctxt.project_id + '1') self.assertRaises(exception.Forbidden, self.network._floating_ip_owned_by_project, ctxt, floating_ip) # does not raise (floating ip is owned by ctxt project) floating_ip = objects.FloatingIP(address='10.0.0.1', project_id=ctxt.project_id) self.network._floating_ip_owned_by_project(ctxt, floating_ip) ctxt = context.RequestContext(None, None, is_admin=True) # does not raise (ctxt is admin) floating_ip = objects.FloatingIP(address='10.0.0.1', project_id=None) self.network._floating_ip_owned_by_project(ctxt, floating_ip) # does not raise (ctxt is admin) floating_ip = objects.FloatingIP(address='10.0.0.1', project_id='testproject') self.network._floating_ip_owned_by_project(ctxt, floating_ip) def test_allocate_floating_ip(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) self.stubs.Set(self.network, '_floating_ip_pool_exists', lambda _x, _y: True) def fake_allocate_address(*args, **kwargs): return {'address': '10.0.0.1', 'project_id': ctxt.project_id} self.stubs.Set(self.network.db, 'floating_ip_allocate_address', fake_allocate_address) self.network.allocate_floating_ip(ctxt, ctxt.project_id) @mock.patch('nova.quota.QUOTAS.reserve') @mock.patch('nova.quota.QUOTAS.commit') def test_deallocate_floating_ip(self, mock_commit, mock_reserve): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake1(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip) def fake2(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', fixed_ip_id=1) def fake3(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', fixed_ip_id=None, project_id=ctxt.project_id) self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1) self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1) # this time should raise because floating ip is associated to fixed_ip self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2) self.assertRaises(exception.FloatingIpAssociated, self.network.deallocate_floating_ip, ctxt, mox.IgnoreArg()) mock_reserve.return_value = 'reserve' # this time should not raise self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3) self.network.deallocate_floating_ip(ctxt, ctxt.project_id) mock_commit.assert_called_once_with(ctxt, 'reserve', project_id='testproject') @mock.patch('nova.db.fixed_ip_get') def test_associate_floating_ip(self, fixed_get): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake1(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, address='10.0.0.1', network=test_network.fake_network) # floating ip that's already associated def fake2(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', pool='nova', interface='eth0', fixed_ip_id=1) # floating ip that isn't associated def fake3(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', pool='nova', interface='eth0', fixed_ip_id=None) # fixed ip with remote host def fake4(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, address='10.0.0.1', pool='nova', instance_uuid=FAKEUUID, interface='eth0', network_id=123) def fake4_network(*args, **kwargs): return dict(test_network.fake_network, multi_host=False, host='jibberjabber') # fixed ip with local host def fake5(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, address='10.0.0.1', pool='nova', instance_uuid=FAKEUUID, interface='eth0', network_id=1234) def fake5_network(*args, **kwargs): return dict(test_network.fake_network, multi_host=False, host='testhost') def fake6(ctxt, method, **kwargs): self.local = False def fake7(*args, **kwargs): self.local = True def fake8(*args, **kwargs): raise processutils.ProcessExecutionError('', 'Cannot find device "em0"\n') def fake9(*args, **kwargs): raise test.TestingException() # raises because interface doesn't exist self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate', fake1) self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1) self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8) self.assertRaises(exception.NoFloatingIpInterface, self.network._associate_floating_ip, ctxt, '1.2.3.4', '1.2.3.5', mox.IgnoreArg(), mox.IgnoreArg()) self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1) # raises because floating_ip is already associated to a fixed_ip self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2) self.stubs.Set(self.network, 'disassociate_floating_ip', fake9) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address='1.2.3.4', instance_uuid='fake_uuid', network=test_network.fake_network) # doesn't raise because we exit early if the address is the same self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4') # raises because we call disassociate which is mocked self.assertRaises(test.TestingException, self.network.associate_floating_ip, ctxt, mox.IgnoreArg(), 'new') self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3) # does not raise and makes call remotely self.local = True self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4) self.stubs.Set(self.network.db, 'network_get', fake4_network) self.stubs.Set(self.network.network_rpcapi.client, 'prepare', lambda **kw: self.network.network_rpcapi.client) self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6) self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), mox.IgnoreArg()) self.assertFalse(self.local) # does not raise and makes call locally self.local = False self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5) self.stubs.Set(self.network.db, 'network_get', fake5_network) self.stubs.Set(self.network, '_associate_floating_ip', fake7) self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), mox.IgnoreArg()) self.assertTrue(self.local) def test_add_floating_ip_nat_before_bind(self): # Tried to verify order with documented mox record/verify # functionality, but it doesn't seem to work since I can't make it # fail. I'm using stubs and a flag for now, but if this mox feature # can be made to work, it would be a better way to test this. # # self.mox.StubOutWithMock(self.network.driver, # 'ensure_floating_forward') # self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip') # # self.network.driver.ensure_floating_forward(mox.IgnoreArg(), # mox.IgnoreArg(), # mox.IgnoreArg(), # mox.IgnoreArg()) # self.network.driver.bind_floating_ip(mox.IgnoreArg(), # mox.IgnoreArg()) # self.mox.ReplayAll() nat_called = [False] def fake_nat(*args, **kwargs): nat_called[0] = True def fake_bind(*args, **kwargs): self.assertTrue(nat_called[0]) self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake_nat) self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind) self.network.l3driver.add_floating_ip('fakefloat', 'fakefixed', 'fakeiface', 'fakenet') @mock.patch('nova.db.floating_ip_get_all_by_host') @mock.patch('nova.db.fixed_ip_get') def _test_floating_ip_init_host(self, fixed_get, floating_get, public_interface, expected_arg): floating_get.return_value = [ dict(test_floating_ip.fake_floating_ip, interface='foo', address='1.2.3.4'), dict(test_floating_ip.fake_floating_ip, interface='fakeiface', address='1.2.3.5', fixed_ip_id=1), dict(test_floating_ip.fake_floating_ip, interface='bar', address='1.2.3.6', fixed_ip_id=2), ] def fixed_ip_get(_context, fixed_ip_id, get_network): if fixed_ip_id == 1: return dict(test_fixed_ip.fake_fixed_ip, address='1.2.3.4', network=test_network.fake_network) raise exception.FixedIpNotFound(id=fixed_ip_id) fixed_get.side_effect = fixed_ip_get self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip') self.flags(public_interface=public_interface) self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'), netaddr.IPAddress('1.2.3.4'), expected_arg, mox.IsA(objects.Network)) self.mox.ReplayAll() self.network.init_host_floating_ips() self.mox.UnsetStubs() self.mox.VerifyAll() def test_floating_ip_init_host_without_public_interface(self): self._test_floating_ip_init_host(public_interface=False, expected_arg='fakeiface') def test_floating_ip_init_host_with_public_interface(self): self._test_floating_ip_init_host(public_interface='fooiface', expected_arg='fooiface') def test_disassociate_floating_ip(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake1(*args, **kwargs): pass # floating ip that isn't associated def fake2(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', pool='nova', interface='eth0', fixed_ip_id=None) # floating ip that is associated def fake3(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', pool='nova', interface='eth0', fixed_ip_id=1, project_id=ctxt.project_id) # fixed ip with remote host def fake4(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, address='10.0.0.1', pool='nova', instance_uuid=FAKEUUID, interface='eth0', network_id=123) def fake4_network(*args, **kwargs): return dict(test_network.fake_network, multi_host=False, host='jibberjabber') # fixed ip with local host def fake5(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, address='10.0.0.1', pool='nova', instance_uuid=FAKEUUID, interface='eth0', network_id=1234) def fake5_network(*args, **kwargs): return dict(test_network.fake_network, multi_host=False, host='testhost') def fake6(ctxt, method, **kwargs): self.local = False def fake7(*args, **kwargs): self.local = True def fake8(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', pool='nova', interface='eth0', fixed_ip_id=1, auto_assigned=True, project_id=ctxt.project_id) self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1) # raises because floating_ip is not associated to a fixed_ip self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2) self.assertRaises(exception.FloatingIpNotAssociated, self.network.disassociate_floating_ip, ctxt, mox.IgnoreArg()) self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3) # does not raise and makes call remotely self.local = True self.stubs.Set(self.network.db, 'fixed_ip_get', fake4) self.stubs.Set(self.network.db, 'network_get', fake4_network) self.stubs.Set(self.network.network_rpcapi.client, 'prepare', lambda **kw: self.network.network_rpcapi.client) self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6) self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg()) self.assertFalse(self.local) # does not raise and makes call locally self.local = False self.stubs.Set(self.network.db, 'fixed_ip_get', fake5) self.stubs.Set(self.network.db, 'network_get', fake5_network) self.stubs.Set(self.network, '_disassociate_floating_ip', fake7) self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg()) self.assertTrue(self.local) # raises because auto_assigned floating IP cannot be disassociated self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8) self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP, self.network.disassociate_floating_ip, ctxt, mox.IgnoreArg()) def test_add_fixed_ip_instance_without_vpn_requested_networks(self): self.stubs.Set(self.network, '_do_trigger_security_group_members_refresh_for_instance', lambda *a, **kw: None) self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(self.network, 'get_instance_nw_info') db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0]) fixed = dict(test_fixed_ip.fake_fixed_ip, address='192.168.0.101') db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), instance_uuid=mox.IgnoreArg(), host=None, virtual_interface_id=vifs[0]['id'] ).AndReturn(fixed) db.network_get(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg() ).AndReturn(dict(test_network.fake_network, **networks[0])) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, uuid=FAKEUUID)) self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST, networks[0]['id']) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') def test_ip_association_and_allocation_of_other_project(self, net_get, fixed_get): """Makes sure that we cannot deallocaate or disassociate a public IP of other project. """ net_get.return_value = dict(test_network.fake_network, **networks[1]) context1 = context.RequestContext('user', 'project1') context2 = context.RequestContext('user', 'project2') float_ip = db.floating_ip_create(context1.elevated(), {'address': '1.2.3.4', 'project_id': context1.project_id}) float_addr = float_ip['address'] instance = db.instance_create(context1, {'project_id': 'project1'}) fix_addr = db.fixed_ip_associate_pool(context1.elevated(), 1, instance['uuid']).address fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) # Associate the IP with non-admin user context self.assertRaises(exception.Forbidden, self.network.associate_floating_ip, context2, float_addr, fix_addr) # Deallocate address from other project self.assertRaises(exception.Forbidden, self.network.deallocate_floating_ip, context2, float_addr) # Now Associates the address to the actual project self.network.associate_floating_ip(context1, float_addr, fix_addr) # Now try dis-associating from other project self.assertRaises(exception.Forbidden, self.network.disassociate_floating_ip, context2, float_addr) # Clean up the ip addresses self.network.disassociate_floating_ip(context1, float_addr) self.network.deallocate_floating_ip(context1, float_addr) self.network.deallocate_fixed_ip(context1, fix_addr, 'fake') db.floating_ip_destroy(context1.elevated(), float_addr) db.fixed_ip_disassociate(context1.elevated(), fix_addr) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_deallocate_fixed(self, fixed_update, net_get, fixed_get): """Verify that release is called properly. Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return """ net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return vifs[0] self.stub_out('nova.db.virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, instance_uuid=instance.uuid, allocated=True, virtual_interface_id=3, network=dict(test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) self.mox.StubOutWithMock(linux_net, 'release_dhcp') linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address, 'DE:AD:BE:EF:00:00') self.mox.ReplayAll() self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False}) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def _deallocate_fixed_with_dhcp(self, mock_dev_exists, fixed_update, net_get, fixed_get): net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return vifs[0] with test.nested( mock.patch.object(db, 'virtual_interface_get', vif_get), mock.patch.object( utils, 'execute', side_effect=processutils.ProcessExecutionError()), ) as (_vif_get, _execute): context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, instance_uuid=instance.uuid, allocated=True, virtual_interface_id=3, network=dict( test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False}) mock_dev_exists.assert_called_once_with(networks[1]['bridge']) if mock_dev_exists.return_value: _execute.assert_called_once_with('dhcp_release', networks[1]['bridge'], fix_addr.address, 'DE:AD:BE:EF:00:00', run_as_root=True) @mock.patch('nova.network.linux_net.device_exists', return_value=True) def test_deallocate_fixed_with_dhcp(self, mock_dev_exists): self._deallocate_fixed_with_dhcp(mock_dev_exists) @mock.patch('nova.network.linux_net.device_exists', return_value=False) def test_deallocate_fixed_without_dhcp(self, mock_dev_exists): self._deallocate_fixed_with_dhcp(mock_dev_exists) def test_deallocate_fixed_deleted(self): # Verify doesn't deallocate deleted fixed_ip from deleted network. def teardown_network_on_host(_context, network): if network['id'] == 0: raise test.TestingException() self.stubs.Set(self.network, '_teardown_network_on_host', teardown_network_on_host) context1 = context.RequestContext('user', 'project1') elevated = context1.elevated() instance = db.instance_create(context1, {'project_id': 'project1'}) network = db.network_create_safe(elevated, networks[0]) _fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fix_addr = _fix_addr.address db.fixed_ip_update(elevated, fix_addr, {'deleted': 1}) elevated.read_deleted = 'yes' delfixed = db.fixed_ip_get_by_address(elevated, fix_addr) values = {'address': fix_addr, 'network_id': network.id, 'instance_uuid': delfixed['instance_uuid']} db.fixed_ip_create(elevated, values) elevated.read_deleted = 'no' elevated.read_deleted = 'yes' deallocate = self.network.deallocate_fixed_ip self.assertRaises(test.TestingException, deallocate, context1, fix_addr, 'fake') @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get): """Verify that deallocate doesn't raise when no vif is returned. Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return """ net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return None self.stub_out('nova.db.virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, allocated=True, virtual_interface_id=3, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) fixed_update.return_value = fixed_get.return_value self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False}) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get): # Verify IP is not deallocated if the security group refresh fails. net_get.return_value = dict(test_network.fake_network, **networks[1]) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = objects.FixedIP.associate_pool(elevated, 1, instance['uuid']) def fake_refresh(instance_uuid): raise test.TestingException() self.stubs.Set(self.network, '_do_trigger_security_group_members_refresh_for_instance', fake_refresh) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, allocated=True, virtual_interface_id=3, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) self.assertRaises(test.TestingException, self.network.deallocate_fixed_ip, context1, str(fix_addr.address), 'fake') self.assertFalse(fixed_update.called) def test_get_networks_by_uuids_ordering(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [dict(test_network.fake_network, **net) for net in networks]) self.mox.ReplayAll() res = self.network._get_networks_by_uuids(self.context, requested_networks) self.assertEqual(1, res[0]['id']) self.assertEqual(0, res[1]['id']) @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id') @mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host') @mock.patch('nova.network.linux_net.iptables_manager._apply') def test_init_host_iptables_defer_apply(self, iptable_apply, floating_get_by_host, fixed_get_by_id): def get_by_id(context, fixed_ip_id, **kwargs): net = objects.Network(bridge='testbridge', cidr='192.168.1.0/24') if fixed_ip_id == 1: return objects.FixedIP(address='192.168.1.4', network=net) elif fixed_ip_id == 2: return objects.FixedIP(address='192.168.1.5', network=net) def fake_apply(): fake_apply.count += 1 fake_apply.count = 0 ctxt = context.RequestContext('testuser', 'testproject', is_admin=True) float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1) float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2) float1._context = ctxt float2._context = ctxt iptable_apply.side_effect = fake_apply floating_get_by_host.return_value = [float1, float2] fixed_get_by_id.side_effect = get_by_id self.network.init_host() self.assertEqual(1, fake_apply.count) class _TestDomainObject(object): def __init__(self, **kwargs): for k, v in six.iteritems(kwargs): self.__setattr__(k, v) class CommonNetworkTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(CommonNetworkTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.flags(ipv6_backend='rfc2462') ipv6.reset_backend() def test_validate_instance_zone_for_dns_domain(self): domain = 'example.com' az = 'test_az' domains = { domain: _TestDomainObject( domain=domain, availability_zone=az)} def dnsdomain_get(context, instance_domain): return domains.get(instance_domain) self.stub_out('nova.db.dnsdomain_get', dnsdomain_get) fake_instance = {'uuid': FAKEUUID, 'availability_zone': az} manager = network_manager.NetworkManager() res = manager._validate_instance_zone_for_dns_domain(self.context, fake_instance) self.assertTrue(res) def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None, extra_reserved=None, bottom_reserved=0, top_reserved=0): return None def test_get_instance_nw_info_client_exceptions(self): manager = network_manager.NetworkManager() self.mox.StubOutWithMock(manager.db, 'fixed_ip_get_by_instance') manager.db.fixed_ip_get_by_instance( self.context, FAKEUUID).AndRaise(exception.InstanceNotFound( instance_id=FAKEUUID)) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, manager.get_instance_nw_info, self.context, FAKEUUID, 'fake_rxtx_factor', HOST) @mock.patch('nova.db.instance_get') @mock.patch('nova.db.fixed_ip_get_by_instance') def test_deallocate_for_instance_passes_host_info(self, fixed_get, instance_get): manager = fake_network.FakeNetworkManager() db = manager.db instance_get.return_value = fake_inst(uuid='ignoreduuid') db.virtual_interface_delete_by_instance = lambda _x, _y: None ctx = context.RequestContext('igonre', 'igonre') fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip, address='1.2.3.4', network_id=123)] manager.deallocate_for_instance( ctx, instance=objects.Instance._from_db_object(self.context, objects.Instance(), instance_get.return_value)) self.assertEqual([ (ctx, '1.2.3.4', 'fake-host') ], manager.deallocate_fixed_ip_calls) @mock.patch('nova.db.fixed_ip_get_by_instance') def test_deallocate_for_instance_passes_host_info_with_update_dns_entries( self, fixed_get): self.flags(update_dns_entries=True) manager = fake_network.FakeNetworkManager() db = manager.db db.virtual_interface_delete_by_instance = lambda _x, _y: None ctx = context.RequestContext('igonre', 'igonre') fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip, address='1.2.3.4', network_id=123)] with mock.patch.object(manager.network_rpcapi, 'update_dns') as mock_update_dns: manager.deallocate_for_instance( ctx, instance=fake_instance.fake_instance_obj(ctx)) mock_update_dns.assert_called_once_with(ctx, ['123']) self.assertEqual([ (ctx, '1.2.3.4', 'fake-host') ], manager.deallocate_fixed_ip_calls) def test_deallocate_for_instance_with_requested_networks(self): manager = fake_network.FakeNetworkManager() db = manager.db db.virtual_interface_delete_by_instance = mock.Mock() ctx = context.RequestContext('igonre', 'igonre') requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in [('123', '1.2.3.4'), ('123', '4.3.2.1'), ('123', None)]]) manager.deallocate_for_instance( ctx, instance=fake_instance.fake_instance_obj(ctx), requested_networks=requested_networks) self.assertEqual([ (ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host') ], manager.deallocate_fixed_ip_calls) def test_deallocate_for_instance_with_update_dns_entries(self): self.flags(update_dns_entries=True) manager = fake_network.FakeNetworkManager() db = manager.db db.virtual_interface_delete_by_instance = mock.Mock() ctx = context.RequestContext('igonre', 'igonre') requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in [('123', '1.2.3.4'), ('123', '4.3.2.1')]]) with mock.patch.object(manager.network_rpcapi, 'update_dns') as mock_update_dns: manager.deallocate_for_instance( ctx, instance=fake_instance.fake_instance_obj(ctx), requested_networks=requested_networks) mock_update_dns.assert_called_once_with(ctx, ['123']) self.assertEqual([ (ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host') ], manager.deallocate_fixed_ip_calls) @mock.patch('nova.db.fixed_ip_get_by_instance') @mock.patch('nova.db.fixed_ip_disassociate') def test_remove_fixed_ip_from_instance(self, disassociate, get): manager = fake_network.FakeNetworkManager() get.return_value = [ dict(test_fixed_ip.fake_fixed_ip, **x) for x in manager.db.fixed_ip_get_by_instance(None, FAKEUUID)] manager.remove_fixed_ip_from_instance(self.context, FAKEUUID, HOST, '10.0.0.1') self.assertEqual('10.0.0.1', manager.deallocate_called) disassociate.assert_called_once_with(self.context, '10.0.0.1') @mock.patch('nova.db.fixed_ip_get_by_instance') def test_remove_fixed_ip_from_instance_bad_input(self, get): manager = fake_network.FakeNetworkManager() get.return_value = [] self.assertRaises(exception.FixedIpNotFoundForSpecificInstance, manager.remove_fixed_ip_from_instance, self.context, 99, HOST, 'bad input') def test_validate_cidrs(self): manager = fake_network.FakeNetworkManager() nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) self.assertEqual(1, len(nets)) cidrs = [str(net['cidr']) for net in nets] self.assertIn('192.168.0.0/24', cidrs) def test_validate_cidrs_split_exact_in_half(self): manager = fake_network.FakeNetworkManager() nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.0.0/24', False, 2, 128, None, None, None, None, None) self.assertEqual(2, len(nets)) cidrs = [str(net['cidr']) for net in nets] self.assertIn('192.168.0.0/25', cidrs) self.assertIn('192.168.0.128/25', cidrs) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.2.0/24')] nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.0.0/16', False, 4, 256, None, None, None, None, None) self.assertEqual(4, len(nets)) cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: self.assertIn(exp_cidr, cidrs) self.assertNotIn('192.168.2.0/24', cidrs) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_smaller_subnet_in_use(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.2.9/25')] # CidrConflict: requested cidr (192.168.2.0/24) conflicts with # existing smaller cidr args = (self.context.elevated(), 'fake', '192.168.2.0/24', False, 1, 256, None, None, None, None, None) self.assertRaises(exception.CidrConflict, manager.create_networks, *args) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.2.0/25')] nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.0.0/16', False, 4, 256, None, None, None, None, None) self.assertEqual(4, len(nets)) cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: self.assertIn(exp_cidr, cidrs) self.assertNotIn('192.168.2.0/24', cidrs) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all): manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.2.9/29')] nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.2.0/24', False, 3, 32, None, None, None, None, None) self.assertEqual(3, len(nets)) cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27'] for exp_cidr in exp_cidrs: self.assertIn(exp_cidr, cidrs) self.assertNotIn('192.168.2.0/27', cidrs) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_split_all_in_use(self, get_all): manager = fake_network.FakeNetworkManager() in_use = [dict(test_network.fake_network, **values) for values in [{'id': 1, 'cidr': '192.168.2.9/29'}, {'id': 2, 'cidr': '192.168.2.64/26'}, {'id': 3, 'cidr': '192.168.2.128/26'}]] get_all.return_value = in_use args = (self.context.elevated(), 'fake', '192.168.2.0/24', False, 3, 64, None, None, None, None, None) # CidrConflict: Not enough subnets avail to satisfy requested num_ # networks - some subnets in requested range already # in use self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_validate_cidrs_one_in_use(self): manager = fake_network.FakeNetworkManager() args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None, None, None, None) # ValueError: network_size * num_networks exceeds cidr size self.assertRaises(ValueError, manager.create_networks, *args) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_already_used(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, cidr='192.168.0.0/24')] # CidrConflict: cidr already in use args = (self.context.elevated(), 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_validate_cidrs_too_many(self): manager = fake_network.FakeNetworkManager() args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None, None, None, None) # ValueError: Not enough subnets avail to satisfy requested # num_networks self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_partial(self): manager = fake_network.FakeNetworkManager() nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.0.0/16', False, 2, 256, None, None, None, None, None) returned_cidrs = [str(net['cidr']) for net in nets] self.assertIn('192.168.0.0/24', returned_cidrs) self.assertIn('192.168.1.0/24', returned_cidrs) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_conflict_existing_supernet(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.0.0/8')] args = (self.context.elevated(), 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) # CidrConflict: requested cidr (192.168.0.0/24) conflicts # with existing supernet self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_create_networks(self): cidr = '192.168.0.0/24' manager = fake_network.FakeNetworkManager() self.stubs.Set(manager, '_create_fixed_ips', self.fake_create_fixed_ips) args = [self.context.elevated(), 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, None, None, None] self.assertTrue(manager.create_networks(*args)) def test_create_networks_with_uuid(self): cidr = '192.168.0.0/24' uuid = FAKEUUID manager = fake_network.FakeNetworkManager() self.stubs.Set(manager, '_create_fixed_ips', self.fake_create_fixed_ips) args = [self.context.elevated(), 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, None, None, None] kwargs = {'uuid': uuid} nets = manager.create_networks(*args, **kwargs) self.assertEqual(1, len(nets)) net = nets[0] self.assertEqual(uuid, net['uuid']) @mock.patch('nova.db.network_get_all') def test_create_networks_cidr_already_used(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.0.0/24')] args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256, 'fd00::/48', None, None, None, None, None] self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_create_networks_many(self): cidr = '192.168.0.0/16' manager = fake_network.FakeNetworkManager() self.stubs.Set(manager, '_create_fixed_ips', self.fake_create_fixed_ips) args = [self.context.elevated(), 'foo', cidr, None, 10, 256, 'fd00::/48', None, None, None, None, None] self.assertTrue(manager.create_networks(*args)) @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ips_by_virtual_interface') def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get): manager = fake_network.FakeNetworkManager(self.stubs) fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface _vifs = manager.db.virtual_interface_get_all(None) fake_context = context.RequestContext('user', 'project') network_get.return_value = dict(test_network.fake_network, **manager.db.network_get(None, 1)) # Greedy get eveything res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '.*'}) self.assertEqual(len(_vifs), len(res)) # Doesn't exist res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '10.0.0.1'}) self.assertFalse(res) # Get instance 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '172.16.0.2'}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid']) # Get instance 2 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '173.16.0.2'}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid']) # Get instance 0 and 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '172.16.0.*'}) self.assertTrue(res) self.assertEqual(2, len(res)) self.assertEqual(_vifs[0]['instance_uuid'], res[0]['instance_uuid']) self.assertEqual(_vifs[1]['instance_uuid'], res[1]['instance_uuid']) # Get instance 1 and 2 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '17..16.0.2'}) self.assertTrue(res) self.assertEqual(2, len(res)) self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid']) self.assertEqual(_vifs[2]['instance_uuid'], res[1]['instance_uuid']) @mock.patch('nova.db.network_get') def test_get_instance_uuids_by_ipv6_regex(self, network_get): manager = fake_network.FakeNetworkManager(self.stubs) _vifs = manager.db.virtual_interface_get_all(None) fake_context = context.RequestContext('user', 'project') def _network_get(context, network_id, **args): return dict(test_network.fake_network, **manager.db.network_get(context, network_id)) network_get.side_effect = _network_get # Greedy get eveything res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '.*'}) self.assertEqual(len(_vifs), len(res)) # Doesn't exist res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '.*1034.*'}) self.assertFalse(res) # Get instance 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '2001:.*2'}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid']) # Get instance 2 ip6 = '2001:db8:69:1f:dead:beff:feff:ef03' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': ip6}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid']) # Get instance 0 and 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '.*ef0[1,2]'}) self.assertTrue(res) self.assertEqual(2, len(res)) self.assertEqual(_vifs[0]['instance_uuid'], res[0]['instance_uuid']) self.assertEqual(_vifs[1]['instance_uuid'], res[1]['instance_uuid']) # Get instance 1 and 2 ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': ip6}) self.assertTrue(res) self.assertEqual(2, len(res)) self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid']) self.assertEqual(_vifs[2]['instance_uuid'], res[1]['instance_uuid']) @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ips_by_virtual_interface') def test_get_instance_uuids_by_ip(self, fixed_get, network_get): manager = fake_network.FakeNetworkManager(self.stubs) fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface _vifs = manager.db.virtual_interface_get_all(None) fake_context = context.RequestContext('user', 'project') network_get.return_value = dict(test_network.fake_network, **manager.db.network_get(None, 1)) # No regex for you! res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': '.*'}) self.assertFalse(res) # Doesn't exist ip = '10.0.0.1' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': ip}) self.assertFalse(res) # Get instance 1 ip = '172.16.0.2' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': ip}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid']) # Get instance 2 ip = '173.16.0.2' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': ip}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid']) @mock.patch('nova.db.network_get_by_uuid') def test_get_network(self, get): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') get.return_value = dict(test_network.fake_network, **networks[0]) uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' network = manager.get_network(fake_context, uuid) self.assertEqual(uuid, network['uuid']) @mock.patch('nova.db.network_get_by_uuid') def test_get_network_not_found(self, get): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo') uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.assertRaises(exception.NetworkNotFound, manager.get_network, fake_context, uuid) @mock.patch('nova.db.network_get_all') def test_get_all_networks(self, get_all): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') get_all.return_value = [dict(test_network.fake_network, **net) for net in networks] output = manager.get_all_networks(fake_context) self.assertEqual(2, len(networks)) self.assertEqual('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', output[0]['uuid']) self.assertEqual('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', output[1]['uuid']) @mock.patch('nova.db.network_get_by_uuid') @mock.patch('nova.db.network_disassociate') def test_disassociate_network(self, disassociate, get): manager = fake_network.FakeNetworkManager() disassociate.return_value = True fake_context = context.RequestContext('user', 'project') get.return_value = dict(test_network.fake_network, **networks[0]) uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' manager.disassociate_network(fake_context, uuid) @mock.patch('nova.db.network_get_by_uuid') def test_disassociate_network_not_found(self, get): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake') uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.assertRaises(exception.NetworkNotFound, manager.disassociate_network, fake_context, uuid) def _test_init_host_dynamic_fixed_range(self, net_manager): self.flags(fake_network=True, routing_source_ip='172.16.0.1', metadata_host='172.16.0.1', public_interface='eth1', dmz_cidr=['10.0.3.0/24']) binary_name = linux_net.get_binary_name() # Stub out calls we don't want to really run, mock the db self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None) self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips', lambda *args: None) self.stubs.Set(net_manager.l3driver, 'initialize_gateway', lambda *args: None) self.mox.StubOutWithMock(db, 'network_get_all_by_host') fake_networks = [dict(test_network.fake_network, **n) for n in networks] db.network_get_all_by_host(mox.IgnoreArg(), mox.IgnoreArg() ).MultipleTimes().AndReturn(fake_networks) self.mox.ReplayAll() net_manager.init_host() # Get the iptables rules that got created current_lines = [] new_lines = linux_net.iptables_manager._modify_rules(current_lines, linux_net.iptables_manager.ipv4['nat'], table_name='nat') expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 ' '-j SNAT --to-source %s -o %s' % (binary_name, networks[0]['cidr'], CONF.routing_source_ip, CONF.public_interface), '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' % (binary_name, networks[0]['cidr'], CONF.metadata_host), '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' % (binary_name, networks[0]['cidr'], CONF.dmz_cidr[0]), '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! ' '--ctstate DNAT -j ACCEPT' % (binary_name, networks[0]['cidr'], networks[0]['cidr']), '[0:0] -A %s-snat -s %s -d 0.0.0.0/0 ' '-j SNAT --to-source %s -o %s' % (binary_name, networks[1]['cidr'], CONF.routing_source_ip, CONF.public_interface), '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' % (binary_name, networks[1]['cidr'], CONF.metadata_host), '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' % (binary_name, networks[1]['cidr'], CONF.dmz_cidr[0]), '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! ' '--ctstate DNAT -j ACCEPT' % (binary_name, networks[1]['cidr'], networks[1]['cidr'])] # Compare the expected rules against the actual ones for line in expected_lines: self.assertIn(line, new_lines) # Add an additional network and ensure the rules get configured new_network = {'id': 2, 'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc', 'label': 'test2', 'injected': False, 'multi_host': False, 'cidr': '192.168.2.0/24', 'cidr_v6': '2001:dba::/64', 'gateway_v6': '2001:dba::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa1', 'bridge_interface': 'fake_fa1', 'gateway': '192.168.2.1', 'dhcp_server': '192.168.2.1', 'broadcast': '192.168.2.255', 'dns1': '192.168.2.1', 'dns2': '192.168.2.2', 'vlan': None, 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.2.2', 'vpn_public_port': '22', 'vpn_private_address': '10.0.0.2'} new_network_obj = objects.Network._from_db_object( self.context, objects.Network(), dict(test_network.fake_network, **new_network)) ctxt = context.get_admin_context() net_manager._setup_network_on_host(ctxt, new_network_obj) # Get the new iptables rules that got created from adding a new network current_lines = [] new_lines = linux_net.iptables_manager._modify_rules(current_lines, linux_net.iptables_manager.ipv4['nat'], table_name='nat') # Add the new expected rules to the old ones expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 ' '-j SNAT --to-source %s -o %s' % (binary_name, new_network['cidr'], CONF.routing_source_ip, CONF.public_interface), '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' % (binary_name, new_network['cidr'], CONF.metadata_host), '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' % (binary_name, new_network['cidr'], CONF.dmz_cidr[0]), '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ' '! --ctstate DNAT -j ACCEPT' % (binary_name, new_network['cidr'], new_network['cidr'])] # Compare the expected rules (with new network) against the actual ones for line in expected_lines: self.assertIn(line, new_lines) def test_flatdhcpmanager_dynamic_fixed_range(self): """Test FlatDHCPManager NAT rules for fixed_range.""" # Set the network manager self.network = network_manager.FlatDHCPManager(host=HOST) self.network.db = db # Test new behavior: # CONF.fixed_range is not set, defaults to None # Determine networks to NAT based on lookup self._test_init_host_dynamic_fixed_range(self.network) def test_vlanmanager_dynamic_fixed_range(self): """Test VlanManager NAT rules for fixed_range.""" # Set the network manager self.network = network_manager.VlanManager(host=HOST) self.network.db = db # Test new behavior: # CONF.fixed_range is not set, defaults to None # Determine networks to NAT based on lookup self._test_init_host_dynamic_fixed_range(self.network) @mock.patch('nova.objects.quotas.Quotas.rollback') @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address') @mock.patch('nova.network.manager.NetworkManager.' '_do_trigger_security_group_members_refresh_for_instance') def test_fixed_ip_cleanup_rollback(self, fake_trig, fixed_get, rollback): manager = network_manager.NetworkManager() fake_trig.side_effect = test.TestingException self.assertRaises(test.TestingException, manager.deallocate_fixed_ip, self.context, 'fake', 'fake', instance=fake_inst(uuid='ignoreduuid')) rollback.assert_called_once_with() def test_fixed_cidr_out_of_range(self): manager = network_manager.NetworkManager() ctxt = context.get_admin_context() self.assertRaises(exception.AddressOutOfRange, manager.create_networks, ctxt, label="fake", cidr='10.1.0.0/24', fixed_cidr='10.1.1.0/25') class TestRPCFixedManager(network_manager.RPCAllocateFixedIP, network_manager.NetworkManager): """Dummy manager that implements RPCAllocateFixedIP.""" class RPCAllocateTestCase(test.NoDBTestCase): """Tests nova.network.manager.RPCAllocateFixedIP.""" def setUp(self): super(RPCAllocateTestCase, self).setUp() self.rpc_fixed = TestRPCFixedManager() self.context = context.RequestContext('fake', 'fake') def test_rpc_allocate(self): """Test to verify bug 855030 doesn't resurface. Mekes sure _rpc_allocate_fixed_ip returns a value so the call returns properly and the greenpool completes. """ address = '10.10.10.10' def fake_allocate(*args, **kwargs): return address def fake_network_get(*args, **kwargs): return test_network.fake_network self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate) self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get) rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context, 'fake_instance', 'fake_network') self.assertEqual(address, rval) class TestFloatingIPManager(floating_ips.FloatingIP, network_manager.NetworkManager): """Dummy manager that implements FloatingIP.""" class AllocateTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(AllocateTestCase, self).setUp() dns = 'nova.network.noop_dns_driver.NoopDNSDriver' self.flags(instance_dns_manager=dns) self.useFixture(test.SampleNetworks()) self.network = network_manager.VlanManager(host=HOST) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) self.user_context = context.RequestContext('testuser', 'testproject') def test_allocate_for_instance(self): address = "10.10.10.10" self.flags(auto_assign_floating_ip=True) db.floating_ip_create(self.context, {'address': address, 'pool': 'nova'}) inst = objects.Instance(context=self.context) inst.host = HOST inst.display_name = HOST inst.instance_type_id = 1 inst.uuid = FAKEUUID inst.create() networks = db.network_get_all(self.context) for network in networks: db.network_update(self.context, network['id'], {'host': HOST}) project_id = self.user_context.project_id nw_info = self.network.allocate_for_instance(self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=None) self.assertEqual(1, len(nw_info)) fixed_ip = nw_info.fixed_ips()[0]['address'] self.assertTrue(netutils.is_valid_ipv4(fixed_ip)) self.network.deallocate_for_instance(self.context, instance=inst) def test_allocate_for_instance_illegal_network(self): networks = db.network_get_all(self.context) requested_networks = [] for network in networks: # set all networks to other projects db.network_update(self.context, network['id'], {'host': HOST, 'project_id': 'otherid'}) requested_networks.append((network['uuid'], None)) # set the first network to our project db.network_update(self.context, networks[0]['id'], {'project_id': self.user_context.project_id}) inst = objects.Instance(context=self.context) inst.host = HOST inst.display_name = HOST inst.instance_type_id = 1 inst.uuid = FAKEUUID inst.create() self.assertRaises(exception.NetworkNotFoundForProject, self.network.allocate_for_instance, self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=self.context.project_id, macs=None, requested_networks=requested_networks) def test_allocate_for_instance_with_mac(self): available_macs = set(['ca:fe:de:ad:be:ef']) inst = db.instance_create(self.context, {'host': HOST, 'display_name': HOST, 'instance_type_id': 1}) networks = db.network_get_all(self.context) for network in networks: db.network_update(self.context, network['id'], {'host': HOST}) project_id = self.context.project_id nw_info = self.network.allocate_for_instance(self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=available_macs) assigned_macs = [vif['address'] for vif in nw_info] self.assertEqual(1, len(assigned_macs)) self.assertEqual(available_macs.pop(), assigned_macs[0]) self.network.deallocate_for_instance(self.context, instance_id=inst['id'], host=self.network.host, project_id=project_id) def test_allocate_for_instance_not_enough_macs(self): available_macs = set() inst = db.instance_create(self.context, {'host': HOST, 'display_name': HOST, 'instance_type_id': 1}) networks = db.network_get_all(self.context) for network in networks: db.network_update(self.context, network['id'], {'host': self.network.host}) project_id = self.context.project_id self.assertRaises(exception.VirtualInterfaceCreateException, self.network.allocate_for_instance, self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=available_macs) class FloatingIPTestCase(test.TestCase): """Tests nova.network.manager.FloatingIP.""" REQUIRES_LOCKING = True def setUp(self): super(FloatingIPTestCase, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = TestFloatingIPManager() self.network.db = db self.project_id = 'testproject' self.context = context.RequestContext('testuser', self.project_id, is_admin=False) @mock.patch('nova.db.fixed_ip_get') @mock.patch('nova.db.network_get') @mock.patch('nova.db.instance_get_by_uuid') @mock.patch('nova.db.service_get_by_host_and_binary') @mock.patch('nova.db.floating_ip_get_by_address') def test_disassociate_floating_ip_multi_host_calls(self, floating_get, service_get, inst_get, net_get, fixed_get): floating_ip = dict(test_floating_ip.fake_floating_ip, fixed_ip_id=12) fixed_ip = dict(test_fixed_ip.fake_fixed_ip, network_id=None, instance_uuid='instance-uuid') network = dict(test_network.fake_network, multi_host=True) instance = dict(fake_instance.fake_db_instance(host='some-other-host')) ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) self.stubs.Set(self.network, '_floating_ip_owned_by_project', lambda _x, _y: True) floating_get.return_value = floating_ip fixed_get.return_value = fixed_ip net_get.return_value = network inst_get.return_value = instance service_get.return_value = test_service.fake_service self.stubs.Set(self.network.servicegroup_api, 'service_is_up', lambda _x: True) self.mox.StubOutWithMock( self.network.network_rpcapi, '_disassociate_floating_ip') self.network.network_rpcapi._disassociate_floating_ip( ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid') self.mox.ReplayAll() self.network.disassociate_floating_ip(ctxt, 'fl_ip', True) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.instance_get_by_uuid') @mock.patch('nova.db.floating_ip_get_by_address') def test_associate_floating_ip_multi_host_calls(self, floating_get, inst_get, net_get, fixed_get): floating_ip = dict(test_floating_ip.fake_floating_ip, fixed_ip_id=None) fixed_ip = dict(test_fixed_ip.fake_fixed_ip, network_id=None, instance_uuid='instance-uuid') network = dict(test_network.fake_network, multi_host=True) instance = dict(fake_instance.fake_db_instance(host='some-other-host')) ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) self.stubs.Set(self.network, '_floating_ip_owned_by_project', lambda _x, _y: True) floating_get.return_value = floating_ip fixed_get.return_value = fixed_ip net_get.return_value = network inst_get.return_value = instance self.mox.StubOutWithMock( self.network.network_rpcapi, '_associate_floating_ip') self.network.network_rpcapi._associate_floating_ip( ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid') self.mox.ReplayAll() self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True) def test_double_deallocation(self): instance_ref = db.instance_create(self.context, {"project_id": self.project_id}) # Run it twice to make it fault if it does not handle # instances without fixed networks # If this fails in either, it does not handle having no addresses self.network.deallocate_for_instance(self.context, instance_id=instance_ref['id']) self.network.deallocate_for_instance(self.context, instance_id=instance_ref['id']) def test_deallocate_floating_ip_quota_rollback(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', fixed_ip_id=None, project_id=ctxt.project_id) self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake) self.mox.StubOutWithMock(db, 'floating_ip_deallocate') self.mox.StubOutWithMock(self.network, '_floating_ip_owned_by_project') self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') self.mox.StubOutWithMock(quota.QUOTAS, 'rollback') quota.QUOTAS.reserve(self.context, floating_ips=-1, project_id='testproject').AndReturn('fake-rsv') self.network._floating_ip_owned_by_project(self.context, mox.IgnoreArg()) db.floating_ip_deallocate(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) quota.QUOTAS.rollback(self.context, 'fake-rsv', project_id='testproject') self.mox.ReplayAll() self.network.deallocate_floating_ip(self.context, '10.0.0.1') def test_deallocation_deleted_instance(self): self.stubs.Set(self.network, '_teardown_network_on_host', lambda *args, **kwargs: None) instance = objects.Instance(context=self.context) instance.project_id = self.project_id instance.deleted = True instance.create() network = db.network_create_safe(self.context.elevated(), { 'project_id': self.project_id, 'host': CONF.host, 'label': 'foo'}) fixed = db.fixed_ip_create(self.context, {'allocated': True, 'instance_uuid': instance.uuid, 'address': '10.1.1.1', 'network_id': network['id']}) db.floating_ip_create(self.context, { 'address': '10.10.10.10', 'instance_uuid': instance.uuid, 'fixed_ip_id': fixed['id'], 'project_id': self.project_id}) self.network.deallocate_for_instance(self.context, instance=instance) def test_deallocation_duplicate_floating_ip(self): self.stubs.Set(self.network, '_teardown_network_on_host', lambda *args, **kwargs: None) instance = objects.Instance(context=self.context) instance.project_id = self.project_id instance.create() network = db.network_create_safe(self.context.elevated(), { 'project_id': self.project_id, 'host': CONF.host, 'label': 'foo'}) fixed = db.fixed_ip_create(self.context, {'allocated': True, 'instance_uuid': instance.uuid, 'address': '10.1.1.1', 'network_id': network['id']}) db.floating_ip_create(self.context, { 'address': '10.10.10.10', 'deleted': True}) db.floating_ip_create(self.context, { 'address': '10.10.10.10', 'instance_uuid': instance.uuid, 'fixed_ip_id': fixed['id'], 'project_id': self.project_id}) self.network.deallocate_for_instance(self.context, instance=instance) @mock.patch('nova.db.fixed_ip_get') @mock.patch('nova.db.floating_ip_get_by_address') @mock.patch('nova.db.floating_ip_update') def test_migrate_instance_start(self, floating_update, floating_get, fixed_get): called = {'count': 0} def fake_floating_ip_get_by_address(context, address): return dict(test_floating_ip.fake_floating_ip, address=address, fixed_ip_id=0) def fake_is_stale_floating_ip_address(context, floating_ip): return str(floating_ip.address) == '172.24.4.23' floating_get.side_effect = fake_floating_ip_get_by_address fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, instance_uuid='fake_uuid', address='10.0.0.2', network=test_network.fake_network) floating_update.return_value = fake_floating_ip_get_by_address( None, '1.2.3.4') def fake_remove_floating_ip(floating_addr, fixed_addr, interface, network): called['count'] += 1 def fake_clean_conntrack(fixed_ip): if not str(fixed_ip) == "10.0.0.2": raise exception.FixedIpInvalid(address=fixed_ip) self.stubs.Set(self.network, '_is_stale_floating_ip_address', fake_is_stale_floating_ip_address) self.stubs.Set(self.network.l3driver, 'remove_floating_ip', fake_remove_floating_ip) self.stubs.Set(self.network.driver, 'clean_conntrack', fake_clean_conntrack) self.mox.ReplayAll() addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25'] self.network.migrate_instance_start(self.context, instance_uuid=FAKEUUID, floating_addresses=addresses, rxtx_factor=3, project_id=self.project_id, source='fake_source', dest='fake_dest') self.assertEqual(2, called['count']) @mock.patch('nova.db.fixed_ip_get') @mock.patch('nova.db.floating_ip_update') def test_migrate_instance_finish(self, floating_update, fixed_get): called = {'count': 0} def fake_floating_ip_get_by_address(context, address): return dict(test_floating_ip.fake_floating_ip, address=address, fixed_ip_id=0) def fake_is_stale_floating_ip_address(context, floating_ip): return str(floating_ip.address) == '172.24.4.23' fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, instance_uuid='fake_uuid', address='10.0.0.2', network=test_network.fake_network) floating_update.return_value = fake_floating_ip_get_by_address( None, '1.2.3.4') def fake_add_floating_ip(floating_addr, fixed_addr, interface, network): called['count'] += 1 self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake_floating_ip_get_by_address) self.stubs.Set(self.network, '_is_stale_floating_ip_address', fake_is_stale_floating_ip_address) self.stubs.Set(self.network.l3driver, 'add_floating_ip', fake_add_floating_ip) self.mox.ReplayAll() addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25'] self.network.migrate_instance_finish(self.context, instance_uuid=FAKEUUID, floating_addresses=addresses, host='fake_dest', rxtx_factor=3, project_id=self.project_id, source='fake_source') self.assertEqual(2, called['count']) def test_floating_dns_create_conflict(self): zone = "example.org" address1 = "10.10.10.11" name1 = "foo" self.network.add_dns_entry(self.context, address1, name1, "A", zone) self.assertRaises(exception.FloatingIpDNSExists, self.network.add_dns_entry, self.context, address1, name1, "A", zone) def test_floating_create_and_get(self): zone = "example.org" address1 = "10.10.10.11" name1 = "foo" name2 = "bar" entries = self.network.get_dns_entries_by_address(self.context, address1, zone) self.assertFalse(entries) self.network.add_dns_entry(self.context, address1, name1, "A", zone) self.network.add_dns_entry(self.context, address1, name2, "A", zone) entries = self.network.get_dns_entries_by_address(self.context, address1, zone) self.assertEqual(2, len(entries)) self.assertEqual(name1, entries[0]) self.assertEqual(name2, entries[1]) entries = self.network.get_dns_entries_by_name(self.context, name1, zone) self.assertEqual(1, len(entries)) self.assertEqual(address1, entries[0]) def test_floating_dns_delete(self): zone = "example.org" address1 = "10.10.10.11" name1 = "foo" name2 = "bar" self.network.add_dns_entry(self.context, address1, name1, "A", zone) self.network.add_dns_entry(self.context, address1, name2, "A", zone) self.network.delete_dns_entry(self.context, name1, zone) entries = self.network.get_dns_entries_by_address(self.context, address1, zone) self.assertEqual(1, len(entries)) self.assertEqual(name2, entries[0]) self.assertRaises(exception.NotFound, self.network.delete_dns_entry, self.context, name1, zone) def test_floating_dns_domains_public(self): domain1 = "example.org" domain2 = "example.com" address1 = '10.10.10.10' entryname = 'testentry' self.network.create_public_dns_domain(self.context, domain1, 'testproject') self.network.create_public_dns_domain(self.context, domain2, 'fakeproject') domains = self.network.get_dns_domains(self.context) self.assertEqual(2, len(domains)) self.assertEqual(domain1, domains[0]['domain']) self.assertEqual(domain2, domains[1]['domain']) self.assertEqual('testproject', domains[0]['project']) self.assertEqual('fakeproject', domains[1]['project']) self.network.add_dns_entry(self.context, address1, entryname, 'A', domain1) entries = self.network.get_dns_entries_by_name(self.context, entryname, domain1) self.assertEqual(1, len(entries)) self.assertEqual(address1, entries[0]) self.network.delete_dns_domain(self.context, domain1) self.network.delete_dns_domain(self.context, domain2) # Verify that deleting the domain deleted the associated entry entries = self.network.get_dns_entries_by_name(self.context, entryname, domain1) self.assertFalse(entries) def test_delete_all_by_ip(self): domain1 = "example.org" domain2 = "example.com" address = "10.10.10.10" name1 = "foo" name2 = "bar" def fake_domains(context): return [{'domain': 'example.org', 'scope': 'public'}, {'domain': 'example.com', 'scope': 'public'}, {'domain': 'test.example.org', 'scope': 'public'}] self.stubs.Set(self.network, 'get_dns_domains', fake_domains) context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.network.create_public_dns_domain(context_admin, domain1, 'testproject') self.network.create_public_dns_domain(context_admin, domain2, 'fakeproject') domains = self.network.get_dns_domains(self.context) for domain in domains: self.network.add_dns_entry(self.context, address, name1, "A", domain['domain']) self.network.add_dns_entry(self.context, address, name2, "A", domain['domain']) entries = self.network.get_dns_entries_by_address(self.context, address, domain['domain']) self.assertEqual(2, len(entries)) self.network._delete_all_entries_for_ip(self.context, address) for domain in domains: entries = self.network.get_dns_entries_by_address(self.context, address, domain['domain']) self.assertFalse(entries) self.network.delete_dns_domain(context_admin, domain1) self.network.delete_dns_domain(context_admin, domain2) def test_mac_conflicts(self): # Make sure MAC collisions are retried. self.flags(create_unique_mac_address_attempts=3) ctxt = context.RequestContext('testuser', 'testproject', is_admin=True) macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa'] # Create a VIF with aa:aa:aa:aa:aa:aa crash_test_dummy_vif = { 'address': macs[1], 'instance_uuid': 'fake_uuid', 'network_id': 123, 'uuid': 'fake_uuid', } self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif) # Hand out a collision first, then a legit MAC def fake_gen_mac(): return macs.pop() self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac) # SQLite doesn't seem to honor the uniqueness constraint on the # address column, so fake the collision-avoidance here def fake_vif_save(vif, session=None): if vif.address == crash_test_dummy_vif['address']: raise db_exc.DBError("If you're smart, you'll retry!") # NOTE(russellb) The VirtualInterface object requires an ID to be # set, and we expect it to get set automatically when we do the # save. vif.id = 1 self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save) # Attempt to add another and make sure that both MACs are consumed # by the retry loop self.network._add_virtual_interface(ctxt, 'fake_uuid', 123) self.assertEqual([], macs) def test_deallocate_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address') self.network.db.floating_ip_get_by_address( self.context, '1.2.3.4').AndRaise( exception.FloatingIpNotFoundForAddress(address='fake')) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, self.network.deallocate_floating_ip, self.context, '1.2.3.4') def test_associate_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address') self.network.db.floating_ip_get_by_address( self.context, '1.2.3.4').AndRaise( exception.FloatingIpNotFoundForAddress(address='fake')) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, self.network.associate_floating_ip, self.context, '1.2.3.4', '10.0.0.1') def test_disassociate_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address') self.network.db.floating_ip_get_by_address( self.context, '1.2.3.4').AndRaise( exception.FloatingIpNotFoundForAddress(address='fake')) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, self.network.disassociate_floating_ip, self.context, '1.2.3.4') def test_get_floating_ip_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get') self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise( exception.FloatingIpNotFound(id='fake')) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, self.network.get_floating_ip, self.context, 'fake-id') def _test_associate_floating_ip_failure(self, stdout, expected_exception): def _fake_catchall(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, network=test_network.fake_network) def _fake_add_floating_ip(*args, **kwargs): raise processutils.ProcessExecutionError(stdout) self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate', _fake_catchall) self.stubs.Set(self.network.db, 'floating_ip_disassociate', _fake_catchall) self.stubs.Set(self.network.l3driver, 'add_floating_ip', _fake_add_floating_ip) self.assertRaises(expected_exception, self.network._associate_floating_ip, self.context, '1.2.3.4', '1.2.3.5', '', '') def test_associate_floating_ip_failure(self): self._test_associate_floating_ip_failure(None, processutils.ProcessExecutionError) def test_associate_floating_ip_failure_interface_not_found(self): self._test_associate_floating_ip_failure('Cannot find device', exception.NoFloatingIpInterface) @mock.patch('nova.objects.FloatingIP.get_by_address') def test_get_floating_ip_by_address(self, mock_get): mock_get.return_value = mock.sentinel.floating self.assertEqual(mock.sentinel.floating, self.network.get_floating_ip_by_address( self.context, mock.sentinel.address)) mock_get.assert_called_once_with(self.context, mock.sentinel.address) @mock.patch('nova.objects.FloatingIPList.get_by_project') def test_get_floating_ips_by_project(self, mock_get): mock_get.return_value = mock.sentinel.floatings self.assertEqual(mock.sentinel.floatings, self.network.get_floating_ips_by_project( self.context)) mock_get.assert_called_once_with(self.context, self.context.project_id) @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address') def test_get_floating_ips_by_fixed_address(self, mock_get): mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'), objects.FloatingIP(address='5.6.7.8')] self.assertEqual(['1.2.3.4', '5.6.7.8'], self.network.get_floating_ips_by_fixed_address( self.context, mock.sentinel.address)) mock_get.assert_called_once_with(self.context, mock.sentinel.address) @mock.patch('nova.db.floating_ip_get_pools') def test_floating_ip_pool_exists(self, floating_ip_get_pools): floating_ip_get_pools.return_value = [{'name': 'public'}] self.assertTrue(self.network._floating_ip_pool_exists(self.context, 'public')) @mock.patch('nova.db.floating_ip_get_pools') def test_floating_ip_pool_does_not_exist(self, floating_ip_get_pools): floating_ip_get_pools.return_value = [] self.assertFalse(self.network._floating_ip_pool_exists(self.context, 'public')) class InstanceDNSTestCase(test.TestCase): """Tests nova.network.manager instance DNS.""" def setUp(self): super(InstanceDNSTestCase, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = TestFloatingIPManager() self.network.db = db self.project_id = 'testproject' self.context = context.RequestContext('testuser', self.project_id, is_admin=False) def test_dns_domains_private(self): zone1 = 'testzone' domain1 = 'example.org' self.network.create_private_dns_domain(self.context, domain1, zone1) domains = self.network.get_dns_domains(self.context) self.assertEqual(1, len(domains)) self.assertEqual(domain1, domains[0]['domain']) self.assertEqual(zone1, domains[0]['availability_zone']) self.network.delete_dns_domain(self.context, domain1) domain1 = "example.org" domain2 = "example.com" class LdapDNSTestCase(test.NoDBTestCase): """Tests nova.network.ldapdns.LdapDNS.""" def setUp(self): super(LdapDNSTestCase, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'nova.network.ldapdns.ldap', fake_ldap)) dns_class = 'nova.network.ldapdns.LdapDNS' self.driver = importutils.import_object(dns_class) attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain', 'domain', 'dcobject', 'top'], 'associateddomain': ['root'], 'dc': ['root']} self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items()) self.driver.create_domain(domain1) self.driver.create_domain(domain2) def tearDown(self): self.driver.delete_domain(domain1) self.driver.delete_domain(domain2) super(LdapDNSTestCase, self).tearDown() def test_ldap_dns_domains(self): domains = self.driver.get_domains() self.assertEqual(2, len(domains)) self.assertIn(domain1, domains) self.assertIn(domain2, domains) def test_ldap_dns_create_conflict(self): address1 = "10.10.10.11" name1 = "foo" self.driver.create_entry(name1, address1, "A", domain1) self.assertRaises(exception.FloatingIpDNSExists, self.driver.create_entry, name1, address1, "A", domain1) def test_ldap_dns_create_and_get(self): address1 = "10.10.10.11" name1 = "foo" name2 = "bar" entries = self.driver.get_entries_by_address(address1, domain1) self.assertFalse(entries) self.driver.create_entry(name1, address1, "A", domain1) self.driver.create_entry(name2, address1, "A", domain1) entries = self.driver.get_entries_by_address(address1, domain1) self.assertEqual(2, len(entries)) self.assertEqual(name1, entries[0]) self.assertEqual(name2, entries[1]) entries = self.driver.get_entries_by_name(name1, domain1) self.assertEqual(1, len(entries)) self.assertEqual(address1, entries[0]) def test_ldap_dns_delete(self): address1 = "10.10.10.11" name1 = "foo" name2 = "bar" self.driver.create_entry(name1, address1, "A", domain1) self.driver.create_entry(name2, address1, "A", domain1) entries = self.driver.get_entries_by_address(address1, domain1) self.assertEqual(2, len(entries)) self.driver.delete_entry(name1, domain1) entries = self.driver.get_entries_by_address(address1, domain1) LOG.debug("entries: %s" % entries) self.assertEqual(1, len(entries)) self.assertEqual(name2, entries[0]) self.assertRaises(exception.NotFound, self.driver.delete_entry, name1, domain1) class NetworkManagerNoDBTestCase(test.NoDBTestCase): """Tests nova.network.manager.NetworkManager without a database.""" def setUp(self): super(NetworkManagerNoDBTestCase, self).setUp() self.context = context.RequestContext('fake-user', 'fake-project') self.manager = network_manager.NetworkManager() @mock.patch.object(objects.FixedIP, 'get_by_address') def test_release_fixed_ip_not_associated(self, mock_fip_get_by_addr): # Tests that the method is a no-op when the fixed IP is not associated # to an instance. fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fake_network.next_fixed_ip(1)) fip.instance_uuid = None with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip(self.context, fip.address) self.assertFalse(mock_disassociate.called, str(mock_disassociate.mock_calls)) @mock.patch.object(objects.FixedIP, 'get_by_address') def test_release_fixed_ip_allocated(self, mock_fip_get_by_addr): # Tests that the fixed IP is not disassociated if it's allocated. fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fake_network.next_fixed_ip(1)) fip.leased = False fip.allocated = True with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip(self.context, fip.address) self.assertFalse(mock_disassociate.called, str(mock_disassociate.mock_calls)) @mock.patch.object(objects.FixedIP, 'get_by_address') @mock.patch.object(objects.VirtualInterface, 'get_by_address') def test_release_fixed_ip_mac_matches_associated_instance(self, mock_vif_get_by_addr, mock_fip_get_by_addr): # Tests that the fixed IP is disassociated when the mac passed to # release_fixed_ip matches the VIF which has the same instance_uuid # as the instance associated to the FixedIP object. Also tests # that the fixed IP is marked as not leased in the database if it was # currently leased. instance = fake_instance.fake_instance_obj(self.context) fip = fake_network.next_fixed_ip(1) fip['instance_uuid'] = instance.uuid fip['leased'] = True vif = fip['virtual_interface'] vif['instance_uuid'] = instance.uuid vif = objects.VirtualInterface._from_db_object( self.context, objects.VirtualInterface(), vif) fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fip) mock_fip_get_by_addr.return_value = fip mock_vif_get_by_addr.return_value = vif with mock.patch.object(fip, 'save') as mock_fip_save: with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip( self.context, fip.address, vif.address) mock_fip_save.assert_called_once_with() self.assertFalse(fip.leased) mock_vif_get_by_addr.assert_called_once_with(self.context, vif.address) mock_disassociate.assert_called_once_with() @mock.patch.object(objects.FixedIP, 'get_by_address') @mock.patch.object(objects.VirtualInterface, 'get_by_address', return_value=None) def test_release_fixed_ip_vif_not_found_for_mac(self, mock_vif_get_by_addr, mock_fip_get_by_addr): # Tests that the fixed IP is disassociated when the fixed IP is marked # as deallocated and there is no VIF found in the database for the mac # passed in. fip = fake_network.next_fixed_ip(1) fip['leased'] = False mac = fip['virtual_interface']['address'] fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fip) mock_fip_get_by_addr.return_value = fip with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip(self.context, fip.address, mac) mock_vif_get_by_addr.assert_called_once_with(self.context, mac) mock_disassociate.assert_called_once_with() @mock.patch.object(objects.FixedIP, 'get_by_address') def test_release_fixed_ip_no_mac(self, mock_fip_get_by_addr): # Tests that the fixed IP is disassociated when the fixed IP is # deallocated and there is no mac address passed in (like before # the network rpc api version bump to pass it in). fip = fake_network.next_fixed_ip(1) fip['leased'] = False fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fip) mock_fip_get_by_addr.return_value = fip with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip(self.context, fip.address) mock_disassociate.assert_called_once_with() @mock.patch.object(objects.FixedIP, 'get_by_address') @mock.patch.object(objects.VirtualInterface, 'get_by_address') def test_release_fixed_ip_mac_mismatch_associated_instance(self, mock_vif_get_by_addr, mock_fip_get_by_addr): # Tests that the fixed IP is not disassociated when the VIF for the mac # passed to release_fixed_ip does not have an instance_uuid that # matches fixed_ip.instance_uuid. old_instance = fake_instance.fake_instance_obj(self.context) new_instance = fake_instance.fake_instance_obj(self.context) fip = fake_network.next_fixed_ip(1) fip['instance_uuid'] = new_instance.uuid fip['leased'] = False vif = fip['virtual_interface'] vif['instance_uuid'] = old_instance.uuid vif = objects.VirtualInterface._from_db_object( self.context, objects.VirtualInterface(), vif) fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fip) mock_fip_get_by_addr.return_value = fip mock_vif_get_by_addr.return_value = vif with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip( self.context, fip.address, vif.address) mock_vif_get_by_addr.assert_called_once_with(self.context, vif.address) self.assertFalse(mock_disassociate.called, str(mock_disassociate.mock_calls)) @mock.patch.object(objects.FixedIP, 'get_by_address') @mock.patch.object(objects.VirtualInterface, 'get_by_id') @mock.patch.object(objects.Quotas, 'reserve') def test_deallocate_fixed_ip_explicit_disassociate(self, mock_quota_reserve, mock_vif_get_by_id, mock_fip_get_by_addr): # Tests that we explicitly call FixedIP.disassociate when the fixed IP # is not leased and has an associated instance (race with dnsmasq). self.flags(force_dhcp_release=True) fake_inst = fake_instance.fake_instance_obj(self.context) fip = fake_network.next_fixed_ip(1) fip['instance_uuid'] = fake_inst.uuid fip['leased'] = False vif = fip['virtual_interface'] vif['instance_uuid'] = fake_inst.uuid vif = objects.VirtualInterface._from_db_object( self.context, objects.VirtualInterface(), vif) fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fip) fip.network = fake_network.fake_network_obj(self.context, fip.network_id) mock_fip_get_by_addr.return_value = fip mock_vif_get_by_id.return_value = vif @mock.patch.object(self.manager, '_do_trigger_security_group_members_refresh_for_instance') @mock.patch.object(self.manager, '_validate_instance_zone_for_dns_domain', return_value=False) @mock.patch.object(self.manager, '_teardown_network_on_host') @mock.patch.object(fip, 'save') @mock.patch.object(fip, 'disassociate') def do_test(mock_disassociate, mock_fip_save, mock_teardown_network_on_host, mock_validate_zone, mock_trigger_secgroup_refresh): self.assertEqual(fake_inst.uuid, fip.instance_uuid) self.assertFalse(fip.leased) self.manager.deallocate_fixed_ip( self.context, fip['address'], instance=fake_inst) mock_trigger_secgroup_refresh.assert_called_once_with( fake_inst.uuid) mock_teardown_network_on_host.assert_called_once_with(self.context, fip.network) mock_disassociate.assert_called_once_with() do_test() nova-13.0.0/nova/tests/unit/network/test_rpcapi.py0000664000567000056710000003710512701407773023361 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.network.rpcapi """ import collections import mock from mox3 import mox from oslo_config import cfg from nova import context from nova.network import rpcapi as network_rpcapi from nova.objects import base as objects_base from nova import test from nova.tests.unit import fake_instance from nova.tests.unit import fake_network CONF = cfg.CONF class NetworkRpcAPITestCase(test.NoDBTestCase): def setUp(self): super(NetworkRpcAPITestCase, self).setUp() self.flags(multi_host=True) # Used to specify the default value expected if no real value is passed DefaultArg = collections.namedtuple('DefaultArg', ['value']) def _test_network_api(self, method, rpc_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = network_rpcapi.NetworkAPI() self.assertIsNotNone(rpcapi.client) self.assertEqual(CONF.network_topic, rpcapi.client.target.topic) expected_retval = 'foo' if rpc_method == 'call' else None expected_version = kwargs.pop('version', None) expected_fanout = kwargs.pop('fanout', None) expected_kwargs = kwargs.copy() for k, v in expected_kwargs.items(): if isinstance(v, self.DefaultArg): expected_kwargs[k] = v.value kwargs.pop(k) prepare_kwargs = {} if expected_version: prepare_kwargs['version'] = expected_version if expected_fanout: prepare_kwargs['fanout'] = True if 'source_compute' in expected_kwargs: # Fix up for migrate_instance_* calls. expected_kwargs['source'] = expected_kwargs.pop('source_compute') expected_kwargs['dest'] = expected_kwargs.pop('dest_compute') targeted_methods = [ 'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host', '_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns', '_associate_floating_ip', '_disassociate_floating_ip', 'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start', 'migrate_instance_finish', 'allocate_for_instance', 'deallocate_for_instance', ] targeted_by_instance = ['deallocate_for_instance'] if method in targeted_methods and ('host' in expected_kwargs or 'instance' in expected_kwargs): if method in targeted_by_instance: host = expected_kwargs['instance']['host'] else: host = expected_kwargs['host'] if method not in ['allocate_for_instance', 'deallocate_fixed_ip']: expected_kwargs.pop('host') if CONF.multi_host: prepare_kwargs['server'] = host self.mox.StubOutWithMock(rpcapi, 'client') version_check = [ 'deallocate_for_instance', 'deallocate_fixed_ip', 'allocate_for_instance', 'release_fixed_ip', 'set_network_host', 'setup_networks_on_host' ] if method in version_check: rpcapi.client.can_send_version(mox.IgnoreArg()).AndReturn(True) if prepare_kwargs: rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client) rpc_method = getattr(rpcapi.client, rpc_method) rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo') self.mox.ReplayAll() retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(expected_retval, retval) def test_create_networks(self): self._test_network_api('create_networks', rpc_method='call', arg1='arg', arg2='arg') def test_delete_network(self): self._test_network_api('delete_network', rpc_method='call', uuid='fake_uuid', fixed_range='range') def test_allocate_for_instance(self): self._test_network_api('allocate_for_instance', rpc_method='call', instance_id='fake_id', project_id='fake_id', host='fake_host', rxtx_factor='fake_factor', vpn=False, requested_networks={}, macs=[], version='1.13') def test_deallocate_for_instance(self): instance = fake_instance.fake_instance_obj(context.get_admin_context()) self._test_network_api('deallocate_for_instance', rpc_method='call', requested_networks=self.DefaultArg(None), instance=instance, version='1.11') def test_deallocate_for_instance_with_expected_networks(self): instance = fake_instance.fake_instance_obj(context.get_admin_context()) self._test_network_api('deallocate_for_instance', rpc_method='call', instance=instance, requested_networks={}, version='1.11') def test_add_fixed_ip_to_instance(self): self._test_network_api('add_fixed_ip_to_instance', rpc_method='call', instance_id='fake_id', rxtx_factor='fake_factor', host='fake_host', network_id='fake_id', version='1.9') def test_remove_fixed_ip_from_instance(self): self._test_network_api('remove_fixed_ip_from_instance', rpc_method='call', instance_id='fake_id', rxtx_factor='fake_factor', host='fake_host', address='fake_address', version='1.9') def test_add_network_to_project(self): self._test_network_api('add_network_to_project', rpc_method='call', project_id='fake_id', network_uuid='fake_uuid') def test_get_instance_nw_info(self): self._test_network_api('get_instance_nw_info', rpc_method='call', instance_id='fake_id', rxtx_factor='fake_factor', host='fake_host', project_id='fake_id', version='1.9') def test_validate_networks(self): self._test_network_api('validate_networks', rpc_method='call', networks={}) def test_get_dns_domains(self): self._test_network_api('get_dns_domains', rpc_method='call') def test_add_dns_entry(self): self._test_network_api('add_dns_entry', rpc_method='call', address='addr', name='name', dns_type='foo', domain='domain') def test_modify_dns_entry(self): self._test_network_api('modify_dns_entry', rpc_method='call', address='addr', name='name', domain='domain') def test_delete_dns_entry(self): self._test_network_api('delete_dns_entry', rpc_method='call', name='name', domain='domain') def test_delete_dns_domain(self): self._test_network_api('delete_dns_domain', rpc_method='call', domain='fake_domain') def test_get_dns_entries_by_address(self): self._test_network_api('get_dns_entries_by_address', rpc_method='call', address='fake_address', domain='fake_domain') def test_get_dns_entries_by_name(self): self._test_network_api('get_dns_entries_by_name', rpc_method='call', name='fake_name', domain='fake_domain') def test_create_private_dns_domain(self): self._test_network_api('create_private_dns_domain', rpc_method='call', domain='fake_domain', av_zone='fake_zone') def test_create_public_dns_domain(self): self._test_network_api('create_public_dns_domain', rpc_method='call', domain='fake_domain', project='fake_project') def test_setup_networks_on_host(self): ctxt = context.RequestContext('fake_user', 'fake_project') instance = fake_instance.fake_instance_obj(ctxt) self._test_network_api('setup_networks_on_host', rpc_method='call', instance_id=instance.id, host='fake_host', teardown=False, instance=instance, version='1.16') def test_setup_networks_on_host_v1_0(self): ctxt = context.RequestContext('fake_user', 'fake_project') instance = fake_instance.fake_instance_obj(ctxt) host = 'fake_host' teardown = True rpcapi = network_rpcapi.NetworkAPI() call_mock = mock.Mock() cctxt_mock = mock.Mock(call=call_mock) with test.nested( mock.patch.object(rpcapi.client, 'can_send_version', return_value=False), mock.patch.object(rpcapi.client, 'prepare', return_value=cctxt_mock) ) as ( can_send_mock, prepare_mock ): rpcapi.setup_networks_on_host(ctxt, instance.id, host, teardown, instance) # assert our mocks were called as expected can_send_mock.assert_called_once_with('1.16') prepare_mock.assert_called_once_with(version='1.0') call_mock.assert_called_once_with(ctxt, 'setup_networks_on_host', host=host, teardown=teardown, instance_id=instance.id) def test_lease_fixed_ip(self): self._test_network_api('lease_fixed_ip', rpc_method='cast', host='fake_host', address='fake_addr') def test_release_fixed_ip(self): self._test_network_api('release_fixed_ip', rpc_method='cast', host='fake_host', address='fake_addr', mac='fake_mac', version='1.14') def test_release_fixed_ip_no_mac_support(self): # Tests that the mac kwarg is not passed when we can't send version # 1.14 to the network manager. ctxt = context.RequestContext('fake_user', 'fake_project') address = '192.168.65.158' host = 'fake-host' mac = '00:0c:29:2c:b2:64' rpcapi = network_rpcapi.NetworkAPI() cast_mock = mock.Mock() cctxt_mock = mock.Mock(cast=cast_mock) with test.nested( mock.patch.object(rpcapi.client, 'can_send_version', return_value=False), mock.patch.object(rpcapi.client, 'prepare', return_value=cctxt_mock) ) as ( can_send_mock, prepare_mock ): rpcapi.release_fixed_ip(ctxt, address, host, mac) # assert our mocks were called as expected 232 can_send_mock.assert_called_once_with('1.14') prepare_mock.assert_called_once_with(server=host, version='1.0') cast_mock.assert_called_once_with(ctxt, 'release_fixed_ip', address=address) def test_set_network_host(self): network = fake_network.fake_network_obj(context.get_admin_context()) self._test_network_api('set_network_host', rpc_method='call', network_ref=network, version='1.15') def test_set_network_host_network_object_to_primitive(self): # Tests that the network object is converted to a primitive if it # can't send version 1.15. ctxt = context.RequestContext('fake_user', 'fake_project') network = fake_network.fake_network_obj(ctxt) network_dict = objects_base.obj_to_primitive(network) rpcapi = network_rpcapi.NetworkAPI() call_mock = mock.Mock() cctxt_mock = mock.Mock(call=call_mock) with test.nested( mock.patch.object(rpcapi.client, 'can_send_version', return_value=False), mock.patch.object(rpcapi.client, 'prepare', return_value=cctxt_mock) ) as ( can_send_mock, prepare_mock ): rpcapi.set_network_host(ctxt, network) # assert our mocks were called as expected can_send_mock.assert_called_once_with('1.15') prepare_mock.assert_called_once_with(version='1.0') call_mock.assert_called_once_with(ctxt, 'set_network_host', network_ref=network_dict) def test_rpc_setup_network_on_host(self): self._test_network_api('rpc_setup_network_on_host', rpc_method='call', network_id='fake_id', teardown=False, host='fake_host') def test_rpc_allocate_fixed_ip(self): self._test_network_api('_rpc_allocate_fixed_ip', rpc_method='call', instance_id='fake_id', network_id='fake_id', address='addr', vpn=True, host='fake_host') def test_deallocate_fixed_ip(self): instance = fake_instance.fake_db_instance() self._test_network_api('deallocate_fixed_ip', rpc_method='call', address='fake_addr', host='fake_host', instance=instance, version='1.12') def test_update_dns(self): self._test_network_api('update_dns', rpc_method='cast', fanout=True, network_ids='fake_id', version='1.3') def test__associate_floating_ip(self): self._test_network_api('_associate_floating_ip', rpc_method='call', floating_address='fake_addr', fixed_address='fixed_address', interface='fake_interface', host='fake_host', instance_uuid='fake_uuid', version='1.6') def test__disassociate_floating_ip(self): self._test_network_api('_disassociate_floating_ip', rpc_method='call', address='fake_addr', interface='fake_interface', host='fake_host', instance_uuid='fake_uuid', version='1.6') def test_migrate_instance_start(self): self._test_network_api('migrate_instance_start', rpc_method='call', instance_uuid='fake_instance_uuid', rxtx_factor='fake_factor', project_id='fake_project', source_compute='fake_src_compute', dest_compute='fake_dest_compute', floating_addresses='fake_floating_addresses', host=self.DefaultArg(None), version='1.2') def test_migrate_instance_start_multi_host(self): self._test_network_api('migrate_instance_start', rpc_method='call', instance_uuid='fake_instance_uuid', rxtx_factor='fake_factor', project_id='fake_project', source_compute='fake_src_compute', dest_compute='fake_dest_compute', floating_addresses='fake_floating_addresses', host='fake_host', version='1.2') def test_migrate_instance_finish(self): self._test_network_api('migrate_instance_finish', rpc_method='call', instance_uuid='fake_instance_uuid', rxtx_factor='fake_factor', project_id='fake_project', source_compute='fake_src_compute', dest_compute='fake_dest_compute', floating_addresses='fake_floating_addresses', host=self.DefaultArg(None), version='1.2') def test_migrate_instance_finish_multi_host(self): self._test_network_api('migrate_instance_finish', rpc_method='call', instance_uuid='fake_instance_uuid', rxtx_factor='fake_factor', project_id='fake_project', source_compute='fake_src_compute', dest_compute='fake_dest_compute', floating_addresses='fake_floating_addresses', host='fake_host', version='1.2') nova-13.0.0/nova/tests/unit/network/test_neutronv2.py0000664000567000056710000060014412701410011024017 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections import copy import uuid from keystoneauth1.fixture import V2Token from keystoneauth1 import loading as ks_loading import mock from mox3 import mox from neutronclient.common import exceptions from neutronclient.v2_0 import client from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import timeutils import requests_mock import six from six.moves import range from nova.compute import flavors from nova import context from nova import exception from nova.network import model from nova.network.neutronv2 import api as neutronapi from nova.network.neutronv2 import constants from nova import objects from nova.pci import manager as pci_manager from nova.pci import whitelist as pci_whitelist from nova import policy from nova import test from nova.tests.unit import fake_instance CONF = cfg.CONF # NOTE: Neutron client raises Exception which is discouraged by HACKING. # We set this variable here and use it for assertions below to avoid # the hacking checks until we can make neutron client throw a custom # exception class instead. NEUTRON_CLIENT_EXCEPTION = Exception fake_info_cache = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'instance_uuid': 'fake-uuid', 'network_info': '[]', } class MyComparator(mox.Comparator): def __init__(self, lhs): self.lhs = lhs def _com_dict(self, lhs, rhs): if len(lhs) != len(rhs): return False for key, value in six.iteritems(lhs): if key not in rhs: return False rhs_value = rhs[key] if not self._com(value, rhs_value): return False return True def _com_list(self, lhs, rhs): if len(lhs) != len(rhs): return False for lhs_value in lhs: if lhs_value not in rhs: return False return True def _com(self, lhs, rhs): if lhs is None: return rhs is None if isinstance(lhs, dict): if not isinstance(rhs, dict): return False return self._com_dict(lhs, rhs) if isinstance(lhs, list): if not isinstance(rhs, list): return False return self._com_list(lhs, rhs) if isinstance(lhs, tuple): if not isinstance(rhs, tuple): return False return self._com_list(lhs, rhs) return lhs == rhs def equals(self, rhs): return self._com(self.lhs, rhs) def __repr__(self): return str(self.lhs) class TestNeutronClient(test.NoDBTestCase): def setUp(self): super(TestNeutronClient, self).setUp() neutronapi.reset_state() def test_withtoken(self): self.flags(url='http://anyhost/', group='neutron') self.flags(timeout=30, group='neutron') my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token') cl = neutronapi.get_client(my_context) self.assertEqual(CONF.neutron.url, cl.httpclient.endpoint_override) self.assertEqual(my_context.auth_token, cl.httpclient.auth.auth_token) self.assertEqual(CONF.neutron.timeout, cl.httpclient.session.timeout) def test_withouttoken(self): my_context = context.RequestContext('userid', 'my_tenantid') self.assertRaises(exceptions.Unauthorized, neutronapi.get_client, my_context) def test_withtoken_context_is_admin(self): self.flags(url='http://anyhost/', group='neutron') self.flags(timeout=30, group='neutron') my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token', is_admin=True) cl = neutronapi.get_client(my_context) self.assertEqual(CONF.neutron.url, cl.httpclient.endpoint_override) self.assertEqual(my_context.auth_token, cl.httpclient.auth.auth_token) self.assertEqual(CONF.neutron.timeout, cl.httpclient.session.timeout) def test_withouttoken_keystone_connection_error(self): self.flags(url='http://anyhost/', group='neutron') my_context = context.RequestContext('userid', 'my_tenantid') self.assertRaises(NEUTRON_CLIENT_EXCEPTION, neutronapi.get_client, my_context) @mock.patch('nova.network.neutronv2.api._ADMIN_AUTH') @mock.patch.object(client.Client, "list_networks", new=mock.Mock()) def test_reuse_admin_token(self, m): self.flags(url='http://anyhost/', group='neutron') my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token') tokens = ['new_token2', 'new_token1'] def token_vals(*args, **kwargs): return tokens.pop() m.get_token.side_effect = token_vals client1 = neutronapi.get_client(my_context, True) client1.list_networks(retrieve_all=False) self.assertEqual('new_token1', client1.httpclient.auth.get_token(None)) client1 = neutronapi.get_client(my_context, True) client1.list_networks(retrieve_all=False) self.assertEqual('new_token2', client1.httpclient.auth.get_token(None)) class TestNeutronv2Base(test.TestCase): def setUp(self): super(TestNeutronv2Base, self).setUp() self.context = context.RequestContext('userid', 'my_tenantid') setattr(self.context, 'auth_token', 'bff4a5a6b9eb4ea2a6efec6eefb77936') self.tenant_id = '9d049e4b60b64716978ab415e6fbd5c0' self.instance = {'project_id': self.tenant_id, 'uuid': str(uuid.uuid4()), 'display_name': 'test_instance', 'hostname': 'test-instance', 'availability_zone': 'nova', 'host': 'some_host', 'info_cache': {'network_info': []}, 'security_groups': []} self.instance2 = {'project_id': self.tenant_id, 'uuid': str(uuid.uuid4()), 'display_name': 'test_instance2', 'availability_zone': 'nova', 'info_cache': {'network_info': []}, 'security_groups': []} self.nets1 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'my_tenantid'}] self.nets2 = [] self.nets2.append(self.nets1[0]) self.nets2.append({'id': 'my_netid2', 'name': 'my_netname2', 'subnets': ['mysubnid2'], 'tenant_id': 'my_tenantid'}) self.nets3 = self.nets2 + [{'id': 'my_netid3', 'name': 'my_netname3', 'tenant_id': 'my_tenantid'}] self.nets4 = [{'id': 'his_netid4', 'name': 'his_netname4', 'tenant_id': 'his_tenantid'}] # A network request with external networks self.nets5 = self.nets1 + [{'id': 'the-external-one', 'name': 'out-of-this-world', 'router:external': True, 'tenant_id': 'should-be-an-admin'}] # A network request with a duplicate self.nets6 = [] self.nets6.append(self.nets1[0]) self.nets6.append(self.nets1[0]) # A network request with a combo self.nets7 = [] self.nets7.append(self.nets2[1]) self.nets7.append(self.nets1[0]) self.nets7.append(self.nets2[1]) self.nets7.append(self.nets1[0]) # A network request with only external network self.nets8 = [self.nets5[1]] # An empty network self.nets9 = [] # A network that is both shared and external self.nets10 = [{'id': 'net_id', 'name': 'net_name', 'router:external': True, 'shared': True}] # A network with non-blank dns_domain to test _update_port_dns_name self.nets11 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'my_tenantid', 'dns_domain': 'my-domain.org.'}] self.nets = [self.nets1, self.nets2, self.nets3, self.nets4, self.nets5, self.nets6, self.nets7, self.nets8, self.nets9, self.nets10, self.nets11] self.port_address = '10.0.1.2' self.port_data1 = [{'network_id': 'my_netid1', 'device_id': self.instance2['uuid'], 'tenant_id': self.tenant_id, 'device_owner': 'compute:nova', 'id': 'my_portid1', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'status': 'DOWN', 'admin_state_up': True, 'fixed_ips': [{'ip_address': self.port_address, 'subnet_id': 'my_subid1'}], 'mac_address': 'my_mac1', }] self.float_data1 = [{'port_id': 'my_portid1', 'fixed_ip_address': self.port_address, 'floating_ip_address': '172.0.1.2'}] self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9', 'subnet_id': 'my_subid1'}], 'status': 'ACTIVE', 'admin_state_up': True}] self.port_address2 = '10.0.2.2' self.port_data2 = [] self.port_data2.append(self.port_data1[0]) self.port_data2.append({'network_id': 'my_netid2', 'device_id': self.instance['uuid'], 'tenant_id': self.tenant_id, 'admin_state_up': True, 'status': 'ACTIVE', 'device_owner': 'compute:nova', 'id': 'my_portid2', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'fixed_ips': [{'ip_address': self.port_address2, 'subnet_id': 'my_subid2'}], 'mac_address': 'my_mac2', }) self.float_data2 = [] self.float_data2.append(self.float_data1[0]) self.float_data2.append({'port_id': 'my_portid2', 'fixed_ip_address': '10.0.2.2', 'floating_ip_address': '172.0.2.2'}) self.port_data3 = [{'network_id': 'my_netid1', 'device_id': 'device_id3', 'tenant_id': self.tenant_id, 'status': 'DOWN', 'admin_state_up': True, 'device_owner': 'compute:nova', 'id': 'my_portid3', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'fixed_ips': [], # no fixed ip 'mac_address': 'my_mac3', }] self.subnet_data1 = [{'id': 'my_subid1', 'cidr': '10.0.1.0/24', 'network_id': 'my_netid1', 'gateway_ip': '10.0.1.1', 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}] self.subnet_data2 = [] self.subnet_data_n = [{'id': 'my_subid1', 'cidr': '10.0.1.0/24', 'network_id': 'my_netid1', 'gateway_ip': '10.0.1.1', 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}, {'id': 'my_subid2', 'cidr': '20.0.1.0/24', 'network_id': 'my_netid2', 'gateway_ip': '20.0.1.1', 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}] self.subnet_data2.append({'id': 'my_subid2', 'cidr': '10.0.2.0/24', 'network_id': 'my_netid2', 'gateway_ip': '10.0.2.1', 'dns_nameservers': ['8.8.2.1', '8.8.2.2']}) self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3', 'name': 'ext_net', 'router:external': True, 'tenant_id': 'admin_tenantid'} self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db', 'name': 'nova', 'router:external': True, 'tenant_id': 'admin_tenantid'} self.fip_unassociated = {'tenant_id': 'my_tenantid', 'id': 'fip_id1', 'floating_ip_address': '172.24.4.227', 'floating_network_id': self.fip_pool['id'], 'port_id': None, 'fixed_ip_address': None, 'router_id': None} fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address'] self.fip_associated = {'tenant_id': 'my_tenantid', 'id': 'fip_id2', 'floating_ip_address': '172.24.4.228', 'floating_network_id': self.fip_pool['id'], 'port_id': self.port_data2[1]['id'], 'fixed_ip_address': fixed_ip_address, 'router_id': 'router_id1'} self._returned_nw_info = [] self.mox.StubOutWithMock(neutronapi, 'get_client') self.moxed_client = self.mox.CreateMock(client.Client) self.addCleanup(CONF.reset) self.addCleanup(self.mox.VerifyAll) self.addCleanup(self.mox.UnsetStubs) self.addCleanup(self.stubs.UnsetAll) def _fake_instance_object(self, instance): return fake_instance.fake_instance_obj(self.context, **instance) def _fake_instance_info_cache(self, nw_info, instance_uuid=None): info_cache = {} if instance_uuid is None: info_cache['instance_uuid'] = str(uuid.uuid4()) else: info_cache['instance_uuid'] = instance_uuid info_cache['deleted'] = False info_cache['created_at'] = timeutils.utcnow() info_cache['deleted_at'] = timeutils.utcnow() info_cache['updated_at'] = timeutils.utcnow() info_cache['network_info'] = model.NetworkInfo.hydrate(six.text_type( jsonutils.dumps(nw_info))) return info_cache def _fake_instance_object_with_info_cache(self, instance): expected_attrs = ['info_cache'] instance = objects.Instance._from_db_object(self.context, objects.Instance(), fake_instance.fake_db_instance(**instance), expected_attrs=expected_attrs) return instance def _stub_allocate_for_instance(self, net_idx=1, **kwargs): self.instance = self._fake_instance_object(self.instance) self.instance2 = self._fake_instance_object(self.instance2) api = neutronapi.API() self.mox.StubOutWithMock(api, 'get_instance_nw_info') has_portbinding = False has_extra_dhcp_opts = False dhcp_options = kwargs.get('dhcp_options') if dhcp_options is not None: has_extra_dhcp_opts = True has_dns_extension = False if kwargs.get('dns_extension'): has_dns_extension = True api.extensions[constants.DNS_INTEGRATION] = 1 if kwargs.get('portbinding'): has_portbinding = True api.extensions[constants.PORTBINDING_EXT] = 1 self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache') neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) neutronapi.get_client( mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) api._refresh_neutron_extensions_cache(mox.IgnoreArg(), neutron=self.moxed_client) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client, refresh_cache=True).AndReturn(has_portbinding) elif has_dns_extension: self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache') api._refresh_neutron_extensions_cache(mox.IgnoreArg(), neutron=self.moxed_client) else: self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache') api._refresh_neutron_extensions_cache(mox.IgnoreArg(), neutron=self.moxed_client) self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') # Net idx is 1-based for compatibility with existing unit tests nets = self.nets[net_idx - 1] ports = {} fixed_ips = {} macs = kwargs.get('macs') if macs: macs = set(macs) req_net_ids = [] ordered_networks = [] if 'requested_networks' in kwargs: for request in kwargs['requested_networks']: if request.port_id: if request.port_id == 'my_portid3': self.moxed_client.show_port(request.port_id ).AndReturn( {'port': {'id': 'my_portid3', 'network_id': 'my_netid1', 'tenant_id': self.tenant_id, 'mac_address': 'my_mac1', 'device_id': kwargs.get('_device') and self.instance2.uuid or ''}}) ports['my_netid1'] = [self.port_data1[0], self.port_data3[0]] ports[request.port_id] = self.port_data3[0] request.network_id = 'my_netid1' if macs is not None: macs.discard('my_mac1') elif request.port_id == 'invalid_id': PortNotFound = exceptions.PortNotFoundClient( status_code=404) self.moxed_client.show_port(request.port_id ).AndRaise(PortNotFound) else: self.moxed_client.show_port(request.port_id).AndReturn( {'port': {'id': 'my_portid1', 'network_id': 'my_netid1', 'tenant_id': self.tenant_id, 'mac_address': 'my_mac1', 'device_id': kwargs.get('_device') and self.instance2.uuid or '', 'dns_name': kwargs.get('_dns_name') or ''}}) ports[request.port_id] = self.port_data1[0] request.network_id = 'my_netid1' if macs is not None: macs.discard('my_mac1') else: fixed_ips[request.network_id] = request.address req_net_ids.append(request.network_id) ordered_networks.append(request) else: for n in nets: ordered_networks.append( objects.NetworkRequest(network_id=n['id'])) if kwargs.get('_break') == 'pre_list_networks': self.mox.ReplayAll() return api # search all req_net_ids as in api.py search_ids = req_net_ids if search_ids: mox_list_params = {'id': mox.SameElementsAs(search_ids)} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) else: mox_list_params = {'tenant_id': self.instance.project_id, 'shared': False} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) mox_list_params = {'shared': True} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': []}) if kwargs.get('_break') == 'post_list_networks': self.mox.ReplayAll() return api if (('requested_networks' not in kwargs or kwargs['requested_networks'].as_tuples() == [(None, None, None)]) and len(nets) > 1): self.mox.ReplayAll() return api preexisting_port_ids = [] ports_in_requested_net_order = [] nets_in_requested_net_order = [] for request in ordered_networks: port_req_body = { 'port': { 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } # Network lookup for available network_id network = None for net in nets: if net['id'] == request.network_id: network = net break # if net_id did not pass validate_networks() and not available # here then skip it safely not continuing with a None Network else: continue if has_portbinding: port_req_body['port']['binding:host_id'] = ( self.instance.get('host')) if has_dns_extension and not network.get('dns_domain'): port_req_body['port']['dns_name'] = self.instance.hostname if not has_portbinding and not has_dns_extension: api._populate_neutron_extension_values(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), mox.IgnoreArg(), network=network, neutron=self.moxed_client, bind_host_id=None).AndReturn(None) elif has_portbinding: # since _populate_neutron_extension_values() will call # _has_port_binding_extension() api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client).\ AndReturn(has_portbinding) else: api._refresh_neutron_extensions_cache(mox.IgnoreArg(), neutron=self.moxed_client) if request.port_id: port = ports[request.port_id] self.moxed_client.update_port(request.port_id, MyComparator(port_req_body) ).AndReturn( {'port': port}) ports_in_requested_net_order.append(request.port_id) preexisting_port_ids.append(request.port_id) else: request.address = fixed_ips.get(request.network_id) if request.address: port_req_body['port']['fixed_ips'] = [ {'ip_address': str(request.address)}] port_req_body['port']['network_id'] = request.network_id port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = \ self.instance.project_id if macs: port_req_body['port']['mac_address'] = macs.pop() if has_portbinding: port_req_body['port']['binding:host_id'] = ( self.instance.get('host')) res_port = {'port': {'id': 'fake'}} if has_extra_dhcp_opts: port_req_body['port']['extra_dhcp_opts'] = dhcp_options if kwargs.get('_break') == 'mac' + request.network_id: self.mox.ReplayAll() return api self.moxed_client.create_port( MyComparator(port_req_body)).AndReturn(res_port) ports_in_requested_net_order.append(res_port['port']['id']) if has_portbinding and has_dns_extension: api._has_port_binding_extension(mox.IgnoreArg()).\ AndReturn(has_portbinding) if net_idx == 11: port_req_body_dns = { 'port': { 'dns_name': self.instance.hostname } } res_port_dns = { 'port': { 'id': ports_in_requested_net_order[-1] } } self.moxed_client.update_port( ports_in_requested_net_order[-1], MyComparator(port_req_body_dns) ).AndReturn(res_port_dns) nets_in_requested_net_order.append(network) api.get_instance_nw_info(mox.IgnoreArg(), self.instance, networks=nets_in_requested_net_order, port_ids=ports_in_requested_net_order, admin_client=None, preexisting_port_ids=preexisting_port_ids, update_cells=True ).AndReturn(self._returned_nw_info) self.mox.ReplayAll() return api def _verify_nw_info(self, nw_inf, index=0): id_suffix = index + 1 self.assertEqual('10.0.%s.2' % id_suffix, nw_inf.fixed_ips()[index]['address']) self.assertEqual('172.0.%s.2' % id_suffix, nw_inf.fixed_ips()[index].floating_ip_addresses()[0]) self.assertEqual('my_netname%s' % id_suffix, nw_inf[index]['network']['label']) self.assertEqual('my_portid%s' % id_suffix, nw_inf[index]['id']) self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address']) self.assertEqual('10.0.%s.0/24' % id_suffix, nw_inf[index]['network']['subnets'][0]['cidr']) ip_addr = model.IP(address='8.8.%s.1' % id_suffix, version=4, type='dns') self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns']) def _get_instance_nw_info(self, number): api = neutronapi.API() self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update(mox.IgnoreArg(), self.instance['uuid'], mox.IgnoreArg()).AndReturn( fake_info_cache) port_data = number == 1 and self.port_data1 or self.port_data2 net_info_cache = [] for port in port_data: net_info_cache.append({"network": {"id": port['network_id']}, "id": port['id']}) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': port_data}) net_ids = [port['network_id'] for port in port_data] nets = number == 1 and self.nets1 or self.nets2 self.moxed_client.list_networks( id=net_ids).AndReturn({'networks': nets}) for i in range(1, number + 1): float_data = number == 1 and self.float_data1 or self.float_data2 for ip in port_data[i - 1]['fixed_ips']: float_data = [x for x in float_data if x['fixed_ip_address'] == ip['ip_address']] self.moxed_client.list_floatingips( fixed_ip_address=ip['ip_address'], port_id=port_data[i - 1]['id']).AndReturn( {'floatingips': float_data}) subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2 self.moxed_client.list_subnets( id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn( {'subnets': subnet_data}) self.moxed_client.list_ports( network_id=subnet_data[0]['network_id'], device_owner='network:dhcp').AndReturn( {'ports': []}) self.instance['info_cache'] = self._fake_instance_info_cache( net_info_cache, self.instance['uuid']) self.mox.StubOutWithMock(api.db, 'instance_info_cache_get') api.db.instance_info_cache_get(mox.IgnoreArg(), self.instance['uuid']).AndReturn( self.instance['info_cache']) self.mox.ReplayAll() instance = self._fake_instance_object_with_info_cache(self.instance) nw_inf = api.get_instance_nw_info(self.context, instance) for i in range(0, number): self._verify_nw_info(nw_inf, i) def _allocate_for_instance(self, net_idx=1, **kwargs): api = self._stub_allocate_for_instance(net_idx, **kwargs) return api.allocate_for_instance(self.context, self.instance, **kwargs) class TestNeutronv2(TestNeutronv2Base): def setUp(self): super(TestNeutronv2, self).setUp() neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) def test_get_instance_nw_info_1(self): # Test to get one port in one network and subnet. neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) self._get_instance_nw_info(1) def test_get_instance_nw_info_2(self): # Test to get one port in each of two networks and subnets. neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) self._get_instance_nw_info(2) def test_get_instance_nw_info_with_nets_add_interface(self): # This tests that adding an interface to an instance does not # remove the first instance from the instance. network_model = model.Network(id='network_id', bridge='br-int', injected='injected', label='fake_network', tenant_id='fake_tenant') network_cache = {'info_cache': { 'network_info': [{'id': self.port_data2[0]['id'], 'address': 'mac_address', 'network': network_model, 'type': 'ovs', 'ovs_interfaceid': 'ovs_interfaceid', 'devname': 'devname'}]}} self._fake_get_instance_nw_info_helper(network_cache, self.port_data2, self.nets2, [self.port_data2[1]['id']]) def test_get_instance_nw_info_remove_ports_from_neutron(self): # This tests that when a port is removed in neutron it # is also removed from the nova. network_model = model.Network(id=self.port_data2[0]['network_id'], bridge='br-int', injected='injected', label='fake_network', tenant_id='fake_tenant') network_cache = {'info_cache': { 'network_info': [{'id': 'network_id', 'address': 'mac_address', 'network': network_model, 'type': 'ovs', 'ovs_interfaceid': 'ovs_interfaceid', 'devname': 'devname'}]}} self._fake_get_instance_nw_info_helper(network_cache, self.port_data2, None, None) def test_get_instance_nw_info_ignores_neutron_ports(self): # Tests that only ports in the network_cache are updated # and ports returned from neutron that match the same # instance_id/device_id are ignored. port_data2 = copy.copy(self.port_data2) # set device_id on the ports to be the same. port_data2[1]['device_id'] = port_data2[0]['device_id'] network_model = model.Network(id='network_id', bridge='br-int', injected='injected', label='fake_network', tenant_id='fake_tenant') network_cache = {'info_cache': { 'network_info': [{'id': 'network_id', 'address': 'mac_address', 'network': network_model, 'type': 'ovs', 'ovs_interfaceid': 'ovs_interfaceid', 'devname': 'devname'}]}} self._fake_get_instance_nw_info_helper(network_cache, port_data2, None, None) def test_get_instance_nw_info_ignores_neutron_ports_empty_cache(self): # Tests that ports returned from neutron that match the same # instance_id/device_id are ignored when the instance info cache is # empty. port_data2 = copy.copy(self.port_data2) # set device_id on the ports to be the same. port_data2[1]['device_id'] = port_data2[0]['device_id'] network_cache = {'info_cache': {'network_info': []}} self._fake_get_instance_nw_info_helper(network_cache, port_data2, None, None) def _fake_get_instance_nw_info_helper(self, network_cache, current_neutron_ports, networks=None, port_ids=None): """Helper function to test get_instance_nw_info. :param network_cache - data already in the nova network cache. :param current_neutron_ports - updated list of ports from neutron. :param networks - networks of ports being added to instance. :param port_ids - new ports being added to instance. """ # keep a copy of the original ports/networks to pass to # get_instance_nw_info() as the code below changes them. original_port_ids = copy.copy(port_ids) original_networks = copy.copy(networks) api = neutronapi.API() self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update( mox.IgnoreArg(), self.instance['uuid'], mox.IgnoreArg()).AndReturn(fake_info_cache) neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': current_neutron_ports}) ifaces = network_cache['info_cache']['network_info'] if port_ids is None: port_ids = [iface['id'] for iface in ifaces] net_ids = [iface['network']['id'] for iface in ifaces] nets = [{'id': iface['network']['id'], 'name': iface['network']['label'], 'tenant_id': iface['network']['meta']['tenant_id']} for iface in ifaces] if networks is None: if ifaces: self.moxed_client.list_networks( id=net_ids).AndReturn({'networks': nets}) else: non_shared_nets = [ {'id': iface['network']['id'], 'name': iface['network']['label'], 'tenant_id': iface['network']['meta']['tenant_id']} for iface in ifaces if not iface['shared']] shared_nets = [ {'id': iface['network']['id'], 'name': iface['network']['label'], 'tenant_id': iface['network']['meta']['tenant_id']} for iface in ifaces if iface['shared']] self.moxed_client.list_networks( shared=False, tenant_id=self.instance['project_id'] ).AndReturn({'networks': non_shared_nets}) self.moxed_client.list_networks( shared=True).AndReturn({'networks': shared_nets}) else: networks = networks + [ dict(id=iface['network']['id'], name=iface['network']['label'], tenant_id=iface['network']['meta']['tenant_id']) for iface in ifaces] port_ids = [iface['id'] for iface in ifaces] + port_ids index = 0 current_neutron_port_map = {} for current_neutron_port in current_neutron_ports: current_neutron_port_map[current_neutron_port['id']] = ( current_neutron_port) for port_id in port_ids: current_neutron_port = current_neutron_port_map.get(port_id) if current_neutron_port: for ip in current_neutron_port['fixed_ips']: self.moxed_client.list_floatingips( fixed_ip_address=ip['ip_address'], port_id=current_neutron_port['id']).AndReturn( {'floatingips': [self.float_data2[index]]}) self.moxed_client.list_subnets( id=mox.SameElementsAs([ip['subnet_id']]) ).AndReturn( {'subnets': [self.subnet_data_n[index]]}) self.moxed_client.list_ports( network_id=current_neutron_port['network_id'], device_owner='network:dhcp').AndReturn( {'ports': self.dhcp_port_data1}) index += 1 self.instance['info_cache'] = self._fake_instance_info_cache( network_cache['info_cache']['network_info'], self.instance['uuid']) self.mox.StubOutWithMock(api.db, 'instance_info_cache_get') api.db.instance_info_cache_get( mox.IgnoreArg(), self.instance['uuid']).MultipleTimes().AndReturn( self.instance['info_cache']) self.mox.ReplayAll() instance = self._fake_instance_object_with_info_cache(self.instance) nw_infs = api.get_instance_nw_info(self.context, instance, networks=original_networks, port_ids=original_port_ids) self.assertEqual(index, len(nw_infs)) # ensure that nic ordering is preserved for iface_index in range(index): self.assertEqual(port_ids[iface_index], nw_infs[iface_index]['id']) def test_get_instance_nw_info_without_subnet(self): # Test get instance_nw_info for a port without subnet. api = neutronapi.API() self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update( mox.IgnoreArg(), self.instance['uuid'], mox.IgnoreArg()).AndReturn(fake_info_cache) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': self.port_data3}) self.moxed_client.list_networks( id=[self.port_data1[0]['network_id']]).AndReturn( {'networks': self.nets1}) neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) net_info_cache = [] for port in self.port_data3: net_info_cache.append({"network": {"id": port['network_id']}, "id": port['id']}) self.instance['info_cache'] = self._fake_instance_info_cache( net_info_cache, self.instance['uuid']) self.mox.StubOutWithMock(api.db, 'instance_info_cache_get') api.db.instance_info_cache_get( mox.IgnoreArg(), self.instance['uuid']).AndReturn(self.instance['info_cache']) self.mox.ReplayAll() instance = self._fake_instance_object_with_info_cache(self.instance) nw_inf = api.get_instance_nw_info(self.context, instance) id_suffix = 3 self.assertEqual(0, len(nw_inf.fixed_ips())) self.assertEqual('my_netname1', nw_inf[0]['network']['label']) self.assertEqual('my_portid%s' % id_suffix, nw_inf[0]['id']) self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address']) self.assertEqual(0, len(nw_inf[0]['network']['subnets'])) def test_refresh_neutron_extensions_cache(self): api = neutronapi.API() # Note: Don't want the default get_client from setUp() self.mox.ResetAll() neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) self.moxed_client.list_extensions().AndReturn( {'extensions': [{'name': constants.QOS_QUEUE}]}) self.mox.ReplayAll() api._refresh_neutron_extensions_cache(mox.IgnoreArg()) self.assertEqual( {constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}}, api.extensions) def test_populate_neutron_extension_values_rxtx_factor(self): api = neutronapi.API() # Note: Don't want the default get_client from setUp() self.mox.ResetAll() neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) self.moxed_client.list_extensions().AndReturn( {'extensions': [{'name': constants.QOS_QUEUE}]}) self.mox.ReplayAll() flavor = flavors.get_default_flavor() flavor['rxtx_factor'] = 1 instance = objects.Instance(system_metadata={}) instance.flavor = flavor port_req_body = {'port': {}} api._populate_neutron_extension_values(self.context, instance, None, port_req_body) self.assertEqual(1, port_req_body['port']['rxtx_factor']) def test_allocate_for_instance_1(self): # Allocate one port in one network env. self._allocate_for_instance(1) def test_allocate_for_instance_2(self): # Allocate one port in two networks env. api = self._stub_allocate_for_instance(net_idx=2) self.assertRaises(exception.NetworkAmbiguous, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_accepts_macs_kwargs_None(self): # The macs kwarg should be accepted as None. self._allocate_for_instance(1, macs=None) def test_allocate_for_instance_accepts_macs_kwargs_set(self): # The macs kwarg should be accepted, as a set, the # _allocate_for_instance helper checks that the mac is used to create a # port. self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45'])) def test_allocate_for_instance_accepts_only_portid(self): # Make sure allocate_for_instance works when only a portid is provided self._returned_nw_info = self.port_data1 result = self._allocate_for_instance( requested_networks=objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')])) self.assertEqual(self.port_data1, result) @mock.patch('nova.network.neutronv2.api.API._unbind_ports') def test_allocate_for_instance_not_enough_macs_via_ports(self, mock_unbind): # using a hypervisor MAC via a pre-created port will stop it being # used to dynamically create a port on a network. We put the network # first in requested_networks so that if the code were to not pre-check # requested ports, it would incorrectly assign the mac and not fail. requested_networks = objects.NetworkRequestList( objects = [ objects.NetworkRequest(network_id=self.nets2[1]['id']), objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( net_idx=2, requested_networks=requested_networks, macs=set(['my_mac1']), _break='mac' + self.nets2[1]['id']) self.assertRaises(exception.PortNotFree, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks, macs=set(['my_mac1'])) mock_unbind.assert_called_once_with(self.context, [], self.moxed_client, mock.ANY) @mock.patch('nova.network.neutronv2.api.API._unbind_ports') def test_allocate_for_instance_not_enough_macs(self, mock_unbind): # If not enough MAC addresses are available to allocate to networks, an # error should be raised. # We could pass in macs=set(), but that wouldn't tell us that # allocate_for_instance tracks used macs properly, so we pass in one # mac, and ask for two networks. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']), objects.NetworkRequest(network_id=self.nets2[0]['id'])]) api = self._stub_allocate_for_instance( net_idx=2, requested_networks=requested_networks, macs=set(['my_mac2']), _break='mac' + self.nets2[0]['id']) with mock.patch.object(api, '_delete_ports'): self.assertRaises(exception.PortNotFree, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks, macs=set(['my_mac2'])) mock_unbind.assert_called_once_with(self.context, [], self.moxed_client, mock.ANY) def test_allocate_for_instance_two_macs_two_networks(self): # If two MACs are available and two networks requested, two new ports # get made and no exceptions raised. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']), objects.NetworkRequest(network_id=self.nets2[0]['id'])]) self._allocate_for_instance( net_idx=2, requested_networks=requested_networks, macs=set(['my_mac2', 'my_mac1'])) def test_allocate_for_instance_mac_conflicting_requested_port(self): # specify only first and last network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( net_idx=1, requested_networks=requested_networks, macs=set(['unknown:mac']), _break='pre_list_networks') self.assertRaises(exception.PortNotUsable, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks, macs=set(['unknown:mac'])) def test_allocate_for_instance_without_requested_networks(self): api = self._stub_allocate_for_instance(net_idx=3) self.assertRaises(exception.NetworkAmbiguous, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_with_requested_non_available_network(self): """verify that a non available network is ignored. self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1] Do not create a port on a non available network self.nets3[2]. """ requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets3[0], self.nets3[2], self.nets3[1])]) self._allocate_for_instance(net_idx=2, requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks(self): # specify only first and last network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets3[1], self.nets3[0], self.nets3[2])]) self._allocate_for_instance(net_idx=3, requested_networks=requested_networks) def test_allocate_for_instance_with_invalid_network_id(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='invalid_id')]) api = self._stub_allocate_for_instance(net_idx=9, requested_networks=requested_networks, _break='post_list_networks') self.assertRaises(exception.NetworkNotFound, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks_with_fixedip(self): # specify only first and last network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=self.nets1[0]['id'], address='10.0.1.0')]) self._allocate_for_instance(net_idx=1, requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks_with_port(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=1, requested_networks=requested_networks) def test_allocate_for_instance_no_networks(self): """verify the exception thrown when there are no networks defined.""" self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() self.moxed_client.list_extensions().AndReturn({'extensions': []}) self.moxed_client.list_networks( tenant_id=self.instance.project_id, shared=False).AndReturn( {'networks': model.NetworkInfo([])}) self.moxed_client.list_networks(shared=True).AndReturn( {'networks': model.NetworkInfo([])}) self.mox.ReplayAll() nwinfo = api.allocate_for_instance(self.context, self.instance) self.assertEqual(0, len(nwinfo)) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') def test_allocate_for_instance_ex1(self, mock_unbind, mock_preexisting): """verify we will delete created ports if we fail to allocate all net resources. Mox to raise exception when creating a second port. In this case, the code should delete the first created port. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) mock_preexisting.return_value = [] api = neutronapi.API() self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client, refresh_cache=True).AndReturn(False) requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets2[0], self.nets2[1])]) self.moxed_client.list_networks( id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2}) index = 0 for network in self.nets2: binding_port_req_body = { 'port': { 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } port_req_body = { 'port': { 'network_id': network['id'], 'admin_state_up': True, 'tenant_id': self.instance.project_id, }, } port_req_body['port'].update(binding_port_req_body['port']) port = {'id': 'portid_' + network['id']} api._populate_neutron_extension_values(self.context, self.instance, None, binding_port_req_body, network=network, neutron=self.moxed_client, bind_host_id=None).AndReturn(None) if index == 0: self.moxed_client.create_port( MyComparator(port_req_body)).AndReturn({'port': port}) else: NeutronOverQuota = exceptions.OverQuotaClient() self.moxed_client.create_port( MyComparator(port_req_body)).AndRaise(NeutronOverQuota) index += 1 self.moxed_client.delete_port('portid_' + self.nets2[0]['id']) self.mox.ReplayAll() self.assertRaises(exception.PortLimitExceeded, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) mock_unbind.assert_called_once_with(self.context, [], self.moxed_client, mock.ANY) def test_allocate_for_instance_ex2(self): """verify we have no port to delete if we fail to allocate the first net resource. Mox to raise exception when creating the first port. In this case, the code should not delete any ports. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client, refresh_cache=True).AndReturn(False) requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets2[0], self.nets2[1])]) self.moxed_client.list_networks( id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2}) binding_port_req_body = { 'port': { 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } port_req_body = { 'port': { 'network_id': self.nets2[0]['id'], 'admin_state_up': True, 'device_id': self.instance.uuid, 'tenant_id': self.instance.project_id, }, } api._populate_neutron_extension_values(self.context, self.instance, None, binding_port_req_body, network=self.nets2[0], neutron=self.moxed_client, bind_host_id=None).AndReturn(None) self.moxed_client.create_port( MyComparator(port_req_body)).AndRaise( Exception("fail to create port")) self.mox.ReplayAll() self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_no_port_or_network(self): class BailOutEarly(Exception): pass self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() self.moxed_client.list_extensions().AndReturn({'extensions': []}) self.mox.StubOutWithMock(api, '_get_available_networks') # Make sure we get an empty list and then bail out of the rest # of the function api._get_available_networks(self.context, self.instance.project_id, [], neutron=self.moxed_client).\ AndRaise(BailOutEarly) self.mox.ReplayAll() requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest()]) self.assertRaises(BailOutEarly, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_second_time(self): # Make sure that allocate_for_instance only returns ports that it # allocated during _that_ run. new_port = {'id': 'fake'} self._returned_nw_info = self.port_data1 + [new_port] nw_info = self._allocate_for_instance() self.assertEqual([new_port], nw_info) def test_allocate_for_instance_port_in_use(self): # If a port is already in use, an exception should be raised. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, _break='pre_list_networks', _device=True) self.assertRaises(exception.PortInUse, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_port_not_found(self): # If a port is not found, an exception should be raised. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='invalid_id')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, _break='pre_list_networks') self.assertRaises(exception.PortNotFound, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_port_invalid_tenantid(self): self.tenant_id = 'invalid_id' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, _break='pre_list_networks') self.assertRaises(exception.PortNotUsable, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_with_externalnet_forbidden(self): """Only one network is available, it's external, and the client is unauthorized to use it. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) self.moxed_client.list_extensions().AndReturn({'extensions': []}) # no networks in the tenant self.moxed_client.list_networks( tenant_id=self.instance.project_id, shared=False).AndReturn( {'networks': model.NetworkInfo([])}) # external network is shared self.moxed_client.list_networks(shared=True).AndReturn( {'networks': self.nets8}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.ExternalNetworkAttachForbidden, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_with_externalnet_multiple(self): """Multiple networks are available, one the client is authorized to use, and an external one the client is unauthorized to use. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) self.moxed_client.list_extensions().AndReturn({'extensions': []}) # network found in the tenant self.moxed_client.list_networks( tenant_id=self.instance.project_id, shared=False).AndReturn( {'networks': self.nets1}) # external network is shared self.moxed_client.list_networks(shared=True).AndReturn( {'networks': self.nets8}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises( exception.NetworkAmbiguous, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_with_externalnet_admin_ctx(self): """Only one network is available, it's external, and the client is authorized. """ admin_ctx = context.RequestContext('userid', 'my_tenantid', is_admin=True) api = self._stub_allocate_for_instance(net_idx=8) api.allocate_for_instance(admin_ctx, self.instance) def test_allocate_for_instance_with_external_shared_net(self): """Only one network is available, it's external and shared.""" ctx = context.RequestContext('userid', 'my_tenantid') api = self._stub_allocate_for_instance(net_idx=10) api.allocate_for_instance(ctx, self.instance) def _deallocate_for_instance(self, number, requested_networks=None): # TODO(mriedem): Remove this conversion when all neutronv2 APIs are # converted to handling instance objects. self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() port_data = number == 1 and self.port_data1 or self.port_data2 ports = {port['id'] for port in port_data} ret_data = copy.deepcopy(port_data) if requested_networks: if isinstance(requested_networks, objects.NetworkRequestList): # NOTE(danms): Temporary and transitional with mock.patch('nova.utils.is_neutron', return_value=True): requested_networks = requested_networks.as_tuples() for net, fip, port, request_id in requested_networks: ret_data.append({'network_id': net, 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', 'id': port, 'status': 'DOWN', 'admin_state_up': True, 'fixed_ips': [], 'mac_address': 'fake_mac', }) self.moxed_client.list_ports( device_id=self.instance.uuid).AndReturn( {'ports': ret_data}) self.moxed_client.list_extensions().AndReturn({'extensions': []}) if requested_networks: for net, fip, port, request_id in requested_networks: self.moxed_client.update_port(port) for port in ports: self.moxed_client.delete_port(port) self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update(self.context, self.instance.uuid, {'network_info': '[]'}).AndReturn( fake_info_cache) self.mox.ReplayAll() api = neutronapi.API() api.deallocate_for_instance(self.context, self.instance, requested_networks=requested_networks) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_1_with_requested(self, mock_preexisting): mock_preexisting.return_value = [] requested = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='fake-net', address='1.2.3.4', port_id='fake-port')]) # Test to deallocate in one port env. self._deallocate_for_instance(1, requested_networks=requested) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_2_with_requested(self, mock_preexisting): mock_preexisting.return_value = [] requested = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='fake-net', address='1.2.3.4', port_id='fake-port')]) # Test to deallocate in one port env. self._deallocate_for_instance(2, requested_networks=requested) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_1(self, mock_preexisting): mock_preexisting.return_value = [] # Test to deallocate in one port env. self._deallocate_for_instance(1) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_2(self, mock_preexisting): mock_preexisting.return_value = [] # Test to deallocate in two ports env. self._deallocate_for_instance(2) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_port_not_found(self, mock_preexisting): # TODO(mriedem): Remove this conversion when all neutronv2 APIs are # converted to handling instance objects. self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) mock_preexisting.return_value = [] port_data = self.port_data1 self.moxed_client.list_ports( device_id=self.instance.uuid).AndReturn( {'ports': port_data}) self.moxed_client.list_extensions().AndReturn({'extensions': []}) NeutronNotFound = exceptions.NeutronClientException(status_code=404) for port in reversed(port_data): self.moxed_client.delete_port(port['id']).AndRaise( NeutronNotFound) self.mox.ReplayAll() api = neutronapi.API() api.deallocate_for_instance(self.context, self.instance) def _test_deallocate_port_for_instance(self, number): port_data = number == 1 and self.port_data1 or self.port_data2 nets = number == 1 and self.nets1 or self.nets2 self.moxed_client.delete_port(port_data[0]['id']) net_info_cache = [] for port in port_data: net_info_cache.append({"network": {"id": port['network_id']}, "id": port['id']}) self.instance['info_cache'] = self._fake_instance_info_cache( net_info_cache, self.instance['uuid']) api = neutronapi.API() neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': port_data[1:]}) neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) net_ids = [port['network_id'] for port in port_data] self.moxed_client.list_networks(id=net_ids).AndReturn( {'networks': nets}) float_data = number == 1 and self.float_data1 or self.float_data2 for data in port_data[1:]: for ip in data['fixed_ips']: self.moxed_client.list_floatingips( fixed_ip_address=ip['ip_address'], port_id=data['id']).AndReturn( {'floatingips': float_data[1:]}) for port in port_data[1:]: self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({}) self.mox.StubOutWithMock(api.db, 'instance_info_cache_get') api.db.instance_info_cache_get(mox.IgnoreArg(), self.instance['uuid']).AndReturn( self.instance['info_cache']) self.mox.ReplayAll() instance = self._fake_instance_object_with_info_cache(self.instance) nwinfo = api.deallocate_port_for_instance(self.context, instance, port_data[0]['id']) self.assertEqual(len(port_data[1:]), len(nwinfo)) if len(port_data) > 1: self.assertEqual('my_netid2', nwinfo[0]['network']['id']) def test_deallocate_port_for_instance_1(self): # Test to deallocate the first and only port self._test_deallocate_port_for_instance(1) def test_deallocate_port_for_instance_2(self): # Test to deallocate the first port of two self._test_deallocate_port_for_instance(2) def test_list_ports(self): search_opts = {'parm': 'value'} self.moxed_client.list_ports(**search_opts) self.mox.ReplayAll() neutronapi.API().list_ports(self.context, **search_opts) def test_show_port(self): self.moxed_client.show_port('foo').AndReturn( {'port': self.port_data1[0]}) self.mox.ReplayAll() neutronapi.API().show_port(self.context, 'foo') def test_validate_networks(self): requested_networks = [('my_netid1', None, None, None), ('my_netid2', None, None, None)] ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 50}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': []}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_without_port_quota_on_network_side(self): requested_networks = [('my_netid1', None, None, None), ('my_netid2', None, None, None)] ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {}}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_ex_1(self): requested_networks = [('my_netid1', None, None, None)] self.moxed_client.list_networks( id=mox.SameElementsAs(['my_netid1'])).AndReturn( {'networks': self.nets1}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 50}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': []}) self.mox.ReplayAll() api = neutronapi.API() try: api.validate_networks(self.context, requested_networks, 1) except exception.NetworkNotFound as ex: self.assertIn("my_netid2", six.text_type(ex)) def test_validate_networks_ex_2(self): requested_networks = [('my_netid1', None, None, None), ('my_netid2', None, None, None), ('my_netid3', None, None, None)] ids = ['my_netid1', 'my_netid2', 'my_netid3'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets1}) self.mox.ReplayAll() api = neutronapi.API() try: api.validate_networks(self.context, requested_networks, 1) except exception.NetworkNotFound as ex: self.assertIn("my_netid2", six.text_type(ex)) self.assertIn("my_netid3", six.text_type(ex)) def test_validate_networks_duplicate_enable(self): # Verify that no duplicateNetworks exception is thrown when duplicate # network ids are passed to validate_networks. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid1')]) ids = ['my_netid1', 'my_netid1'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets1}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 50}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': []}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_allocate_for_instance_with_requested_networks_duplicates(self): # specify a duplicate network to allocate to instance requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets6[0], self.nets6[1])]) self._allocate_for_instance(net_idx=6, requested_networks=requested_networks) def test_allocate_for_instance_requested_networks_duplicates_port(self): # specify first port and last port that are in same network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port['id']) for port in (self.port_data1[0], self.port_data3[0])]) self._allocate_for_instance(net_idx=6, requested_networks=requested_networks) def test_allocate_for_instance_requested_networks_duplicates_combo(self): # specify a combo net_idx=7 : net2, port in net1, net2, port in net1 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid2'), objects.NetworkRequest(port_id=self.port_data1[0]['id']), objects.NetworkRequest(network_id='my_netid2'), objects.NetworkRequest(port_id=self.port_data3[0]['id'])]) self._allocate_for_instance(net_idx=7, requested_networks=requested_networks) def test_validate_networks_not_specified(self): requested_networks = objects.NetworkRequestList(objects=[]) self.moxed_client.list_networks( tenant_id=self.context.project_id, shared=False).AndReturn( {'networks': self.nets1}) self.moxed_client.list_networks( shared=True).AndReturn( {'networks': self.nets2}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.NetworkAmbiguous, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_port_not_found(self): # Verify that the correct exception is thrown when a non existent # port is passed to validate_networks. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest( network_id='my_netid1', port_id='3123-ad34-bc43-32332ca33e')]) PortNotFound = exceptions.PortNotFoundClient() self.moxed_client.show_port(requested_networks[0].port_id).AndRaise( PortNotFound) self.mox.ReplayAll() # Expected call from setUp. neutronapi.get_client(None) api = neutronapi.API() self.assertRaises(exception.PortNotFound, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_port_show_raises_non404(self): # Verify that the correct exception is thrown when a non existent # port is passed to validate_networks. fake_port_id = '3123-ad34-bc43-32332ca33e' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest( network_id='my_netid1', port_id=fake_port_id)]) NeutronNotFound = exceptions.NeutronClientException(status_code=0) self.moxed_client.show_port(requested_networks[0].port_id).AndRaise( NeutronNotFound) self.mox.ReplayAll() # Expected call from setUp. neutronapi.get_client(None) api = neutronapi.API() exc = self.assertRaises(exception.NovaException, api.validate_networks, self.context, requested_networks, 1) expected_exception_message = ('Failed to access port %(port_id)s: ' 'An unknown exception occurred.' % {'port_id': fake_port_id}) self.assertEqual(expected_exception_message, str(exc)) def test_validate_networks_port_in_use(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=self.port_data3[0]['id'])]) self.moxed_client.show_port(self.port_data3[0]['id']).\ AndReturn({'port': self.port_data3[0]}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.PortInUse, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_port_no_subnet_id(self): port_a = self.port_data3[0] port_a['device_id'] = None port_a['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.PortRequiresFixedIP, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_no_subnet_id(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='his_netid4')]) ids = ['his_netid4'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets4}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.NetworkRequiresSubnet, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_ports_in_same_network_enable(self): # Verify that duplicateNetworks exception is not thrown when ports # on same duplicate network are passed to validate_networks. port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', 'subnet_id': 'subnet_id'} port_b = self.port_data1[0] self.assertEqual(port_a['network_id'], port_b['network_id']) for port in [port_a, port_b]: port['device_id'] = None port['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id']), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn( {'port': port_a}) self.moxed_client.show_port(port_b['id']).AndReturn( {'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_ports_not_in_same_network(self): port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', 'subnet_id': 'subnet_id'} port_b = self.port_data2[1] self.assertNotEqual(port_a['network_id'], port_b['network_id']) for port in [port_a, port_b]: port['device_id'] = None port['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id']), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a}) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_no_quota(self): # Test validation for a request for one instance needing # two ports, where the quota is 2 and 2 ports are in use # => instances which can be created = 0 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid2')]) ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 2}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': self.port_data2}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(0, max_count) def test_validate_networks_with_ports_and_networks(self): # Test validation for a request for one instance needing # one port allocated via nova with another port being passed in. port_b = self.port_data2[1] port_b['device_id'] = None port_b['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) ids = ['my_netid1'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets1}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 5}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': self.port_data2}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(1, max_count) def test_validate_networks_one_port_and_no_networks(self): # Test that show quota is not called if no networks are # passed in and only ports. port_b = self.port_data2[1] port_b['device_id'] = None port_b['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(1, max_count) def test_validate_networks_some_quota(self): # Test validation for a request for two instance needing # two ports each, where the quota is 5 and 2 ports are in use # => instances which can be created = 1 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid2')]) ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 5}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': self.port_data2}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 2) self.assertEqual(1, max_count) def test_validate_networks_unlimited_quota(self): # Test validation for a request for two instance needing # two ports each, where the quota is -1 (unlimited) # => instances which can be created = 1 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid2')]) ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': -1}}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 2) self.assertEqual(2, max_count) def test_validate_networks_no_quota_but_ports_supplied(self): port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', 'subnet_id': 'subnet_id'} port_b = self.port_data2[1] self.assertNotEqual(port_a['network_id'], port_b['network_id']) for port in [port_a, port_b]: port['device_id'] = None port['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id']), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a}) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(1, max_count) def _mock_list_ports(self, port_data=None): if port_data is None: port_data = self.port_data2 address = self.port_address self.moxed_client.list_ports( fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn( {'ports': port_data}) self.mox.ReplayAll() return address def test_get_fixed_ip_by_address_fails_for_no_ports(self): address = self._mock_list_ports(port_data=[]) api = neutronapi.API() self.assertRaises(exception.FixedIpNotFoundForAddress, api.get_fixed_ip_by_address, self.context, address) def test_get_fixed_ip_by_address_succeeds_for_1_port(self): address = self._mock_list_ports(port_data=self.port_data1) api = neutronapi.API() result = api.get_fixed_ip_by_address(self.context, address) self.assertEqual(self.instance2['uuid'], result['instance_uuid']) def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self): address = self._mock_list_ports() api = neutronapi.API() self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances, api.get_fixed_ip_by_address, self.context, address) def _get_available_networks(self, prv_nets, pub_nets, req_ids=None, context=None): api = neutronapi.API() nets = prv_nets + pub_nets if req_ids: mox_list_params = {'id': req_ids} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) else: mox_list_params = {'tenant_id': self.instance['project_id'], 'shared': False} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': prv_nets}) mox_list_params = {'shared': True} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': pub_nets}) self.mox.ReplayAll() rets = api._get_available_networks( context if context else self.context, self.instance['project_id'], req_ids) self.assertEqual(nets, rets) def test_get_available_networks_all_private(self): self._get_available_networks(prv_nets=self.nets2, pub_nets=[]) def test_get_available_networks_all_public(self): self._get_available_networks(prv_nets=[], pub_nets=self.nets2) def test_get_available_networks_private_and_public(self): self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4) def test_get_available_networks_with_network_ids(self): prv_nets = [self.nets3[0]] pub_nets = [self.nets3[-1]] # specify only first and last network req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])] self._get_available_networks(prv_nets, pub_nets, req_ids) def test_get_available_networks_with_custom_policy(self): rules = {'network:attach_external_network': ''} policy.set_rules(oslo_policy.Rules.from_dict(rules)) req_ids = [net['id'] for net in self.nets5] self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids) def test_get_floating_ip_pools(self): api = neutronapi.API() search_opts = {'router:external': True} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]}) self.mox.ReplayAll() pools = api.get_floating_ip_pools(self.context) expected = [self.fip_pool['name'], self.fip_pool_nova['name']] self.assertEqual(expected, pools) def _get_expected_fip_model(self, fip_data, idx=0): expected = {'id': fip_data['id'], 'address': fip_data['floating_ip_address'], 'pool': self.fip_pool['name'], 'project_id': fip_data['tenant_id'], 'fixed_ip_id': fip_data['port_id'], 'fixed_ip': {'address': fip_data['fixed_ip_address']}, 'instance': ({'uuid': self.port_data2[idx]['device_id']} if fip_data['port_id'] else None)} if expected['instance'] is not None: expected['fixed_ip']['instance_uuid'] = \ expected['instance']['uuid'] return expected def _test_get_floating_ip(self, fip_data, idx=0, by_address=False): api = neutronapi.API() fip_id = fip_data['id'] net_id = fip_data['floating_network_id'] address = fip_data['floating_ip_address'] if by_address: self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [fip_data]}) else: self.moxed_client.show_floatingip(fip_id).\ AndReturn({'floatingip': fip_data}) self.moxed_client.show_network(net_id).\ AndReturn({'network': self.fip_pool}) if fip_data['port_id']: self.moxed_client.show_port(fip_data['port_id']).\ AndReturn({'port': self.port_data2[idx]}) self.mox.ReplayAll() expected = self._get_expected_fip_model(fip_data, idx) if by_address: fip = api.get_floating_ip_by_address(self.context, address) else: fip = api.get_floating_ip(self.context, fip_id) self.assertEqual(expected, fip) def test_get_floating_ip_unassociated(self): self._test_get_floating_ip(self.fip_unassociated, idx=0) def test_get_floating_ip_associated(self): self._test_get_floating_ip(self.fip_associated, idx=1) def test_get_floating_ip_by_address(self): self._test_get_floating_ip(self.fip_unassociated, idx=0, by_address=True) def test_get_floating_ip_by_address_associated(self): self._test_get_floating_ip(self.fip_associated, idx=1, by_address=True) def test_get_floating_ip_by_address_not_found(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': []}) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpNotFoundForAddress, api.get_floating_ip_by_address, self.context, address) def test_get_floating_ip_by_id_not_found(self): api = neutronapi.API() NeutronNotFound = exceptions.NeutronClientException(status_code=404) floating_ip_id = self.fip_unassociated['id'] self.moxed_client.show_floatingip(floating_ip_id).\ AndRaise(NeutronNotFound) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpNotFound, api.get_floating_ip, self.context, floating_ip_id) def test_get_floating_ip_raises_non404(self): api = neutronapi.API() NeutronNotFound = exceptions.NeutronClientException(status_code=0) floating_ip_id = self.fip_unassociated['id'] self.moxed_client.show_floatingip(floating_ip_id).\ AndRaise(NeutronNotFound) self.mox.ReplayAll() self.assertRaises(exceptions.NeutronClientException, api.get_floating_ip, self.context, floating_ip_id) def test_get_floating_ip_by_address_multiple_found(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated] * 2}) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpMultipleFoundForAddress, api.get_floating_ip_by_address, self.context, address) def test_get_floating_ips_by_project(self): api = neutronapi.API() project_id = self.context.project_id self.moxed_client.list_floatingips(tenant_id=project_id).\ AndReturn({'floatingips': [self.fip_unassociated, self.fip_associated]}) search_opts = {'router:external': True} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]}) self.moxed_client.list_ports(tenant_id=project_id).\ AndReturn({'ports': self.port_data2}) self.mox.ReplayAll() expected = [self._get_expected_fip_model(self.fip_unassociated), self._get_expected_fip_model(self.fip_associated, idx=1)] fips = api.get_floating_ips_by_project(self.context) self.assertEqual(expected, fips) def _test_get_instance_id_by_floating_address(self, fip_data, associated=False): api = neutronapi.API() address = fip_data['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [fip_data]}) if associated: self.moxed_client.show_port(fip_data['port_id']).\ AndReturn({'port': self.port_data2[1]}) self.mox.ReplayAll() if associated: expected = self.port_data2[1]['device_id'] else: expected = None fip = api.get_instance_id_by_floating_address(self.context, address) self.assertEqual(expected, fip) def test_get_instance_id_by_floating_address(self): self._test_get_instance_id_by_floating_address(self.fip_unassociated) def test_get_instance_id_by_floating_address_associated(self): self._test_get_instance_id_by_floating_address(self.fip_associated, associated=True) def test_allocate_floating_ip(self): api = neutronapi.API() pool_name = self.fip_pool['name'] pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndReturn({'floatingip': self.fip_unassociated}) self.mox.ReplayAll() fip = api.allocate_floating_ip(self.context, 'ext_net') self.assertEqual(self.fip_unassociated['floating_ip_address'], fip) def test_allocate_floating_ip_addr_gen_fail(self): api = neutronapi.API() pool_name = self.fip_pool['name'] pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndRaise(exceptions.IpAddressGenerationFailureClient) self.mox.ReplayAll() self.assertRaises(exception.NoMoreFloatingIps, api.allocate_floating_ip, self.context, 'ext_net') def test_allocate_floating_ip_exhausted_fail(self): api = neutronapi.API() pool_name = self.fip_pool['name'] pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndRaise(exceptions.ExternalIpAddressExhaustedClient) self.mox.ReplayAll() self.assertRaises(exception.NoMoreFloatingIps, api.allocate_floating_ip, self.context, 'ext_net') def test_allocate_floating_ip_with_pool_id(self): api = neutronapi.API() pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'id': pool_id} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndReturn({'floatingip': self.fip_unassociated}) self.mox.ReplayAll() fip = api.allocate_floating_ip(self.context, pool_id) self.assertEqual(self.fip_unassociated['floating_ip_address'], fip) def test_allocate_floating_ip_with_default_pool(self): api = neutronapi.API() pool_name = self.fip_pool_nova['name'] pool_id = self.fip_pool_nova['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool_nova]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndReturn({'floatingip': self.fip_unassociated}) self.mox.ReplayAll() fip = api.allocate_floating_ip(self.context) self.assertEqual(self.fip_unassociated['floating_ip_address'], fip) def test_release_floating_ip(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] fip_id = self.fip_unassociated['id'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated]}) self.moxed_client.delete_floatingip(fip_id) self.mox.ReplayAll() api.release_floating_ip(self.context, address) def test_disassociate_and_release_floating_ip(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] fip_id = self.fip_unassociated['id'] floating_ip = {'address': address} self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated]}) self.moxed_client.delete_floatingip(fip_id) self.mox.ReplayAll() api.disassociate_and_release_floating_ip(self.context, None, floating_ip) def test_release_floating_ip_associated(self): api = neutronapi.API() address = self.fip_associated['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_associated]}) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpAssociated, api.release_floating_ip, self.context, address) def _setup_mock_for_refresh_cache(self, api, instances): nw_info = model.NetworkInfo() self.mox.StubOutWithMock(api, '_get_instance_nw_info') self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') for instance in instances: api._get_instance_nw_info(mox.IgnoreArg(), instance).\ AndReturn(nw_info) api.db.instance_info_cache_update(mox.IgnoreArg(), instance['uuid'], mox.IgnoreArg()).AndReturn( fake_info_cache) def test_associate_floating_ip(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] fixed_address = self.port_address2 fip_id = self.fip_unassociated['id'] instance = self._fake_instance_object(self.instance) search_opts = {'device_owner': 'compute:nova', 'device_id': instance.uuid} self.moxed_client.list_ports(**search_opts).\ AndReturn({'ports': [self.port_data2[1]]}) self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated]}) self.moxed_client.update_floatingip( fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'], 'fixed_ip_address': fixed_address}}) self._setup_mock_for_refresh_cache(api, [instance]) self.mox.ReplayAll() api.associate_floating_ip(self.context, instance, address, fixed_address) @mock.patch('nova.objects.Instance.get_by_uuid') def test_reassociate_floating_ip(self, mock_get): api = neutronapi.API() address = self.fip_associated['floating_ip_address'] new_fixed_address = self.port_address fip_id = self.fip_associated['id'] search_opts = {'device_owner': 'compute:nova', 'device_id': self.instance2['uuid']} self.moxed_client.list_ports(**search_opts).\ AndReturn({'ports': [self.port_data2[0]]}) self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_associated]}) self.moxed_client.update_floatingip( fip_id, {'floatingip': {'port_id': 'my_portid1', 'fixed_ip_address': new_fixed_address}}) self.moxed_client.show_port(self.fip_associated['port_id']).\ AndReturn({'port': self.port_data2[1]}) mock_get.return_value = fake_instance.fake_instance_obj( self.context, **self.instance) instance2 = self._fake_instance_object(self.instance2) self._setup_mock_for_refresh_cache(api, [mock_get.return_value, instance2]) self.mox.ReplayAll() api.associate_floating_ip(self.context, instance2, address, new_fixed_address) def test_associate_floating_ip_not_found_fixed_ip(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() address = self.fip_associated['floating_ip_address'] fixed_address = self.fip_associated['fixed_ip_address'] search_opts = {'device_owner': 'compute:nova', 'device_id': self.instance['uuid']} self.moxed_client.list_ports(**search_opts).\ AndReturn({'ports': [self.port_data2[0]]}) self.mox.ReplayAll() self.assertRaises(exception.FixedIpNotFoundForAddress, api.associate_floating_ip, self.context, instance, address, fixed_address) def test_disassociate_floating_ip(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() address = self.fip_associated['floating_ip_address'] fip_id = self.fip_associated['id'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_associated]}) self.moxed_client.update_floatingip( fip_id, {'floatingip': {'port_id': None}}) self._setup_mock_for_refresh_cache(api, [instance]) self.mox.ReplayAll() api.disassociate_floating_ip(self.context, instance, address) def test_add_fixed_ip_to_instance(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() self._setup_mock_for_refresh_cache(api, [instance]) network_id = 'my_netid1' search_opts = {'network_id': network_id} self.moxed_client.list_subnets( **search_opts).AndReturn({'subnets': self.subnet_data_n}) search_opts = {'device_id': instance.uuid, 'device_owner': 'compute:nova', 'network_id': network_id} self.moxed_client.list_ports( **search_opts).AndReturn({'ports': self.port_data1}) port_req_body = { 'port': { 'fixed_ips': [{'subnet_id': 'my_subid1'}, {'subnet_id': 'my_subid1'}], }, } port = self.port_data1[0] port['fixed_ips'] = [{'subnet_id': 'my_subid1'}] self.moxed_client.update_port('my_portid1', MyComparator(port_req_body)).AndReturn({'port': port}) self.mox.ReplayAll() api.add_fixed_ip_to_instance(self.context, instance, network_id) def test_remove_fixed_ip_from_instance(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() self._setup_mock_for_refresh_cache(api, [instance]) address = '10.0.0.3' zone = 'compute:%s' % self.instance['availability_zone'] search_opts = {'device_id': self.instance['uuid'], 'device_owner': zone, 'fixed_ips': 'ip_address=%s' % address} self.moxed_client.list_ports( **search_opts).AndReturn({'ports': self.port_data1}) port_req_body = { 'port': { 'fixed_ips': [], }, } port = self.port_data1[0] port['fixed_ips'] = [] self.moxed_client.update_port('my_portid1', MyComparator(port_req_body)).AndReturn({'port': port}) self.mox.ReplayAll() api.remove_fixed_ip_from_instance(self.context, instance, address) def test_list_floating_ips_without_l3_support(self): api = neutronapi.API() NeutronNotFound = exceptions.NotFound() self.moxed_client.list_floatingips( fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound) self.mox.ReplayAll() neutronapi.get_client('fake') floatingips = api._get_floating_ips_by_fixed_and_port( self.moxed_client, '1.1.1.1', 1) self.assertEqual([], floatingips) def test_nw_info_get_ips(self): fake_port = { 'fixed_ips': [ {'ip_address': '1.1.1.1'}], 'id': 'port-id', } api = neutronapi.API() self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port') api._get_floating_ips_by_fixed_and_port( self.moxed_client, '1.1.1.1', 'port-id').AndReturn( [{'floating_ip_address': '10.0.0.1'}]) self.mox.ReplayAll() neutronapi.get_client('fake') result = api._nw_info_get_ips(self.moxed_client, fake_port) self.assertEqual(1, len(result)) self.assertEqual('1.1.1.1', result[0]['address']) self.assertEqual('10.0.0.1', result[0]['floating_ips'][0]['address']) def test_nw_info_get_subnets(self): fake_port = { 'fixed_ips': [ {'ip_address': '1.1.1.1'}, {'ip_address': '2.2.2.2'}], 'id': 'port-id', } fake_subnet = model.Subnet(cidr='1.0.0.0/8') fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']] api = neutronapi.API() self.mox.StubOutWithMock(api, '_get_subnets_from_port') api._get_subnets_from_port(self.context, fake_port).AndReturn( [fake_subnet]) self.mox.ReplayAll() neutronapi.get_client('fake') subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips) self.assertEqual(1, len(subnets)) self.assertEqual(1, len(subnets[0]['ips'])) self.assertEqual('1.1.1.1', subnets[0]['ips'][0]['address']) def _test_nw_info_build_network(self, vif_type): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id', 'binding:vif_type': vif_type, } fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant', 'mtu': 9000}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertEqual(fake_subnets, net['subnets']) self.assertEqual('net-id', net['id']) self.assertEqual('foo', net['label']) self.assertEqual('tenant', net.get_meta('tenant_id')) self.assertEqual(9000, net.get_meta('mtu')) self.assertEqual(CONF.flat_injected, net.get_meta('injected')) return net, iid def test_nw_info_build_network_ovs(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS) self.assertEqual(CONF.neutron.ovs_bridge, net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertEqual('port-id', iid) def test_nw_info_build_network_dvs(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS) self.assertEqual('net-id', net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertNotIn('ovs_interfaceid', net) self.assertIsNone(iid) def test_nw_info_build_network_bridge(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE) self.assertEqual('brqnet-id', net['bridge']) self.assertTrue(net['should_create_bridge']) self.assertIsNone(iid) def test_nw_info_build_network_tap(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_TAP) self.assertIsNone(net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertIsNone(iid) def test_nw_info_build_network_other(self): net, iid = self._test_nw_info_build_network(None) self.assertIsNone(net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertIsNone(iid) def test_nw_info_build_no_match(self): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id1', 'tenant_id': 'tenant', 'binding:vif_type': model.VIF_TYPE_OVS, } fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertEqual(fake_subnets, net['subnets']) self.assertEqual('net-id1', net['id']) self.assertEqual('tenant', net['meta']['tenant_id']) def test_nw_info_build_network_vhostuser(self): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id', 'binding:vif_type': model.VIF_TYPE_VHOSTUSER, 'binding:vif_details': { model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True } } fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertEqual(fake_subnets, net['subnets']) self.assertEqual('net-id', net['id']) self.assertEqual('foo', net['label']) self.assertEqual('tenant', net.get_meta('tenant_id')) self.assertEqual(CONF.flat_injected, net.get_meta('injected')) self.assertEqual(CONF.neutron.ovs_bridge, net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertEqual('port-id', iid) def _test_nw_info_build_custom_bridge(self, vif_type, extra_details=None): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id', 'binding:vif_type': vif_type, 'binding:vif_details': { model.VIF_DETAILS_BRIDGE_NAME: 'custom-bridge', } } if extra_details: fake_port['binding:vif_details'].update(extra_details) fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertNotEqual(CONF.neutron.ovs_bridge, net['bridge']) self.assertEqual('custom-bridge', net['bridge']) def test_nw_info_build_custom_ovs_bridge(self): self._test_nw_info_build_custom_bridge(model.VIF_TYPE_OVS) def test_nw_info_build_custom_ovs_bridge_vhostuser(self): self._test_nw_info_build_custom_bridge(model.VIF_TYPE_VHOSTUSER, {model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True}) def test_nw_info_build_custom_lb_bridge(self): self._test_nw_info_build_custom_bridge(model.VIF_TYPE_BRIDGE) def test_build_network_info_model(self): api = neutronapi.API() fake_inst = objects.Instance() fake_inst.project_id = 'fake' fake_inst.uuid = 'uuid' fake_inst.info_cache = objects.InstanceInfoCache() fake_inst.info_cache.network_info = model.NetworkInfo() fake_ports = [ # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port1', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:01', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, # admin_state_up=False and status='DOWN' thus vif.active=True {'id': 'port2', 'network_id': 'net-id', 'admin_state_up': False, 'status': 'DOWN', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:02', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, # admin_state_up=True and status='DOWN' thus vif.active=False {'id': 'port0', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'DOWN', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:03', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port3', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:04', 'binding:vif_type': model.VIF_TYPE_HW_VEB, 'binding:vnic_type': model.VNIC_TYPE_DIRECT, 'binding:profile': {'pci_vendor_info': '1137:0047', 'pci_slot': '0000:0a:00.1', 'physical_network': 'phynet1'}, 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'}, }, # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port4', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:05', 'binding:vif_type': model.VIF_TYPE_802_QBH, 'binding:vnic_type': model.VNIC_TYPE_MACVTAP, 'binding:profile': {'pci_vendor_info': '1137:0047', 'pci_slot': '0000:0a:00.2', 'physical_network': 'phynet1'}, 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'}, }, # admin_state_up=True and status='ACTIVE' thus vif.active=True # This port has no binding:vnic_type to verify default is assumed {'id': 'port5', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:06', 'binding:vif_type': model.VIF_TYPE_BRIDGE, # No binding:vnic_type 'binding:vif_details': {}, }, # This does not match the networks we provide below, # so it should be ignored (and is here to verify that) {'id': 'port6', 'network_id': 'other-net-id', 'admin_state_up': True, 'status': 'DOWN', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, }, ] fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [ {'id': 'net-id', 'name': 'foo', 'tenant_id': 'fake', } ] neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes( ).AndReturn(self.moxed_client) self.moxed_client.list_ports( tenant_id='fake', device_id='uuid').AndReturn( {'ports': fake_ports}) self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port') self.mox.StubOutWithMock(api, '_get_subnets_from_port') requested_ports = [fake_ports[2], fake_ports[0], fake_ports[1], fake_ports[3], fake_ports[4], fake_ports[5]] for requested_port in requested_ports: api._get_floating_ips_by_fixed_and_port( self.moxed_client, '1.1.1.1', requested_port['id']).AndReturn( [{'floating_ip_address': '10.0.0.1'}]) for requested_port in requested_ports: api._get_subnets_from_port(self.context, requested_port ).AndReturn(fake_subnets) self.mox.StubOutWithMock(api, '_get_preexisting_port_ids') api._get_preexisting_port_ids(fake_inst).AndReturn(['port5']) self.mox.ReplayAll() neutronapi.get_client('fake') fake_inst.info_cache = objects.InstanceInfoCache.new( self.context, 'fake-uuid') fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([]) nw_infos = api._build_network_info_model( self.context, fake_inst, fake_nets, [fake_ports[2]['id'], fake_ports[0]['id'], fake_ports[1]['id'], fake_ports[3]['id'], fake_ports[4]['id'], fake_ports[5]['id']], preexisting_port_ids=['port3']) self.assertEqual(6, len(nw_infos)) index = 0 for nw_info in nw_infos: self.assertEqual(requested_ports[index]['mac_address'], nw_info['address']) self.assertEqual('tapport' + str(index), nw_info['devname']) self.assertIsNone(nw_info['ovs_interfaceid']) self.assertEqual(requested_ports[index]['binding:vif_type'], nw_info['type']) if nw_info['type'] == model.VIF_TYPE_BRIDGE: self.assertEqual('brqnet-id', nw_info['network']['bridge']) self.assertEqual(requested_ports[index].get('binding:vnic_type', model.VNIC_TYPE_NORMAL), nw_info['vnic_type']) self.assertEqual(requested_ports[index].get('binding:vif_details'), nw_info.get('details')) self.assertEqual(requested_ports[index].get('binding:profile'), nw_info.get('profile')) index += 1 self.assertFalse(nw_infos[0]['active']) self.assertTrue(nw_infos[1]['active']) self.assertTrue(nw_infos[2]['active']) self.assertTrue(nw_infos[3]['active']) self.assertTrue(nw_infos[4]['active']) self.assertTrue(nw_infos[5]['active']) self.assertEqual('port0', nw_infos[0]['id']) self.assertEqual('port1', nw_infos[1]['id']) self.assertEqual('port2', nw_infos[2]['id']) self.assertEqual('port3', nw_infos[3]['id']) self.assertEqual('port4', nw_infos[4]['id']) self.assertEqual('port5', nw_infos[5]['id']) self.assertFalse(nw_infos[0]['preserve_on_delete']) self.assertFalse(nw_infos[1]['preserve_on_delete']) self.assertFalse(nw_infos[2]['preserve_on_delete']) self.assertTrue(nw_infos[3]['preserve_on_delete']) self.assertFalse(nw_infos[4]['preserve_on_delete']) self.assertTrue(nw_infos[5]['preserve_on_delete']) @mock.patch('nova.network.neutronv2.api.API._nw_info_get_subnets') @mock.patch('nova.network.neutronv2.api.API._nw_info_get_ips') @mock.patch('nova.network.neutronv2.api.API._nw_info_build_network') @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.API._gather_port_ids_and_networks') def test_build_network_info_model_empty( self, mock_gather_port_ids_and_networks, mock_get_preexisting_port_ids, mock_nw_info_build_network, mock_nw_info_get_ips, mock_nw_info_get_subnets): # An empty instance info network cache should not be populated from # ports found in Neutron. api = neutronapi.API() fake_inst = objects.Instance() fake_inst.project_id = 'fake' fake_inst.uuid = 'uuid' fake_inst.info_cache = objects.InstanceInfoCache() fake_inst.info_cache.network_info = model.NetworkInfo() fake_ports = [ # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port1', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:01', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, ] fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes( ).AndReturn(self.moxed_client) self.moxed_client.list_ports( tenant_id='fake', device_id='uuid').AndReturn( {'ports': fake_ports}) mock_gather_port_ids_and_networks.return_value = ([], []) mock_get_preexisting_port_ids.return_value = [] mock_nw_info_build_network.return_value = (None, None) mock_nw_info_get_ips.return_value = [] mock_nw_info_get_subnets.return_value = fake_subnets self.mox.ReplayAll() neutronapi.get_client('fake') nw_infos = api._build_network_info_model( self.context, fake_inst) self.assertEqual(0, len(nw_infos)) def test_get_subnets_from_port(self): api = neutronapi.API() port_data = copy.copy(self.port_data1[0]) subnet_data1 = copy.copy(self.subnet_data1) subnet_data1[0]['host_routes'] = [ {'destination': '192.168.0.0/24', 'nexthop': '1.0.0.10'} ] self.moxed_client.list_subnets( id=[port_data['fixed_ips'][0]['subnet_id']] ).AndReturn({'subnets': subnet_data1}) self.moxed_client.list_ports( network_id=subnet_data1[0]['network_id'], device_owner='network:dhcp').AndReturn({'ports': []}) self.mox.ReplayAll() subnets = api._get_subnets_from_port(self.context, port_data) self.assertEqual(1, len(subnets)) self.assertEqual(1, len(subnets[0]['routes'])) self.assertEqual(subnet_data1[0]['host_routes'][0]['destination'], subnets[0]['routes'][0]['cidr']) self.assertEqual(subnet_data1[0]['host_routes'][0]['nexthop'], subnets[0]['routes'][0]['gateway']['address']) def test_get_all_empty_list_networks(self): api = neutronapi.API() self.moxed_client.list_networks().AndReturn({'networks': []}) self.mox.ReplayAll() networks = api.get_all(self.context) self.assertIsInstance(networks, objects.NetworkList) self.assertEqual(0, len(networks)) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_get_port_vnic_info_1(self, mock_get_client): api = neutronapi.API() self.mox.ResetAll() test_port = { 'port': {'id': 'my_port_id1', 'network_id': 'net-id', 'binding:vnic_type': model.VNIC_TYPE_DIRECT, }, } test_net = {'network': {'provider:physical_network': 'phynet1'}} mock_client = mock_get_client() mock_client.show_port.return_value = test_port mock_client.show_network.return_value = test_net vnic_type, phynet_name = api._get_port_vnic_info( self.context, mock_client, test_port['port']['id']) mock_client.show_port.assert_called_once_with(test_port['port']['id'], fields=['binding:vnic_type', 'network_id']) mock_client.show_network.assert_called_once_with( test_port['port']['network_id'], fields='provider:physical_network') self.assertEqual(model.VNIC_TYPE_DIRECT, vnic_type) self.assertEqual('phynet1', phynet_name) def _test_get_port_vnic_info(self, mock_get_client, binding_vnic_type=None): api = neutronapi.API() self.mox.ResetAll() test_port = { 'port': {'id': 'my_port_id2', 'network_id': 'net-id', }, } if binding_vnic_type: test_port['port']['binding:vnic_type'] = binding_vnic_type mock_get_client.reset_mock() mock_client = mock_get_client() mock_client.show_port.return_value = test_port vnic_type, phynet_name = api._get_port_vnic_info( self.context, mock_client, test_port['port']['id']) mock_client.show_port.assert_called_once_with(test_port['port']['id'], fields=['binding:vnic_type', 'network_id']) self.assertEqual(model.VNIC_TYPE_NORMAL, vnic_type) self.assertFalse(phynet_name) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_get_port_vnic_info_2(self, mock_get_client): self._test_get_port_vnic_info(mock_get_client, binding_vnic_type=model.VNIC_TYPE_NORMAL) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_get_port_vnic_info_3(self, mock_get_client): self._test_get_port_vnic_info(mock_get_client) @mock.patch.object(neutronapi.API, "_get_port_vnic_info") @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_create_pci_requests_for_sriov_ports(self, mock_get_client, mock_get_port_vnic_info): api = neutronapi.API() self.mox.ResetAll() requested_networks = objects.NetworkRequestList( objects = [ objects.NetworkRequest(port_id='my_portid1'), objects.NetworkRequest(network_id='net1'), objects.NetworkRequest(port_id='my_portid2'), objects.NetworkRequest(port_id='my_portid3'), objects.NetworkRequest(port_id='my_portid4')]) pci_requests = objects.InstancePCIRequests(requests=[]) mock_get_port_vnic_info.side_effect = [ (model.VNIC_TYPE_DIRECT, 'phynet1'), (model.VNIC_TYPE_NORMAL, ''), (model.VNIC_TYPE_MACVTAP, 'phynet1'), (model.VNIC_TYPE_MACVTAP, 'phynet2') ] api.create_pci_requests_for_sriov_ports( None, pci_requests, requested_networks) self.assertEqual(3, len(pci_requests.requests)) has_pci_request_id = [net.pci_request_id is not None for net in requested_networks.objects] expected_results = [True, False, False, True, True] self.assertEqual(expected_results, has_pci_request_id) class TestNeutronv2WithMock(test.TestCase): """Used to test Neutron V2 API with mock.""" def setUp(self): super(TestNeutronv2WithMock, self).setUp() self.api = neutronapi.API() self.context = context.RequestContext( 'fake-user', 'fake-project', auth_token='bff4a5a6b9eb4ea2a6efec6eefb77936') @mock.patch('oslo_concurrency.lockutils.lock') def test_get_instance_nw_info_locks_per_instance(self, mock_lock): instance = objects.Instance(uuid=uuid.uuid4()) api = neutronapi.API() mock_lock.side_effect = test.TestingException self.assertRaises(test.TestingException, api.get_instance_nw_info, 'context', instance) mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid) @mock.patch('nova.network.neutronv2.api.LOG') def test_get_instance_nw_info_verify_duplicates_ignored(self, mock_log): """test that the returned networks & port_ids from _gather_port_ids_and_networks doesn't contain any duplicates The test fakes an instance with two ports connected to two networks. The _gather_port_ids_and_networks method will be called with the instance and a list of port ids of which one port id is configured already to the instance (== duplicate #1) and a list of networks that already contains a network to which an instance port is connected (== duplicate #2). All-in-all, we expect the resulting port ids list to contain 3 items (["instance_port_1", "port_1", "port_2"]) and the resulting networks list to contain 3 items (["net_1", "net_2", "instance_network_1"]) while the warning message for duplicate items was executed twice (due to "duplicate #1" & "duplicate #2") """ networks = [model.Network(id="net_1"), model.Network(id="net_2")] port_ids = ["port_1", "port_2"] instance_networks = [{"id": "instance_network_1", "name": "fake_network", "tenant_id": "fake_tenant_id"}] instance_port_ids = ["instance_port_1"] network_info = model.NetworkInfo( [{'id': port_ids[0], 'network': networks[0]}, {'id': instance_port_ids[0], 'network': model.Network( id=instance_networks[0]["id"], label=instance_networks[0]["name"], meta={"tenant_id": instance_networks[0]["tenant_id"]})}] ) instance_uuid = uuid.uuid4() instance = objects.Instance(uuid=instance_uuid, info_cache=objects.InstanceInfoCache( context=self.context, instance_uuid=instance_uuid, network_info=network_info)) new_networks, new_port_ids = self.api._gather_port_ids_and_networks( self.context, instance, networks, port_ids) self.assertEqual(new_networks, networks + instance_networks) self.assertEqual(new_port_ids, instance_port_ids + port_ids) self.assertEqual(2, mock_log.warning.call_count) @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(neutronapi.API, '_get_instance_nw_info') @mock.patch('nova.network.base_api.update_instance_cache_with_nw_info') def test_get_instance_nw_info(self, mock_update, mock_get, mock_lock): fake_result = mock.sentinel.get_nw_info_result mock_get.return_value = fake_result instance = fake_instance.fake_instance_obj(self.context) result = self.api.get_instance_nw_info(self.context, instance) mock_get.assert_called_once_with(self.context, instance) mock_update.assert_called_once_with(self.api, self.context, instance, nw_info=fake_result, update_cells=False) self.assertEqual(fake_result, result) def _test_validate_networks_fixed_ip_no_dup(self, nets, requested_networks, ids, list_port_values): def _fake_list_ports(**search_opts): for args, return_value in list_port_values: if args == search_opts: return return_value self.fail('Unexpected call to list_ports %s' % search_opts) with test.nested( mock.patch.object(client.Client, 'list_ports', side_effect=_fake_list_ports), mock.patch.object(client.Client, 'list_networks', return_value={'networks': nets}), mock.patch.object(client.Client, 'show_quota', return_value={'quota': {'port': 50}})) as ( list_ports_mock, list_networks_mock, show_quota_mock): self.api.validate_networks(self.context, requested_networks, 1) self.assertEqual(len(list_port_values), len(list_ports_mock.call_args_list)) list_networks_mock.assert_called_once_with(id=ids) show_quota_mock.assert_called_once_with(tenant_id='fake-project') def test_validate_networks_over_limit_quota(self): """Test validates that a relevant exception is being raised when there are more ports defined, than there is a quota for it. """ requested_networks = [('my_netid1', '10.0.1.2', None, None), ('my_netid2', '10.0.1.3', None, None)] list_port_values = [({'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'}, {'ports': []}), ({'network_id': 'my_netid2', 'fixed_ips': 'ip_address=10.0.1.3', 'fields': 'device_id'}, {'ports': []}), ({'tenant_id': 'fake-project', 'fields': ['id']}, {'ports': [1, 2, 3, 4, 5]})] nets = [{'subnets': '1'}, {'subnets': '2'}] def _fake_list_ports(**search_opts): for args, return_value in list_port_values: if args == search_opts: return return_value with test.nested( mock.patch.object(self.api, '_get_available_networks', return_value=nets), mock.patch.object(client.Client, 'list_ports', side_effect=_fake_list_ports), mock.patch.object(client.Client, 'show_quota', return_value={'quota': {'port': 1}})): exc = self.assertRaises(exception.PortLimitExceeded, self.api.validate_networks, self.context, requested_networks, 1) expected_exception_msg = ('The number of defined ports: ' '%(ports)d is over the limit: ' '%(quota)d' % {'ports': 5, 'quota': 1}) self.assertEqual(expected_exception_msg, str(exc)) def test_validate_networks_fixed_ip_no_dup1(self): # Test validation for a request for a network with a # fixed ip that is not already in use because no fixed ips in use nets1 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'fake-project'}] requested_networks = [('my_netid1', '10.0.1.2', None, None)] ids = ['my_netid1'] list_port_values = [({'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'}, {'ports': []}), ({'tenant_id': 'fake-project', 'fields': ['id']}, {'ports': []})] self._test_validate_networks_fixed_ip_no_dup(nets1, requested_networks, ids, list_port_values) def test_validate_networks_fixed_ip_no_dup2(self): # Test validation for a request for a network with a # fixed ip that is not already in use because not used on this net id nets2 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'fake-project'}, {'id': 'my_netid2', 'name': 'my_netname2', 'subnets': ['mysubnid2'], 'tenant_id': 'fake-project'}] requested_networks = [('my_netid1', '10.0.1.2', None, None), ('my_netid2', '10.0.1.3', None, None)] ids = ['my_netid1', 'my_netid2'] list_port_values = [({'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'}, {'ports': []}), ({'network_id': 'my_netid2', 'fixed_ips': 'ip_address=10.0.1.3', 'fields': 'device_id'}, {'ports': []}), ({'tenant_id': 'fake-project', 'fields': ['id']}, {'ports': []})] self._test_validate_networks_fixed_ip_no_dup(nets2, requested_networks, ids, list_port_values) def test_validate_networks_fixed_ip_dup(self): # Test validation for a request for a network with a # fixed ip that is already in use requested_networks = [('my_netid1', '10.0.1.2', None, None)] list_port_mock_params = {'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'} list_port_mock_return = {'ports': [({'device_id': 'my_deviceid'})]} with mock.patch.object(client.Client, 'list_ports', return_value=list_port_mock_return) as ( list_ports_mock): self.assertRaises(exception.FixedIpAlreadyInUse, self.api.validate_networks, self.context, requested_networks, 1) list_ports_mock.assert_called_once_with(**list_port_mock_params) def test_allocate_floating_ip_exceed_limit(self): # Verify that the correct exception is thrown when quota exceed pool_name = 'dummy' api = neutronapi.API() with test.nested( mock.patch.object(client.Client, 'create_floatingip'), mock.patch.object(api, '_get_floating_ip_pool_id_by_name_or_id')) as ( create_mock, get_mock): create_mock.side_effect = exceptions.OverQuotaClient() self.assertRaises(exception.FloatingIpLimitExceeded, api.allocate_floating_ip, self.context, pool_name) def test_allocate_floating_ip_no_ipv4_subnet(self): api = neutronapi.API() net_id = uuid.uuid4() error_msg = ('Bad floatingip request: Network %s does not contain ' 'any IPv4 subnet' % net_id) with test.nested( mock.patch.object(client.Client, 'create_floatingip'), mock.patch.object(api, '_get_floating_ip_pool_id_by_name_or_id')) as ( create_mock, get_mock): create_mock.side_effect = exceptions.BadRequest(error_msg) self.assertRaises(exception.FloatingIpBadRequest, api.allocate_floating_ip, self.context, 'ext_net') def test_create_port_for_instance_no_more_ip(self): instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} with mock.patch.object(client.Client, 'create_port', side_effect=exceptions.IpAddressGenerationFailureClient()) as ( create_port_mock): zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone}} self.assertRaises(exception.NoMoreFixedIps, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body) create_port_mock.assert_called_once_with(port_req_body) @mock.patch.object(client.Client, 'create_port', side_effect=exceptions.MacAddressInUseClient()) def test_create_port_for_instance_mac_address_in_use(self, create_port_mock): # Create fake data. instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone, 'mac_address': 'XX:XX:XX:XX:XX:XX'}} available_macs = set(['XX:XX:XX:XX:XX:XX']) # Run the code. self.assertRaises(exception.PortInUse, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body, available_macs=available_macs) # Assert the calls. create_port_mock.assert_called_once_with(port_req_body) @mock.patch.object(client.Client, 'create_port', side_effect=exceptions.IpAddressInUseClient()) def test_create_port_for_fixed_ip_in_use(self, create_port_mock): # Create fake data. instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone, 'mac_address': 'XX:XX:XX:XX:XX:XX'}} fake_ip = '1.1.1.1' # Run the code. self.assertRaises(exception.FixedIpAlreadyInUse, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body, fixed_ip=fake_ip) # Assert the calls. create_port_mock.assert_called_once_with(port_req_body) @mock.patch.object(client.Client, 'create_port', side_effect=exceptions.InvalidIpForNetworkClient()) def test_create_port_with_invalid_ip_for_network(self, create_port_mock): # Create fake data. instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone, 'mac_address': 'XX:XX:XX:XX:XX:XX'}} fake_ip = '1.1.1.1' # Run the code. exc = self.assertRaises(exception.InvalidInput, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body, fixed_ip=fake_ip) # Assert the exception message expected_exception_msg = ('Invalid input received: Fixed IP %(ip)s is ' 'not a valid ip address for network ' '%(net_id)s.' % {'ip': fake_ip, 'net_id': net['id']}) self.assertEqual(expected_exception_msg, str(exc)) # Assert the calls. create_port_mock.assert_called_once_with(port_req_body) def test_get_network_detail_not_found(self): api = neutronapi.API() expected_exc = exceptions.NetworkNotFoundClient() network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786' with mock.patch.object(client.Client, 'show_network', side_effect=expected_exc) as ( fake_show_network): self.assertRaises(exception.NetworkNotFound, api.get, self.context, network_uuid) fake_show_network.assert_called_once_with(network_uuid) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.API.' '_refresh_neutron_extensions_cache') def test_deallocate_for_instance_uses_delete_helper(self, mock_refresh, mock_preexisting): # setup fake data instance = fake_instance.fake_instance_obj(self.context) mock_preexisting.return_value = [] port_data = {'ports': [{'id': str(uuid.uuid4())}]} ports = set([port['id'] for port in port_data.get('ports')]) api = neutronapi.API() # setup mocks mock_client = mock.Mock() mock_client.list_ports.return_value = port_data with test.nested( mock.patch.object(neutronapi, 'get_client', return_value=mock_client), mock.patch.object(api, '_delete_ports') ) as ( mock_get_client, mock_delete ): # run the code api.deallocate_for_instance(self.context, instance) # assert the calls mock_client.list_ports.assert_called_once_with( device_id=instance.uuid) mock_delete.assert_called_once_with( mock_client, instance, ports, raise_if_fail=True) def _test_delete_ports(self, expect_raise): results = [exceptions.NeutronClientException, None] mock_client = mock.Mock() with mock.patch.object(mock_client, 'delete_port', side_effect=results): api = neutronapi.API() api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1', 'port2'], raise_if_fail=expect_raise) def test_delete_ports_raise(self): self.assertRaises(exceptions.NeutronClientException, self._test_delete_ports, True) def test_delete_ports_no_raise(self): self._test_delete_ports(False) def test_delete_ports_never_raise_404(self): mock_client = mock.Mock() mock_client.delete_port.side_effect = exceptions.PortNotFoundClient api = neutronapi.API() api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1'], raise_if_fail=True) mock_client.delete_port.assert_called_once_with('port1') @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_port_for_instance_fails(self, mock_preexisting): mock_preexisting.return_value = [] mock_client = mock.Mock() api = neutronapi.API() with test.nested( mock.patch.object(neutronapi, 'get_client', return_value=mock_client), mock.patch.object(api, '_delete_ports', side_effect=exceptions.Unauthorized), mock.patch.object(api, 'get_instance_nw_info') ) as ( get_client, delete_ports, get_nw_info ): self.assertRaises(exceptions.Unauthorized, api.deallocate_port_for_instance, self.context, instance={'uuid': 'fake'}, port_id='fake') # make sure that we didn't try to reload nw info self.assertFalse(get_nw_info.called) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def _test_show_port_exceptions(self, client_exc, expected_nova_exc, get_client_mock): show_port_mock = mock.Mock(side_effect=client_exc) get_client_mock.return_value.show_port = show_port_mock self.assertRaises(expected_nova_exc, self.api.show_port, self.context, 'fake_port_id') def test_show_port_not_found(self): self._test_show_port_exceptions(exceptions.PortNotFoundClient, exception.PortNotFound) def test_show_port_forbidden(self): self._test_show_port_exceptions(exceptions.Unauthorized, exception.Forbidden) def test_show_port_unknown_exception(self): self._test_show_port_exceptions(exceptions.NeutronClientException, exception.NovaException) def test_get_network(self): api = neutronapi.API() with mock.patch.object(client.Client, 'show_network') as mock_show: mock_show.return_value = { 'network': {'id': 'fake-uuid', 'name': 'fake-network'} } net_obj = api.get(self.context, 'fake-uuid') self.assertEqual('fake-network', net_obj.label) self.assertEqual('fake-network', net_obj.name) self.assertEqual('fake-uuid', net_obj.uuid) def test_get_all_networks(self): api = neutronapi.API() with mock.patch.object(client.Client, 'list_networks') as mock_list: mock_list.return_value = { 'networks': [ {'id': 'fake-uuid1', 'name': 'fake-network1'}, {'id': 'fake-uuid2', 'name': 'fake-network2'}, ]} net_objs = api.get_all(self.context) self.assertIsInstance(net_objs, objects.NetworkList) self.assertEqual(2, len(net_objs)) self.assertEqual(('fake-uuid1', 'fake-network1'), (net_objs[0].uuid, net_objs[0].name)) self.assertEqual(('fake-uuid2', 'fake-network2'), (net_objs[1].uuid, net_objs[1].name)) @mock.patch.object(neutronapi.API, "_refresh_neutron_extensions_cache") @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_update_instance_vnic_index(self, mock_get_client, mock_refresh_extensions): api = neutronapi.API() api.extensions = set([constants.VNIC_INDEX_EXT]) mock_client = mock_get_client() mock_client.update_port.return_value = 'port' instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0', 'uuid': str(uuid.uuid4()), 'display_name': 'test_instance', 'availability_zone': 'nova', 'host': 'some_host'} instance = objects.Instance(**instance) vif = {'id': 'fake-port-id'} api.update_instance_vnic_index(self.context, instance, vif, 7) port_req_body = {'port': {'vnic_index': 7}} mock_client.update_port.assert_called_once_with('fake-port-id', port_req_body) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_update_port_bindings_for_instance_same_host(self, get_client_mock): instance = fake_instance.fake_instance_obj(self.context) self.api._has_port_binding_extension = mock.Mock(return_value=True) # We test two ports, one with the same host as the host passed in and # one where binding:host_id isn't set, so we update that port. fake_ports = {'ports': [ {'id': 'fake-port-1', 'binding:host_id': instance.host}, {'id': 'fake-port-2'}]} list_ports_mock = mock.Mock(return_value=fake_ports) get_client_mock.return_value.list_ports = list_ports_mock update_port_mock = mock.Mock() get_client_mock.return_value.update_port = update_port_mock self.api._update_port_binding_for_instance(self.context, instance, instance.host) # Assert that update_port was only called on the port without a host. update_port_mock.assert_called_once_with( 'fake-port-2', {'port': {'binding:host_id': instance.host}}) @mock.patch('nova.network.neutronv2.api.compute_utils') def test_get_preexisting_port_ids(self, mocked_comp_utils): mocked_comp_utils.get_nw_info_for_instance.return_value = [model.VIF( id='1', preserve_on_delete=False), model.VIF( id='2', preserve_on_delete=True), model.VIF( id='3', preserve_on_delete=True)] result = self.api._get_preexisting_port_ids(None) self.assertEqual(['2', '3'], result, "Invalid preexisting ports") def _test_unbind_ports_get_client(self, mock_neutron, mock_has_ext, has_ext=False): mock_ctx = mock.Mock(is_admin=False) mock_has_ext.return_value = has_ext ports = ["1", "2", "3"] self.api._unbind_ports(mock_ctx, ports, mock_neutron) get_client_calls = [] get_client_calls.append(mock.call(mock_ctx) if not has_ext else mock.call(mock_ctx, admin=True)) if has_ext: self.assertEqual(1, mock_neutron.call_count) mock_neutron.assert_has_calls(get_client_calls, True) else: self.assertEqual(0, mock_neutron.call_count) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports_get_client_binding_extension(self, mock_neutron, mock_has_ext): self._test_unbind_ports_get_client(mock_neutron, mock_has_ext, True) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports_get_client(self, mock_neutron, mock_has_ext): self._test_unbind_ports_get_client(mock_neutron, mock_has_ext) def _test_unbind_ports(self, mock_neutron, mock_has_ext, has_ext=False): mock_client = mock.Mock() mock_update_port = mock.Mock() mock_client.update_port = mock_update_port mock_ctx = mock.Mock(is_admin=False) mock_has_ext.return_value = has_ext mock_neutron.return_value = mock_client ports = ["1", "2", "3"] api = neutronapi.API() api._unbind_ports(mock_ctx, ports, mock_client) body = {'port': {'device_id': '', 'device_owner': ''}} if has_ext: body['port']['binding:host_id'] = None body['port']['binding:profile'] = {} update_port_calls = [] for p in ports: update_port_calls.append(mock.call(p, body)) self.assertEqual(3, mock_update_port.call_count) mock_update_port.assert_has_calls(update_port_calls) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports_binding_ext(self, mock_neutron, mock_has_ext): self._test_unbind_ports(mock_neutron, mock_has_ext, True) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports(self, mock_neutron, mock_has_ext): self._test_unbind_ports(mock_neutron, mock_has_ext, False) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') def test_unbind_ports_no_port_ids(self, mock_has_ext): # Tests that None entries in the ports list are filtered out. mock_client = mock.Mock() mock_update_port = mock.Mock() mock_client.update_port = mock_update_port mock_ctx = mock.Mock(is_admin=False) mock_has_ext.return_value = True api = neutronapi.API() api._unbind_ports(mock_ctx, [None], mock_client, mock_client) self.assertFalse(mock_update_port.called) @mock.patch('nova.network.neutronv2.api.API.get_instance_nw_info') @mock.patch('nova.network.neutronv2.api.excutils') @mock.patch('nova.network.neutronv2.api.API._delete_ports') @mock.patch('nova.network.neutronv2.api.API.' '_check_external_network_attach') @mock.patch('nova.network.neutronv2.api.LOG') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.API.' '_populate_neutron_extension_values') @mock.patch('nova.network.neutronv2.api.API._get_available_networks') @mock.patch('nova.network.neutronv2.api.get_client') def test_allocate_for_instance_unbind(self, mock_ntrn, mock_avail_nets, mock_ext_vals, mock_has_pbe, mock_unbind, mock_log, mock_cena, mock_del_ports, mock_exeu, mock_giwn): mock_nc = mock.Mock() def show_port(port_id): return {'port': {'network_id': 'net-1', 'id': port_id, 'tenant_id': 'proj-1'}} mock_nc.show_port = show_port mock_ntrn.return_value = mock_nc mock_nc.update_port.side_effect = [True, True, Exception] mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_has_pbe.return_value = False nw_req = objects.NetworkRequestList( objects = [objects.NetworkRequest(port_id='fake-port1'), objects.NetworkRequest(port_id='fake-port2'), objects.NetworkRequest(port_id='fail-port')]) mock_avail_nets.return_value = [{'id': 'net-1'}] self.api.allocate_for_instance(mock.sentinel.ctx, mock_inst, requested_networks=nw_req) mock_unbind.assert_called_once_with(mock.sentinel.ctx, ['fake-port1', 'fake-port2'], mock.ANY, mock.ANY) @mock.patch('nova.network.neutronv2.api.API._process_requested_networks') @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.API._get_available_networks') @mock.patch('nova.network.neutronv2.api.get_client') def test_allocate_port_for_instance_no_networks(self, mock_getclient, mock_avail_nets, mock_has_pbe, mock_process_request_net): """Tests that if no networks are requested and no networks are available, we fail with InterfaceAttachFailedNoNetwork. """ instance = fake_instance.fake_instance_obj(self.context) mock_has_pbe.return_value = False mock_process_request_net.return_value = ({}, [], [], None) mock_avail_nets.return_value = [] api = neutronapi.API() ex = self.assertRaises(exception.InterfaceAttachFailedNoNetwork, api.allocate_port_for_instance, self.context, instance, port_id=None) self.assertEqual( "No specific network was requested and none are available for " "project 'fake-project'.", six.text_type(ex)) @mock.patch('nova.objects.network_request.utils') @mock.patch('nova.network.neutronv2.api.LOG') @mock.patch('nova.network.neutronv2.api.base_api') @mock.patch('nova.network.neutronv2.api.API._delete_ports') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.get_client') def test_preexisting_deallocate_for_instance(self, mock_ntrn, mock_gppids, mock_unbind, mock_deletep, mock_baseapi, mock_log, req_utils): req_utils.is_neutron.return_value = True mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_ports.return_value = {'ports': [ {'id': 'port-1'}, {'id': 'port-2'}, {'id': 'port-3'} ]} nw_req = objects.NetworkRequestList( objects = [objects.NetworkRequest(network_id='net-1', address='192.168.0.3', port_id='port-1', pci_request_id='pci-1')]) mock_gppids.return_value = ['port-3'] self.api.deallocate_for_instance(mock.sentinel.ctx, mock_inst, requested_networks=nw_req) mock_unbind.assert_called_once_with(mock.sentinel.ctx, set(['port-1', 'port-3']), mock.ANY) mock_deletep.assert_called_once_with(mock_nc, mock_inst, set(['port-2']), raise_if_fail=True) @mock.patch('nova.network.neutronv2.api.API.get_instance_nw_info') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') @mock.patch('nova.network.neutronv2.api.compute_utils') @mock.patch('nova.network.neutronv2.api.get_client') def test_preexisting_deallocate_port_for_instance(self, mock_ntrn, mock_comp_utils, mock_unbind, mock_netinfo): mock_comp_utils.get_nw_info_for_instance.return_value = [model.VIF( id='1', preserve_on_delete=False), model.VIF( id='2', preserve_on_delete=True), model.VIF( id='3', preserve_on_delete=True)] mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_client = mock.Mock() mock_ntrn.return_value = mock_client self.api.deallocate_port_for_instance(mock.sentinel.ctx, mock_inst, '2') mock_unbind.assert_called_once_with(mock.sentinel.ctx, ['2'], mock_client) @mock.patch('nova.network.neutronv2.api.API.' '_check_external_network_attach') @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.API.' '_populate_neutron_extension_values') @mock.patch('nova.network.neutronv2.api.API._get_available_networks') @mock.patch('nova.network.neutronv2.api.get_client') def test_port_binding_failed_created_port(self, mock_ntrn, mock_avail_nets, mock_ext_vals, mock_has_pbe, mock_cena): mock_has_pbe.return_value = True mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_avail_nets.return_value = [{'id': 'net-1'}] mock_nc.create_port.return_value = {'port': {'id': 'fake_id', 'tenant_id': mock_inst.project_id, 'binding:vif_type': 'binding_failed'}} self.assertRaises(exception.PortBindingFailed, self.api.allocate_for_instance, mock.sentinel.ctx, mock_inst) mock_nc.delete_port.assert_called_once_with('fake_id') @mock.patch('nova.network.neutronv2.api.API._show_port') @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_port_binding_failed_with_request(self, mock_ntrn, mock_has_pbe, mock_show_port): mock_has_pbe.return_value = True mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_show_port.return_value = { 'tenant_id': mock_inst.project_id, 'binding:vif_type': 'binding_failed'} nw_req = objects.NetworkRequestList( objects = [objects.NetworkRequest(port_id='fake_id')]) self.assertRaises(exception.PortBindingFailed, self.api.allocate_for_instance, mock.sentinel.ctx, mock_inst, requested_networks=nw_req) @mock.patch('nova.network.neutronv2.api.get_client') def test_get_floating_ip_by_address_not_found_neutron_not_found(self, mock_ntrn): mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_floatingips.side_effect = exceptions.NotFound() address = '172.24.4.227' self.assertRaises(exception.FloatingIpNotFoundForAddress, self.api.get_floating_ip_by_address, self.context, address) @mock.patch('nova.network.neutronv2.api.get_client') def test_get_floating_ip_by_address_not_found_neutron_raises_non404(self, mock_ntrn): mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_floatingips.side_effect = exceptions.InternalServerError() address = '172.24.4.227' self.assertRaises(exceptions.InternalServerError, self.api.get_floating_ip_by_address, self.context, address) @mock.patch('nova.network.neutronv2.api.get_client') def test_get_floating_ips_by_project_not_found(self, mock_ntrn): mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_floatingips.side_effect = exceptions.NotFound() fips = self.api.get_floating_ips_by_project(self.context) self.assertEqual([], fips) @mock.patch('nova.network.neutronv2.api.get_client') def test_get_floating_ips_by_project_not_found_legacy(self, mock_ntrn): # FIXME(danms): Remove this test along with the code path it tests # when bug 1513879 is fixed. mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc # neutronclient doesn't raise NotFound in this scenario, it raises a # NeutronClientException with status_code=404 notfound = exceptions.NeutronClientException(status_code=404) mock_nc.list_floatingips.side_effect = notfound fips = self.api.get_floating_ips_by_project(self.context) self.assertEqual([], fips) @mock.patch('nova.network.neutronv2.api.get_client') def test_get_floating_ips_by_project_raises_non404(self, mock_ntrn): mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_floatingips.side_effect = exceptions.InternalServerError() self.assertRaises(exceptions.InternalServerError, self.api.get_floating_ips_by_project, self.context) class TestNeutronv2ModuleMethods(test.NoDBTestCase): def test_gather_port_ids_and_networks_wrong_params(self): api = neutronapi.API() # Test with networks not None and port_ids is None self.assertRaises(exception.NovaException, api._gather_port_ids_and_networks, 'fake_context', 'fake_instance', [{'network': {'name': 'foo'}}], None) # Test with networks is None and port_ids not None self.assertRaises(exception.NovaException, api._gather_port_ids_and_networks, 'fake_context', 'fake_instance', None, ['list', 'of', 'port_ids']) def test_ensure_requested_network_ordering_no_preference_ids(self): l = [1, 2, 3] neutronapi._ensure_requested_network_ordering( lambda x: x, l, None) def test_ensure_requested_network_ordering_no_preference_hashes(self): l = [{'id': 3}, {'id': 1}, {'id': 2}] neutronapi._ensure_requested_network_ordering( lambda x: x['id'], l, None) self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}]) def test_ensure_requested_network_ordering_with_preference(self): l = [{'id': 3}, {'id': 1}, {'id': 2}] neutronapi._ensure_requested_network_ordering( lambda x: x['id'], l, [1, 2, 3]) self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}]) class TestNeutronv2Portbinding(TestNeutronv2Base): def test_allocate_for_instance_portbinding(self): self._allocate_for_instance(1, portbinding=True, bind_host_id=self.instance.get('host')) def test_populate_neutron_extension_values_binding(self): api = neutronapi.API() neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) self.moxed_client.list_extensions().AndReturn( {'extensions': [{'name': constants.PORTBINDING_EXT}]}) self.mox.ReplayAll() host_id = 'my_host_id' instance = {'host': host_id} port_req_body = {'port': {}} api._populate_neutron_extension_values(self.context, instance, None, port_req_body, bind_host_id=host_id) self.assertEqual(host_id, port_req_body['port']['binding:host_id']) self.assertFalse(port_req_body['port'].get('binding:profile')) @mock.patch.object(pci_whitelist, 'get_pci_device_devspec') @mock.patch.object(pci_manager, 'get_instance_pci_devs') def test_populate_neutron_extension_values_binding_sriov(self, mock_get_instance_pci_devs, mock_get_pci_device_devspec): api = neutronapi.API() host_id = 'my_host_id' instance = {'host': host_id} port_req_body = {'port': {}} pci_req_id = 'my_req_id' pci_dev = {'vendor_id': '1377', 'product_id': '0047', 'address': '0000:0a:00.1', } PciDevice = collections.namedtuple('PciDevice', ['vendor_id', 'product_id', 'address']) mydev = PciDevice(**pci_dev) profile = {'pci_vendor_info': '1377:0047', 'pci_slot': '0000:0a:00.1', 'physical_network': 'phynet1', } mock_get_instance_pci_devs.return_value = [mydev] devspec = mock.Mock() devspec.get_tags.return_value = {'physical_network': 'phynet1'} mock_get_pci_device_devspec.return_value = devspec api._populate_neutron_binding_profile(instance, pci_req_id, port_req_body) self.assertEqual(profile, port_req_body['port']['binding:profile']) def _test_update_port_binding_false(self, func_name, *args): api = neutronapi.API() func = getattr(api, func_name) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), refresh_cache=True).AndReturn(False) self.mox.ReplayAll() func(*args) def _test_update_port_binding_true(self, expected_bind_host, func_name, *args): api = neutronapi.API() func = getattr(api, func_name) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), refresh_cache=True).AndReturn(True) neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) search_opts = {'device_id': self.instance['uuid'], 'tenant_id': self.instance['project_id']} ports = {'ports': [{'id': 'test1'}]} self.moxed_client.list_ports(**search_opts).AndReturn(ports) port_req_body = {'port': {'binding:host_id': expected_bind_host}} self.moxed_client.update_port('test1', port_req_body).AndReturn(None) self.mox.ReplayAll() func(*args) def _test_update_port_true_exception(self, expected_bind_host, func_name, *args): api = neutronapi.API() func = getattr(api, func_name) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), refresh_cache=True).AndReturn(True) neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) search_opts = {'device_id': self.instance['uuid'], 'tenant_id': self.instance['project_id']} ports = {'ports': [{'id': 'test1'}]} self.moxed_client.list_ports(**search_opts).AndReturn(ports) port_req_body = {'port': {'binding:host_id': expected_bind_host}} self.moxed_client.update_port('test1', port_req_body).AndRaise( Exception("fail to update port")) self.mox.ReplayAll() self.assertRaises(NEUTRON_CLIENT_EXCEPTION, func, *args) def test_migrate_instance_finish_binding_false(self): self._test_update_port_binding_false('migrate_instance_finish', self.context, None, {'dest_compute': 'fake'}) def test_migrate_instance_finish_binding_true(self): migration = {'source_compute': self.instance.get('host'), 'dest_compute': 'dest_host'} instance = self._fake_instance_object(self.instance) self._test_update_port_binding_true('dest_host', 'migrate_instance_finish', self.context, instance, migration) def test_migrate_instance_finish_binding_true_exception(self): migration = {'source_compute': self.instance.get('host'), 'dest_compute': 'dest_host'} instance = self._fake_instance_object(self.instance) self._test_update_port_true_exception('dest_host', 'migrate_instance_finish', self.context, instance, migration) def test_setup_instance_network_on_host_false(self): self._test_update_port_binding_false( 'setup_instance_network_on_host', self.context, None, 'fake_host') def test_setup_instance_network_on_host_true(self): instance = self._fake_instance_object(self.instance) self._test_update_port_binding_true('fake_host', 'setup_instance_network_on_host', self.context, instance, 'fake_host') def test_setup_instance_network_on_host_exception(self): instance = self._fake_instance_object(self.instance) self._test_update_port_true_exception( 'fake_host', 'setup_instance_network_on_host', self.context, instance, 'fake_host') def test_associate_not_implemented(self): api = neutronapi.API() self.assertRaises(NotImplementedError, api.associate, self.context, 'id') class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base): def setUp(self): super(TestNeutronv2ExtraDhcpOpts, self).setUp() neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self): self._allocate_for_instance(1, extra_dhcp_opts=False) def test_allocate_for_instance_extradhcpopts(self): dhcp_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] self._allocate_for_instance(1, dhcp_options=dhcp_opts) class TestNeutronv2NeutronHostnameDNS(TestNeutronv2Base): def setUp(self): super(TestNeutronv2NeutronHostnameDNS, self).setUp() neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) def test_allocate_for_instance_create_port(self): # The port's dns_name attribute should be set by the port create # request in allocate_for_instance self._allocate_for_instance(1, dns_extension=True) def test_allocate_for_instance_with_requested_port(self): # The port's dns_name attribute should be set by the port update # request in allocate_for_instance requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=1, dns_extension=True, requested_networks=requested_networks) def test_allocate_for_instance_port_dns_name_preset_equal_hostname(self): # The port's dns_name attribute should be set by the port update # request in allocate_for_instance. The port's dns_name was preset by # the user with a value equal to the instance's hostname requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=1, dns_extension=True, requested_networks=requested_networks, _dns_name='test-instance') def test_allocate_for_instance_port_dns_name_preset_noteq_hostname(self): # If a pre-existing port has dns_name set, an exception should be # raised if dns_name is not equal to the instance's hostname requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, dns_extension=True, _break='pre_list_networks', _dns_name='my-instance') self.assertRaises(exception.PortNotUsableDNS, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) class TestNeutronv2NeutronHostnameDNSPortbinding(TestNeutronv2Base): def test_allocate_for_instance_create_port(self): # The port's dns_name attribute should be set by the port create # request in allocate_for_instance self._allocate_for_instance(1, portbinding=True, dns_extension=True, bind_host_id=self.instance.get('host')) def test_allocate_for_instance_with_requested_port(self): # The port's dns_name attribute should be set by the port update # request in allocate_for_instance requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=1, dns_extension=True, portbinding=True, bind_host_id=self.instance.get('host'), requested_networks=requested_networks) def test_allocate_for_instance_create_port_with_dns_domain(self): # The port's dns_name attribute should be set by the port update # request in _update_port_dns_name. This should happen only when the # port binding extension is enabled and the port's network has a # non-blank dns_domain attribute self._allocate_for_instance(11, portbinding=True, dns_extension=True, bind_host_id=self.instance.get('host')) def test_allocate_for_instance_with_requested_port_with_dns_domain(self): # The port's dns_name attribute should be set by the port update # request in _update_port_dns_name. This should happen only when the # port binding extension is enabled and the port's network has a # non-blank dns_domain attribute requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=11, dns_extension=True, portbinding=True, bind_host_id=self.instance.get('host'), requested_networks=requested_networks) class TestNeutronClientForAdminScenarios(test.NoDBTestCase): def setUp(self): super(TestNeutronClientForAdminScenarios, self).setUp() # NOTE(morganfainberg): The real configuration fixture here is used # instead o the already existing fixtures to ensure that the new # config options are automatically deregistered at the end of the # test run. Without the use of this fixture, the config options # from the plugin(s) would persist for all subsequent tests from when # these are run (due to glonal conf object) and not be fully # representative of a "clean" slate at the start of a test. self.config_fixture = self.useFixture(config_fixture.Config()) oslo_opts = ks_loading.get_auth_plugin_conf_options('v2password') self.config_fixture.register_opts(oslo_opts, 'neutron') @requests_mock.mock() def _test_get_client_for_admin(self, req_mock, use_id=False, admin_context=False): token_value = uuid.uuid4().hex auth_url = 'http://anyhost/auth' token_resp = V2Token(token_id=token_value) req_mock.post(auth_url + '/tokens', json=token_resp) self.flags(url='http://anyhost/', group='neutron') self.flags(auth_type='v2password', group='neutron') self.flags(auth_url=auth_url, group='neutron') self.flags(timeout=30, group='neutron') if use_id: self.flags(tenant_id='tenant_id', group='neutron') self.flags(user_id='user_id', group='neutron') if admin_context: my_context = context.get_admin_context() else: my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token') # clean global neutronapi.reset_state() if admin_context: # Note that the context does not contain a token but is # an admin context which will force an elevation to admin # credentials. context_client = neutronapi.get_client(my_context) else: # Note that the context is not elevated, but the True is passed in # which will force an elevation to admin credentials even though # the context has an auth_token. context_client = neutronapi.get_client(my_context, True) admin_auth = neutronapi._ADMIN_AUTH self.assertEqual(CONF.neutron.auth_url, admin_auth.auth_url) self.assertEqual(CONF.neutron.password, admin_auth.password) if use_id: self.assertEqual(CONF.neutron.tenant_id, admin_auth.tenant_id) self.assertEqual(CONF.neutron.user_id, admin_auth.user_id) self.assertIsNone(admin_auth.tenant_name) self.assertIsNone(admin_auth.username) else: self.assertEqual(CONF.neutron.username, admin_auth.username) self.assertIsNone(admin_auth.tenant_id) self.assertIsNone(admin_auth.user_id) self.assertEqual(CONF.neutron.timeout, neutronapi._SESSION.timeout) self.assertEqual( token_value, context_client.httpclient.auth.get_token(neutronapi._SESSION)) self.assertEqual( CONF.neutron.url, context_client.httpclient.get_endpoint()) def test_get_client_for_admin(self): self._test_get_client_for_admin() def test_get_client_for_admin_with_id(self): self._test_get_client_for_admin(use_id=True) def test_get_client_for_admin_context(self): self._test_get_client_for_admin(admin_context=True) def test_get_client_for_admin_context_with_id(self): self._test_get_client_for_admin(use_id=True, admin_context=True) nova-13.0.0/nova/tests/unit/network/security_group/0000775000567000056710000000000012701410205023527 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/network/security_group/__init__.py0000664000567000056710000000000012701407773025646 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/network/security_group/test_neutron_driver.py0000664000567000056710000004635412701407773030241 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from mox3 import mox from neutronclient.common import exceptions as n_exc from neutronclient.v2_0 import client from six.moves import range from nova import context from nova import exception from nova.network.neutronv2 import api as neutronapi from nova.network.security_group import neutron_driver from nova.network.security_group import openstack_driver from nova import objects from nova import test from nova.tests import uuidsentinel as uuids class TestNeutronDriver(test.NoDBTestCase): def setUp(self): super(TestNeutronDriver, self).setUp() self.mox.StubOutWithMock(neutronapi, 'get_client') self.moxed_client = self.mox.CreateMock(client.Client) neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) self.context = context.RequestContext('userid', 'my_tenantid') setattr(self.context, 'auth_token', 'bff4a5a6b9eb4ea2a6efec6eefb77936') def test_list_with_project(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' security_groups_list = {'security_groups': []} self.moxed_client.list_security_groups(tenant_id=project_id).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(self.context, project=project_id) def test_list_with_all_tenants_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' search_opts = {'all_tenants': 1} security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id, search_opts=search_opts) mock_list_secgroup.assert_called_once_with() def test_list_without_all_tenants_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id) mock_list_secgroup.assert_called_once_with(tenant_id=project_id) def test_list_with_all_tenants_sec_name_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' search_opts = {'all_tenants': 1} security_group_names = ['secgroup_ssh'] security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id, names=security_group_names, search_opts=search_opts) mock_list_secgroup.assert_called_once_with( name=security_group_names, tenant_id=project_id) def test_list_with_all_tenants_sec_name_ids_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' search_opts = {'all_tenants': 1} security_group_names = ['secgroup_ssh'] security_group_ids = ['id1'] security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id, names=security_group_names, ids=security_group_ids, search_opts=search_opts) mock_list_secgroup.assert_called_once_with( name=security_group_names, id=security_group_ids, tenant_id=project_id) def test_list_with_all_tenants_not_admin(self): search_opts = {'all_tenants': 1} security_groups_list = {'security_groups': []} self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(self.context, project=self.context.tenant, search_opts=search_opts) mock_list_secgroup.assert_called_once_with( tenant_id=self.context.tenant) def test_get_with_name_duplicated(self): sg_name = 'web_server' expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5' list_security_groups = {'security_groups': [{'name': sg_name, 'id': expected_sg_id, 'tenant_id': self.context.tenant, 'description': 'server', 'rules': []} ]} self.moxed_client.list_security_groups(name=sg_name, fields='id', tenant_id=self.context.tenant).AndReturn(list_security_groups) expected_sg = {'security_group': {'name': sg_name, 'id': expected_sg_id, 'tenant_id': self.context.tenant, 'description': 'server', 'rules': []}} self.moxed_client.show_security_group(expected_sg_id).AndReturn( expected_sg) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() observed_sg = sg_api.get(self.context, name=sg_name) expected_sg['security_group']['project_id'] = self.context.tenant del expected_sg['security_group']['tenant_id'] self.assertEqual(expected_sg['security_group'], observed_sg) def test_get_with_invalid_name(self): sg_name = 'invalid_name' expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5' list_security_groups = {'security_groups': [{'name': sg_name, 'id': expected_sg_id, 'tenant_id': self.context.tenant, 'description': 'server', 'rules': []} ]} self.moxed_client.list_security_groups(name=sg_name, fields='id', tenant_id=self.context.tenant).AndReturn(list_security_groups) self.moxed_client.show_security_group(expected_sg_id).AndRaise( TypeError) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.SecurityGroupNotFound, sg_api.get, self.context, name=sg_name) def test_create_security_group_with_bad_request(self): name = 'test-security-group' description = None body = {'security_group': {'name': name, 'description': description}} message = "Invalid input. Reason: 'None' is not a valid string." self.moxed_client.create_security_group( body).AndRaise(n_exc.BadRequest(message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.Invalid, sg_api.create_security_group, self.context, name, description) def test_create_security_group_exceed_quota(self): name = 'test-security-group' description = 'test-security-group' body = {'security_group': {'name': name, 'description': description}} message = "Quota exceeded for resources: ['security_group']" self.moxed_client.create_security_group( body).AndRaise(n_exc.NeutronClientException(status_code=409, message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.SecurityGroupLimitExceeded, sg_api.create_security_group, self.context, name, description) def test_create_security_group_rules_exceed_quota(self): vals = {'protocol': 'tcp', 'cidr': '0.0.0.0/0', 'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'group_id': None, 'from_port': 1025, 'to_port': 1025} body = {'security_group_rules': [{'remote_group_id': None, 'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'port_range_max': 1025, 'port_range_min': 1025, 'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'remote_ip_prefix': '0.0.0.0/0'}]} name = 'test-security-group' message = "Quota exceeded for resources: ['security_group_rule']" self.moxed_client.create_security_group_rule( body).AndRaise(n_exc.NeutronClientException(status_code=409, message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.SecurityGroupLimitExceeded, sg_api.add_rules, self.context, None, name, [vals]) def test_create_security_group_rules_bad_request(self): vals = {'protocol': 'icmp', 'cidr': '0.0.0.0/0', 'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'group_id': None, 'to_port': 255} body = {'security_group_rules': [{'remote_group_id': None, 'direction': 'ingress', 'protocol': 'icmp', 'ethertype': 'IPv4', 'port_range_max': 255, 'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'remote_ip_prefix': '0.0.0.0/0'}]} name = 'test-security-group' message = "ICMP code (port-range-max) 255 is provided but ICMP type" \ " (port-range-min) is missing" self.moxed_client.create_security_group_rule( body).AndRaise(n_exc.NeutronClientException(status_code=400, message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.Invalid, sg_api.add_rules, self.context, None, name, [vals]) def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self): sg1 = {'description': 'default', 'id': '07f1362f-34f6-4136-819a-2dcde112269e', 'name': 'default', 'tenant_id': 'c166d9316f814891bcb66b96c4c891d6', 'security_group_rules': [{'direction': 'ingress', 'ethertype': 'IPv4', 'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb', 'port_range_max': None, 'port_range_min': None, 'protocol': '51', 'remote_group_id': None, 'remote_ip_prefix': None, 'security_group_id': '07f1362f-34f6-4136-819a-2dcde112269e', 'tenant_id': 'c166d9316f814891bcb66b96c4c891d6'}]} self.moxed_client.list_security_groups().AndReturn( {'security_groups': [sg1]}) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.list(self.context) expected = [{'rules': [{'from_port': -1, 'protocol': '51', 'to_port': -1, 'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e', 'cidr': '0.0.0.0/0', 'group_id': None, 'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb'}], 'project_id': 'c166d9316f814891bcb66b96c4c891d6', 'id': '07f1362f-34f6-4136-819a-2dcde112269e', 'name': 'default', 'description': 'default'}] self.assertEqual(expected, result) def test_instances_security_group_bindings(self): server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1' port1_id = '4c505aec-09aa-47bc-bcc0-940477e84dc0' port2_id = 'b3b31a53-6e29-479f-ae5c-00b7b71a6d44' sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4' sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584' servers = [{'id': server_id}] ports = [{'id': port1_id, 'device_id': server_id, 'security_groups': [sg1_id]}, {'id': port2_id, 'device_id': server_id, 'security_groups': [sg2_id]}] port_list = {'ports': ports} sg1 = {'id': sg1_id, 'name': 'wol'} sg2 = {'id': sg2_id, 'name': 'eor'} security_groups_list = {'security_groups': [sg1, sg2]} sg_bindings = {server_id: [{'name': 'wol'}, {'name': 'eor'}]} self.moxed_client.list_ports(device_id=[server_id]).AndReturn( port_list) self.moxed_client.list_security_groups( id=mox.SameElementsAs([sg2_id, sg1_id])).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instances_security_groups_bindings( self.context, servers) self.assertEqual(sg_bindings, result) def _test_instances_security_group_bindings_scale(self, num_servers): max_query = 150 sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4' sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584' sg1 = {'id': sg1_id, 'name': 'wol'} sg2 = {'id': sg2_id, 'name': 'eor'} security_groups_list = {'security_groups': [sg1, sg2]} servers = [] device_ids = [] ports = [] sg_bindings = {} for i in range(0, num_servers): server_id = "server-%d" % i port_id = "port-%d" % i servers.append({'id': server_id}) device_ids.append(server_id) ports.append({'id': port_id, 'device_id': server_id, 'security_groups': [sg1_id, sg2_id]}) sg_bindings[server_id] = [{'name': 'wol'}, {'name': 'eor'}] for x in range(0, num_servers, max_query): self.moxed_client.list_ports( device_id=device_ids[x:x + max_query]).\ AndReturn({'ports': ports[x:x + max_query]}) self.moxed_client.list_security_groups( id=mox.SameElementsAs([sg2_id, sg1_id])).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instances_security_groups_bindings( self.context, servers) self.assertEqual(sg_bindings, result) def test_instances_security_group_bindings_less_than_max(self): self._test_instances_security_group_bindings_scale(100) def test_instances_security_group_bindings_max(self): self._test_instances_security_group_bindings_scale(150) def test_instances_security_group_bindings_more_then_max(self): self._test_instances_security_group_bindings_scale(300) def test_instances_security_group_bindings_with_hidden_sg(self): servers = [{'id': 'server_1'}] ports = [{'id': '1', 'device_id': 'dev_1', 'security_groups': ['1']}, {'id': '2', 'device_id': 'dev_1', 'security_groups': ['2']}] port_list = {'ports': ports} sg1 = {'id': '1', 'name': 'wol'} # User doesn't have access to sg2 security_groups_list = {'security_groups': [sg1]} sg_bindings = {'dev_1': [{'name': 'wol'}]} self.moxed_client.list_ports(device_id=['server_1']).AndReturn( port_list) self.moxed_client.\ list_security_groups(id=mox.SameElementsAs(['1', '2'])).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instances_security_groups_bindings( self.context, servers) self.assertEqual(sg_bindings, result) def test_instance_empty_security_groups(self): port_list = {'ports': [{'id': 1, 'device_id': uuids.instance, 'security_groups': []}]} self.moxed_client.list_ports( device_id=[uuids.instance]).AndReturn(port_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instance_security_groups( self.context, objects.Instance(uuid=uuids.instance)) self.assertEqual([], result) class TestNeutronDriverWithoutMock(test.NoDBTestCase): def test_validate_property(self): sg_api = neutron_driver.SecurityGroupAPI() sg_api.validate_property('foo', 'name', None) sg_api.validate_property('', 'name', None) self.assertRaises(exception.Invalid, sg_api.validate_property, 'a' * 256, 'name', None) self.assertRaises(exception.Invalid, sg_api.validate_property, None, 'name', None) def test_populate_security_groups(self): sg_api = neutron_driver.SecurityGroupAPI() r = sg_api.populate_security_groups('ignore') self.assertIsInstance(r, objects.SecurityGroupList) self.assertEqual(0, len(r)) class TestGetter(test.NoDBTestCase): @mock.patch('nova.network.security_group.openstack_driver.' '_get_openstack_security_group_driver') def test_caches(self, mock_get): getter = openstack_driver.get_openstack_security_group_driver openstack_driver.DRIVER_CACHE = {} getter(False) getter(False) getter(True) getter(False) self.assertEqual(2, len(mock_get.call_args_list)) self.assertEqual({True: mock_get.return_value, False: mock_get.return_value}, openstack_driver.DRIVER_CACHE) nova-13.0.0/nova/tests/unit/network/test_config.py0000664000567000056710000000604312701407773023345 0ustar jenkinsjenkins00000000000000# Copyright 2016 HPE, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import nova.network import nova.network.security_group.neutron_driver import nova.network.security_group.openstack_driver as sgapi import nova.test class FileATicket(object): def __init__(self, **kwargs): pass class NetworkAPIConfigTest(nova.test.NoDBTestCase): """Test the transition from legacy to use_neutron config options.""" def test_default(self): netapi = nova.network.API() self.assertIsInstance(netapi, nova.network.api.API) def test_use_neutron(self): self.flags(use_neutron=True) netapi = nova.network.API() self.assertIsInstance(netapi, nova.network.neutronv2.api.API) def test_dont_use_neutron(self): self.flags(use_neutron=False) netapi = nova.network.API() self.assertIsInstance(netapi, nova.network.api.API) def test_legacy_use_neutron(self): """use neutron even if config is false because of legacy option.""" self.flags(use_neutron=False) self.flags(network_api_class='nova.network.neutronv2.api.API') netapi = nova.network.API() self.assertIsInstance(netapi, nova.network.neutronv2.api.API) def test_legacy_custom_class(self): """use neutron even if config is false because of legacy option.""" self.flags(network_api_class= 'nova.tests.unit.network.test_config.FileATicket') netapi = nova.network.API() self.assertIsInstance(netapi, FileATicket) class SecurityGroupAPIConfigTest(nova.test.NoDBTestCase): def test_use_neutron(self): self.flags(use_neutron=True) driver = sgapi.get_openstack_security_group_driver() self.assertIsInstance( driver, nova.network.security_group.neutron_driver.SecurityGroupAPI) def test_sg_nova(self): self.flags(security_group_api='nova') driver = sgapi.get_openstack_security_group_driver() self.assertIsInstance( driver, nova.compute.api.SecurityGroupAPI) def test_sg_neutron(self): self.flags(security_group_api='neutron') driver = sgapi.get_openstack_security_group_driver() self.assertIsInstance( driver, nova.network.security_group.neutron_driver.SecurityGroupAPI) def test_sg_custom(self): self.flags(security_group_api= 'nova.tests.unit.network.test_config.FileATicket') driver = sgapi.get_openstack_security_group_driver() self.assertIsInstance(driver, FileATicket) nova-13.0.0/nova/tests/unit/network/test_l3.py0000664000567000056710000000167512701407773022424 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.network import l3 from nova import test class L3DriverTestCase(test.NoDBTestCase): def test_linuxnetl3_driver_signatures(self): self.assertPublicAPISignatures(l3.L3Driver, l3.LinuxNetL3) def test_nulll3_driver_signatures(self): self.assertPublicAPISignatures(l3.L3Driver, l3.NullL3) nova-13.0.0/nova/tests/unit/servicegroup/0000775000567000056710000000000012701410205021470 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/servicegroup/__init__.py0000664000567000056710000000000012701407773023607 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/servicegroup/test_api.py0000664000567000056710000000374512701410011023656 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test the base class for the servicegroup API """ import mock from nova import servicegroup from nova import test class ServiceGroupApiTestCase(test.NoDBTestCase): def setUp(self): super(ServiceGroupApiTestCase, self).setUp() self.flags(servicegroup_driver='db') self.servicegroup_api = servicegroup.API() self.driver = self.servicegroup_api._driver def test_join(self): """""" member = {'host': "fake-host", "topic": "compute"} group = "group" self.driver.join = mock.MagicMock(return_value=None) result = self.servicegroup_api.join(member, group) self.assertIsNone(result) self.driver.join.assert_called_with(member, group, None) def test_service_is_up(self): """""" member = {"host": "fake-host", "topic": "compute", "forced_down": False} for retval in (True, False): driver = self.servicegroup_api._driver driver.is_up = mock.MagicMock(return_value=retval) result = self.servicegroup_api.service_is_up(member) self.assertIs(result, retval) driver.is_up.assert_called_with(member) member["forced_down"] = True for retval in (True, False): driver = self.servicegroup_api._driver result = self.servicegroup_api.service_is_up(member) self.assertIs(result, False) nova-13.0.0/nova/tests/unit/servicegroup/test_mc_servicegroup.py0000664000567000056710000000472212701407773026322 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Akira Yoshiyama # # This is derived from test_db_servicegroup.py. # Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import servicegroup from nova import test class MemcachedServiceGroupTestCase(test.NoDBTestCase): @mock.patch('nova.cache_utils.get_memcached_client') def setUp(self, mgc_mock): super(MemcachedServiceGroupTestCase, self).setUp() self.mc_client = mock.MagicMock() mgc_mock.return_value = self.mc_client self.flags(memcached_servers='ignored', servicegroup_driver='mc') self.servicegroup_api = servicegroup.API() def test_is_up(self): service_ref = { 'host': 'fake-host', 'topic': 'compute' } self.mc_client.get.return_value = None self.assertFalse(self.servicegroup_api.service_is_up(service_ref)) self.mc_client.get.assert_called_once_with('compute:fake-host') self.mc_client.reset_mock() self.mc_client.get.return_value = True self.assertTrue(self.servicegroup_api.service_is_up(service_ref)) self.mc_client.get.assert_called_once_with('compute:fake-host') def test_join(self): service = mock.MagicMock(report_interval=1) self.servicegroup_api.join('fake-host', 'fake-topic', service) fn = self.servicegroup_api._driver._report_state service.tg.add_timer.assert_called_once_with(1, fn, 5, service) def test_report_state(self): service_ref = { 'host': 'fake-host', 'topic': 'compute' } service = mock.MagicMock(model_disconnected=False, service_ref=service_ref) fn = self.servicegroup_api._driver._report_state fn(service) self.mc_client.set.assert_called_once_with('compute:fake-host', mock.ANY) nova-13.0.0/nova/tests/unit/servicegroup/test_db_servicegroup.py0000664000567000056710000001160012701407773026301 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_db import exception as db_exception import oslo_messaging as messaging from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from nova import objects from nova import servicegroup from nova import test class DBServiceGroupTestCase(test.NoDBTestCase): def setUp(self): super(DBServiceGroupTestCase, self).setUp() self.down_time = 15 self.flags(service_down_time=self.down_time, servicegroup_driver='db') self.servicegroup_api = servicegroup.API() def test_is_up(self): now = timeutils.utcnow() service = objects.Service( host='fake-host', topic='compute', binary='nova-compute', created_at=now, updated_at=now, last_seen_up=now, forced_down=False, ) time_fixture = self.useFixture(utils_fixture.TimeFixture(now)) # Up (equal) result = self.servicegroup_api.service_is_up(service) self.assertTrue(result) # Up time_fixture.advance_time_seconds(self.down_time) result = self.servicegroup_api.service_is_up(service) self.assertTrue(result) # Down time_fixture.advance_time_seconds(1) result = self.servicegroup_api.service_is_up(service) self.assertFalse(result) # "last_seen_up" says down, "updated_at" says up. # This can happen if we do a service disable/enable while it's down. service.updated_at = timeutils.utcnow() result = self.servicegroup_api.service_is_up(service) self.assertFalse(result) def test_join(self): service = mock.MagicMock(report_interval=1) self.servicegroup_api.join('fake-host', 'fake-topic', service) fn = self.servicegroup_api._driver._report_state service.tg.add_timer.assert_called_once_with(1, fn, 5, service) @mock.patch.object(objects.Service, 'save') def test_report_state(self, upd_mock): service_ref = objects.Service(host='fake-host', topic='compute', report_count=10) service = mock.MagicMock(model_disconnected=False, service_ref=service_ref) fn = self.servicegroup_api._driver._report_state fn(service) upd_mock.assert_called_once_with() self.assertEqual(11, service_ref.report_count) self.assertFalse(service.model_disconnected) @mock.patch.object(objects.Service, 'save') def _test_report_state_error(self, exc_cls, upd_mock): upd_mock.side_effect = exc_cls("service save failed") service_ref = objects.Service(host='fake-host', topic='compute', report_count=10) service = mock.MagicMock(model_disconnected=False, service_ref=service_ref) fn = self.servicegroup_api._driver._report_state fn(service) # fail if exception not caught self.assertTrue(service.model_disconnected) def test_report_state_remote_error_handling(self): # test error handling using remote conductor self.flags(use_local=False, group='conductor') self._test_report_state_error(messaging.RemoteError) def test_report_state_remote_error_handling_timeout(self): # test error handling using remote conductor self.flags(use_local=False, group='conductor') self._test_report_state_error(messaging.MessagingTimeout) def test_report_state_remote_unexpected_error(self): # unexpected errors must be handled, but disconnected flag not touched self.flags(use_local=False, group='conductor') self._test_report_state_error(RuntimeError) def test_report_state_local_error_handling(self): # if using local conductor, the db driver must handle DB errors self.flags(use_local=True, group='conductor') # mock an oslo.db DBError as it's an exception base class for # oslo.db DB errors (eg DBConnectionError) self._test_report_state_error(db_exception.DBError) def test_report_state_local_unexpected_error(self): # unexpected errors must be handled, but disconnected flag not touched self.flags(use_local=True, group='conductor') self._test_report_state_error(RuntimeError) nova-13.0.0/nova/tests/unit/objects/0000775000567000056710000000000012701410205020404 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/objects/test_host_mapping.py0000664000567000056710000001345012701407773024530 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.objects import host_mapping from nova.tests.unit.objects import test_cell_mapping from nova.tests.unit.objects import test_objects def get_db_mapping(mapped_cell=None, **updates): db_mapping = { 'id': 1, 'cell_id': None, 'host': 'fake-host', 'created_at': None, 'updated_at': None, } if mapped_cell: db_mapping["cell_mapping"] = mapped_cell else: db_mapping["cell_mapping"] = test_cell_mapping.get_db_mapping(id=42) db_mapping['cell_id'] = db_mapping["cell_mapping"]["id"] db_mapping.update(updates) return db_mapping class _TestHostMappingObject(object): def _check_cell_map_value(self, db_val, cell_obj): self.assertEqual(db_val, cell_obj.id) @mock.patch.object(host_mapping.HostMapping, '_get_by_host_from_db') def test_get_by_host(self, host_from_db): fake_cell = test_cell_mapping.get_db_mapping(id=1) db_mapping = get_db_mapping(mapped_cell=fake_cell) host_from_db.return_value = db_mapping mapping_obj = objects.HostMapping().get_by_host( self.context, db_mapping['host']) host_from_db.assert_called_once_with(self.context, db_mapping['host']) with mock.patch.object( host_mapping.HostMapping, '_get_cell_mapping') as mock_load: self.compare_obj(mapping_obj, db_mapping, subs={'cell_mapping': 'cell_id'}, comparators={ 'cell_mapping': self._check_cell_map_value}) # Check that lazy loading isn't happening self.assertFalse(mock_load.called) def test_from_db_object_no_cell_map(self): """Test when db object does not have cell_mapping""" fake_cell = test_cell_mapping.get_db_mapping(id=1) db_mapping = get_db_mapping(mapped_cell=fake_cell) # If db object has no cell_mapping, lazy loading should occur db_mapping.pop("cell_mapping") fake_cell_obj = objects.CellMapping(self.context, **fake_cell) mapping_obj = objects.HostMapping()._from_db_object( self.context, objects.HostMapping(), db_mapping) with mock.patch.object( host_mapping.HostMapping, '_get_cell_mapping') as mock_load: mock_load.return_value = fake_cell_obj self.compare_obj(mapping_obj, db_mapping, subs={'cell_mapping': 'cell_id'}, comparators={ 'cell_mapping': self._check_cell_map_value}) # Check that cell_mapping is lazy loaded mock_load.assert_called_once_with() @mock.patch.object(host_mapping.HostMapping, '_create_in_db') def test_create(self, create_in_db): fake_cell = test_cell_mapping.get_db_mapping(id=1) db_mapping = get_db_mapping(mapped_cell=fake_cell) db_mapping.pop("cell_mapping") host = db_mapping['host'] create_in_db.return_value = db_mapping fake_cell_obj = objects.CellMapping(self.context, **fake_cell) mapping_obj = objects.HostMapping(self.context) mapping_obj.host = host mapping_obj.cell_mapping = fake_cell_obj mapping_obj.create() create_in_db.assert_called_once_with(self.context, {'host': host, 'cell_id': fake_cell["id"]}) self.compare_obj(mapping_obj, db_mapping, subs={'cell_mapping': 'cell_id'}, comparators={ 'cell_mapping': self._check_cell_map_value}) @mock.patch.object(host_mapping.HostMapping, '_save_in_db') def test_save(self, save_in_db): db_mapping = get_db_mapping() # This isn't needed here db_mapping.pop("cell_mapping") host = db_mapping['host'] mapping_obj = objects.HostMapping(self.context) mapping_obj.host = host new_fake_cell = test_cell_mapping.get_db_mapping(id=10) fake_cell_obj = objects.CellMapping(self.context, **new_fake_cell) mapping_obj.cell_mapping = fake_cell_obj db_mapping.update({"cell_id": new_fake_cell["id"]}) save_in_db.return_value = db_mapping mapping_obj.save() save_in_db.assert_called_once_with(self.context, db_mapping['host'], {'cell_id': new_fake_cell["id"], 'host': host}) self.compare_obj(mapping_obj, db_mapping, subs={'cell_mapping': 'cell_id'}, comparators={ 'cell_mapping': self._check_cell_map_value}) @mock.patch.object(host_mapping.HostMapping, '_destroy_in_db') def test_destroy(self, destroy_in_db): mapping_obj = objects.HostMapping(self.context) mapping_obj.host = "fake-host2" mapping_obj.destroy() destroy_in_db.assert_called_once_with(self.context, "fake-host2") class TestHostMappingObject(test_objects._LocalTest, _TestHostMappingObject): pass class TestRemoteHostMappingObject(test_objects._RemoteTest, _TestHostMappingObject): pass nova-13.0.0/nova/tests/unit/objects/test_keypair.py0000664000567000056710000001063712701407773023510 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import timeutils from nova import exception from nova.objects import keypair from nova.tests.unit.objects import test_objects NOW = timeutils.utcnow().replace(microsecond=0) fake_keypair = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'name': 'foo-keypair', 'type': 'ssh', 'user_id': 'fake-user', 'fingerprint': 'fake-fingerprint', 'public_key': 'fake\npublic\nkey', } class _TestKeyPairObject(object): @mock.patch('nova.db.key_pair_get') def test_get_by_name(self, mock_kp_get): mock_kp_get.return_value = fake_keypair keypair_obj = keypair.KeyPair.get_by_name(self.context, 'fake-user', 'foo-keypair') self.compare_obj(keypair_obj, fake_keypair) mock_kp_get.assert_called_once_with(self.context, 'fake-user', 'foo-keypair') @mock.patch('nova.db.key_pair_create') def test_create(self, mock_kp_create): mock_kp_create.return_value = fake_keypair keypair_obj = keypair.KeyPair(context=self.context) keypair_obj.name = 'foo-keypair' keypair_obj.public_key = 'keydata' keypair_obj.create() self.compare_obj(keypair_obj, fake_keypair) mock_kp_create.assert_called_once_with(self.context, {'name': 'foo-keypair', 'public_key': 'keydata'}) @mock.patch('nova.db.key_pair_create') def test_recreate_fails(self, mock_kp_create): mock_kp_create.return_value = fake_keypair keypair_obj = keypair.KeyPair(context=self.context) keypair_obj.name = 'foo-keypair' keypair_obj.public_key = 'keydata' keypair_obj.create() self.assertRaises(exception.ObjectActionError, keypair_obj.create) mock_kp_create.assert_called_once_with(self.context, {'name': 'foo-keypair', 'public_key': 'keydata'}) @mock.patch('nova.db.key_pair_destroy') def test_destroy(self, mock_kp_destroy): keypair_obj = keypair.KeyPair(context=self.context) keypair_obj.id = 123 keypair_obj.user_id = 'fake-user' keypair_obj.name = 'foo-keypair' keypair_obj.destroy() mock_kp_destroy.assert_called_once_with( self.context, 'fake-user', 'foo-keypair') @mock.patch('nova.db.key_pair_destroy') def test_destroy_by_name(self, mock_kp_destroy): keypair.KeyPair.destroy_by_name(self.context, 'fake-user', 'foo-keypair') mock_kp_destroy.assert_called_once_with( self.context, 'fake-user', 'foo-keypair') @mock.patch('nova.db.key_pair_get_all_by_user') @mock.patch('nova.db.key_pair_count_by_user') def test_get_by_user(self, mock_kp_count, mock_kp_get): mock_kp_get.return_value = [fake_keypair] mock_kp_count.return_value = 1 keypairs = keypair.KeyPairList.get_by_user(self.context, 'fake-user') self.assertEqual(1, len(keypairs)) self.compare_obj(keypairs[0], fake_keypair) self.assertEqual(1, keypair.KeyPairList.get_count_by_user(self.context, 'fake-user')) mock_kp_get.assert_called_once_with(self.context, 'fake-user') mock_kp_count.assert_called_once_with(self.context, 'fake-user') def test_obj_make_compatible(self): keypair_obj = keypair.KeyPair(context=self.context) fake_keypair_copy = dict(fake_keypair) keypair_obj.obj_make_compatible(fake_keypair_copy, '1.1') self.assertNotIn('type', fake_keypair_copy) class TestMigrationObject(test_objects._LocalTest, _TestKeyPairObject): pass class TestRemoteMigrationObject(test_objects._RemoteTest, _TestKeyPairObject): pass nova-13.0.0/nova/tests/unit/objects/test_migration.py0000664000567000056710000002577412701407773024045 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import timeutils from nova import context from nova import db from nova import exception from nova import objects from nova.objects import migration from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_objects from nova.tests import uuidsentinel NOW = timeutils.utcnow().replace(microsecond=0) def fake_db_migration(**updates): db_instance = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'source_compute': 'compute-source', 'dest_compute': 'compute-dest', 'source_node': 'node-source', 'dest_node': 'node-dest', 'dest_host': 'host-dest', 'old_instance_type_id': 42, 'new_instance_type_id': 84, 'instance_uuid': 'fake-uuid', 'status': 'migrating', 'migration_type': 'resize', 'hidden': False, 'memory_total': 123456, 'memory_processed': 12345, 'memory_remaining': 120000, 'disk_total': 234567, 'disk_processed': 23456, 'disk_remaining': 230000, } if updates: db_instance.update(updates) return db_instance class _TestMigrationObject(object): def test_get_by_id(self): ctxt = context.get_admin_context() fake_migration = fake_db_migration() self.mox.StubOutWithMock(db, 'migration_get') db.migration_get(ctxt, fake_migration['id']).AndReturn(fake_migration) self.mox.ReplayAll() mig = migration.Migration.get_by_id(ctxt, fake_migration['id']) self.compare_obj(mig, fake_migration) def test_get_by_instance_and_status(self): ctxt = context.get_admin_context() fake_migration = fake_db_migration() self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status') db.migration_get_by_instance_and_status(ctxt, fake_migration['id'], 'migrating' ).AndReturn(fake_migration) self.mox.ReplayAll() mig = migration.Migration.get_by_instance_and_status( ctxt, fake_migration['id'], 'migrating') self.compare_obj(mig, fake_migration) @mock.patch('nova.db.migration_get_in_progress_by_instance') def test_get_in_progress_by_instance(self, m_get_mig): ctxt = context.get_admin_context() fake_migration = fake_db_migration() db_migrations = [fake_migration, dict(fake_migration, id=456)] m_get_mig.return_value = db_migrations migrations = migration.MigrationList.get_in_progress_by_instance( ctxt, fake_migration['instance_uuid']) self.assertEqual(2, len(migrations)) for index, db_migration in enumerate(db_migrations): self.compare_obj(migrations[index], db_migration) def test_create(self): ctxt = context.get_admin_context() fake_migration = fake_db_migration() self.mox.StubOutWithMock(db, 'migration_create') db.migration_create(ctxt, {'source_compute': 'foo', 'migration_type': 'resize'} ).AndReturn(fake_migration) self.mox.ReplayAll() mig = migration.Migration(context=ctxt) mig.source_compute = 'foo' mig.migration_type = 'resize' mig.create() self.assertEqual(fake_migration['dest_compute'], mig.dest_compute) def test_recreate_fails(self): ctxt = context.get_admin_context() fake_migration = fake_db_migration() self.mox.StubOutWithMock(db, 'migration_create') db.migration_create(ctxt, {'source_compute': 'foo', 'migration_type': 'resize'} ).AndReturn(fake_migration) self.mox.ReplayAll() mig = migration.Migration(context=ctxt) mig.source_compute = 'foo' mig.migration_type = 'resize' mig.create() self.assertRaises(exception.ObjectActionError, mig.create) def test_create_fails_migration_type(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'migration_create') self.mox.ReplayAll() mig = migration.Migration(context=ctxt, old_instance_type_id=42, new_instance_type_id=84) mig.source_compute = 'foo' self.assertRaises(exception.ObjectActionError, mig.create) def test_save(self): ctxt = context.get_admin_context() fake_migration = fake_db_migration() self.mox.StubOutWithMock(db, 'migration_update') db.migration_update(ctxt, 123, {'source_compute': 'foo'} ).AndReturn(fake_migration) self.mox.ReplayAll() mig = migration.Migration(context=ctxt) mig.id = 123 mig.source_compute = 'foo' mig.save() self.assertEqual(fake_migration['dest_compute'], mig.dest_compute) def test_instance(self): ctxt = context.get_admin_context() fake_migration = fake_db_migration() fake_inst = fake_instance.fake_db_instance() self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(ctxt, fake_migration['instance_uuid'], columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst) mig = migration.Migration._from_db_object(ctxt, migration.Migration(), fake_migration) mig._context = ctxt self.mox.ReplayAll() self.assertEqual(mig.instance.host, fake_inst['host']) def test_instance_setter(self): migration = objects.Migration(instance_uuid=uuidsentinel.instance) inst = objects.Instance(uuid=uuidsentinel.instance) with mock.patch('nova.objects.Instance.get_by_uuid') as mock_get: migration.instance = inst migration.instance self.assertFalse(mock_get.called) self.assertEqual(inst, migration._cached_instance) self.assertEqual(inst, migration.instance) def test_get_unconfirmed_by_dest_compute(self): ctxt = context.get_admin_context() fake_migration = fake_db_migration() db_migrations = [fake_migration, dict(fake_migration, id=456)] self.mox.StubOutWithMock( db, 'migration_get_unconfirmed_by_dest_compute') db.migration_get_unconfirmed_by_dest_compute( ctxt, 'window', 'foo').AndReturn(db_migrations) self.mox.ReplayAll() migrations = ( migration.MigrationList.get_unconfirmed_by_dest_compute( ctxt, 'window', 'foo', use_slave=False)) self.assertEqual(2, len(migrations)) for index, db_migration in enumerate(db_migrations): self.compare_obj(migrations[index], db_migration) def test_get_in_progress_by_host_and_node(self): ctxt = context.get_admin_context() fake_migration = fake_db_migration() db_migrations = [fake_migration, dict(fake_migration, id=456)] self.mox.StubOutWithMock( db, 'migration_get_in_progress_by_host_and_node') db.migration_get_in_progress_by_host_and_node( ctxt, 'host', 'node').AndReturn(db_migrations) self.mox.ReplayAll() migrations = ( migration.MigrationList.get_in_progress_by_host_and_node( ctxt, 'host', 'node')) self.assertEqual(2, len(migrations)) for index, db_migration in enumerate(db_migrations): self.compare_obj(migrations[index], db_migration) def test_get_by_filters(self): ctxt = context.get_admin_context() fake_migration = fake_db_migration() db_migrations = [fake_migration, dict(fake_migration, id=456)] self.mox.StubOutWithMock( db, 'migration_get_all_by_filters') filters = {'foo': 'bar'} db.migration_get_all_by_filters(ctxt, filters).AndReturn(db_migrations) self.mox.ReplayAll() migrations = migration.MigrationList.get_by_filters(ctxt, filters) self.assertEqual(2, len(migrations)) for index, db_migration in enumerate(db_migrations): self.compare_obj(migrations[index], db_migration) def test_migrate_old_resize_record(self): db_migration = dict(fake_db_migration(), migration_type=None) with mock.patch('nova.db.migration_get') as fake_get: fake_get.return_value = db_migration mig = objects.Migration.get_by_id(context.get_admin_context(), 1) self.assertTrue(mig.obj_attr_is_set('migration_type')) self.assertEqual('resize', mig.migration_type) def test_migrate_old_migration_record(self): db_migration = dict( fake_db_migration(), migration_type=None, old_instance_type_id=1, new_instance_type_id=1) with mock.patch('nova.db.migration_get') as fake_get: fake_get.return_value = db_migration mig = objects.Migration.get_by_id(context.get_admin_context(), 1) self.assertTrue(mig.obj_attr_is_set('migration_type')) self.assertEqual('migration', mig.migration_type) def test_migrate_unset_type_resize(self): mig = objects.Migration(old_instance_type_id=1, new_instance_type_id=2) self.assertEqual('resize', mig.migration_type) self.assertTrue(mig.obj_attr_is_set('migration_type')) def test_migrate_unset_type_migration(self): mig = objects.Migration(old_instance_type_id=1, new_instance_type_id=1) self.assertEqual('migration', mig.migration_type) self.assertTrue(mig.obj_attr_is_set('migration_type')) @mock.patch('nova.db.migration_get_by_id_and_instance') def test_get_by_id_and_instance(self, fake_get): ctxt = context.get_admin_context() fake_migration = fake_db_migration() fake_get.return_value = fake_migration migration = objects.Migration.get_by_id_and_instance(ctxt, '1', '1') self.compare_obj(migration, fake_migration) class TestMigrationObject(test_objects._LocalTest, _TestMigrationObject): pass class TestRemoteMigrationObject(test_objects._RemoteTest, _TestMigrationObject): pass nova-13.0.0/nova/tests/unit/objects/test_task_log.py0000664000567000056710000001216412701407773023644 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from oslo_utils import timeutils from nova import objects from nova.tests.unit.objects import test_objects from nova import utils NOW = timeutils.utcnow().replace(microsecond=0) fake_task_log = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'id': 1, 'task_name': 'fake-name', 'state': 'fake-state', 'host': 'fake-host', 'period_beginning': NOW - datetime.timedelta(seconds=10), 'period_ending': NOW, 'message': 'fake-message', 'task_items': 1, 'errors': 0, } class _TestTaskLog(object): @mock.patch('nova.db.task_log_get', return_value=fake_task_log) def test_get(self, mock_get): task_log = objects.TaskLog.get(self.context, fake_task_log['task_name'], fake_task_log['period_beginning'], fake_task_log['period_ending'], fake_task_log['host'], state=fake_task_log['state']) mock_get.assert_called_once_with( self.context, fake_task_log['task_name'], utils.strtime(fake_task_log['period_beginning']), utils.strtime(fake_task_log['period_ending']), fake_task_log['host'], state=fake_task_log['state']) self.compare_obj(task_log, fake_task_log) @mock.patch('nova.db.task_log_begin_task') def test_begin_task(self, mock_begin_task): task_log = objects.TaskLog(self.context) task_log.task_name = fake_task_log['task_name'] task_log.period_beginning = fake_task_log['period_beginning'] task_log.period_ending = fake_task_log['period_ending'] task_log.host = fake_task_log['host'] task_log.task_items = fake_task_log['task_items'] task_log.message = fake_task_log['message'] task_log.begin_task() mock_begin_task.assert_called_once_with( self.context, fake_task_log['task_name'], fake_task_log['period_beginning'].replace( tzinfo=iso8601.iso8601.Utc()), fake_task_log['period_ending'].replace( tzinfo=iso8601.iso8601.Utc()), fake_task_log['host'], task_items=fake_task_log['task_items'], message=fake_task_log['message']) @mock.patch('nova.db.task_log_end_task') def test_end_task(self, mock_end_task): task_log = objects.TaskLog(self.context) task_log.task_name = fake_task_log['task_name'] task_log.period_beginning = fake_task_log['period_beginning'] task_log.period_ending = fake_task_log['period_ending'] task_log.host = fake_task_log['host'] task_log.errors = fake_task_log['errors'] task_log.message = fake_task_log['message'] task_log.end_task() mock_end_task.assert_called_once_with( self.context, fake_task_log['task_name'], fake_task_log['period_beginning'].replace( tzinfo=iso8601.iso8601.Utc()), fake_task_log['period_ending'].replace( tzinfo=iso8601.iso8601.Utc()), fake_task_log['host'], errors=fake_task_log['errors'], message=fake_task_log['message']) class TestTaskLog(test_objects._LocalTest, _TestTaskLog): pass class TestRemoteTaskLog(test_objects._RemoteTest, _TestTaskLog): pass class _TestTaskLogList(object): @mock.patch('nova.db.task_log_get_all') def test_get_all(self, mock_get_all): fake_task_logs = [dict(fake_task_log, id=1), dict(fake_task_log, id=2)] mock_get_all.return_value = fake_task_logs task_logs = objects.TaskLogList.get_all( self.context, fake_task_log['task_name'], fake_task_log['period_beginning'], fake_task_log['period_ending'], host=fake_task_log['host'], state=fake_task_log['state']) mock_get_all.assert_called_once_with( self.context, fake_task_log['task_name'], utils.strtime(fake_task_log['period_beginning']), utils.strtime(fake_task_log['period_ending']), host=fake_task_log['host'], state=fake_task_log['state']) for index, task_log in enumerate(task_logs): self.compare_obj(task_log, fake_task_logs[index]) class TestTaskLogList(test_objects._LocalTest, _TestTaskLogList): pass class TestRemoteTaskLogList(test_objects._RemoteTest, _TestTaskLogList): pass nova-13.0.0/nova/tests/unit/objects/test_cell_mapping.py0000664000567000056710000001004512701407773024467 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from nova import exception from nova import objects from nova.objects import cell_mapping from nova.tests.unit.objects import test_objects def get_db_mapping(**updates): db_mapping = { 'id': 1, 'uuid': uuidutils.generate_uuid(), 'name': 'cell1', 'transport_url': 'rabbit://', 'database_connection': 'sqlite:///', 'created_at': None, 'updated_at': None, } db_mapping.update(updates) return db_mapping class _TestCellMappingObject(object): @mock.patch.object(cell_mapping.CellMapping, '_get_by_uuid_from_db') def test_get_by_uuid(self, uuid_from_db): db_mapping = get_db_mapping() uuid_from_db.return_value = db_mapping mapping_obj = objects.CellMapping().get_by_uuid(self.context, db_mapping['uuid']) uuid_from_db.assert_called_once_with(self.context, db_mapping['uuid']) self.compare_obj(mapping_obj, db_mapping) @mock.patch.object(cell_mapping.CellMapping, '_get_by_uuid_from_db', side_effect=exception.CellMappingNotFound(uuid='fake')) def test_get_by_uuid_invalid(self, uuid_from_db): db_mapping = get_db_mapping() self.assertRaises(exception.CellMappingNotFound, objects.CellMapping().get_by_uuid, self.context, db_mapping['uuid']) uuid_from_db.assert_called_once_with(self.context, db_mapping['uuid']) @mock.patch.object(cell_mapping.CellMapping, '_create_in_db') def test_create(self, create_in_db): uuid = uuidutils.generate_uuid() db_mapping = get_db_mapping(uuid=uuid, name='test', database_connection='mysql+pymysql:///') create_in_db.return_value = db_mapping mapping_obj = objects.CellMapping(self.context) mapping_obj.uuid = uuid mapping_obj.name = 'test' mapping_obj.database_connection = 'mysql+pymysql:///' mapping_obj.create() create_in_db.assert_called_once_with(self.context, {'uuid': uuid, 'name': 'test', 'database_connection': 'mysql+pymysql:///'}) self.compare_obj(mapping_obj, db_mapping) @mock.patch.object(cell_mapping.CellMapping, '_save_in_db') def test_save(self, save_in_db): uuid = uuidutils.generate_uuid() db_mapping = get_db_mapping(database_connection='mysql+pymysql:///') save_in_db.return_value = db_mapping mapping_obj = objects.CellMapping(self.context) mapping_obj.uuid = uuid mapping_obj.database_connection = 'mysql+pymysql:///' mapping_obj.save() save_in_db.assert_called_once_with(self.context, uuid, {'uuid': uuid, 'database_connection': 'mysql+pymysql:///'}) self.compare_obj(mapping_obj, db_mapping) @mock.patch.object(cell_mapping.CellMapping, '_destroy_in_db') def test_destroy(self, destroy_in_db): uuid = uuidutils.generate_uuid() mapping_obj = objects.CellMapping(self.context) mapping_obj.uuid = uuid mapping_obj.destroy() destroy_in_db.assert_called_once_with(self.context, uuid) class TestCellMappingObject(test_objects._LocalTest, _TestCellMappingObject): pass class TestRemoteCellMappingObject(test_objects._RemoteTest, _TestCellMappingObject): pass nova-13.0.0/nova/tests/unit/objects/test_request_spec.py0000664000567000056710000005703412701407773024550 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils from oslo_utils import uuidutils from nova import context from nova import exception from nova import objects from nova.objects import base from nova.objects import request_spec from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance from nova.tests.unit import fake_request_spec from nova.tests.unit.objects import test_objects class _TestRequestSpecObject(object): def test_image_meta_from_image_as_object(self): # Just isolating the test for the from_dict() method image_meta = objects.ImageMeta(name='foo') spec = objects.RequestSpec() spec._image_meta_from_image(image_meta) self.assertEqual(image_meta, spec.image) @mock.patch.object(objects.ImageMeta, 'from_dict') def test_image_meta_from_image_as_dict(self, from_dict): # Just isolating the test for the from_dict() method image_meta = objects.ImageMeta(name='foo') from_dict.return_value = image_meta spec = objects.RequestSpec() spec._image_meta_from_image({'name': 'foo'}) self.assertEqual(image_meta, spec.image) def test_image_meta_from_image_as_none(self): # just add a dumb check to have a full coverage spec = objects.RequestSpec() spec._image_meta_from_image(None) self.assertIsNone(spec.image) @mock.patch.object(base, 'obj_to_primitive') def test_to_legacy_image(self, obj_to_primitive): spec = objects.RequestSpec(image=objects.ImageMeta()) fake_dict = mock.Mock() obj_to_primitive.return_value = fake_dict self.assertEqual(fake_dict, spec._to_legacy_image()) obj_to_primitive.assert_called_once_with(spec.image) @mock.patch.object(base, 'obj_to_primitive') def test_to_legacy_image_with_none(self, obj_to_primitive): spec = objects.RequestSpec(image=None) self.assertEqual({}, spec._to_legacy_image()) self.assertFalse(obj_to_primitive.called) def test_from_instance_as_object(self): instance = objects.Instance() instance.uuid = uuidutils.generate_uuid() instance.numa_topology = None instance.pci_requests = None instance.project_id = '1' instance.availability_zone = 'nova' spec = objects.RequestSpec() spec._from_instance(instance) instance_fields = ['numa_topology', 'pci_requests', 'uuid', 'project_id', 'availability_zone'] for field in instance_fields: if field == 'uuid': self.assertEqual(getattr(instance, field), getattr(spec, 'instance_uuid')) else: self.assertEqual(getattr(instance, field), getattr(spec, field)) def test_from_instance_as_dict(self): instance = dict(uuid=uuidutils.generate_uuid(), numa_topology=None, pci_requests=None, project_id='1', availability_zone='nova') spec = objects.RequestSpec() spec._from_instance(instance) instance_fields = ['numa_topology', 'pci_requests', 'uuid', 'project_id', 'availability_zone'] for field in instance_fields: if field == 'uuid': self.assertEqual(instance.get(field), getattr(spec, 'instance_uuid')) else: self.assertEqual(instance.get(field), getattr(spec, field)) @mock.patch.object(objects.InstancePCIRequests, 'from_request_spec_instance_props') def test_from_instance_with_pci_requests(self, pci_from_spec): fake_pci_requests = objects.InstancePCIRequests() pci_from_spec.return_value = fake_pci_requests instance = dict( uuid=uuidutils.generate_uuid(), root_gb=10, ephemeral_gb=0, memory_mb=10, vcpus=1, numa_topology=None, project_id='1', availability_zone='nova', pci_requests={ 'instance_uuid': 'fakeid', 'requests': [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]}) spec = objects.RequestSpec() spec._from_instance(instance) pci_from_spec.assert_called_once_with(instance['pci_requests']) self.assertEqual(fake_pci_requests, spec.pci_requests) def test_from_instance_with_numa_stuff(self): instance = dict( uuid=uuidutils.generate_uuid(), root_gb=10, ephemeral_gb=0, memory_mb=10, vcpus=1, project_id='1', availability_zone='nova', pci_requests=None, numa_topology={'cells': [{'id': 1, 'cpuset': ['1'], 'memory': 8192, 'pagesize': None, 'cpu_topology': None, 'cpu_pinning_raw': None}]}) spec = objects.RequestSpec() spec._from_instance(instance) self.assertIsInstance(spec.numa_topology, objects.InstanceNUMATopology) cells = spec.numa_topology.cells self.assertEqual(1, len(cells)) self.assertIsInstance(cells[0], objects.InstanceNUMACell) def test_from_flavor_as_object(self): flavor = objects.Flavor() spec = objects.RequestSpec() spec._from_flavor(flavor) self.assertEqual(flavor, spec.flavor) def test_from_flavor_as_dict(self): flavor_dict = dict(id=1) ctxt = context.RequestContext('fake', 'fake') spec = objects.RequestSpec(ctxt) spec._from_flavor(flavor_dict) self.assertIsInstance(spec.flavor, objects.Flavor) self.assertEqual({'id': 1}, spec.flavor.obj_get_changes()) def test_to_legacy_instance(self): spec = objects.RequestSpec() spec.flavor = objects.Flavor(root_gb=10, ephemeral_gb=0, memory_mb=10, vcpus=1) spec.numa_topology = None spec.pci_requests = None spec.project_id = '1' spec.availability_zone = 'nova' instance = spec._to_legacy_instance() self.assertEqual({'root_gb': 10, 'ephemeral_gb': 0, 'memory_mb': 10, 'vcpus': 1, 'numa_topology': None, 'pci_requests': None, 'project_id': '1', 'availability_zone': 'nova'}, instance) def test_to_legacy_instance_with_unset_values(self): spec = objects.RequestSpec() self.assertEqual({}, spec._to_legacy_instance()) def test_from_retry(self): retry_dict = {'num_attempts': 1, 'hosts': [['fake1', 'node1']]} ctxt = context.RequestContext('fake', 'fake') spec = objects.RequestSpec(ctxt) spec._from_retry(retry_dict) self.assertIsInstance(spec.retry, objects.SchedulerRetries) self.assertEqual(1, spec.retry.num_attempts) self.assertIsInstance(spec.retry.hosts, objects.ComputeNodeList) self.assertEqual(1, len(spec.retry.hosts)) self.assertEqual('fake1', spec.retry.hosts[0].host) self.assertEqual('node1', spec.retry.hosts[0].hypervisor_hostname) def test_from_retry_missing_values(self): retry_dict = {} ctxt = context.RequestContext('fake', 'fake') spec = objects.RequestSpec(ctxt) spec._from_retry(retry_dict) self.assertIsNone(spec.retry) def test_populate_group_info(self): filt_props = {} filt_props['group_updated'] = True filt_props['group_policies'] = set(['affinity']) filt_props['group_hosts'] = set(['fake1']) filt_props['group_members'] = set(['fake-instance1']) spec = objects.RequestSpec() spec._populate_group_info(filt_props) self.assertIsInstance(spec.instance_group, objects.InstanceGroup) self.assertEqual(['affinity'], spec.instance_group.policies) self.assertEqual(['fake1'], spec.instance_group.hosts) self.assertEqual(['fake-instance1'], spec.instance_group.members) def test_populate_group_info_missing_values(self): filt_props = {} spec = objects.RequestSpec() spec._populate_group_info(filt_props) self.assertIsNone(spec.instance_group) def test_from_limits(self): limits_dict = {'numa_topology': None, 'vcpu': 1.0, 'disk_gb': 1.0, 'memory_mb': 1.0} spec = objects.RequestSpec() spec._from_limits(limits_dict) self.assertIsInstance(spec.limits, objects.SchedulerLimits) self.assertIsNone(spec.limits.numa_topology) self.assertEqual(1, spec.limits.vcpu) self.assertEqual(1, spec.limits.disk_gb) self.assertEqual(1, spec.limits.memory_mb) def test_from_limits_missing_values(self): limits_dict = {} spec = objects.RequestSpec() spec._from_limits(limits_dict) self.assertIsInstance(spec.limits, objects.SchedulerLimits) self.assertIsNone(spec.limits.numa_topology) self.assertIsNone(spec.limits.vcpu) self.assertIsNone(spec.limits.disk_gb) self.assertIsNone(spec.limits.memory_mb) def test_from_hints(self): hints_dict = {'foo_str': '1', 'bar_list': ['2']} spec = objects.RequestSpec() spec._from_hints(hints_dict) expected = {'foo_str': ['1'], 'bar_list': ['2']} self.assertEqual(expected, spec.scheduler_hints) def test_from_hints_with_no_hints(self): spec = objects.RequestSpec() spec._from_hints(None) self.assertIsNone(spec.scheduler_hints) @mock.patch.object(objects.SchedulerLimits, 'from_dict') def test_from_primitives(self, mock_limits): spec_dict = {'instance_type': objects.Flavor(), 'instance_properties': objects.Instance( uuid=uuidutils.generate_uuid(), numa_topology=None, pci_requests=None, project_id=1, availability_zone='nova')} filt_props = {} # We seriously don't care about the return values, we just want to make # sure that all the fields are set mock_limits.return_value = None ctxt = context.RequestContext('fake', 'fake') spec = objects.RequestSpec.from_primitives(ctxt, spec_dict, filt_props) mock_limits.assert_called_once_with({}) # Make sure that all fields are set using that helper method for field in [f for f in spec.obj_fields if f != 'id']: self.assertTrue(spec.obj_attr_is_set(field), 'Field: %s is not set' % field) # just making sure that the context is set by the method self.assertEqual(ctxt, spec._context) def test_from_components(self): ctxt = context.RequestContext('fake-user', 'fake-project') instance = fake_instance.fake_instance_obj(ctxt) image = {'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} flavor = fake_flavor.fake_flavor_obj(ctxt) filter_properties = {} instance_group = None spec = objects.RequestSpec.from_components(ctxt, instance, image, flavor, instance.numa_topology, instance.pci_requests, filter_properties, instance_group, instance.availability_zone) # Make sure that all fields are set using that helper method for field in [f for f in spec.obj_fields if f != 'id']: self.assertEqual(True, spec.obj_attr_is_set(field), 'Field: %s is not set' % field) # just making sure that the context is set by the method self.assertEqual(ctxt, spec._context) @mock.patch('nova.objects.RequestSpec._populate_group_info') def test_from_components_with_instance_group(self, mock_pgi): # This test makes sure that we don't overwrite instance group passed # to from_components ctxt = context.RequestContext('fake-user', 'fake-project') instance = fake_instance.fake_instance_obj(ctxt) image = {'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} flavor = fake_flavor.fake_flavor_obj(ctxt) filter_properties = {'fake': 'property'} instance_group = objects.InstanceGroup() objects.RequestSpec.from_components(ctxt, instance, image, flavor, instance.numa_topology, instance.pci_requests, filter_properties, instance_group, instance.availability_zone) self.assertFalse(mock_pgi.called) @mock.patch('nova.objects.RequestSpec._populate_group_info') def test_from_components_without_instance_group(self, mock_pgi): # This test makes sure that we populate instance group if not # present ctxt = context.RequestContext('fake-user', 'fake-project') instance = fake_instance.fake_instance_obj(ctxt) image = {'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} flavor = fake_flavor.fake_flavor_obj(ctxt) filter_properties = {'fake': 'property'} objects.RequestSpec.from_components(ctxt, instance, image, flavor, instance.numa_topology, instance.pci_requests, filter_properties, None, instance.availability_zone) mock_pgi.assert_called_once_with(filter_properties) def test_get_scheduler_hint(self): spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'], 'foo_mul': ['1', '2']}) self.assertEqual('1', spec_obj.get_scheduler_hint('foo_single')) self.assertEqual(['1', '2'], spec_obj.get_scheduler_hint('foo_mul')) self.assertIsNone(spec_obj.get_scheduler_hint('oops')) self.assertEqual('bar', spec_obj.get_scheduler_hint('oops', default='bar')) def test_get_scheduler_hint_with_no_hints(self): spec_obj = objects.RequestSpec() self.assertEqual('bar', spec_obj.get_scheduler_hint('oops', default='bar')) @mock.patch.object(objects.RequestSpec, '_to_legacy_instance') @mock.patch.object(base, 'obj_to_primitive') def test_to_legacy_request_spec_dict(self, image_to_primitive, spec_to_legacy_instance): fake_image_dict = mock.Mock() image_to_primitive.return_value = fake_image_dict fake_instance = {'root_gb': 1.0, 'ephemeral_gb': 1.0, 'memory_mb': 1.0, 'vcpus': 1, 'numa_topology': None, 'pci_requests': None, 'project_id': '1', 'availability_zone': 'nova', 'uuid': '1'} spec_to_legacy_instance.return_value = fake_instance fake_flavor = objects.Flavor(root_gb=10, ephemeral_gb=0, memory_mb=512, vcpus=1) spec = objects.RequestSpec(num_instances=1, image=objects.ImageMeta(), # instance properties numa_topology=None, pci_requests=None, project_id=1, availability_zone='nova', instance_uuid='1', flavor=fake_flavor) spec_dict = spec.to_legacy_request_spec_dict() expected = {'num_instances': 1, 'image': fake_image_dict, 'instance_properties': fake_instance, 'instance_type': fake_flavor} self.assertEqual(expected, spec_dict) def test_to_legacy_request_spec_dict_with_unset_values(self): spec = objects.RequestSpec() self.assertEqual({'num_instances': 1, 'image': {}, 'instance_properties': {}, 'instance_type': {}}, spec.to_legacy_request_spec_dict()) def test_to_legacy_filter_properties_dict(self): fake_numa_limits = objects.NUMATopologyLimits() fake_computes_obj = objects.ComputeNodeList( objects=[objects.ComputeNode(host='fake1', hypervisor_hostname='node1')]) spec = objects.RequestSpec( ignore_hosts=['ignoredhost'], force_hosts=['fakehost'], force_nodes=['fakenode'], retry=objects.SchedulerRetries(num_attempts=1, hosts=fake_computes_obj), limits=objects.SchedulerLimits(numa_topology=fake_numa_limits, vcpu=1.0, disk_gb=10.0, memory_mb=8192.0), instance_group=objects.InstanceGroup(hosts=['fake1'], policies=['affinity']), scheduler_hints={'foo': ['bar']}) expected = {'ignore_hosts': ['ignoredhost'], 'force_hosts': ['fakehost'], 'force_nodes': ['fakenode'], 'retry': {'num_attempts': 1, 'hosts': [['fake1', 'node1']]}, 'limits': {'numa_topology': fake_numa_limits, 'vcpu': 1.0, 'disk_gb': 10.0, 'memory_mb': 8192.0}, 'group_updated': True, 'group_hosts': set(['fake1']), 'group_policies': set(['affinity']), 'scheduler_hints': {'foo': 'bar'}} self.assertEqual(expected, spec.to_legacy_filter_properties_dict()) def test_to_legacy_filter_properties_dict_with_nullable_values(self): spec = objects.RequestSpec(force_hosts=None, force_nodes=None, retry=None, limits=None, instance_group=None, scheduler_hints=None) self.assertEqual({}, spec.to_legacy_filter_properties_dict()) def test_to_legacy_filter_properties_dict_with_unset_values(self): spec = objects.RequestSpec() self.assertEqual({}, spec.to_legacy_filter_properties_dict()) @mock.patch.object(request_spec.RequestSpec, '_get_by_instance_uuid_from_db') def test_get_by_instance_uuid(self, get_by_uuid): fake_spec = fake_request_spec.fake_db_spec() get_by_uuid.return_value = fake_spec req_obj = request_spec.RequestSpec.get_by_instance_uuid(self.context, fake_spec['instance_uuid']) self.assertEqual(1, req_obj.num_instances) self.assertEqual(['host2', 'host4'], req_obj.ignore_hosts) self.assertEqual('fake', req_obj.project_id) self.assertEqual({'hint': ['over-there']}, req_obj.scheduler_hints) self.assertEqual(['host1', 'host3'], req_obj.force_hosts) self.assertIsNone(req_obj.availability_zone) self.assertEqual(['node1', 'node2'], req_obj.force_nodes) self.assertIsInstance(req_obj.image, objects.ImageMeta) self.assertIsInstance(req_obj.numa_topology, objects.InstanceNUMATopology) self.assertIsInstance(req_obj.pci_requests, objects.InstancePCIRequests) self.assertIsInstance(req_obj.flavor, objects.Flavor) self.assertIsInstance(req_obj.retry, objects.SchedulerRetries) self.assertIsInstance(req_obj.limits, objects.SchedulerLimits) self.assertIsInstance(req_obj.instance_group, objects.InstanceGroup) def _check_update_primitive(self, req_obj, changes): self.assertEqual(req_obj.instance_uuid, changes['instance_uuid']) serialized_obj = objects.RequestSpec.obj_from_primitive( jsonutils.loads(changes['spec'])) # primitive fields for field in ['instance_uuid', 'num_instances', 'ignore_hosts', 'project_id', 'scheduler_hints', 'force_hosts', 'availability_zone', 'force_nodes']: self.assertEqual(getattr(req_obj, field), getattr(serialized_obj, field)) # object fields for field in ['image', 'numa_topology', 'pci_requests', 'flavor', 'retry', 'limits', 'instance_group']: self.assertDictEqual( getattr(req_obj, field).obj_to_primitive(), getattr(serialized_obj, field).obj_to_primitive()) def test_create(self): req_obj = fake_request_spec.fake_spec_obj(remove_id=True) def _test_create_args(self2, context, changes): self._check_update_primitive(req_obj, changes) # DB creation would have set an id changes['id'] = 42 return changes with mock.patch.object(request_spec.RequestSpec, '_create_in_db', _test_create_args): req_obj.create() def test_create_id_set(self): req_obj = request_spec.RequestSpec(self.context) req_obj.id = 3 self.assertRaises(exception.ObjectActionError, req_obj.create) def test_save(self): req_obj = fake_request_spec.fake_spec_obj() def _test_save_args(self2, context, instance_uuid, changes): self._check_update_primitive(req_obj, changes) # DB creation would have set an id changes['id'] = 42 return changes with mock.patch.object(request_spec.RequestSpec, '_save_in_db', _test_save_args): req_obj.save() def test_reset_forced_destinations(self): req_obj = fake_request_spec.fake_spec_obj() # Making sure the fake object has forced hosts and nodes self.assertIsNotNone(req_obj.force_hosts) self.assertIsNotNone(req_obj.force_nodes) with mock.patch.object(req_obj, 'obj_reset_changes') as mock_reset: req_obj.reset_forced_destinations() self.assertIsNone(req_obj.force_hosts) self.assertIsNone(req_obj.force_nodes) mock_reset.assert_called_once_with(['force_hosts', 'force_nodes']) class TestRequestSpecObject(test_objects._LocalTest, _TestRequestSpecObject): pass class TestRemoteRequestSpecObject(test_objects._RemoteTest, _TestRequestSpecObject): pass nova-13.0.0/nova/tests/unit/objects/test_objects.py0000664000567000056710000015651612701410011023457 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import copy import datetime import inspect import os import pprint import fixtures import mock from oslo_log import log from oslo_utils import timeutils from oslo_utils import versionutils from oslo_versionedobjects import base as ovo_base from oslo_versionedobjects import exception as ovo_exc from oslo_versionedobjects import fixture import six from nova import context from nova import exception from nova import objects from nova.objects import base from nova.objects import fields from nova.objects import notification from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.unit import fake_notifier from nova import utils LOG = log.getLogger(__name__) class MyOwnedObject(base.NovaPersistentObject, base.NovaObject): VERSION = '1.0' fields = {'baz': fields.IntegerField()} class MyObj(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): VERSION = '1.6' fields = {'foo': fields.IntegerField(default=1), 'bar': fields.StringField(), 'missing': fields.StringField(), 'readonly': fields.IntegerField(read_only=True), 'rel_object': fields.ObjectField('MyOwnedObject', nullable=True), 'rel_objects': fields.ListOfObjectsField('MyOwnedObject', nullable=True), 'mutable_default': fields.ListOfStringsField(default=[]), } @staticmethod def _from_db_object(context, obj, db_obj): self = MyObj() self.foo = db_obj['foo'] self.bar = db_obj['bar'] self.missing = db_obj['missing'] self.readonly = 1 self._context = context return self def obj_load_attr(self, attrname): setattr(self, attrname, 'loaded!') @base.remotable_classmethod def query(cls, context): obj = cls(context=context, foo=1, bar='bar') obj.obj_reset_changes() return obj @base.remotable def marco(self): return 'polo' @base.remotable def _update_test(self): self.bar = 'updated' @base.remotable def save(self): self.obj_reset_changes() @base.remotable def refresh(self): self.foo = 321 self.bar = 'refreshed' self.obj_reset_changes() @base.remotable def modify_save_modify(self): self.bar = 'meow' self.save() self.foo = 42 self.rel_object = MyOwnedObject(baz=42) def obj_make_compatible(self, primitive, target_version): super(MyObj, self).obj_make_compatible(primitive, target_version) # NOTE(danms): Simulate an older version that had a different # format for the 'bar' attribute if target_version == '1.1' and 'bar' in primitive: primitive['bar'] = 'old%s' % primitive['bar'] class RandomMixInWithNoFields(object): """Used to test object inheritance using a mixin that has no fields.""" pass @base.NovaObjectRegistry.register_if(False) class TestSubclassedObject(RandomMixInWithNoFields, MyObj): fields = {'new_field': fields.StringField()} class TestObjToPrimitive(test.NoDBTestCase): def test_obj_to_primitive_list(self): @base.NovaObjectRegistry.register_if(False) class MyObjElement(base.NovaObject): fields = {'foo': fields.IntegerField()} def __init__(self, foo): super(MyObjElement, self).__init__() self.foo = foo @base.NovaObjectRegistry.register_if(False) class MyList(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('MyObjElement')} mylist = MyList() mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)] self.assertEqual([1, 2, 3], [x['foo'] for x in base.obj_to_primitive(mylist)]) def test_obj_to_primitive_dict(self): base.NovaObjectRegistry.register(MyObj) myobj = MyObj(foo=1, bar='foo') self.assertEqual({'foo': 1, 'bar': 'foo'}, base.obj_to_primitive(myobj)) def test_obj_to_primitive_recursive(self): base.NovaObjectRegistry.register(MyObj) class MyList(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('MyObj')} mylist = MyList(objects=[MyObj(), MyObj()]) for i, value in enumerate(mylist): value.foo = i self.assertEqual([{'foo': 0}, {'foo': 1}], base.obj_to_primitive(mylist)) def test_obj_to_primitive_with_ip_addr(self): @base.NovaObjectRegistry.register_if(False) class TestObject(base.NovaObject): fields = {'addr': fields.IPAddressField(), 'cidr': fields.IPNetworkField()} obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16') self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'}, base.obj_to_primitive(obj)) class TestObjMakeList(test.NoDBTestCase): def test_obj_make_list(self): class MyList(base.ObjectListBase, base.NovaObject): fields = { 'objects': fields.ListOfObjectsField('MyObj'), } db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'}, {'foo': 2, 'bar': 'bat', 'missing': 'apple'}, ] mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs) self.assertEqual(2, len(mylist)) self.assertEqual('ctxt', mylist._context) for index, item in enumerate(mylist): self.assertEqual(db_objs[index]['foo'], item.foo) self.assertEqual(db_objs[index]['bar'], item.bar) self.assertEqual(db_objs[index]['missing'], item.missing) def compare_obj(test, obj, db_obj, subs=None, allow_missing=None, comparators=None): """Compare a NovaObject and a dict-like database object. This automatically converts TZ-aware datetimes and iterates over the fields of the object. :param:test: The TestCase doing the comparison :param:obj: The NovaObject to examine :param:db_obj: The dict-like database object to use as reference :param:subs: A dict of objkey=dbkey field substitutions :param:allow_missing: A list of fields that may not be in db_obj :param:comparators: Map of comparator functions to use for certain fields """ if subs is None: subs = {} if allow_missing is None: allow_missing = [] if comparators is None: comparators = {} for key in obj.fields: if key in allow_missing and not obj.obj_attr_is_set(key): continue obj_val = getattr(obj, key) db_key = subs.get(key, key) db_val = db_obj[db_key] if isinstance(obj_val, datetime.datetime): obj_val = obj_val.replace(tzinfo=None) if key in comparators: comparator = comparators[key] comparator(db_val, obj_val) else: test.assertEqual(db_val, obj_val) class _BaseTestCase(test.TestCase): def setUp(self): super(_BaseTestCase, self).setUp() self.user_id = 'fake-user' self.project_id = 'fake-project' self.context = context.RequestContext(self.user_id, self.project_id) fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) # NOTE(danms): register these here instead of at import time # so that they're not always present base.NovaObjectRegistry.register(MyObj) base.NovaObjectRegistry.register(MyOwnedObject) def compare_obj(self, obj, db_obj, subs=None, allow_missing=None, comparators=None): compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing, comparators=comparators) def str_comparator(self, expected, obj_val): """Compare an object field to a string in the db by performing a simple coercion on the object field value. """ self.assertEqual(expected, str(obj_val)) class _LocalTest(_BaseTestCase): def setUp(self): super(_LocalTest, self).setUp() # Just in case self.useFixture(nova_fixtures.IndirectionAPIFixture(None)) @contextlib.contextmanager def things_temporarily_local(): # Temporarily go non-remote so the conductor handles # this request directly _api = base.NovaObject.indirection_api base.NovaObject.indirection_api = None yield base.NovaObject.indirection_api = _api # FIXME(danms): We shouldn't be overriding any of this, but need to # for the moment because of the mocks in the base fixture that don't # hit our registry subclass. class FakeIndirectionHack(fixture.FakeIndirectionAPI): def object_action(self, context, objinst, objmethod, args, kwargs): objinst = self._ser.deserialize_entity( context, self._ser.serialize_entity( context, objinst)) objmethod = six.text_type(objmethod) args = self._ser.deserialize_entity( None, self._ser.serialize_entity(None, args)) kwargs = self._ser.deserialize_entity( None, self._ser.serialize_entity(None, kwargs)) original = objinst.obj_clone() with mock.patch('nova.objects.base.NovaObject.' 'indirection_api', new=None): result = getattr(objinst, objmethod)(*args, **kwargs) updates = self._get_changes(original, objinst) updates['obj_what_changed'] = objinst.obj_what_changed() return updates, result def object_class_action(self, context, objname, objmethod, objver, args, kwargs): objname = six.text_type(objname) objmethod = six.text_type(objmethod) objver = six.text_type(objver) args = self._ser.deserialize_entity( None, self._ser.serialize_entity(None, args)) kwargs = self._ser.deserialize_entity( None, self._ser.serialize_entity(None, kwargs)) cls = base.NovaObject.obj_class_from_name(objname, objver) with mock.patch('nova.objects.base.NovaObject.' 'indirection_api', new=None): result = getattr(cls, objmethod)(context, *args, **kwargs) manifest = ovo_base.obj_tree_get_versions(objname) return (base.NovaObject.obj_from_primitive( result.obj_to_primitive(target_version=objver, version_manifest=manifest), context=context) if isinstance(result, base.NovaObject) else result) def object_class_action_versions(self, context, objname, objmethod, object_versions, args, kwargs): objname = six.text_type(objname) objmethod = six.text_type(objmethod) object_versions = {six.text_type(o): six.text_type(v) for o, v in object_versions.items()} args, kwargs = self._canonicalize_args(context, args, kwargs) objver = object_versions[objname] cls = base.NovaObject.obj_class_from_name(objname, objver) with mock.patch('nova.objects.base.NovaObject.' 'indirection_api', new=None): result = getattr(cls, objmethod)(context, *args, **kwargs) return (base.NovaObject.obj_from_primitive( result.obj_to_primitive(target_version=objver), context=context) if isinstance(result, base.NovaObject) else result) class IndirectionFixture(fixtures.Fixture): def setUp(self): super(IndirectionFixture, self).setUp() ser = base.NovaObjectSerializer() self.indirection_api = FakeIndirectionHack(serializer=ser) self.useFixture(fixtures.MonkeyPatch( 'nova.objects.base.NovaObject.indirection_api', self.indirection_api)) class _RemoteTest(_BaseTestCase): def setUp(self): super(_RemoteTest, self).setUp() self.useFixture(IndirectionFixture()) class _TestObject(object): def test_object_attrs_in_init(self): # Spot check a few objects.Instance objects.InstanceInfoCache objects.SecurityGroup # Now check the test one in this file. Should be newest version self.assertEqual('1.6', objects.MyObj.VERSION) def test_hydration_type_error(self): primitive = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.5', 'nova_object.data': {'foo': 'a'}} self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) def test_hydration(self): primitive = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.5', 'nova_object.data': {'foo': 1}} real_method = MyObj._obj_from_primitive def _obj_from_primitive(*args): return real_method(*args) with mock.patch.object(MyObj, '_obj_from_primitive') as ofp: ofp.side_effect = _obj_from_primitive obj = MyObj.obj_from_primitive(primitive) ofp.assert_called_once_with(None, '1.5', primitive) self.assertEqual(obj.foo, 1) def test_hydration_version_different(self): primitive = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.2', 'nova_object.data': {'foo': 1}} obj = MyObj.obj_from_primitive(primitive) self.assertEqual(obj.foo, 1) self.assertEqual('1.2', obj.VERSION) def test_hydration_bad_ns(self): primitive = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'foo', 'nova_object.version': '1.5', 'nova_object.data': {'foo': 1}} self.assertRaises(ovo_exc.UnsupportedObjectError, MyObj.obj_from_primitive, primitive) def test_hydration_additional_unexpected_stuff(self): primitive = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.5.1', 'nova_object.data': { 'foo': 1, 'unexpected_thing': 'foobar'}} obj = MyObj.obj_from_primitive(primitive) self.assertEqual(1, obj.foo) self.assertFalse(hasattr(obj, 'unexpected_thing')) # NOTE(danms): If we call obj_from_primitive() directly # with a version containing .z, we'll get that version # in the resulting object. In reality, when using the # serializer, we'll get that snipped off (tested # elsewhere) self.assertEqual('1.5.1', obj.VERSION) def test_dehydration(self): expected = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.6', 'nova_object.data': {'foo': 1}} obj = MyObj(foo=1) obj.obj_reset_changes() self.assertEqual(obj.obj_to_primitive(), expected) def test_object_property(self): obj = MyObj(foo=1) self.assertEqual(obj.foo, 1) def test_object_property_type_error(self): obj = MyObj() def fail(): obj.foo = 'a' self.assertRaises(ValueError, fail) def test_load(self): obj = MyObj() self.assertEqual(obj.bar, 'loaded!') def test_load_in_base(self): @base.NovaObjectRegistry.register_if(False) class Foo(base.NovaObject): fields = {'foobar': fields.IntegerField()} obj = Foo() with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"): obj.foobar def test_loaded_in_primitive(self): obj = MyObj(foo=1) obj.obj_reset_changes() self.assertEqual(obj.bar, 'loaded!') expected = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.6', 'nova_object.changes': ['bar'], 'nova_object.data': {'foo': 1, 'bar': 'loaded!'}} self.assertEqual(obj.obj_to_primitive(), expected) def test_changes_in_primitive(self): obj = MyObj(foo=123) self.assertEqual(obj.obj_what_changed(), set(['foo'])) primitive = obj.obj_to_primitive() self.assertIn('nova_object.changes', primitive) obj2 = MyObj.obj_from_primitive(primitive) self.assertEqual(obj2.obj_what_changed(), set(['foo'])) obj2.obj_reset_changes() self.assertEqual(obj2.obj_what_changed(), set()) def test_orphaned_object(self): obj = MyObj.query(self.context) obj._context = None self.assertRaises(ovo_exc.OrphanedObjectError, obj._update_test) def test_changed_1(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(obj.obj_what_changed(), set(['foo'])) obj._update_test() self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar'])) self.assertEqual(obj.foo, 123) def test_changed_2(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(obj.obj_what_changed(), set(['foo'])) obj.save() self.assertEqual(obj.obj_what_changed(), set([])) self.assertEqual(obj.foo, 123) def test_changed_3(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(obj.obj_what_changed(), set(['foo'])) obj.refresh() self.assertEqual(obj.obj_what_changed(), set([])) self.assertEqual(obj.foo, 321) self.assertEqual(obj.bar, 'refreshed') def test_changed_4(self): obj = MyObj.query(self.context) obj.bar = 'something' self.assertEqual(obj.obj_what_changed(), set(['bar'])) obj.modify_save_modify() self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object'])) self.assertEqual(obj.foo, 42) self.assertEqual(obj.bar, 'meow') self.assertIsInstance(obj.rel_object, MyOwnedObject) def test_changed_with_sub_object(self): @base.NovaObjectRegistry.register_if(False) class ParentObject(base.NovaObject): fields = {'foo': fields.IntegerField(), 'bar': fields.ObjectField('MyObj'), } obj = ParentObject() self.assertEqual(set(), obj.obj_what_changed()) obj.foo = 1 self.assertEqual(set(['foo']), obj.obj_what_changed()) bar = MyObj() obj.bar = bar self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) obj.obj_reset_changes() self.assertEqual(set(), obj.obj_what_changed()) bar.foo = 1 self.assertEqual(set(['bar']), obj.obj_what_changed()) def test_static_result(self): obj = MyObj.query(self.context) self.assertEqual(obj.bar, 'bar') result = obj.marco() self.assertEqual(result, 'polo') def test_updates(self): obj = MyObj.query(self.context) self.assertEqual(obj.foo, 1) obj._update_test() self.assertEqual(obj.bar, 'updated') def test_base_attributes(self): dt = datetime.datetime(1955, 11, 5) obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None, deleted=False) expected = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.6', 'nova_object.changes': ['deleted', 'created_at', 'deleted_at', 'updated_at'], 'nova_object.data': {'created_at': utils.isotime(dt), 'updated_at': utils.isotime(dt), 'deleted_at': None, 'deleted': False, } } actual = obj.obj_to_primitive() self.assertJsonEqual(actual, expected) def test_contains(self): obj = MyObj() self.assertNotIn('foo', obj) obj.foo = 1 self.assertIn('foo', obj) self.assertNotIn('does_not_exist', obj) def test_obj_attr_is_set(self): obj = MyObj(foo=1) self.assertTrue(obj.obj_attr_is_set('foo')) self.assertFalse(obj.obj_attr_is_set('bar')) self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') def test_obj_reset_changes_recursive(self): obj = MyObj(rel_object=MyOwnedObject(baz=123), rel_objects=[MyOwnedObject(baz=456)]) self.assertEqual(set(['rel_object', 'rel_objects']), obj.obj_what_changed()) obj.obj_reset_changes() self.assertEqual(set(['rel_object']), obj.obj_what_changed()) self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed()) self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed()) obj.obj_reset_changes(recursive=True, fields=['foo']) self.assertEqual(set(['rel_object']), obj.obj_what_changed()) self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed()) self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed()) obj.obj_reset_changes(recursive=True) self.assertEqual(set([]), obj.rel_object.obj_what_changed()) self.assertEqual(set([]), obj.obj_what_changed()) def test_get(self): obj = MyObj(foo=1) # Foo has value, should not get the default self.assertEqual(obj.get('foo', 2), 1) # Foo has value, should return the value without error self.assertEqual(obj.get('foo'), 1) # Bar is not loaded, so we should get the default self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded') # Bar without a default should lazy-load self.assertEqual(obj.get('bar'), 'loaded!') # Bar now has a default, but loaded value should be returned self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!') # Invalid attribute should raise AttributeError self.assertRaises(AttributeError, obj.get, 'nothing') # ...even with a default self.assertRaises(AttributeError, obj.get, 'nothing', 3) def test_object_inheritance(self): base_fields = base.NovaPersistentObject.fields.keys() myobj_fields = (['foo', 'bar', 'missing', 'readonly', 'rel_object', 'rel_objects', 'mutable_default'] + list(base_fields)) myobj3_fields = ['new_field'] self.assertTrue(issubclass(TestSubclassedObject, MyObj)) self.assertEqual(len(myobj_fields), len(MyObj.fields)) self.assertEqual(set(myobj_fields), set(MyObj.fields.keys())) self.assertEqual(len(myobj_fields) + len(myobj3_fields), len(TestSubclassedObject.fields)) self.assertEqual(set(myobj_fields) | set(myobj3_fields), set(TestSubclassedObject.fields.keys())) def test_obj_as_admin(self): obj = MyObj(context=self.context) def fake(*args, **kwargs): self.assertTrue(obj._context.is_admin) with mock.patch.object(obj, 'obj_reset_changes') as mock_fn: mock_fn.side_effect = fake with obj.obj_as_admin(): obj.save() self.assertTrue(mock_fn.called) self.assertFalse(obj._context.is_admin) def test_obj_as_admin_orphaned(self): def testme(): obj = MyObj() with obj.obj_as_admin(): pass self.assertRaises(exception.OrphanedObjectError, testme) def test_obj_alternate_context(self): obj = MyObj(context=self.context) with obj.obj_alternate_context(mock.sentinel.alt_ctx): self.assertEqual(mock.sentinel.alt_ctx, obj._context) self.assertEqual(self.context, obj._context) def test_get_changes(self): obj = MyObj() self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_obj_fields(self): @base.NovaObjectRegistry.register_if(False) class TestObj(base.NovaObject): fields = {'foo': fields.IntegerField()} obj_extra_fields = ['bar'] @property def bar(self): return 'this is bar' obj = TestObj() self.assertEqual(['foo', 'bar'], obj.obj_fields) def test_obj_constructor(self): obj = MyObj(context=self.context, foo=123, bar='abc') self.assertEqual(123, obj.foo) self.assertEqual('abc', obj.bar) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) def test_obj_read_only(self): obj = MyObj(context=self.context, foo=123, bar='abc') obj.readonly = 1 self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr, obj, 'readonly', 2) def test_obj_mutable_default(self): obj = MyObj(context=self.context, foo=123, bar='abc') obj.mutable_default = None obj.mutable_default.append('s1') self.assertEqual(obj.mutable_default, ['s1']) obj1 = MyObj(context=self.context, foo=123, bar='abc') obj1.mutable_default = None obj1.mutable_default.append('s2') self.assertEqual(obj1.mutable_default, ['s2']) def test_obj_mutable_default_set_default(self): obj1 = MyObj(context=self.context, foo=123, bar='abc') obj1.obj_set_defaults('mutable_default') self.assertEqual(obj1.mutable_default, []) obj1.mutable_default.append('s1') self.assertEqual(obj1.mutable_default, ['s1']) obj2 = MyObj(context=self.context, foo=123, bar='abc') obj2.obj_set_defaults('mutable_default') self.assertEqual(obj2.mutable_default, []) obj2.mutable_default.append('s2') self.assertEqual(obj2.mutable_default, ['s2']) def test_obj_repr(self): obj = MyObj(foo=123) self.assertEqual('MyObj(bar=,created_at=,deleted=,' 'deleted_at=,foo=123,missing=,' 'mutable_default=,readonly=,rel_object=,' 'rel_objects=,updated_at=)', repr(obj)) def test_obj_make_obj_compatible(self): subobj = MyOwnedObject(baz=1) subobj.VERSION = '1.2' obj = MyObj(rel_object=subobj) obj.obj_relationships = { 'rel_object': [('1.5', '1.1'), ('1.7', '1.2')], } orig_primitive = obj.obj_to_primitive()['nova_object.data'] with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: primitive = copy.deepcopy(orig_primitive) obj._obj_make_obj_compatible(primitive, '1.8', 'rel_object') self.assertFalse(mock_compat.called) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: primitive = copy.deepcopy(orig_primitive) obj._obj_make_obj_compatible(primitive, '1.7', 'rel_object') mock_compat.assert_called_once_with( primitive['rel_object']['nova_object.data'], '1.2') with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: primitive = copy.deepcopy(orig_primitive) obj._obj_make_obj_compatible(primitive, '1.6', 'rel_object') mock_compat.assert_called_once_with( primitive['rel_object']['nova_object.data'], '1.1') self.assertEqual('1.1', primitive['rel_object']['nova_object.version']) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: primitive = copy.deepcopy(orig_primitive) obj._obj_make_obj_compatible(primitive, '1.5', 'rel_object') mock_compat.assert_called_once_with( primitive['rel_object']['nova_object.data'], '1.1') self.assertEqual('1.1', primitive['rel_object']['nova_object.version']) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: primitive = copy.deepcopy(orig_primitive) obj._obj_make_obj_compatible(primitive, '1.4', 'rel_object') self.assertFalse(mock_compat.called) self.assertNotIn('rel_object', primitive) def test_obj_make_compatible_hits_sub_objects(self): subobj = MyOwnedObject(baz=1) obj = MyObj(foo=123, rel_object=subobj) obj.obj_relationships = {'rel_object': [('1.0', '1.0')]} with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat: obj.obj_make_compatible({'rel_object': 'foo'}, '1.10') mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10', 'rel_object') def test_obj_make_compatible_skips_unset_sub_objects(self): obj = MyObj(foo=123) obj.obj_relationships = {'rel_object': [('1.0', '1.0')]} with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat: obj.obj_make_compatible({'rel_object': 'foo'}, '1.10') self.assertFalse(mock_compat.called) def test_obj_make_compatible_doesnt_skip_falsey_sub_objects(self): @base.NovaObjectRegistry.register_if(False) class MyList(base.ObjectListBase, base.NovaObject): VERSION = '1.2' fields = {'objects': fields.ListOfObjectsField('MyObjElement')} obj_relationships = { 'objects': [('1.1', '1.1'), ('1.2', '1.2')], } mylist = MyList(objects=[]) @base.NovaObjectRegistry.register_if(False) class MyOwner(base.NovaObject): VERSION = '1.2' fields = {'mylist': fields.ObjectField('MyList')} obj_relationships = { 'mylist': [('1.1', '1.1')], } myowner = MyOwner(mylist=mylist) primitive = myowner.obj_to_primitive('1.1') self.assertIn('mylist', primitive['nova_object.data']) def test_obj_make_compatible_handles_list_of_objects(self): subobj = MyOwnedObject(baz=1) obj = MyObj(rel_objects=[subobj]) obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]} def fake_make_compat(primitive, version): self.assertEqual('1.123', version) self.assertIn('baz', primitive) with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc: mock_mc.side_effect = fake_make_compat obj.obj_to_primitive('1.0') self.assertTrue(mock_mc.called) def test_delattr(self): obj = MyObj(bar='foo') del obj.bar # Should appear unset now self.assertFalse(obj.obj_attr_is_set('bar')) # Make sure post-delete, references trigger lazy loads self.assertEqual('loaded!', getattr(obj, 'bar')) def test_delattr_unset(self): obj = MyObj() self.assertRaises(AttributeError, delattr, obj, 'bar') class TestObject(_LocalTest, _TestObject): def test_set_defaults(self): obj = MyObj() obj.obj_set_defaults('foo') self.assertTrue(obj.obj_attr_is_set('foo')) self.assertEqual(1, obj.foo) def test_set_defaults_no_default(self): obj = MyObj() self.assertRaises(ovo_exc.ObjectActionError, obj.obj_set_defaults, 'bar') def test_set_all_defaults(self): obj = MyObj() obj.obj_set_defaults() self.assertEqual(set(['deleted', 'foo', 'mutable_default']), obj.obj_what_changed()) self.assertEqual(1, obj.foo) def test_set_defaults_not_overwrite(self): # NOTE(danms): deleted defaults to False, so verify that it does # not get reset by obj_set_defaults() obj = MyObj(deleted=True) obj.obj_set_defaults() self.assertEqual(1, obj.foo) self.assertTrue(obj.deleted) class TestObjectSerializer(_BaseTestCase): def test_serialize_entity_primitive(self): ser = base.NovaObjectSerializer() for thing in (1, 'foo', [1, 2], {'foo': 'bar'}): self.assertEqual(thing, ser.serialize_entity(None, thing)) def test_deserialize_entity_primitive(self): ser = base.NovaObjectSerializer() for thing in (1, 'foo', [1, 2], {'foo': 'bar'}): self.assertEqual(thing, ser.deserialize_entity(None, thing)) def test_serialize_set_to_list(self): ser = base.NovaObjectSerializer() self.assertEqual([1, 2], ser.serialize_entity(None, set([1, 2]))) def _test_deserialize_entity_newer(self, obj_version, backported_to, my_version='1.6'): ser = base.NovaObjectSerializer() ser._conductor = mock.Mock() ser._conductor.object_backport_versions.return_value = 'backported' class MyTestObj(MyObj): VERSION = my_version base.NovaObjectRegistry.register(MyTestObj) obj = MyTestObj() obj.VERSION = obj_version primitive = obj.obj_to_primitive() result = ser.deserialize_entity(self.context, primitive) if backported_to is None: self.assertFalse(ser._conductor.object_backport_versions.called) else: self.assertEqual('backported', result) versions = ovo_base.obj_tree_get_versions('MyTestObj') ser._conductor.object_backport_versions.assert_called_with( self.context, primitive, versions) def test_deserialize_entity_newer_version_backports(self): self._test_deserialize_entity_newer('1.25', '1.6') def test_deserialize_entity_newer_revision_does_not_backport_zero(self): self._test_deserialize_entity_newer('1.6.0', None) def test_deserialize_entity_newer_revision_does_not_backport(self): self._test_deserialize_entity_newer('1.6.1', None) def test_deserialize_entity_newer_version_passes_revision(self): self._test_deserialize_entity_newer('1.7', '1.6.1', '1.6.1') def test_deserialize_dot_z_with_extra_stuff(self): primitive = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.6.1', 'nova_object.data': { 'foo': 1, 'unexpected_thing': 'foobar'}} ser = base.NovaObjectSerializer() obj = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, obj.foo) self.assertFalse(hasattr(obj, 'unexpected_thing')) # NOTE(danms): The serializer is where the logic lives that # avoids backports for cases where only a .z difference in # the received object version is detected. As a result, we # end up with a version of what we expected, effectively the # .0 of the object. self.assertEqual('1.6', obj.VERSION) @mock.patch('oslo_versionedobjects.base.obj_tree_get_versions') def test_object_tree_backport(self, mock_get_versions): # Test the full client backport path all the way from the serializer # to the conductor and back. self.start_service('conductor', manager='nova.conductor.manager.ConductorManager') # NOTE(danms): Actually register a complex set of objects, # two versions of the same parent object which contain a # child sub object. @base.NovaObjectRegistry.register class Child(base.NovaObject): VERSION = '1.10' @base.NovaObjectRegistry.register class Parent(base.NovaObject): VERSION = '1.0' fields = { 'child': fields.ObjectField('Child'), } @base.NovaObjectRegistry.register # noqa class Parent(base.NovaObject): VERSION = '1.1' fields = { 'child': fields.ObjectField('Child'), } # NOTE(danms): Since we're on the same node as conductor, # return a fake version manifest so that we confirm that it # actually honors what the client asked for and not just what # it sees in the local machine state. mock_get_versions.return_value = { 'Parent': '1.0', 'Child': '1.5', } call_context = {} real_ofp = base.NovaObject.obj_from_primitive def fake_obj_from_primitive(*a, **k): # NOTE(danms): We need the first call to this to report an # incompatible object version, but subsequent calls must # succeed. Since we're testing the backport path all the # way through conductor and RPC, we can't fully break this # method, we just need it to fail once to trigger the # backport. if 'run' in call_context: return real_ofp(*a, **k) else: call_context['run'] = True raise ovo_exc.IncompatibleObjectVersion('foo') child = Child() parent = Parent(child=child) prim = parent.obj_to_primitive() ser = base.NovaObjectSerializer() with mock.patch('nova.objects.base.NovaObject.' 'obj_from_primitive') as mock_ofp: mock_ofp.side_effect = fake_obj_from_primitive result = ser.deserialize_entity(self.context, prim) # Our newest version (and what we passed back) of Parent # is 1.1, make sure that the manifest version is honored self.assertEqual('1.0', result.VERSION) # Our newest version (and what we passed back) of Child # is 1.10, make sure that the manifest version is honored self.assertEqual('1.5', result.child.VERSION) def test_object_serialization(self): ser = base.NovaObjectSerializer() obj = MyObj() primitive = ser.serialize_entity(self.context, obj) self.assertIn('nova_object.name', primitive) obj2 = ser.deserialize_entity(self.context, primitive) self.assertIsInstance(obj2, MyObj) self.assertEqual(self.context, obj2._context) def test_object_serialization_iterables(self): ser = base.NovaObjectSerializer() obj = MyObj() for iterable in (list, tuple, set): thing = iterable([obj]) primitive = ser.serialize_entity(self.context, thing) self.assertEqual(1, len(primitive)) for item in primitive: self.assertNotIsInstance(item, base.NovaObject) thing2 = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, len(thing2)) for item in thing2: self.assertIsInstance(item, MyObj) # dict case thing = {'key': obj} primitive = ser.serialize_entity(self.context, thing) self.assertEqual(1, len(primitive)) for item in six.itervalues(primitive): self.assertNotIsInstance(item, base.NovaObject) thing2 = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, len(thing2)) for item in six.itervalues(thing2): self.assertIsInstance(item, MyObj) # object-action updates dict case thing = {'foo': obj.obj_to_primitive()} primitive = ser.serialize_entity(self.context, thing) self.assertEqual(thing, primitive) thing2 = ser.deserialize_entity(self.context, thing) self.assertIsInstance(thing2['foo'], base.NovaObject) class TestArgsSerializer(test.NoDBTestCase): def setUp(self): super(TestArgsSerializer, self).setUp() self.now = timeutils.utcnow() self.str_now = utils.strtime(self.now) self.unicode_str = u'\xF0\x9F\x92\xA9' @base.serialize_args def _test_serialize_args(self, *args, **kwargs): expected_args = ('untouched', self.str_now, self.str_now) for index, val in enumerate(args): self.assertEqual(expected_args[index], val) expected_kwargs = {'a': 'untouched', 'b': self.str_now, 'c': self.str_now, 'exc_val': self.unicode_str} for key, val in six.iteritems(kwargs): self.assertEqual(expected_kwargs[key], val) def test_serialize_args(self): self._test_serialize_args('untouched', self.now, self.now, a='untouched', b=self.now, c=self.now, exc_val=self.unicode_str) class TestRegistry(test.NoDBTestCase): @mock.patch('nova.objects.base.objects') def test_hook_chooses_newer_properly(self, mock_objects): reg = base.NovaObjectRegistry() reg.registration_hook(MyObj, 0) class MyNewerObj(object): VERSION = '1.123' @classmethod def obj_name(cls): return 'MyObj' self.assertEqual(MyObj, mock_objects.MyObj) reg.registration_hook(MyNewerObj, 0) self.assertEqual(MyNewerObj, mock_objects.MyObj) @mock.patch('nova.objects.base.objects') def test_hook_keeps_newer_properly(self, mock_objects): reg = base.NovaObjectRegistry() reg.registration_hook(MyObj, 0) class MyOlderObj(object): VERSION = '1.1' @classmethod def obj_name(cls): return 'MyObj' self.assertEqual(MyObj, mock_objects.MyObj) reg.registration_hook(MyOlderObj, 0) self.assertEqual(MyObj, mock_objects.MyObj) # NOTE(danms): The hashes in this list should only be changed if # they come with a corresponding version bump in the affected # objects object_data = { 'Agent': '1.0-c0c092abaceb6f51efe5d82175f15eba', 'AgentList': '1.0-5a7380d02c3aaf2a32fc8115ae7ca98c', 'Aggregate': '1.2-fe9d8c93feb37919753e9e44fe6818a7', 'AggregateList': '1.2-fb6e19f3c3a3186b04eceb98b5dadbfa', 'BandwidthUsage': '1.2-c6e4c779c7f40f2407e3d70022e3cd1c', 'BandwidthUsageList': '1.2-5fe7475ada6fe62413cbfcc06ec70746', 'BlockDeviceMapping': '1.16-f0c172e902bc62f1cac05b17d7be7688', 'BlockDeviceMappingList': '1.17-1e568eecb91d06d4112db9fd656de235', 'BuildRequest': '1.0-e4ca475cabb07f73d8176f661afe8c55', 'CellMapping': '1.0-7f1a7e85a22bbb7559fc730ab658b9bd', 'ComputeNode': '1.16-2436e5b836fa0306a3c4e6d9e5ddacec', 'ComputeNodeList': '1.14-3b6f4f5ade621c40e70cb116db237844', 'DNSDomain': '1.0-7b0b2dab778454b6a7b6c66afe163a1a', 'DNSDomainList': '1.0-4ee0d9efdfd681fed822da88376e04d2', 'EC2Ids': '1.0-474ee1094c7ec16f8ce657595d8c49d9', 'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f', 'EC2SnapshotMapping': '1.0-47e7ddabe1af966dce0cfd0ed6cd7cd1', 'EC2VolumeMapping': '1.0-5b713751d6f97bad620f3378a521020d', 'EventType': '1.0-21dc35de314fc5fc0a7965211c0c00f7', 'FixedIP': '1.14-53e1c10b539f1a82fe83b1af4720efae', 'FixedIPList': '1.14-87a39361c8f08f059004d6b15103cdfd', 'Flavor': '1.1-b6bb7a730a79d720344accefafacf7ee', 'FlavorList': '1.1-52b5928600e7ca973aa4fc1e46f3934c', 'FloatingIP': '1.10-52a67d52d85eb8b3f324a5b7935a335b', 'FloatingIPList': '1.11-7f2ba670714e1b7bab462ab3290f7159', 'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac', 'HyperVLiveMigrateData': '1.0-0b868dd6228a09c3f3e47016dddf6a1c', 'HVSpec': '1.2-db672e73304da86139086d003f3977e7', 'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d', 'ImageMetaProps': '1.12-6a132dee47931447bf86c03c7006d96c', 'Instance': '2.1-416fdd0dfc33dfa12ff2cfdd8cc32e17', 'InstanceAction': '1.1-f9f293e526b66fca0d05c3b3a2d13914', 'InstanceActionEvent': '1.1-e56a64fa4710e43ef7af2ad9d6028b33', 'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be', 'InstanceActionList': '1.0-4a53826625cc280e15fae64a575e0879', 'InstanceExternalEvent': '1.1-6e446ceaae5f475ead255946dd443417', 'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38', 'InstanceFaultList': '1.1-f8ec07cbe3b60f5f07a8b7a06311ac0d', 'InstanceGroup': '1.10-1a0c8c7447dc7ecb9da53849430c4a5f', 'InstanceGroupList': '1.7-be18078220513316abd0ae1b2d916873', 'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e', 'InstanceList': '2.0-6c8ba6147cca3082b1e4643f795068bf', 'InstanceMapping': '1.0-94bff38981ef9ce37c9fccf309b94f58', 'InstanceMappingList': '1.0-9e982e3de1613b9ada85e35f69b23d47', 'InstanceNUMACell': '1.3-6991a20992c5faa57fae71a45b40241b', 'InstanceNUMATopology': '1.2-d944a7d6c21e1c773ffdf09c6d025954', 'InstancePCIRequest': '1.1-b1d75ebc716cb12906d9d513890092bf', 'InstancePCIRequests': '1.1-65e38083177726d806684cb1cc0136d2', 'Inventory': '1.0-f4160797d47a533a58700e9ddcc9c5e2', 'InventoryList': '1.0-de53f0fd078c27cc1d43400f4e8bcef8', 'LibvirtLiveMigrateBDMInfo': '1.0-252aabb723ca79d5469fa56f64b57811', 'LibvirtLiveMigrateData': '1.1-4ecf40aae7fee7bb37fc3b2123e760de', 'KeyPair': '1.3-bfaa2a8b148cdf11e0c72435d9dd097a', 'KeyPairList': '1.2-58b94f96e776bedaf1e192ddb2a24c4e', 'Migration': '1.4-17979b9f2ae7f28d97043a220b2a8350', 'MigrationContext': '1.0-d8c2f10069e410f639c49082b5932c92', 'MigrationList': '1.3-55595bfc1a299a5962614d0821a3567e', 'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba', 'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'NotificationPublisher': '1.0-bbbc1402fb0e443a3eb227cc52b61545', 'NUMACell': '1.2-74fc993ac5c83005e76e34e8487f1c05', 'NUMAPagesTopology': '1.0-c71d86317283266dc8364c149155e48e', 'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220', 'NUMATopologyLimits': '1.0-9463e0edd40f64765ae518a539b9dfd2', 'Network': '1.2-a977ab383aa462a479b2fae8211a5dde', 'NetworkList': '1.2-69eca910d8fa035dfecd8ba10877ee59', 'NetworkRequest': '1.1-7a3e4ca2ce1e7b62d8400488f2f2b756', 'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'PciDevice': '1.5-0d5abe5c91645b8469eb2a93fc53f932', 'PciDeviceList': '1.3-52ff14355491c8c580bdc0ba34c26210', 'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000', 'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'Quotas': '1.2-1fe4cd50593aaf5d36a6dc5ab3f98fb3', 'QuotasNoOp': '1.2-e041ddeb7dc8188ca71706f78aad41c1', 'RequestSpec': '1.5-576a249869c161e17b7cd6d55f9d85f3', 'ResourceProvider': '1.0-57a9a344b0faed9cf6d6811835b6deb6', 'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e', 'SchedulerLimits': '1.0-249c4bd8e62a9b327b7026b7f19cc641', 'SchedulerRetries': '1.1-3c9c8b16143ebbb6ad7030e999d14cc0', 'SecurityGroup': '1.1-0e1b9ba42fe85c13c1437f8b74bdb976', 'SecurityGroupList': '1.0-dc8bbea01ba09a2edb6e5233eae85cbc', 'SecurityGroupRule': '1.1-ae1da17b79970012e8536f88cb3c6b29', 'SecurityGroupRuleList': '1.2-0005c47fcd0fb78dd6d7fd32a1409f5b', 'Service': '1.19-8914320cbeb4ec29f252d72ce55d07e1', 'ServiceList': '1.18-6c52cb616621c1af2415dcc11faf5c1a', 'ServiceStatusNotification': '1.0-a73147b93b520ff0061865849d3dfa56', 'ServiceStatusPayload': '1.0-a5e7b4fd6cc5581be45b31ff1f3a3f7f', 'TaskLog': '1.0-78b0534366f29aa3eebb01860fbe18fe', 'TaskLogList': '1.0-cc8cce1af8a283b9d28b55fcd682e777', 'Tag': '1.1-8b8d7d5b48887651a0e01241672e2963', 'TagList': '1.1-55231bdb671ecf7641d6a2e9109b5d8e', 'VirtCPUFeature': '1.0-3310718d8c72309259a6e39bdefe83ee', 'VirtCPUModel': '1.0-6a5cc9f322729fc70ddc6733bacd57d3', 'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563', 'VirtualInterface': '1.0-19921e38cba320f355d56ecbf8f29587', 'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6', 'VolumeUsage': '1.0-6c8190c46ce1469bb3286a1f21c2e475', 'XenapiLiveMigrateData': '1.0-5f982bec68f066e194cd9ce53a24ac4c', } class TestObjectVersions(test.NoDBTestCase): def test_versions(self): checker = fixture.ObjectVersionChecker( base.NovaObjectRegistry.obj_classes()) fingerprints = checker.get_hashes(extra_data_func=get_extra_data) if os.getenv('GENERATE_HASHES'): open('object_hashes.txt', 'w').write( pprint.pformat(fingerprints)) raise test.TestingException( 'Generated hashes in object_hashes.txt') expected, actual = checker.test_hashes(object_data) self.assertEqual(expected, actual, 'Some objects have changed; please make sure the ' 'versions have been bumped, and then update their ' 'hashes here.') def test_notification_payload_version_depends_on_the_schema(self): @base.NovaObjectRegistry.register_if(False) class TestNotificationPayload(notification.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), } fields = { 'extra_field': fields.StringField(), # filled by ctor 'field_1': fields.StringField(), # filled by the schema 'field_2': fields.IntegerField(), # filled by the schema } checker = fixture.ObjectVersionChecker( {'TestNotificationPayload': (TestNotificationPayload,)}) old_hash = checker.get_hashes(extra_data_func=get_extra_data) TestNotificationPayload.SCHEMA['field_3'] = ('source_field', 'field_3') new_hash = checker.get_hashes(extra_data_func=get_extra_data) self.assertNotEqual(old_hash, new_hash) def test_obj_make_compatible(self): # Iterate all object classes and verify that we can run # obj_make_compatible with every older version than current. # This doesn't actually test the data conversions, but it at least # makes sure the method doesn't blow up on something basic like # expecting the wrong version format. obj_classes = base.NovaObjectRegistry.obj_classes() for obj_name in obj_classes: versions = ovo_base.obj_tree_get_versions(obj_name) obj_class = obj_classes[obj_name][0] version = versionutils.convert_version_to_tuple(obj_class.VERSION) for n in range(version[1]): test_version = '%d.%d' % (version[0], n) LOG.info('testing obj: %s version: %s' % (obj_name, test_version)) obj_class().obj_to_primitive(target_version=test_version, version_manifest=versions) def test_list_obj_make_compatible(self): @base.NovaObjectRegistry.register_if(False) class TestObj(base.NovaObject): VERSION = '1.4' fields = {'foo': fields.IntegerField()} @base.NovaObjectRegistry.register_if(False) class TestListObj(base.ObjectListBase, base.NovaObject): VERSION = '1.5' fields = {'objects': fields.ListOfObjectsField('TestObj')} obj_relationships = { 'objects': [('1.0', '1.1'), ('1.1', '1.2'), ('1.3', '1.3'), ('1.5', '1.4')] } my_list = TestListObj() my_obj = TestObj(foo=1) my_list.objects = [my_obj] primitive = my_list.obj_to_primitive(target_version='1.5') primitive_data = primitive['nova_object.data'] obj_primitive = my_obj.obj_to_primitive(target_version='1.4') obj_primitive_data = obj_primitive['nova_object.data'] with mock.patch.object(TestObj, 'obj_make_compatible') as comp: my_list.obj_make_compatible(primitive_data, '1.1') comp.assert_called_with(obj_primitive_data, '1.2') def test_list_obj_make_compatible_when_no_objects(self): # Test to make sure obj_make_compatible works with no 'objects' # If a List object ever has a version that did not contain the # 'objects' key, we need to make sure converting back to that version # doesn't cause backporting problems. @base.NovaObjectRegistry.register_if(False) class TestObj(base.NovaObject): VERSION = '1.1' fields = {'foo': fields.IntegerField()} @base.NovaObjectRegistry.register_if(False) class TestListObj(base.ObjectListBase, base.NovaObject): VERSION = '1.1' fields = {'objects': fields.ListOfObjectsField('TestObj')} # pretend that version 1.0 didn't have 'objects' obj_relationships = { 'objects': [('1.1', '1.1')] } my_list = TestListObj() my_list.objects = [TestObj(foo=1)] primitive = my_list.obj_to_primitive(target_version='1.1') primitive_data = primitive['nova_object.data'] my_list.obj_make_compatible(primitive_data, target_version='1.0') self.assertNotIn('objects', primitive_data, "List was backported to before 'objects' existed." " 'objects' should not be in the primitive.") class TestObjEqualPrims(_BaseTestCase): def test_object_equal(self): obj1 = MyObj(foo=1, bar='goodbye') obj1.obj_reset_changes() obj2 = MyObj(foo=1, bar='goodbye') obj2.obj_reset_changes() obj2.bar = 'goodbye' # obj2 will be marked with field 'three' updated self.assertTrue(base.obj_equal_prims(obj1, obj2), "Objects that differ only because one a is marked " "as updated should be equal") def test_object_not_equal(self): obj1 = MyObj(foo=1, bar='goodbye') obj1.obj_reset_changes() obj2 = MyObj(foo=1, bar='hello') obj2.obj_reset_changes() self.assertFalse(base.obj_equal_prims(obj1, obj2), "Objects that differ in any field " "should not be equal") def test_object_ignore_equal(self): obj1 = MyObj(foo=1, bar='goodbye') obj1.obj_reset_changes() obj2 = MyObj(foo=1, bar='hello') obj2.obj_reset_changes() self.assertTrue(base.obj_equal_prims(obj1, obj2, ['bar']), "Objects that only differ in an ignored field " "should be equal") class TestObjMethodOverrides(test.NoDBTestCase): def test_obj_reset_changes(self): args = inspect.getargspec(base.NovaObject.obj_reset_changes) obj_classes = base.NovaObjectRegistry.obj_classes() for obj_name in obj_classes: obj_class = obj_classes[obj_name][0] self.assertEqual(args, inspect.getargspec(obj_class.obj_reset_changes)) def get_extra_data(obj_class): extra_data = tuple() # Get the SCHEMA items to add to the fingerprint # if we are looking at a notification if issubclass(obj_class, notification.NotificationPayloadBase): schema_data = collections.OrderedDict( sorted(obj_class.SCHEMA.items())) extra_data += (schema_data,) return extra_data nova-13.0.0/nova/tests/unit/objects/test_compute_node.py0000664000567000056710000005755012701407773024532 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock import netaddr from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_versionedobjects import base as ovo_base from oslo_versionedobjects import exception as ovo_exc from nova import db from nova import exception from nova import objects from nova.objects import base from nova.objects import compute_node from nova.objects import hv_spec from nova.objects import service from nova.tests.unit import fake_pci_device_pools from nova.tests.unit.objects import test_objects from nova.tests import uuidsentinel NOW = timeutils.utcnow().replace(microsecond=0) fake_stats = {'num_foo': '10'} fake_stats_db_format = jsonutils.dumps(fake_stats) # host_ip is coerced from a string to an IPAddress # but needs to be converted to a string for the database format fake_host_ip = '127.0.0.1' fake_numa_topology = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], pinned_cpus=set([]), siblings=[]), objects.NUMACell(id=1, cpuset=set([3, 4]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], pinned_cpus=set([]), siblings=[])]) fake_numa_topology_db_format = fake_numa_topology._to_json() fake_supported_instances = [('x86_64', 'kvm', 'hvm')] fake_hv_spec = hv_spec.HVSpec(arch=fake_supported_instances[0][0], hv_type=fake_supported_instances[0][1], vm_mode=fake_supported_instances[0][2]) fake_supported_hv_specs = [fake_hv_spec] # for backward compatibility, each supported instance object # is stored as a list in the database fake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()]) fake_pci = jsonutils.dumps(fake_pci_device_pools.fake_pool_list_primitive) fake_compute_node = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'uuid': uuidsentinel.fake_compute_node, 'service_id': None, 'host': 'fake', 'vcpus': 4, 'memory_mb': 4096, 'local_gb': 1024, 'vcpus_used': 2, 'memory_mb_used': 2048, 'local_gb_used': 512, 'hypervisor_type': 'Hyper-Dan-VM-ware', 'hypervisor_version': 1001, 'hypervisor_hostname': 'vm.danplanet.com', 'free_ram_mb': 1024, 'free_disk_gb': 256, 'current_workload': 100, 'running_vms': 2013, 'cpu_info': 'Schmintel i786', 'disk_available_least': 256, 'metrics': '', 'stats': fake_stats_db_format, 'host_ip': fake_host_ip, 'numa_topology': fake_numa_topology_db_format, 'supported_instances': fake_supported_hv_specs_db_format, 'pci_stats': fake_pci, 'cpu_allocation_ratio': 16.0, 'ram_allocation_ratio': 1.5, 'disk_allocation_ratio': 1.0, } # FIXME(sbauza) : For compatibility checking, to be removed once we are sure # that all computes are running latest DB version with host field in it. fake_old_compute_node = fake_compute_node.copy() del fake_old_compute_node['host'] # resources are passed from the virt drivers and copied into the compute_node fake_resources = { 'vcpus': 2, 'memory_mb': 1024, 'local_gb': 10, 'cpu_info': 'fake-info', 'vcpus_used': 1, 'memory_mb_used': 512, 'local_gb_used': 4, 'numa_topology': fake_numa_topology_db_format, 'hypervisor_type': 'fake-type', 'hypervisor_version': 1, 'hypervisor_hostname': 'fake-host', 'disk_available_least': 256, 'host_ip': fake_host_ip, 'supported_instances': fake_supported_instances } fake_compute_with_resources = objects.ComputeNode( vcpus=fake_resources['vcpus'], memory_mb=fake_resources['memory_mb'], local_gb=fake_resources['local_gb'], cpu_info=fake_resources['cpu_info'], vcpus_used=fake_resources['vcpus_used'], memory_mb_used=fake_resources['memory_mb_used'], local_gb_used =fake_resources['local_gb_used'], numa_topology=fake_resources['numa_topology'], hypervisor_type=fake_resources['hypervisor_type'], hypervisor_version=fake_resources['hypervisor_version'], hypervisor_hostname=fake_resources['hypervisor_hostname'], disk_available_least=fake_resources['disk_available_least'], host_ip=netaddr.IPAddress(fake_resources['host_ip']), supported_hv_specs=fake_supported_hv_specs, ) class _TestComputeNodeObject(object): def supported_hv_specs_comparator(self, expected, obj_val): obj_val = [inst.to_list() for inst in obj_val] self.assertJsonEqual(expected, obj_val) def pci_device_pools_comparator(self, expected, obj_val): if obj_val is not None: obj_val = obj_val.obj_to_primitive() self.assertJsonEqual(expected, obj_val) else: self.assertEqual(expected, obj_val) def comparators(self): return {'stats': self.assertJsonEqual, 'host_ip': self.str_comparator, 'supported_hv_specs': self.supported_hv_specs_comparator, 'pci_device_pools': self.pci_device_pools_comparator, } def subs(self): return {'supported_hv_specs': 'supported_instances', 'pci_device_pools': 'pci_stats'} def test_get_by_id(self): self.mox.StubOutWithMock(db, 'compute_node_get') db.compute_node_get(self.context, 123).AndReturn(fake_compute_node) self.mox.ReplayAll() compute = compute_node.ComputeNode.get_by_id(self.context, 123) self.compare_obj(compute, fake_compute_node, subs=self.subs(), comparators=self.comparators()) self.assertNotIn('uuid', compute.obj_what_changed()) @mock.patch.object(objects.Service, 'get_by_id') @mock.patch.object(db, 'compute_node_get') def test_get_by_id_with_host_field_not_in_db(self, mock_cn_get, mock_obj_svc_get): fake_compute_node_with_svc_id = fake_compute_node.copy() fake_compute_node_with_svc_id['service_id'] = 123 fake_compute_node_with_no_host = fake_compute_node_with_svc_id.copy() host = fake_compute_node_with_no_host.pop('host') fake_service = service.Service(id=123) fake_service.host = host mock_cn_get.return_value = fake_compute_node_with_no_host mock_obj_svc_get.return_value = fake_service compute = compute_node.ComputeNode.get_by_id(self.context, 123) self.compare_obj(compute, fake_compute_node_with_svc_id, subs=self.subs(), comparators=self.comparators()) def test_get_by_service_id(self): self.mox.StubOutWithMock(db, 'compute_nodes_get_by_service_id') db.compute_nodes_get_by_service_id(self.context, 456).AndReturn( [fake_compute_node]) self.mox.ReplayAll() compute = compute_node.ComputeNode.get_by_service_id(self.context, 456) self.compare_obj(compute, fake_compute_node, subs=self.subs(), comparators=self.comparators()) @mock.patch.object(db, 'compute_node_get_by_host_and_nodename') def test_get_by_host_and_nodename(self, cn_get_by_h_and_n): cn_get_by_h_and_n.return_value = fake_compute_node compute = compute_node.ComputeNode.get_by_host_and_nodename( self.context, 'fake', 'vm.danplanet.com') self.compare_obj(compute, fake_compute_node, subs=self.subs(), comparators=self.comparators()) @mock.patch('nova.db.compute_node_get_all_by_host') def test_get_first_node_by_host_for_old_compat( self, cn_get_all_by_host): another_node = fake_compute_node.copy() another_node['hypervisor_hostname'] = 'neverland' cn_get_all_by_host.return_value = [fake_compute_node, another_node] compute = ( compute_node.ComputeNode.get_first_node_by_host_for_old_compat( self.context, 'fake') ) self.compare_obj(compute, fake_compute_node, subs=self.subs(), comparators=self.comparators()) @mock.patch('nova.objects.ComputeNodeList.get_all_by_host') def test_get_first_node_by_host_for_old_compat_not_found( self, cn_get_all_by_host): cn_get_all_by_host.side_effect = exception.ComputeHostNotFound( host='fake') self.assertRaises( exception.ComputeHostNotFound, compute_node.ComputeNode.get_first_node_by_host_for_old_compat, self.context, 'fake') def test_create(self): self.mox.StubOutWithMock(db, 'compute_node_create') db.compute_node_create( self.context, { 'service_id': 456, 'stats': fake_stats_db_format, 'host_ip': fake_host_ip, 'supported_instances': fake_supported_hv_specs_db_format, 'uuid': uuidsentinel.fake_compute_node, }).AndReturn(fake_compute_node) self.mox.ReplayAll() compute = compute_node.ComputeNode(context=self.context) compute.service_id = 456 compute.uuid = uuidsentinel.fake_compute_node compute.stats = fake_stats # NOTE (pmurray): host_ip is coerced to an IPAddress compute.host_ip = fake_host_ip compute.supported_hv_specs = fake_supported_hv_specs with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_gu: compute.create() self.assertFalse(mock_gu.called) self.compare_obj(compute, fake_compute_node, subs=self.subs(), comparators=self.comparators()) @mock.patch('nova.db.compute_node_create') @mock.patch('oslo_utils.uuidutils.generate_uuid') def test_create_allocates_uuid(self, mock_gu, mock_create): mock_create.return_value = fake_compute_node mock_gu.return_value = fake_compute_node['uuid'] obj = objects.ComputeNode(context=self.context) obj.create() mock_gu.assert_called_once_with() mock_create.assert_called_once_with( self.context, {'uuid': fake_compute_node['uuid']}) def test_recreate_fails(self): self.mox.StubOutWithMock(db, 'compute_node_create') db.compute_node_create( self.context, {'service_id': 456, 'uuid': uuidsentinel.fake_compute_node}).AndReturn( fake_compute_node) self.mox.ReplayAll() compute = compute_node.ComputeNode(context=self.context) compute.service_id = 456 compute.uuid = uuidsentinel.fake_compute_node compute.create() self.assertRaises(exception.ObjectActionError, compute.create) def test_save(self): self.mox.StubOutWithMock(db, 'compute_node_update') db.compute_node_update( self.context, 123, { 'vcpus_used': 3, 'stats': fake_stats_db_format, 'host_ip': fake_host_ip, 'supported_instances': fake_supported_hv_specs_db_format, 'uuid': uuidsentinel.fake_compute_node, }).AndReturn(fake_compute_node) self.mox.ReplayAll() compute = compute_node.ComputeNode(context=self.context) compute.id = 123 compute.vcpus_used = 3 compute.stats = fake_stats compute.uuid = uuidsentinel.fake_compute_node # NOTE (pmurray): host_ip is coerced to an IPAddress compute.host_ip = fake_host_ip compute.supported_hv_specs = fake_supported_hv_specs compute.save() self.compare_obj(compute, fake_compute_node, subs=self.subs(), comparators=self.comparators()) def test_query_allocates_uuid(self): fake = dict(fake_compute_node) fake.pop('uuid') db.compute_node_create(self.context, fake) with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_gu: mock_gu.return_value = uuidsentinel.fake_compute_node obj = objects.ComputeNode.get_by_id(self.context, fake['id']) mock_gu.assert_called_once_with() self.assertEqual(uuidsentinel.fake_compute_node, obj.uuid) self.assertNotIn('uuid', obj.obj_get_changes()) with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_gu: obj = objects.ComputeNode.get_by_id(self.context, fake['id']) self.assertEqual(uuidsentinel.fake_compute_node, obj.uuid) self.assertFalse(mock_gu.called) def test_save_pci_device_pools_empty(self): fake_pci = jsonutils.dumps( objects.PciDevicePoolList(objects=[]).obj_to_primitive()) compute_dict = fake_compute_node.copy() compute_dict['pci_stats'] = fake_pci with mock.patch.object( db, 'compute_node_update', return_value=compute_dict) as mock_compute_node_update: compute = compute_node.ComputeNode(context=self.context) compute.id = 123 compute.pci_device_pools = objects.PciDevicePoolList(objects=[]) compute.save() self.compare_obj(compute, compute_dict, subs=self.subs(), comparators=self.comparators()) mock_compute_node_update.assert_called_once_with( self.context, 123, {'pci_stats': fake_pci}) def test_save_pci_device_pools_null(self): compute_dict = fake_compute_node.copy() compute_dict['pci_stats'] = None with mock.patch.object( db, 'compute_node_update', return_value=compute_dict) as mock_compute_node_update: compute = compute_node.ComputeNode(context=self.context) compute.id = 123 compute.pci_device_pools = None compute.save() self.compare_obj(compute, compute_dict, subs=self.subs(), comparators=self.comparators()) mock_compute_node_update.assert_called_once_with( self.context, 123, {'pci_stats': None}) @mock.patch.object(db, 'compute_node_create', return_value=fake_compute_node) def test_set_id_failure(self, db_mock): compute = compute_node.ComputeNode(context=self.context) compute.create() self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr, compute, 'id', 124) def test_destroy(self): self.mox.StubOutWithMock(db, 'compute_node_delete') db.compute_node_delete(self.context, 123) self.mox.ReplayAll() compute = compute_node.ComputeNode(context=self.context) compute.id = 123 compute.destroy() def test_get_all(self): self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(self.context).AndReturn([fake_compute_node]) self.mox.ReplayAll() computes = compute_node.ComputeNodeList.get_all(self.context) self.assertEqual(1, len(computes)) self.compare_obj(computes[0], fake_compute_node, subs=self.subs(), comparators=self.comparators()) def test_get_by_hypervisor(self): self.mox.StubOutWithMock(db, 'compute_node_search_by_hypervisor') db.compute_node_search_by_hypervisor(self.context, 'hyper').AndReturn( [fake_compute_node]) self.mox.ReplayAll() computes = compute_node.ComputeNodeList.get_by_hypervisor(self.context, 'hyper') self.assertEqual(1, len(computes)) self.compare_obj(computes[0], fake_compute_node, subs=self.subs(), comparators=self.comparators()) @mock.patch('nova.db.compute_nodes_get_by_service_id') def test__get_by_service(self, cn_get_by_svc_id): cn_get_by_svc_id.return_value = [fake_compute_node] computes = compute_node.ComputeNodeList._get_by_service(self.context, 123) self.assertEqual(1, len(computes)) self.compare_obj(computes[0], fake_compute_node, subs=self.subs(), comparators=self.comparators()) @mock.patch('nova.db.compute_node_get_all_by_host') def test_get_all_by_host(self, cn_get_all_by_host): cn_get_all_by_host.return_value = [fake_compute_node] computes = compute_node.ComputeNodeList.get_all_by_host(self.context, 'fake') self.assertEqual(1, len(computes)) self.compare_obj(computes[0], fake_compute_node, subs=self.subs(), comparators=self.comparators()) def test_compat_numa_topology(self): compute = compute_node.ComputeNode() versions = ovo_base.obj_tree_get_versions('ComputeNode') primitive = compute.obj_to_primitive(target_version='1.4', version_manifest=versions) self.assertNotIn('numa_topology', primitive) def test_compat_supported_hv_specs(self): compute = compute_node.ComputeNode() compute.supported_hv_specs = fake_supported_hv_specs versions = ovo_base.obj_tree_get_versions('ComputeNode') primitive = compute.obj_to_primitive(target_version='1.5', version_manifest=versions) self.assertNotIn('supported_hv_specs', primitive) def test_compat_host(self): compute = compute_node.ComputeNode() primitive = compute.obj_to_primitive(target_version='1.6') self.assertNotIn('host', primitive) def test_compat_pci_device_pools(self): compute = compute_node.ComputeNode() compute.pci_device_pools = fake_pci_device_pools.fake_pool_list versions = ovo_base.obj_tree_get_versions('ComputeNode') primitive = compute.obj_to_primitive(target_version='1.8', version_manifest=versions) self.assertNotIn('pci_device_pools', primitive) @mock.patch('nova.objects.Service.get_by_compute_host') def test_compat_service_id(self, mock_get): mock_get.return_value = objects.Service(id=1) compute = objects.ComputeNode(host='fake-host', service_id=None) primitive = compute.obj_to_primitive(target_version='1.12') self.assertEqual(1, primitive['nova_object.data']['service_id']) @mock.patch('nova.objects.Service.get_by_compute_host') def test_compat_service_id_compute_host_not_found(self, mock_get): mock_get.side_effect = exception.ComputeHostNotFound(host='fake-host') compute = objects.ComputeNode(host='fake-host', service_id=None) primitive = compute.obj_to_primitive(target_version='1.12') self.assertEqual(-1, primitive['nova_object.data']['service_id']) def test_update_from_virt_driver(self): # copy in case the update has a side effect resources = copy.deepcopy(fake_resources) compute = compute_node.ComputeNode() compute.update_from_virt_driver(resources) expected = fake_compute_with_resources self.assertTrue(base.obj_equal_prims(expected, compute)) def test_update_from_virt_driver_missing_field(self): # NOTE(pmurray): update_from_virt_driver does not require # all fields to be present in resources. Validation of the # resources data structure would be done in a different method. resources = copy.deepcopy(fake_resources) del resources['vcpus'] compute = compute_node.ComputeNode() compute.update_from_virt_driver(resources) expected = fake_compute_with_resources.obj_clone() del expected.vcpus self.assertTrue(base.obj_equal_prims(expected, compute)) def test_update_from_virt_driver_extra_field(self): # copy in case the update has a side effect resources = copy.deepcopy(fake_resources) resources['extra_field'] = 'nonsense' compute = compute_node.ComputeNode() compute.update_from_virt_driver(resources) expected = fake_compute_with_resources self.assertTrue(base.obj_equal_prims(expected, compute)) def test_update_from_virt_driver_bad_value(self): # copy in case the update has a side effect resources = copy.deepcopy(fake_resources) resources['vcpus'] = 'nonsense' compute = compute_node.ComputeNode() self.assertRaises(ValueError, compute.update_from_virt_driver, resources) def test_compat_allocation_ratios(self): compute = compute_node.ComputeNode() primitive = compute.obj_to_primitive(target_version='1.13') self.assertNotIn('cpu_allocation_ratio', primitive) self.assertNotIn('ram_allocation_ratio', primitive) def test_compat_disk_allocation_ratio(self): compute = compute_node.ComputeNode() primitive = compute.obj_to_primitive(target_version='1.15') self.assertNotIn('disk_allocation_ratio', primitive) def test_compat_allocation_ratios_old_compute(self): self.flags(cpu_allocation_ratio=2.0, ram_allocation_ratio=3.0, disk_allocation_ratio=0.9) compute_dict = fake_compute_node.copy() # old computes don't provide allocation ratios to the table compute_dict['cpu_allocation_ratio'] = None compute_dict['ram_allocation_ratio'] = None compute_dict['disk_allocation_ratio'] = None cls = objects.ComputeNode compute = cls._from_db_object(self.context, cls(), compute_dict) self.assertEqual(2.0, compute.cpu_allocation_ratio) self.assertEqual(3.0, compute.ram_allocation_ratio) self.assertEqual(0.9, compute.disk_allocation_ratio) def test_compat_allocation_ratios_default_values(self): compute_dict = fake_compute_node.copy() # new computes provide allocation ratios defaulted to 0.0 compute_dict['cpu_allocation_ratio'] = 0.0 compute_dict['ram_allocation_ratio'] = 0.0 compute_dict['disk_allocation_ratio'] = 0.0 cls = objects.ComputeNode compute = cls._from_db_object(self.context, cls(), compute_dict) self.assertEqual(16.0, compute.cpu_allocation_ratio) self.assertEqual(1.5, compute.ram_allocation_ratio) self.assertEqual(1.0, compute.disk_allocation_ratio) def test_compat_allocation_ratios_old_compute_default_values(self): compute_dict = fake_compute_node.copy() # old computes don't provide allocation ratios to the table compute_dict['cpu_allocation_ratio'] = None compute_dict['ram_allocation_ratio'] = None compute_dict['disk_allocation_ratio'] = None cls = objects.ComputeNode compute = cls._from_db_object(self.context, cls(), compute_dict) self.assertEqual(16.0, compute.cpu_allocation_ratio) self.assertEqual(1.5, compute.ram_allocation_ratio) self.assertEqual(1.0, compute.disk_allocation_ratio) class TestComputeNodeObject(test_objects._LocalTest, _TestComputeNodeObject): pass class TestRemoteComputeNodeObject(test_objects._RemoteTest, _TestComputeNodeObject): pass nova-13.0.0/nova/tests/unit/objects/test_fields.py0000664000567000056710000012405412701410011023264 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import netaddr from oslo_versionedobjects import exception as ovo_exc import six from nova.network import model as network_model from nova.objects import fields from nova import signature_utils from nova import test from nova import utils class FakeFieldType(fields.FieldType): def coerce(self, obj, attr, value): return '*%s*' % value def to_primitive(self, obj, attr, value): return '!%s!' % value def from_primitive(self, obj, attr, value): return value[1:-1] class FakeEnum(fields.Enum): FROG = "frog" PLATYPUS = "platypus" ALLIGATOR = "alligator" ALL = (FROG, PLATYPUS, ALLIGATOR) def __init__(self, **kwargs): super(FakeEnum, self).__init__(valid_values=FakeEnum.ALL, **kwargs) class FakeEnumAlt(fields.Enum): FROG = "frog" PLATYPUS = "platypus" AARDVARK = "aardvark" ALL = (FROG, PLATYPUS, AARDVARK) def __init__(self, **kwargs): super(FakeEnumAlt, self).__init__(valid_values=FakeEnumAlt.ALL, **kwargs) class FakeEnumField(fields.BaseEnumField): AUTO_TYPE = FakeEnum() class FakeEnumAltField(fields.BaseEnumField): AUTO_TYPE = FakeEnumAlt() class TestField(test.NoDBTestCase): def setUp(self): super(TestField, self).setUp() self.field = fields.Field(FakeFieldType()) self.coerce_good_values = [('foo', '*foo*')] self.coerce_bad_values = [] self.to_primitive_values = [('foo', '!foo!')] self.from_primitive_values = [('!foo!', 'foo')] def test_coerce_good_values(self): for in_val, out_val in self.coerce_good_values: self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val)) def test_coerce_bad_values(self): for in_val in self.coerce_bad_values: self.assertRaises((TypeError, ValueError), self.field.coerce, 'obj', 'attr', in_val) def test_to_primitive(self): for in_val, prim_val in self.to_primitive_values: self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr', in_val)) def test_from_primitive(self): class ObjectLikeThing(object): _context = 'context' for prim_val, out_val in self.from_primitive_values: self.assertEqual(out_val, self.field.from_primitive( ObjectLikeThing, 'attr', prim_val)) def test_stringify(self): self.assertEqual('123', self.field.stringify(123)) class TestString(TestField): def setUp(self): super(TestString, self).setUp() self.field = fields.StringField() self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')] if six.PY2: self.coerce_good_values.append((int(1), '1')) self.coerce_bad_values = [None] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'123'", self.field.stringify(123)) class TestBaseEnum(TestField): def setUp(self): super(TestBaseEnum, self).setUp() self.field = FakeEnumField() self.coerce_good_values = [('frog', 'frog'), ('platypus', 'platypus'), ('alligator', 'alligator')] self.coerce_bad_values = ['aardvark', 'wookie'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'platypus'", self.field.stringify('platypus')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'aardvark') def test_fingerprint(self): # Notes(yjiang5): make sure changing valid_value will be detected # in test_objects.test_versions field1 = FakeEnumField() field2 = FakeEnumAltField() self.assertNotEqual(str(field1), str(field2)) class TestEnum(TestField): def setUp(self): super(TestEnum, self).setUp() self.field = fields.EnumField( valid_values=['foo', 'bar', 1, 1, True]) self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')] if six.PY2: self.coerce_good_values.append((int(1), '1')) self.coerce_bad_values = ['boo', 2, False] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'foo'", self.field.stringify('foo')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, '123') def test_fingerprint(self): # Notes(yjiang5): make sure changing valid_value will be detected # in test_objects.test_versions field1 = fields.EnumField(valid_values=['foo', 'bar']) field2 = fields.EnumField(valid_values=['foo', 'bar1']) self.assertNotEqual(str(field1), str(field2)) def test_without_valid_values(self): self.assertRaises(ovo_exc.EnumValidValuesInvalidError, fields.EnumField, 1) def test_with_empty_values(self): self.assertRaises(ovo_exc.EnumRequiresValidValuesError, fields.EnumField, []) class TestArchitecture(TestField): def setUp(self): super(TestArchitecture, self).setUp() self.field = fields.ArchitectureField() self.coerce_good_values = [('x86_64', 'x86_64'), ('amd64', 'x86_64'), ('I686', 'i686'), ('i386', 'i686')] self.coerce_bad_values = ['x86_99'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'aarch64'", self.field.stringify('aarch64')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'ppc42') class TestBlockDeviceDestinationType(TestField): def setUp(self): super(TestBlockDeviceDestinationType, self).setUp() self.field = fields.BlockDeviceDestinationTypeField() self.coerce_good_values = [('local', 'local'), ('volume', 'volume')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'volume'", self.field.stringify('volume')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestBlockDeviceSourceType(TestField): def setUp(self): super(TestBlockDeviceSourceType, self).setUp() self.field = fields.BlockDeviceSourceTypeField() self.coerce_good_values = [('blank', 'blank'), ('image', 'image'), ('snapshot', 'snapshot'), ('volume', 'volume')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'image'", self.field.stringify('image')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestBlockDeviceType(TestField): def setUp(self): super(TestBlockDeviceType, self).setUp() self.field = fields.BlockDeviceTypeField() self.coerce_good_values = [('cdrom', 'cdrom'), ('disk', 'disk'), ('floppy', 'floppy'), ('fs', 'fs'), ('lun', 'lun')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'disk'", self.field.stringify('disk')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestCPUMode(TestField): def setUp(self): super(TestCPUMode, self).setUp() self.field = fields.CPUModeField() self.coerce_good_values = [('host-model', 'host-model'), ('host-passthrough', 'host-passthrough'), ('custom', 'custom')] self.coerce_bad_values = ['magic'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'custom'", self.field.stringify('custom')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'magic') class TestCPUMatch(TestField): def setUp(self): super(TestCPUMatch, self).setUp() self.field = fields.CPUMatchField() self.coerce_good_values = [('exact', 'exact'), ('strict', 'strict'), ('minimum', 'minimum')] self.coerce_bad_values = ['best'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'exact'", self.field.stringify('exact')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'best') class TestCPUFeaturePolicy(TestField): def setUp(self): super(TestCPUFeaturePolicy, self).setUp() self.field = fields.CPUFeaturePolicyField() self.coerce_good_values = [('force', 'force'), ('require', 'require'), ('optional', 'optional'), ('disable', 'disable'), ('forbid', 'forbid')] self.coerce_bad_values = ['disallow'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'forbid'", self.field.stringify('forbid')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'disallow') class TestConfigDrivePolicy(TestField): def setUp(self): super(TestConfigDrivePolicy, self).setUp() self.field = fields.ConfigDrivePolicyField() self.coerce_good_values = [('optional', 'optional'), ('mandatory', 'mandatory')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'optional'", self.field.stringify('optional')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestCPUAllocationPolicy(TestField): def setUp(self): super(TestCPUAllocationPolicy, self).setUp() self.field = fields.CPUAllocationPolicyField() self.coerce_good_values = [('dedicated', 'dedicated'), ('shared', 'shared')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'shared'", self.field.stringify('shared')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestCPUThreadAllocationPolicy(TestField): def setUp(self): super(TestCPUThreadAllocationPolicy, self).setUp() self.field = fields.CPUThreadAllocationPolicyField() self.coerce_good_values = [('prefer', 'prefer'), ('isolate', 'isolate'), ('require', 'require')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'prefer'", self.field.stringify('prefer')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestPciDeviceType(TestField): def setUp(self): super(TestPciDeviceType, self).setUp() self.field = fields.PciDeviceTypeField() self.coerce_good_values = [('type-PCI', 'type-PCI'), ('type-PF', 'type-PF'), ('type-VF', 'type-VF')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'type-VF'", self.field.stringify('type-VF')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestDiskBus(TestField): def setUp(self): super(TestDiskBus, self).setUp() self.field = fields.DiskBusField() self.coerce_good_values = [('fdc', 'fdc'), ('ide', 'ide'), ('sata', 'sata'), ('scsi', 'scsi'), ('usb', 'usb'), ('virtio', 'virtio'), ('xen', 'xen'), ('lxc', 'lxc'), ('uml', 'uml')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'ide'", self.field.stringify('ide')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestHVType(TestField): def setUp(self): super(TestHVType, self).setUp() self.field = fields.HVTypeField() self.coerce_good_values = [('baremetal', 'baremetal'), ('bhyve', 'bhyve'), ('fake', 'fake'), ('kvm', 'kvm'), ('xapi', 'xen')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'xen'", self.field.stringify('xen')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestImageSignatureTypes(TestField): # Ensure that the object definition is updated # in step with the signature_utils module def setUp(self): super(TestImageSignatureTypes, self).setUp() self.hash_field = fields.ImageSignatureHashType() self.key_type_field = fields.ImageSignatureKeyType() def test_hashes(self): for hash_name in list(signature_utils.HASH_METHODS.keys()): self.assertIn(hash_name, self.hash_field.hashes) def test_key_types(self): key_type_dict = signature_utils.SignatureKeyType._REGISTERED_TYPES key_types = list(key_type_dict.keys()) for key_type in key_types: self.assertIn(key_type, self.key_type_field.key_types) class TestOSType(TestField): def setUp(self): super(TestOSType, self).setUp() self.field = fields.OSTypeField() self.coerce_good_values = [('linux', 'linux'), ('windows', 'windows'), ('WINDOWS', 'windows')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'linux'", self.field.stringify('linux')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestResourceClass(TestField): def setUp(self): super(TestResourceClass, self).setUp() self.field = fields.ResourceClassField() self.coerce_good_values = [ ('VCPU', 'VCPU'), ('MEMORY_MB', 'MEMORY_MB'), ('DISK_GB', 'DISK_GB'), ('PCI_DEVICE', 'PCI_DEVICE'), ('SRIOV_NET_VF', 'SRIOV_NET_VF'), ('NUMA_SOCKET', 'NUMA_SOCKET'), ('NUMA_CORE', 'NUMA_CORE'), ('NUMA_THREAD', 'NUMA_THREAD'), ('NUMA_MEMORY_MB', 'NUMA_MEMORY_MB'), ('IPV4_ADDRESS', 'IPV4_ADDRESS'), ] self.expected_indexes = [ ('VCPU', 0), ('MEMORY_MB', 1), ('DISK_GB', 2), ('PCI_DEVICE', 3), ('SRIOV_NET_VF', 4), ('NUMA_SOCKET', 5), ('NUMA_CORE', 6), ('NUMA_THREAD', 7), ('NUMA_MEMORY_MB', 8), ('IPV4_ADDRESS', 9), ] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'VCPU'", self.field.stringify( fields.ResourceClass.VCPU)) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'cow') def test_index(self): for name, index in self.expected_indexes: self.assertEqual(index, self.field.index(name)) def test_index_invalid(self): self.assertRaises(ValueError, self.field.index, 'cow') def test_from_index(self): for name, index in self.expected_indexes: self.assertEqual(name, self.field.from_index(index)) def test_from_index_invalid(self): self.assertRaises(IndexError, self.field.from_index, 999) class TestRNGModel(TestField): def setUp(self): super(TestRNGModel, self).setUp() self.field = fields.RNGModelField() self.coerce_good_values = [('virtio', 'virtio'), ] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'virtio'", self.field.stringify('virtio')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestSCSIModel(TestField): def setUp(self): super(TestSCSIModel, self).setUp() self.field = fields.SCSIModelField() self.coerce_good_values = [('buslogic', 'buslogic'), ('ibmvscsi', 'ibmvscsi'), ('lsilogic', 'lsilogic'), ('lsisas1068', 'lsisas1068'), ('lsisas1078', 'lsisas1078'), ('virtio-scsi', 'virtio-scsi'), ('vmpvscsi', 'vmpvscsi'), ('lsilogicsas', 'lsisas1068'), ('paravirtual', 'vmpvscsi')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'vmpvscsi'", self.field.stringify('vmpvscsi')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestVideoModel(TestField): def setUp(self): super(TestVideoModel, self).setUp() self.field = fields.VideoModelField() self.coerce_good_values = [('cirrus', 'cirrus'), ('qxl', 'qxl'), ('vga', 'vga'), ('vmvga', 'vmvga'), ('xen', 'xen')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'cirrus'", self.field.stringify('cirrus')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestVIFModel(TestField): def setUp(self): super(TestVIFModel, self).setUp() self.field = fields.VIFModelField() self.coerce_good_values = [('virtio', 'virtio'), ('ne2k_pci', 'ne2k_pci'), ('pcnet', 'pcnet'), ('rtl8139', 'rtl8139'), ('e1000', 'e1000'), ('e1000e', 'e1000e'), ('netfront', 'netfront'), ('spapr-vlan', 'spapr-vlan'), ('VirtualE1000', 'e1000'), ('VirtualE1000e', 'e1000e'), ('VirtualPCNet32', 'pcnet'), ('VirtualSriovEthernetCard', 'sriov'), ('VirtualVmxnet', 'vmxnet'), ('VirtualVmxnet3', 'vmxnet3'), ] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'e1000'", self.field.stringify('e1000')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestVMMode(TestField): def setUp(self): super(TestVMMode, self).setUp() self.field = fields.VMModeField() self.coerce_good_values = [('hvm', 'hvm'), ('xen', 'xen'), ('uml', 'uml'), ('exe', 'exe'), ('pv', 'xen'), ('hv', 'hvm'), ('baremetal', 'hvm')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'hvm'", self.field.stringify('hvm')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestWatchdogAction(TestField): def setUp(self): super(TestWatchdogAction, self).setUp() self.field = fields.WatchdogActionField() self.coerce_good_values = [('none', 'none'), ('pause', 'pause'), ('poweroff', 'poweroff'), ('reset', 'reset')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'reset'", self.field.stringify('reset')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestMonitorMetricType(TestField): def setUp(self): super(TestMonitorMetricType, self).setUp() self.field = fields.MonitorMetricTypeField() self.coerce_good_values = [('cpu.frequency', 'cpu.frequency'), ('cpu.user.time', 'cpu.user.time'), ('cpu.kernel.time', 'cpu.kernel.time'), ('cpu.idle.time', 'cpu.idle.time'), ('cpu.iowait.time', 'cpu.iowait.time'), ('cpu.user.percent', 'cpu.user.percent'), ('cpu.kernel.percent', 'cpu.kernel.percent'), ('cpu.idle.percent', 'cpu.idle.percent'), ('cpu.iowait.percent', 'cpu.iowait.percent'), ('cpu.percent', 'cpu.percent')] self.coerce_bad_values = ['cpu.typo'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'cpu.frequency'", self.field.stringify('cpu.frequency')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'cpufrequency') class TestDiskFormat(TestField): def setUp(self): super(TestDiskFormat, self).setUp() self.field = fields.DiskFormatField() self.coerce_good_values = [('qcow2', 'qcow2'), ('raw', 'raw'), ('lvm', 'lvm'), ('rbd', 'rbd'), ('ploop', 'ploop'), ('vhd', 'vhd'), ('vmdk', 'vmdk'), ('vdi', 'vdi'), ('iso', 'iso')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'rbd'", self.field.stringify('rbd')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'acme') class TestInteger(TestField): def setUp(self): super(TestInteger, self).setUp() self.field = fields.IntegerField() self.coerce_good_values = [(1, 1), ('1', 1)] self.coerce_bad_values = ['foo', None] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] class TestNonNegativeInteger(TestInteger): def setUp(self): super(TestNonNegativeInteger, self).setUp() self.field = fields.Field(fields.NonNegativeInteger()) self.coerce_bad_values.extend(['-2', '4.2']) class TestFloat(TestField): def setUp(self): super(TestFloat, self).setUp() self.field = fields.FloatField() self.coerce_good_values = [(1.1, 1.1), ('1.1', 1.1)] self.coerce_bad_values = ['foo', None] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] class TestNonNegativeFloat(TestFloat): def setUp(self): super(TestNonNegativeFloat, self).setUp() self.field = fields.Field(fields.NonNegativeFloat()) self.coerce_bad_values.extend(['-4.2']) class TestBoolean(TestField): def setUp(self): super(TestBoolean, self).setUp() self.field = fields.BooleanField() self.coerce_good_values = [(True, True), (False, False), (1, True), ('foo', True), (0, False), ('', False)] self.coerce_bad_values = [] self.to_primitive_values = self.coerce_good_values[0:2] self.from_primitive_values = self.coerce_good_values[0:2] class TestDateTime(TestField): def setUp(self): super(TestDateTime, self).setUp() self.dt = datetime.datetime(1955, 11, 5, tzinfo=iso8601.iso8601.Utc()) self.field = fields.DateTimeField() self.coerce_good_values = [(self.dt, self.dt), (utils.isotime(self.dt), self.dt)] self.coerce_bad_values = [1, 'foo'] self.to_primitive_values = [(self.dt, utils.isotime(self.dt))] self.from_primitive_values = [(utils.isotime(self.dt), self.dt)] def test_stringify(self): self.assertEqual( '1955-11-05T18:00:00Z', self.field.stringify( datetime.datetime(1955, 11, 5, 18, 0, 0, tzinfo=iso8601.iso8601.Utc()))) class TestIPAddress(TestField): def setUp(self): super(TestIPAddress, self).setUp() self.field = fields.IPAddressField() self.coerce_good_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')), ('::1', netaddr.IPAddress('::1')), (netaddr.IPAddress('::1'), netaddr.IPAddress('::1'))] self.coerce_bad_values = ['1-2', 'foo'] self.to_primitive_values = [(netaddr.IPAddress('1.2.3.4'), '1.2.3.4'), (netaddr.IPAddress('::1'), '::1')] self.from_primitive_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')), ('::1', netaddr.IPAddress('::1'))] class TestIPAddressV4(TestField): def setUp(self): super(TestIPAddressV4, self).setUp() self.field = fields.IPV4AddressField() self.coerce_good_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')), (netaddr.IPAddress('1.2.3.4'), netaddr.IPAddress('1.2.3.4'))] self.coerce_bad_values = ['1-2', 'foo', '::1'] self.to_primitive_values = [(netaddr.IPAddress('1.2.3.4'), '1.2.3.4')] self.from_primitive_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4'))] class TestIPAddressV6(TestField): def setUp(self): super(TestIPAddressV6, self).setUp() self.field = fields.IPV6AddressField() self.coerce_good_values = [('::1', netaddr.IPAddress('::1')), (netaddr.IPAddress('::1'), netaddr.IPAddress('::1'))] self.coerce_bad_values = ['1.2', 'foo', '1.2.3.4'] self.to_primitive_values = [(netaddr.IPAddress('::1'), '::1')] self.from_primitive_values = [('::1', netaddr.IPAddress('::1'))] class TestDict(TestField): def setUp(self): super(TestDict, self).setUp() self.field = fields.Field(fields.Dict(FakeFieldType())) self.coerce_good_values = [({'foo': 'bar'}, {'foo': '*bar*'}), ({'foo': 1}, {'foo': '*1*'})] self.coerce_bad_values = [{1: 'bar'}, 'foo'] self.to_primitive_values = [({'foo': 'bar'}, {'foo': '!bar!'})] self.from_primitive_values = [({'foo': '!bar!'}, {'foo': 'bar'})] def test_stringify(self): self.assertEqual("{key=val}", self.field.stringify({'key': 'val'})) class TestDictOfStrings(TestField): def setUp(self): super(TestDictOfStrings, self).setUp() self.field = fields.DictOfStringsField() self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}), ({'foo': 1}, {'foo': '1'})] self.coerce_bad_values = [{1: 'bar'}, {'foo': None}, 'foo'] self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})] self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})] def test_stringify(self): self.assertEqual("{key='val'}", self.field.stringify({'key': 'val'})) class TestDictOfIntegers(TestField): def setUp(self): super(TestDictOfIntegers, self).setUp() self.field = fields.DictOfIntegersField() self.coerce_good_values = [({'foo': '42'}, {'foo': 42}), ({'foo': 4.2}, {'foo': 4})] self.coerce_bad_values = [{1: 'bar'}, {'foo': 'boo'}, 'foo', {'foo': None}] self.to_primitive_values = [({'foo': 42}, {'foo': 42})] self.from_primitive_values = [({'foo': 42}, {'foo': 42})] def test_stringify(self): self.assertEqual("{key=42}", self.field.stringify({'key': 42})) class TestDictOfStringsNone(TestField): def setUp(self): super(TestDictOfStringsNone, self).setUp() self.field = fields.DictOfNullableStringsField() self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}), ({'foo': 1}, {'foo': '1'}), ({'foo': None}, {'foo': None})] self.coerce_bad_values = [{1: 'bar'}, 'foo'] self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})] self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})] def test_stringify(self): self.assertEqual("{k2=None,key='val'}", self.field.stringify({'k2': None, 'key': 'val'})) class TestListOfDictOfNullableStringsField(TestField): def setUp(self): super(TestListOfDictOfNullableStringsField, self).setUp() self.field = fields.ListOfDictOfNullableStringsField() self.coerce_good_values = [([{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}], [{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}]), ([{'f': 1}, {'f1': 'b1'}], [{'f': '1'}, {'f1': 'b1'}]), ([{'foo': None}], [{'foo': None}])] self.coerce_bad_values = [[{1: 'a'}], ['ham', 1], ['eggs']] self.to_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, {'f2': None}], [{'f': 'b'}, {'f1': 'b1'}, {'f2': None}])] self.from_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, {'f2': None}], [{'f': 'b'}, {'f1': 'b1'}, {'f2': None}])] def test_stringify(self): self.assertEqual("[{f=None,f1='b1'},{f2='b2'}]", self.field.stringify( [{'f': None, 'f1': 'b1'}, {'f2': 'b2'}])) class TestList(TestField): def setUp(self): super(TestList, self).setUp() self.field = fields.Field(fields.List(FakeFieldType())) self.coerce_good_values = [(['foo', 'bar'], ['*foo*', '*bar*'])] self.coerce_bad_values = ['foo'] self.to_primitive_values = [(['foo'], ['!foo!'])] self.from_primitive_values = [(['!foo!'], ['foo'])] def test_stringify(self): self.assertEqual('[123]', self.field.stringify([123])) class TestListOfStrings(TestField): def setUp(self): super(TestListOfStrings, self).setUp() self.field = fields.ListOfStringsField() self.coerce_good_values = [(['foo', 'bar'], ['foo', 'bar'])] self.coerce_bad_values = ['foo'] self.to_primitive_values = [(['foo'], ['foo'])] self.from_primitive_values = [(['foo'], ['foo'])] def test_stringify(self): self.assertEqual("['abc']", self.field.stringify(['abc'])) class TestSet(TestField): def setUp(self): super(TestSet, self).setUp() self.field = fields.Field(fields.Set(FakeFieldType())) self.coerce_good_values = [(set(['foo', 'bar']), set(['*foo*', '*bar*']))] self.coerce_bad_values = [['foo'], {'foo': 'bar'}] self.to_primitive_values = [(set(['foo']), tuple(['!foo!']))] self.from_primitive_values = [(tuple(['!foo!']), set(['foo']))] def test_stringify(self): self.assertEqual('set([123])', self.field.stringify(set([123]))) class TestSetOfIntegers(TestField): def setUp(self): super(TestSetOfIntegers, self).setUp() self.field = fields.SetOfIntegersField() self.coerce_good_values = [(set(['1', 2]), set([1, 2]))] self.coerce_bad_values = [set(['foo'])] self.to_primitive_values = [(set([1]), tuple([1]))] self.from_primitive_values = [(tuple([1]), set([1]))] def test_stringify(self): self.assertEqual('set([1,2])', self.field.stringify(set([1, 2]))) class TestListOfSetsOfIntegers(TestField): def setUp(self): super(TestListOfSetsOfIntegers, self).setUp() self.field = fields.ListOfSetsOfIntegersField() self.coerce_good_values = [([set(['1', 2]), set([3, '4'])], [set([1, 2]), set([3, 4])])] self.coerce_bad_values = [[set(['foo'])]] self.to_primitive_values = [([set([1])], [tuple([1])])] self.from_primitive_values = [([tuple([1])], [set([1])])] def test_stringify(self): self.assertEqual('[set([1,2])]', self.field.stringify([set([1, 2])])) class TestNetworkModel(TestField): def setUp(self): super(TestNetworkModel, self).setUp() model = network_model.NetworkInfo() self.field = fields.Field(fields.NetworkModel()) self.coerce_good_values = [(model, model), (model.json(), model)] self.coerce_bad_values = [[], 'foo'] self.to_primitive_values = [(model, model.json())] self.from_primitive_values = [(model.json(), model)] def test_stringify(self): networkinfo = network_model.NetworkInfo() networkinfo.append(network_model.VIF(id=123)) networkinfo.append(network_model.VIF(id=456)) self.assertEqual('NetworkModel(123,456)', self.field.stringify(networkinfo)) class TestIPNetwork(TestField): def setUp(self): super(TestIPNetwork, self).setUp() self.field = fields.Field(fields.IPNetwork()) good = ['192.168.1.0/24', '0.0.0.0/0', '::1/128', '::1/64', '::1/0'] self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good] self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo', '::1/129', '192.168.0.0/-1'] self.to_primitive_values = [(netaddr.IPNetwork(x), x) for x in good] self.from_primitive_values = [(x, netaddr.IPNetwork(x)) for x in good] class TestIPV4Network(TestField): def setUp(self): super(TestIPV4Network, self).setUp() self.field = fields.Field(fields.IPV4Network()) good = ['192.168.1.0/24', '0.0.0.0/0'] self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good] self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo', '::1/129', '192.168.0.0/-1'] self.to_primitive_values = [(netaddr.IPNetwork(x), x) for x in good] self.from_primitive_values = [(x, netaddr.IPNetwork(x)) for x in good] class TestIPV6Network(TestField): def setUp(self): super(TestIPV6Network, self).setUp() self.field = fields.Field(fields.IPV6Network()) good = ['::1/128', '::1/64', '::1/0'] self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good] self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo', '::1/129', '192.168.0.0/-1'] self.to_primitive_values = [(netaddr.IPNetwork(x), x) for x in good] self.from_primitive_values = [(x, netaddr.IPNetwork(x)) for x in good] class TestNotificationPriority(TestField): def setUp(self): super(TestNotificationPriority, self).setUp() self.field = fields.NotificationPriorityField() self.coerce_good_values = [('audit', 'audit'), ('critical', 'critical'), ('debug', 'debug'), ('error', 'error'), ('sample', 'sample'), ('warn', 'warn')] self.coerce_bad_values = ['warning'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'warn'", self.field.stringify('warn')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'warning') class TestNotificationPhase(TestField): def setUp(self): super(TestNotificationPhase, self).setUp() self.field = fields.NotificationPhaseField() self.coerce_good_values = [('start', 'start'), ('end', 'end'), ('error', 'error')] self.coerce_bad_values = ['begin'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'error'", self.field.stringify('error')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'begin') class TestNotificationAction(TestField): def setUp(self): super(TestNotificationAction, self).setUp() self.field = fields.NotificationActionField() self.coerce_good_values = [('update', 'update')] self.coerce_bad_values = ['magic'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'update'", self.field.stringify('update')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'magic') nova-13.0.0/nova/tests/unit/objects/test_ec2.py0000664000567000056710000002422312701410011022464 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import db from nova import objects from nova.objects import ec2 as ec2_obj from nova.tests.unit.objects import test_objects fake_map = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'id': 1, 'uuid': 'fake-uuid-2', } class _TestEC2InstanceMapping(object): @staticmethod def _compare(test, db, obj): for field, value in db.items(): test.assertEqual(db[field], getattr(obj, field)) def test_create(self): imap = ec2_obj.EC2InstanceMapping(context=self.context) imap.uuid = 'fake-uuid-2' with mock.patch.object(db, 'ec2_instance_create') as create: create.return_value = fake_map imap.create() self.assertEqual(self.context, imap._context) imap._context = None self._compare(self, fake_map, imap) def test_get_by_uuid(self): with mock.patch.object(db, 'ec2_instance_get_by_uuid') as get: get.return_value = fake_map imap = ec2_obj.EC2InstanceMapping.get_by_uuid(self.context, 'fake-uuid-2') self._compare(self, fake_map, imap) def test_get_by_ec2_id(self): with mock.patch.object(db, 'ec2_instance_get_by_id') as get: get.return_value = fake_map imap = ec2_obj.EC2InstanceMapping.get_by_id(self.context, 1) self._compare(self, fake_map, imap) class TestEC2InstanceMapping(test_objects._LocalTest, _TestEC2InstanceMapping): pass class TestRemoteEC2InstanceMapping(test_objects._RemoteTest, _TestEC2InstanceMapping): pass class _TestEC2VolumeMapping(object): @staticmethod def _compare(test, db, obj): for field, value in db.items(): test.assertEqual(db[field], getattr(obj, field)) def test_create(self): vmap = ec2_obj.EC2VolumeMapping(context=self.context) vmap.uuid = 'fake-uuid-2' with mock.patch.object(db, 'ec2_volume_create') as create: create.return_value = fake_map vmap.create() self.assertEqual(self.context, vmap._context) vmap._context = None self._compare(self, fake_map, vmap) def test_get_by_uuid(self): with mock.patch.object(db, 'ec2_volume_get_by_uuid') as get: get.return_value = fake_map vmap = ec2_obj.EC2VolumeMapping.get_by_uuid(self.context, 'fake-uuid-2') self._compare(self, fake_map, vmap) def test_get_by_ec2_id(self): with mock.patch.object(db, 'ec2_volume_get_by_id') as get: get.return_value = fake_map vmap = ec2_obj.EC2VolumeMapping.get_by_id(self.context, 1) self._compare(self, fake_map, vmap) class TestEC2VolumeMapping(test_objects._LocalTest, _TestEC2VolumeMapping): pass class TestRemoteEC2VolumeMapping(test_objects._RemoteTest, _TestEC2VolumeMapping): pass class _TestEC2SnapshotMapping(object): @staticmethod def _compare(test, db, obj): for field, value in db.items(): test.assertEqual(db[field], getattr(obj, field)) def test_create(self): smap = ec2_obj.EC2SnapshotMapping(context=self.context) smap.uuid = 'fake-uuid-2' with mock.patch.object(db, 'ec2_snapshot_create') as create: create.return_value = fake_map smap.create() self.assertEqual(self.context, smap._context) smap._context = None self._compare(self, fake_map, smap) def test_get_by_uuid(self): with mock.patch.object(db, 'ec2_snapshot_get_by_uuid') as get: get.return_value = fake_map smap = ec2_obj.EC2SnapshotMapping.get_by_uuid(self.context, 'fake-uuid-2') self._compare(self, fake_map, smap) def test_get_by_ec2_id(self): with mock.patch.object(db, 'ec2_snapshot_get_by_ec2_id') as get: get.return_value = fake_map smap = ec2_obj.EC2SnapshotMapping.get_by_id(self.context, 1) self._compare(self, fake_map, smap) class TestEC2SnapshotMapping(test_objects._LocalTest, _TestEC2SnapshotMapping): pass class TestRemoteEC2SnapshotMapping(test_objects._RemoteTest, _TestEC2SnapshotMapping): pass class _TestS3ImageMapping(object): @staticmethod def _compare(test, db, obj): for field, value in db.items(): test.assertEqual(db[field], obj[field]) def test_create(self): s3imap = ec2_obj.S3ImageMapping(context=self.context) s3imap.uuid = 'fake-uuid-2' with mock.patch.object(db, 's3_image_create') as create: create.return_value = fake_map s3imap.create() self.assertEqual(self.context, s3imap._context) s3imap._context = None self._compare(self, fake_map, s3imap) def test_get_by_uuid(self): with mock.patch.object(db, 's3_image_get_by_uuid') as get: get.return_value = fake_map s3imap = ec2_obj.S3ImageMapping.get_by_uuid(self.context, 'fake-uuid-2') self._compare(self, fake_map, s3imap) def test_get_by_s3_id(self): with mock.patch.object(db, 's3_image_get') as get: get.return_value = fake_map s3imap = ec2_obj.S3ImageMapping.get_by_id(self.context, 1) self._compare(self, fake_map, s3imap) class TestS3ImageMapping(test_objects._LocalTest, _TestS3ImageMapping): pass class TestRemoteS3ImageMapping(test_objects._RemoteTest, _TestS3ImageMapping): pass class _TestEC2Ids(object): @mock.patch('nova.api.ec2.ec2utils.image_type') @mock.patch('nova.api.ec2.ec2utils.glance_id_to_ec2_id') @mock.patch('nova.api.ec2.ec2utils.id_to_ec2_inst_id') def test_get_by_instance(self, mock_inst, mock_glance, mock_type): mock_inst.return_value = 'fake-ec2-inst-id' mock_glance.side_effect = ['fake-ec2-ami-id', 'fake-ec2-kernel-id', 'fake-ec2-ramdisk-id'] mock_type.side_effect = [mock.sentinel.ec2_kernel_type, mock.sentinel.ec2_ramdisk_type] inst = objects.Instance(uuid='fake-uuid', image_ref='fake-image-id', kernel_id='fake-kernel-id', ramdisk_id='fake-ramdisk-id') result = ec2_obj.EC2Ids.get_by_instance(self.context, inst) self.assertEqual('fake-ec2-inst-id', result.instance_id) self.assertEqual('fake-ec2-ami-id', result.ami_id) self.assertEqual('fake-ec2-kernel-id', result.kernel_id) self.assertEqual('fake-ec2-ramdisk-id', result.ramdisk_id) @mock.patch('nova.api.ec2.ec2utils.glance_id_to_ec2_id') @mock.patch('nova.api.ec2.ec2utils.id_to_ec2_inst_id') def test_get_by_instance_no_image_ref(self, mock_inst, mock_glance): mock_inst.return_value = 'fake-ec2-inst-id' mock_glance.return_value = None inst = objects.Instance(uuid='fake-uuid', image_ref=None, kernel_id=None, ramdisk_id=None) result = ec2_obj.EC2Ids.get_by_instance(self.context, inst) self.assertEqual('fake-ec2-inst-id', result.instance_id) self.assertIsNone(result.ami_id) self.assertIsNone(result.kernel_id) self.assertIsNone(result.ramdisk_id) @mock.patch('nova.api.ec2.ec2utils.image_type') @mock.patch('nova.api.ec2.ec2utils.glance_id_to_ec2_id') @mock.patch('nova.api.ec2.ec2utils.id_to_ec2_inst_id') def test_get_by_instance_no_kernel_id(self, mock_inst, mock_glance, mock_type): mock_inst.return_value = 'fake-ec2-inst-id' mock_glance.side_effect = ['fake-ec2-ami-id', 'fake-ec2-ramdisk-id'] mock_type.return_value = mock.sentinel.ec2_ramdisk_type inst = objects.Instance(uuid='fake-uuid', image_ref='fake-image-id', kernel_id=None, ramdisk_id='fake-ramdisk-id') result = ec2_obj.EC2Ids.get_by_instance(self.context, inst) self.assertEqual('fake-ec2-inst-id', result.instance_id) self.assertEqual('fake-ec2-ami-id', result.ami_id) self.assertIsNone(result.kernel_id) self.assertEqual('fake-ec2-ramdisk-id', result.ramdisk_id) @mock.patch('nova.api.ec2.ec2utils.image_type') @mock.patch('nova.api.ec2.ec2utils.glance_id_to_ec2_id') @mock.patch('nova.api.ec2.ec2utils.id_to_ec2_inst_id') def test_get_by_instance_no_ramdisk_id(self, mock_inst, mock_glance, mock_type): mock_inst.return_value = 'fake-ec2-inst-id' mock_glance.side_effect = ['fake-ec2-ami-id', 'fake-ec2-kernel-id'] mock_type.return_value = mock.sentinel.ec2_kernel_type inst = objects.Instance(uuid='fake-uuid', image_ref='fake-image-id', kernel_id='fake-kernel-id', ramdisk_id=None) result = ec2_obj.EC2Ids.get_by_instance(self.context, inst) self.assertEqual('fake-ec2-inst-id', result.instance_id) self.assertEqual('fake-ec2-ami-id', result.ami_id) self.assertEqual('fake-ec2-kernel-id', result.kernel_id) self.assertIsNone(result.ramdisk_id) class TestEC2Ids(test_objects._LocalTest, _TestEC2Ids): pass class TestRemoteEC2Ids(test_objects._RemoteTest, _TestEC2Ids): pass nova-13.0.0/nova/tests/unit/objects/test_instance.py0000664000567000056710000023365212701407773023654 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from mox3 import mox import netaddr from oslo_db import exception as db_exc from oslo_serialization import jsonutils from oslo_utils import timeutils from nova.cells import rpcapi as cells_rpcapi from nova.compute import flavors from nova import db from nova import exception from nova.network import model as network_model from nova import notifications from nova import objects from nova.objects import base from nova.objects import fields from nova.objects import instance from nova.objects import instance_info_cache from nova.objects import pci_device from nova.objects import security_group from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_instance_fault from nova.tests.unit.objects import test_instance_info_cache from nova.tests.unit.objects import test_instance_numa_topology from nova.tests.unit.objects import test_instance_pci_requests from nova.tests.unit.objects import test_migration_context as test_mig_ctxt from nova.tests.unit.objects import test_objects from nova.tests.unit.objects import test_security_group from nova.tests.unit.objects import test_vcpu_model from nova.tests import uuidsentinel as uuids from nova import utils class _TestInstanceObject(object): @property def fake_instance(self): db_inst = fake_instance.fake_db_instance(id=2, access_ip_v4='1.2.3.4', access_ip_v6='::1') db_inst['uuid'] = uuids.db_instance db_inst['cell_name'] = 'api!child' db_inst['terminated_at'] = None db_inst['deleted_at'] = None db_inst['created_at'] = None db_inst['updated_at'] = None db_inst['launched_at'] = datetime.datetime(1955, 11, 12, 22, 4, 0) db_inst['deleted'] = False db_inst['security_groups'] = [] db_inst['pci_devices'] = [] db_inst['user_id'] = self.context.user_id db_inst['project_id'] = self.context.project_id db_inst['tags'] = [] db_inst['info_cache'] = dict(test_instance_info_cache.fake_info_cache, instance_uuid=db_inst['uuid']) db_inst['system_metadata'] = { 'image_name': 'os2-warp', 'image_min_ram': 100, 'image_hw_disk_bus': 'ide', 'image_hw_vif_model': 'ne2k_pci', } return db_inst def test_datetime_deserialization(self): red_letter_date = timeutils.parse_isotime( utils.isotime(datetime.datetime(1955, 11, 5))) inst = objects.Instance(uuid=uuids.instance, launched_at=red_letter_date) primitive = inst.obj_to_primitive() expected = {'nova_object.name': 'Instance', 'nova_object.namespace': 'nova', 'nova_object.version': inst.VERSION, 'nova_object.data': {'uuid': uuids.instance, 'launched_at': '1955-11-05T00:00:00Z'}, 'nova_object.changes': ['launched_at', 'uuid']} self.assertJsonEqual(primitive, expected) inst2 = objects.Instance.obj_from_primitive(primitive) self.assertIsInstance(inst2.launched_at, datetime.datetime) self.assertEqual(red_letter_date, inst2.launched_at) def test_ip_deserialization(self): inst = objects.Instance(uuid=uuids.instance, access_ip_v4='1.2.3.4', access_ip_v6='::1') primitive = inst.obj_to_primitive() expected = {'nova_object.name': 'Instance', 'nova_object.namespace': 'nova', 'nova_object.version': inst.VERSION, 'nova_object.data': {'uuid': uuids.instance, 'access_ip_v4': '1.2.3.4', 'access_ip_v6': '::1'}, 'nova_object.changes': ['uuid', 'access_ip_v6', 'access_ip_v4']} self.assertJsonEqual(primitive, expected) inst2 = objects.Instance.obj_from_primitive(primitive) self.assertIsInstance(inst2.access_ip_v4, netaddr.IPAddress) self.assertIsInstance(inst2.access_ip_v6, netaddr.IPAddress) self.assertEqual(netaddr.IPAddress('1.2.3.4'), inst2.access_ip_v4) self.assertEqual(netaddr.IPAddress('::1'), inst2.access_ip_v6) def test_get_without_expected(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, 'uuid', columns_to_join=[] ).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, 'uuid', expected_attrs=[]) for attr in instance.INSTANCE_OPTIONAL_ATTRS: self.assertFalse(inst.obj_attr_is_set(attr)) def test_get_with_expected(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') self.mox.StubOutWithMock( db, 'instance_extra_get_by_instance_uuid') exp_cols = instance.INSTANCE_OPTIONAL_ATTRS[:] exp_cols.remove('fault') exp_cols.remove('numa_topology') exp_cols.remove('pci_requests') exp_cols.remove('vcpu_model') exp_cols.remove('ec2_ids') exp_cols.remove('migration_context') exp_cols = list(filter(lambda x: 'flavor' not in x, exp_cols)) exp_cols.extend(['extra', 'extra.numa_topology', 'extra.pci_requests', 'extra.flavor', 'extra.vcpu_model', 'extra.migration_context']) fake_topology = (test_instance_numa_topology. fake_db_topology['numa_topology']) fake_requests = jsonutils.dumps(test_instance_pci_requests. fake_pci_requests) fake_flavor = jsonutils.dumps( {'cur': objects.Flavor().obj_to_primitive(), 'old': None, 'new': None}) fake_vcpu_model = jsonutils.dumps( test_vcpu_model.fake_vcpumodel.obj_to_primitive()) fake_mig_context = jsonutils.dumps( test_mig_ctxt.fake_migration_context_obj.obj_to_primitive()) fake_service = {'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'host': 'fake-host', 'binary': 'nova-fake', 'topic': 'fake-service-topic', 'report_count': 1, 'forced_down': False, 'disabled': False, 'disabled_reason': None, 'last_seen_up': None, 'version': 1, } fake_instance = dict(self.fake_instance, services=[fake_service], extra={ 'numa_topology': fake_topology, 'pci_requests': fake_requests, 'flavor': fake_flavor, 'vcpu_model': fake_vcpu_model, 'migration_context': fake_mig_context, }) db.instance_get_by_uuid( self.context, 'uuid', columns_to_join=exp_cols).AndReturn(fake_instance) fake_faults = test_instance_fault.fake_faults db.instance_fault_get_by_instance_uuids( self.context, [fake_instance['uuid']] ).AndReturn(fake_faults) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid( self.context, 'uuid', expected_attrs=instance.INSTANCE_OPTIONAL_ATTRS) for attr in instance.INSTANCE_OPTIONAL_ATTRS: self.assertTrue(inst.obj_attr_is_set(attr)) self.assertEqual(123, inst.services[0].id) def test_lazy_load_services_on_deleted_instance(self): # We should avoid trying to hit the database to reload the instance # and just set the services attribute to an empty list. instance = objects.Instance(self.context, uuid=uuids.instance, deleted=True) self.assertEqual(0, len(instance.services)) def test_get_by_id(self): self.mox.StubOutWithMock(db, 'instance_get') db.instance_get(self.context, 'instid', columns_to_join=['info_cache', 'security_groups'] ).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = objects.Instance.get_by_id(self.context, 'instid') self.assertEqual(self.fake_instance['uuid'], inst.uuid) def test_load(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') fake_uuid = self.fake_instance['uuid'] db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(self.fake_instance) fake_inst2 = dict(self.fake_instance, metadata=[{'key': 'foo', 'value': 'bar'}]) db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['metadata'] ).AndReturn(fake_inst2) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid) self.assertFalse(hasattr(inst, '_obj_metadata')) meta = inst.metadata self.assertEqual({'foo': 'bar'}, meta) self.assertTrue(hasattr(inst, '_obj_metadata')) # Make sure we don't run load again meta2 = inst.metadata self.assertEqual({'foo': 'bar'}, meta2) def test_load_invalid(self): inst = objects.Instance(context=self.context, uuid=uuids.instance) self.assertRaises(exception.ObjectActionError, inst.obj_load_attr, 'foo') def test_get_remote(self): # isotime doesn't have microseconds and is always UTC self.mox.StubOutWithMock(db, 'instance_get_by_uuid') fake_instance = self.fake_instance db.instance_get_by_uuid(self.context, uuids.instance, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_instance) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, uuids.instance) self.assertEqual(fake_instance['id'], inst.id) self.assertEqual(fake_instance['launched_at'], inst.launched_at.replace(tzinfo=None)) self.assertEqual(fake_instance['access_ip_v4'], str(inst.access_ip_v4)) self.assertEqual(fake_instance['access_ip_v6'], str(inst.access_ip_v6)) def test_refresh(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') fake_uuid = self.fake_instance['uuid'] db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(dict(self.fake_instance, host='orig-host')) db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(dict(self.fake_instance, host='new-host')) self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache, 'refresh') instance_info_cache.InstanceInfoCache.refresh() self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid) self.assertEqual('orig-host', inst.host) inst.refresh() self.assertEqual('new-host', inst.host) self.assertEqual(set([]), inst.obj_what_changed()) def test_refresh_does_not_recurse(self): inst = objects.Instance(context=self.context, uuid=uuids.instance, metadata={}) inst_copy = objects.Instance() inst_copy.uuid = inst.uuid self.mox.StubOutWithMock(objects.Instance, 'get_by_uuid') objects.Instance.get_by_uuid(self.context, uuid=inst.uuid, expected_attrs=['metadata'], use_slave=False ).AndReturn(inst_copy) self.mox.ReplayAll() self.assertRaises(exception.OrphanedObjectError, inst.refresh) def _save_test_helper(self, cell_type, save_kwargs): """Common code for testing save() for cells/non-cells.""" if cell_type: self.flags(enable=True, cell_type=cell_type, group='cells') else: self.flags(enable=False, group='cells') old_ref = dict(self.fake_instance, host='oldhost', user_data='old', vm_state='old', task_state='old') fake_uuid = old_ref['uuid'] expected_updates = dict(vm_state='meow', task_state='wuff', user_data='new') new_ref = dict(old_ref, host='newhost', **expected_updates) exp_vm_state = save_kwargs.get('expected_vm_state') exp_task_state = save_kwargs.get('expected_task_state') admin_reset = save_kwargs.get('admin_state_reset', False) if exp_vm_state: expected_updates['expected_vm_state'] = exp_vm_state if exp_task_state: if (exp_task_state == 'image_snapshot' and 'instance_version' in save_kwargs and save_kwargs['instance_version'] == '1.9'): expected_updates['expected_task_state'] = [ 'image_snapshot', 'image_snapshot_pending'] else: expected_updates['expected_task_state'] = exp_task_state self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(db, 'instance_info_cache_update') cells_api_mock = self.mox.CreateMock(cells_rpcapi.CellsAPI) self.mox.StubOutWithMock(cells_api_mock, 'instance_update_at_top') self.mox.StubOutWithMock(cells_api_mock, 'instance_update_from_api') self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI', use_mock_anything=True) self.mox.StubOutWithMock(notifications, 'send_update') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(old_ref) db.instance_update_and_get_original( self.context, fake_uuid, expected_updates, columns_to_join=['info_cache', 'security_groups', 'system_metadata', 'extra', 'extra.flavor'] ).AndReturn((old_ref, new_ref)) if cell_type == 'api': cells_rpcapi.CellsAPI().AndReturn(cells_api_mock) cells_api_mock.instance_update_from_api( self.context, mox.IsA(objects.Instance), exp_vm_state, exp_task_state, admin_reset) elif cell_type == 'compute': cells_rpcapi.CellsAPI().AndReturn(cells_api_mock) cells_api_mock.instance_update_at_top(self.context, mox.IsA(objects.Instance)) notifications.send_update(self.context, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, old_ref['uuid']) if 'instance_version' in save_kwargs: inst.VERSION = save_kwargs.pop('instance_version') self.assertEqual('old', inst.task_state) self.assertEqual('old', inst.vm_state) self.assertEqual('old', inst.user_data) inst.vm_state = 'meow' inst.task_state = 'wuff' inst.user_data = 'new' save_kwargs.pop('context', None) inst.save(**save_kwargs) self.assertEqual('newhost', inst.host) self.assertEqual('meow', inst.vm_state) self.assertEqual('wuff', inst.task_state) self.assertEqual('new', inst.user_data) # NOTE(danms): Ignore flavor migrations for the moment self.assertEqual(set([]), inst.obj_what_changed() - set(['flavor'])) def test_save(self): self._save_test_helper(None, {}) def test_save_in_api_cell(self): self._save_test_helper('api', {}) def test_save_in_compute_cell(self): self._save_test_helper('compute', {}) def test_save_exp_vm_state(self): self._save_test_helper(None, {'expected_vm_state': ['meow']}) def test_save_exp_task_state(self): self._save_test_helper(None, {'expected_task_state': ['meow']}) def test_save_exp_vm_state_api_cell(self): self._save_test_helper('api', {'expected_vm_state': ['meow']}) def test_save_exp_task_state_api_cell(self): self._save_test_helper('api', {'expected_task_state': ['meow']}) def test_save_exp_task_state_api_cell_admin_reset(self): self._save_test_helper('api', {'admin_state_reset': True}) def test_save_rename_sends_notification(self): # Tests that simply changing the 'display_name' on the instance # will send a notification. self.flags(enable=False, group='cells') old_ref = dict(self.fake_instance, display_name='hello') fake_uuid = old_ref['uuid'] expected_updates = dict(display_name='goodbye') new_ref = dict(old_ref, **expected_updates) self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(notifications, 'send_update') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(old_ref) db.instance_update_and_get_original( self.context, fake_uuid, expected_updates, columns_to_join=['info_cache', 'security_groups', 'system_metadata', 'extra', 'extra.flavor'] ).AndReturn((old_ref, new_ref)) notifications.send_update(self.context, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, old_ref['uuid'], use_slave=False) self.assertEqual('hello', inst.display_name) inst.display_name = 'goodbye' inst.save() self.assertEqual('goodbye', inst.display_name) # NOTE(danms): Ignore flavor migrations for the moment self.assertEqual(set([]), inst.obj_what_changed() - set(['flavor'])) def test_save_related_object_if_none(self): with mock.patch.object(objects.Instance, '_save_pci_requests' ) as save_mock: inst = objects.Instance() inst = objects.Instance._from_db_object(self.context, inst, self.fake_instance) inst.pci_requests = None inst.save() self.assertTrue(save_mock.called) @mock.patch('nova.db.instance_update_and_get_original') @mock.patch.object(instance.Instance, '_from_db_object') def test_save_does_not_refresh_pci_devices(self, mock_fdo, mock_update): # NOTE(danms): This tests that we don't update the pci_devices # field from the contents of the database. This is not because we # don't necessarily want to, but because the way pci_devices is # currently implemented it causes versioning issues. When that is # resolved, this test should go away. mock_update.return_value = None, None inst = objects.Instance(context=self.context, id=123) inst.uuid = uuids.test_instance_not_refresh inst.pci_devices = pci_device.PciDeviceList() inst.save() self.assertNotIn('pci_devices', mock_fdo.call_args_list[0][1]['expected_attrs']) @mock.patch('nova.db.instance_extra_update_by_uuid') @mock.patch('nova.db.instance_update_and_get_original') @mock.patch.object(instance.Instance, '_from_db_object') def test_save_updates_numa_topology(self, mock_fdo, mock_update, mock_extra_update): fake_obj_numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0]), memory=128), objects.InstanceNUMACell(id=1, cpuset=set([1]), memory=128)]) fake_obj_numa_topology.instance_uuid = uuids.instance jsonified = fake_obj_numa_topology._to_json() mock_update.return_value = None, None inst = objects.Instance( context=self.context, id=123, uuid=uuids.instance) inst.numa_topology = fake_obj_numa_topology inst.save() # NOTE(sdague): the json representation of nova object for # NUMA isn't stable from a string comparison # perspective. There are sets which get converted to lists, # and based on platform differences may show up in different # orders. So we can't have mock do the comparison. Instead # manually compare the final parameter using our json equality # operator which does the right thing here. mock_extra_update.assert_called_once_with( self.context, inst.uuid, mock.ANY) called_arg = mock_extra_update.call_args_list[0][0][2]['numa_topology'] self.assertJsonEqual(called_arg, jsonified) mock_extra_update.reset_mock() inst.numa_topology = None inst.save() mock_extra_update.assert_called_once_with( self.context, inst.uuid, {'numa_topology': None}) @mock.patch('nova.db.instance_extra_update_by_uuid') def test_save_vcpu_model(self, mock_update): inst = fake_instance.fake_instance_obj(self.context) inst.vcpu_model = test_vcpu_model.fake_vcpumodel inst.save() self.assertTrue(mock_update.called) self.assertEqual(1, mock_update.call_count) actual_args = mock_update.call_args self.assertEqual(self.context, actual_args[0][0]) self.assertEqual(inst.uuid, actual_args[0][1]) self.assertEqual(['vcpu_model'], list(actual_args[0][2].keys())) self.assertJsonEqual(jsonutils.dumps( test_vcpu_model.fake_vcpumodel.obj_to_primitive()), actual_args[0][2]['vcpu_model']) mock_update.reset_mock() inst.vcpu_model = None inst.save() mock_update.assert_called_once_with( self.context, inst.uuid, {'vcpu_model': None}) @mock.patch('nova.db.instance_extra_update_by_uuid') def test_save_migration_context_model(self, mock_update): inst = fake_instance.fake_instance_obj(self.context) inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj( self.context) inst.save() self.assertTrue(mock_update.called) self.assertEqual(1, mock_update.call_count) actual_args = mock_update.call_args self.assertEqual(self.context, actual_args[0][0]) self.assertEqual(inst.uuid, actual_args[0][1]) self.assertEqual(['migration_context'], list(actual_args[0][2].keys())) self.assertIsInstance( objects.MigrationContext.obj_from_db_obj( actual_args[0][2]['migration_context']), objects.MigrationContext) mock_update.reset_mock() inst.migration_context = None inst.save() mock_update.assert_called_once_with( self.context, inst.uuid, {'migration_context': None}) def test_save_flavor_skips_unchanged_flavors(self): inst = objects.Instance(context=self.context, flavor=objects.Flavor()) inst.obj_reset_changes() with mock.patch('nova.db.instance_extra_update_by_uuid') as mock_upd: inst.save() self.assertFalse(mock_upd.called) @mock.patch.object(notifications, 'send_update') @mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_from_api') @mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_at_top') @mock.patch.object(db, 'instance_update_and_get_original') def _test_skip_cells_sync_helper(self, mock_db_update, mock_update_at_top, mock_update_from_api, mock_notif_update, cell_type): self.flags(enable=True, cell_type=cell_type, group='cells') inst = fake_instance.fake_instance_obj(self.context, cell_name='fake') inst.vm_state = 'foo' inst.task_state = 'bar' inst.cell_name = 'foo!bar@baz' old_ref = dict(base.obj_to_primitive(inst), vm_state='old', task_state='old') new_ref = dict(old_ref, vm_state='foo', task_state='bar') newer_ref = dict(new_ref, vm_state='bar', task_state='foo') mock_db_update.side_effect = [(old_ref, new_ref), (new_ref, newer_ref)] with inst.skip_cells_sync(): inst.save() mock_update_at_top.assert_has_calls([]) mock_update_from_api.assert_has_calls([]) self.assertFalse(mock_notif_update.called) inst.vm_state = 'bar' inst.task_state = 'foo' def fake_update_from_api(context, instance, expected_vm_state, expected_task_state, admin_state_reset): self.assertEqual('foo!bar@baz', instance.cell_name) # This is re-mocked so that cell_name can be checked above. Since # instance objects have no equality testing assert_called_once_with # doesn't work. with mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_from_api', side_effect=fake_update_from_api) as fake_update_from_api: inst.save() self.assertEqual('foo!bar@baz', inst.cell_name) self.assertTrue(mock_notif_update.called) if cell_type == 'compute': mock_update_at_top.assert_called_once_with(self.context, mock.ANY) # Compare primitives since we can't check instance object equality expected_inst_p = base.obj_to_primitive(inst) actual_inst = mock_update_at_top.call_args[0][1] actual_inst_p = base.obj_to_primitive(actual_inst) self.assertEqual(expected_inst_p, actual_inst_p) self.assertFalse(fake_update_from_api.called) elif cell_type == 'api': self.assertFalse(mock_update_at_top.called) fake_update_from_api.assert_called_once_with(self.context, mock.ANY, None, None, False) expected_calls = [ mock.call(self.context, inst.uuid, {'vm_state': 'foo', 'task_state': 'bar', 'cell_name': 'foo!bar@baz'}, columns_to_join=['system_metadata', 'extra', 'extra.flavor']), mock.call(self.context, inst.uuid, {'vm_state': 'bar', 'task_state': 'foo'}, columns_to_join=['system_metadata'])] mock_db_update.assert_has_calls(expected_calls) def test_skip_cells_api(self): self._test_skip_cells_sync_helper(cell_type='api') def test_skip_cells_compute(self): self._test_skip_cells_sync_helper(cell_type='compute') def test_get_deleted(self): fake_inst = dict(self.fake_instance, id=123, deleted=123) fake_uuid = fake_inst['uuid'] self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid) # NOTE(danms): Make sure it's actually a bool self.assertTrue(inst.deleted) def test_get_not_cleaned(self): fake_inst = dict(self.fake_instance, id=123, cleaned=None) fake_uuid = fake_inst['uuid'] self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid) # NOTE(mikal): Make sure it's actually a bool self.assertFalse(inst.cleaned) def test_get_cleaned(self): fake_inst = dict(self.fake_instance, id=123, cleaned=1) fake_uuid = fake_inst['uuid'] self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid) # NOTE(mikal): Make sure it's actually a bool self.assertTrue(inst.cleaned) def test_with_info_cache(self): fake_inst = dict(self.fake_instance) fake_uuid = fake_inst['uuid'] nwinfo1 = network_model.NetworkInfo.hydrate([{'address': 'foo'}]) nwinfo2 = network_model.NetworkInfo.hydrate([{'address': 'bar'}]) nwinfo1_json = nwinfo1.json() nwinfo2_json = nwinfo2.json() fake_info_cache = test_instance_info_cache.fake_info_cache fake_inst['info_cache'] = dict( fake_info_cache, network_info=nwinfo1_json, instance_uuid=fake_uuid) self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(db, 'instance_info_cache_update') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst) db.instance_info_cache_update(self.context, fake_uuid, {'network_info': nwinfo2_json}).AndReturn(fake_info_cache) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid) self.assertEqual(nwinfo1, inst.info_cache.network_info) self.assertEqual(fake_uuid, inst.info_cache.instance_uuid) inst.info_cache.network_info = nwinfo2 inst.save() def test_with_info_cache_none(self): fake_inst = dict(self.fake_instance, info_cache=None) fake_uuid = fake_inst['uuid'] self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache'] ).AndReturn(fake_inst) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid, ['info_cache']) self.assertIsNone(inst.info_cache) def test_with_security_groups(self): fake_inst = dict(self.fake_instance) fake_uuid = fake_inst['uuid'] fake_inst['security_groups'] = [ {'id': 1, 'name': 'secgroup1', 'description': 'fake-desc', 'user_id': 'fake-user', 'project_id': 'fake_project', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False}, {'id': 2, 'name': 'secgroup2', 'description': 'fake-desc', 'user_id': 'fake-user', 'project_id': 'fake_project', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False}, ] self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(db, 'security_group_update') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst) db.security_group_update(self.context, 1, {'description': 'changed'} ).AndReturn(fake_inst['security_groups'][0]) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid) self.assertEqual(2, len(inst.security_groups)) for index, group in enumerate(fake_inst['security_groups']): for key in group: self.assertEqual(group[key], inst.security_groups[index][key]) self.assertIsInstance(inst.security_groups[index], security_group.SecurityGroup) self.assertEqual(set(), inst.security_groups.obj_what_changed()) inst.security_groups[0].description = 'changed' inst.save() self.assertEqual(set(), inst.security_groups.obj_what_changed()) def test_with_empty_security_groups(self): fake_inst = dict(self.fake_instance, security_groups=[]) fake_uuid = fake_inst['uuid'] self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid) self.assertEqual(0, len(inst.security_groups)) def test_with_empty_pci_devices(self): fake_inst = dict(self.fake_instance, pci_devices=[]) fake_uuid = fake_inst['uuid'] self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['pci_devices'] ).AndReturn(fake_inst) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid, ['pci_devices']) self.assertEqual(0, len(inst.pci_devices)) def test_with_pci_devices(self): fake_inst = dict(self.fake_instance) fake_uuid = fake_inst['uuid'] fake_inst['pci_devices'] = [ {'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 2, 'compute_node_id': 1, 'address': 'a1', 'vendor_id': 'v1', 'numa_node': 0, 'product_id': 'p1', 'dev_type': fields.PciDeviceType.STANDARD, 'status': fields.PciDeviceStatus.ALLOCATED, 'dev_id': 'i', 'label': 'l', 'instance_uuid': fake_uuid, 'request_id': None, 'parent_addr': None, 'extra_info': '{}'}, { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 1, 'compute_node_id': 1, 'address': 'a', 'vendor_id': 'v', 'numa_node': 1, 'product_id': 'p', 'dev_type': fields.PciDeviceType.STANDARD, 'status': fields.PciDeviceStatus.ALLOCATED, 'dev_id': 'i', 'label': 'l', 'instance_uuid': fake_uuid, 'request_id': None, 'parent_addr': 'a1', 'extra_info': '{}'}, ] self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['pci_devices'] ).AndReturn(fake_inst) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid, ['pci_devices']) self.assertEqual(2, len(inst.pci_devices)) self.assertEqual(fake_uuid, inst.pci_devices[0].instance_uuid) self.assertEqual(fake_uuid, inst.pci_devices[1].instance_uuid) def test_with_fault(self): fake_inst = dict(self.fake_instance) fake_uuid = fake_inst['uuid'] fake_faults = [dict(x, instance_uuid=fake_uuid) for x in test_instance_fault.fake_faults['fake-uuid']] self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=[] ).AndReturn(self.fake_instance) db.instance_fault_get_by_instance_uuids( self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults}) self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, fake_uuid, expected_attrs=['fault']) self.assertEqual(fake_faults[0], dict(inst.fault.items())) @mock.patch('nova.objects.EC2Ids.get_by_instance') @mock.patch('nova.db.instance_get_by_uuid') def test_with_ec2_ids(self, mock_get, mock_ec2): fake_inst = dict(self.fake_instance) fake_uuid = fake_inst['uuid'] mock_get.return_value = fake_inst fake_ec2_ids = objects.EC2Ids(instance_id='fake-inst', ami_id='fake-ami') mock_ec2.return_value = fake_ec2_ids inst = objects.Instance.get_by_uuid(self.context, fake_uuid, expected_attrs=['ec2_ids']) mock_ec2.assert_called_once_with(self.context, mock.ANY) self.assertEqual(fake_ec2_ids.instance_id, inst.ec2_ids.instance_id) @mock.patch('nova.db.instance_get_by_uuid') def test_with_image_meta(self, mock_get): fake_inst = dict(self.fake_instance) mock_get.return_value = fake_inst inst = instance.Instance.get_by_uuid(self.context, fake_inst['uuid'], expected_attrs=['image_meta']) image_meta = inst.image_meta self.assertIsInstance(image_meta, objects.ImageMeta) self.assertEqual(100, image_meta.min_ram) self.assertEqual('ide', image_meta.properties.hw_disk_bus) self.assertEqual('ne2k_pci', image_meta.properties.hw_vif_model) def test_iteritems_with_extra_attrs(self): self.stubs.Set(objects.Instance, 'name', 'foo') inst = objects.Instance(uuid=uuids.instance) self.assertEqual(sorted({'uuid': uuids.instance, 'name': 'foo', }.items()), sorted(inst.items())) def _test_metadata_change_tracking(self, which): inst = objects.Instance(uuid=uuids.instance) setattr(inst, which, {}) inst.obj_reset_changes() getattr(inst, which)['foo'] = 'bar' self.assertEqual(set([which]), inst.obj_what_changed()) inst.obj_reset_changes() self.assertEqual(set(), inst.obj_what_changed()) def test_create_skip_scheduled_at(self): self.mox.StubOutWithMock(db, 'instance_create') vals = {'host': 'foo-host', 'memory_mb': 128, 'system_metadata': {'foo': 'bar'}, 'extra': { 'vcpu_model': None, 'numa_topology': None, 'pci_requests': None, }} fake_inst = fake_instance.fake_db_instance(**vals) db.instance_create(self.context, vals).AndReturn(fake_inst) self.mox.ReplayAll() inst = objects.Instance(context=self.context, host='foo-host', memory_mb=128, scheduled_at=None, system_metadata={'foo': 'bar'}) inst.create() self.assertEqual('foo-host', inst.host) def test_metadata_change_tracking(self): self._test_metadata_change_tracking('metadata') def test_system_metadata_change_tracking(self): self._test_metadata_change_tracking('system_metadata') def test_create_stubbed(self): self.mox.StubOutWithMock(db, 'instance_create') vals = {'host': 'foo-host', 'memory_mb': 128, 'system_metadata': {'foo': 'bar'}, 'extra': { 'vcpu_model': None, 'numa_topology': None, 'pci_requests': None, }} fake_inst = fake_instance.fake_db_instance(**vals) db.instance_create(self.context, vals).AndReturn(fake_inst) self.mox.ReplayAll() inst = objects.Instance(context=self.context, host='foo-host', memory_mb=128, system_metadata={'foo': 'bar'}) inst.create() def test_create(self): self.mox.StubOutWithMock(db, 'instance_create') extras = {'vcpu_model': None, 'numa_topology': None, 'pci_requests': None} db.instance_create(self.context, {'extra': extras}).AndReturn( self.fake_instance) self.mox.ReplayAll() inst = objects.Instance(context=self.context) inst.create() self.assertEqual(self.fake_instance['id'], inst.id) self.assertIsNotNone(inst.ec2_ids) def test_create_with_values(self): inst1 = objects.Instance(context=self.context, user_id=self.context.user_id, project_id=self.context.project_id, host='foo-host') inst1.create() self.assertEqual('foo-host', inst1.host) inst2 = objects.Instance.get_by_uuid(self.context, inst1.uuid) self.assertEqual('foo-host', inst2.host) def test_create_with_extras(self): inst = objects.Instance(context=self.context, uuid=self.fake_instance['uuid'], numa_topology=test_instance_numa_topology.fake_obj_numa_topology, pci_requests=objects.InstancePCIRequests( requests=[ objects.InstancePCIRequest(count=123, spec=[])]), vcpu_model=test_vcpu_model.fake_vcpumodel, ) inst.create() self.assertIsNotNone(inst.numa_topology) self.assertIsNotNone(inst.pci_requests) self.assertEqual(1, len(inst.pci_requests.requests)) self.assertIsNotNone(inst.vcpu_model) got_numa_topo = objects.InstanceNUMATopology.get_by_instance_uuid( self.context, inst.uuid) self.assertEqual(inst.numa_topology.instance_uuid, got_numa_topo.instance_uuid) got_pci_requests = objects.InstancePCIRequests.get_by_instance_uuid( self.context, inst.uuid) self.assertEqual(123, got_pci_requests.requests[0].count) vcpu_model = objects.VirtCPUModel.get_by_instance_uuid( self.context, inst.uuid) self.assertEqual('fake-model', vcpu_model.model) def test_recreate_fails(self): inst = objects.Instance(context=self.context, user_id=self.context.user_id, project_id=self.context.project_id, host='foo-host') inst.create() self.assertRaises(exception.ObjectActionError, inst.create) def test_create_with_special_things(self): self.mox.StubOutWithMock(db, 'instance_create') fake_inst = fake_instance.fake_db_instance() db.instance_create(self.context, {'host': 'foo-host', 'security_groups': ['foo', 'bar'], 'info_cache': {'network_info': '[]'}, 'extra': { 'vcpu_model': None, 'numa_topology': None, 'pci_requests': None, }, } ).AndReturn(fake_inst) self.mox.ReplayAll() secgroups = security_group.SecurityGroupList() secgroups.objects = [] for name in ('foo', 'bar'): secgroup = security_group.SecurityGroup() secgroup.name = name secgroups.objects.append(secgroup) info_cache = instance_info_cache.InstanceInfoCache() info_cache.network_info = network_model.NetworkInfo() inst = objects.Instance(context=self.context, host='foo-host', security_groups=secgroups, info_cache=info_cache) inst.create() def test_destroy_stubbed(self): self.mox.StubOutWithMock(db, 'instance_destroy') deleted_at = datetime.datetime(1955, 11, 6) fake_inst = fake_instance.fake_db_instance(deleted_at=deleted_at, deleted=True) db.instance_destroy(self.context, uuids.instance, constraint=None).AndReturn(fake_inst) self.mox.ReplayAll() inst = objects.Instance(context=self.context, id=1, uuid=uuids.instance, host='foo') inst.destroy() self.assertEqual(timeutils.normalize_time(deleted_at), timeutils.normalize_time(inst.deleted_at)) self.assertTrue(inst.deleted) def test_destroy(self): values = {'user_id': self.context.user_id, 'project_id': self.context.project_id} db_inst = db.instance_create(self.context, values) inst = objects.Instance(context=self.context, id=db_inst['id'], uuid=db_inst['uuid']) inst.destroy() self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, self.context, db_inst['uuid']) def test_destroy_host_constraint(self): values = {'user_id': self.context.user_id, 'project_id': self.context.project_id, 'host': 'foo'} db_inst = db.instance_create(self.context, values) inst = objects.Instance.get_by_uuid(self.context, db_inst['uuid']) inst.host = None self.assertRaises(exception.ObjectActionError, inst.destroy) @mock.patch.object(cells_rpcapi.CellsAPI, 'instance_destroy_at_top') @mock.patch.object(db, 'instance_destroy') def test_destroy_cell_sync_to_top(self, mock_destroy, mock_destroy_at_top): self.flags(enable=True, cell_type='compute', group='cells') fake_inst = fake_instance.fake_db_instance(deleted=True) mock_destroy.return_value = fake_inst inst = objects.Instance(context=self.context, id=1, uuid=uuids.instance) inst.destroy() mock_destroy_at_top.assert_called_once_with(self.context, mock.ANY) actual_inst = mock_destroy_at_top.call_args[0][1] self.assertIsInstance(actual_inst, instance.Instance) @mock.patch.object(cells_rpcapi.CellsAPI, 'instance_destroy_at_top') @mock.patch.object(db, 'instance_destroy') def test_destroy_no_cell_sync_to_top(self, mock_destroy, mock_destroy_at_top): fake_inst = fake_instance.fake_db_instance(deleted=True) mock_destroy.return_value = fake_inst inst = objects.Instance(context=self.context, id=1, uuid=uuids.instance) inst.destroy() self.assertFalse(mock_destroy_at_top.called) def test_name_does_not_trigger_lazy_loads(self): values = {'user_id': self.context.user_id, 'project_id': self.context.project_id, 'host': 'foo'} db_inst = db.instance_create(self.context, values) inst = objects.Instance.get_by_uuid(self.context, db_inst['uuid']) self.assertFalse(inst.obj_attr_is_set('fault')) self.flags(instance_name_template='foo-%(uuid)s') self.assertEqual('foo-%s' % db_inst['uuid'], inst.name) self.assertFalse(inst.obj_attr_is_set('fault')) def test_from_db_object_not_overwrite_info_cache(self): info_cache = instance_info_cache.InstanceInfoCache() inst = objects.Instance(context=self.context, info_cache=info_cache) db_inst = fake_instance.fake_db_instance() db_inst['info_cache'] = dict( test_instance_info_cache.fake_info_cache) inst._from_db_object(self.context, inst, db_inst, expected_attrs=['info_cache']) self.assertIs(info_cache, inst.info_cache) def test_from_db_object_info_cache_not_set(self): inst = instance.Instance(context=self.context, info_cache=None) db_inst = fake_instance.fake_db_instance() db_inst.pop('info_cache') inst._from_db_object(self.context, inst, db_inst, expected_attrs=['info_cache']) self.assertIsNone(inst.info_cache) def test_from_db_object_security_groups_net_set(self): inst = instance.Instance(context=self.context, info_cache=None) db_inst = fake_instance.fake_db_instance() db_inst.pop('security_groups') inst._from_db_object(self.context, inst, db_inst, expected_attrs=['security_groups']) self.assertEqual([], inst.security_groups.objects) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') def test_get_with_pci_requests(self, mock_get): mock_get.return_value = objects.InstancePCIRequests() db_instance = db.instance_create(self.context, { 'user_id': self.context.user_id, 'project_id': self.context.project_id}) instance = objects.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['pci_requests']) self.assertTrue(instance.obj_attr_is_set('pci_requests')) self.assertIsNotNone(instance.pci_requests) def test_get_flavor(self): db_flavor = flavors.get_default_flavor() inst = objects.Instance(flavor=db_flavor) self.assertEqual(db_flavor['flavorid'], inst.get_flavor().flavorid) def test_get_flavor_namespace(self): db_flavor = flavors.get_default_flavor() inst = objects.Instance(old_flavor=db_flavor) self.assertEqual(db_flavor['flavorid'], inst.get_flavor('old').flavorid) @mock.patch.object(db, 'instance_metadata_delete') def test_delete_metadata_key(self, db_delete): inst = objects.Instance(context=self.context, id=1, uuid=uuids.instance) inst.metadata = {'foo': '1', 'bar': '2'} inst.obj_reset_changes() inst.delete_metadata_key('foo') self.assertEqual({'bar': '2'}, inst.metadata) self.assertEqual({}, inst.obj_get_changes()) db_delete.assert_called_once_with(self.context, inst.uuid, 'foo') def test_reset_changes(self): inst = objects.Instance() inst.metadata = {'1985': 'present'} inst.system_metadata = {'1955': 'past'} self.assertEqual({}, inst._orig_metadata) inst.obj_reset_changes(['metadata']) self.assertEqual({'1985': 'present'}, inst._orig_metadata) self.assertEqual({}, inst._orig_system_metadata) def test_load_generic_calls_handler(self): inst = objects.Instance(context=self.context, uuid=uuids.instance) with mock.patch.object(inst, '_load_generic') as mock_load: def fake_load(name): inst.system_metadata = {} mock_load.side_effect = fake_load inst.system_metadata mock_load.assert_called_once_with('system_metadata') def test_load_fault_calls_handler(self): inst = objects.Instance(context=self.context, uuid=uuids.instance) with mock.patch.object(inst, '_load_fault') as mock_load: def fake_load(): inst.fault = None mock_load.side_effect = fake_load inst.fault mock_load.assert_called_once_with() def test_load_ec2_ids_calls_handler(self): inst = objects.Instance(context=self.context, uuid=uuids.instance) with mock.patch.object(inst, '_load_ec2_ids') as mock_load: def fake_load(): inst.ec2_ids = objects.EC2Ids(instance_id='fake-inst', ami_id='fake-ami') mock_load.side_effect = fake_load inst.ec2_ids mock_load.assert_called_once_with() def test_load_migration_context(self): inst = instance.Instance(context=self.context, uuid=uuids.instance) with mock.patch.object( objects.MigrationContext, 'get_by_instance_uuid', return_value=test_mig_ctxt.fake_migration_context_obj ) as mock_get: inst.migration_context mock_get.assert_called_once_with(self.context, inst.uuid) def test_load_migration_context_no_context(self): inst = instance.Instance(context=self.context, uuid=uuids.instance) with mock.patch.object( objects.MigrationContext, 'get_by_instance_uuid', side_effect=exception.MigrationContextNotFound( instance_uuid=inst.uuid) ) as mock_get: mig_ctxt = inst.migration_context mock_get.assert_called_once_with(self.context, inst.uuid) self.assertIsNone(mig_ctxt) def test_load_migration_context_no_data(self): inst = instance.Instance(context=self.context, uuid=uuids.instance) with mock.patch.object( objects.MigrationContext, 'get_by_instance_uuid') as mock_get: loaded_ctxt = inst._load_migration_context(db_context=None) self.assertFalse(mock_get.called) self.assertIsNone(loaded_ctxt) def test_apply_revert_migration_context(self): inst = instance.Instance(context=self.context, uuid=uuids.instance, numa_topology=None) inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj( self.context) inst.apply_migration_context() self.assertIsInstance(inst.numa_topology, objects.InstanceNUMATopology) inst.revert_migration_context() self.assertIsNone(inst.numa_topology) def test_drop_migration_context(self): inst = instance.Instance(context=self.context, uuid=uuids.instance) inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj( self.context) inst.migration_context.instance_uuid = inst.uuid inst.migration_context.id = 7 with mock.patch( 'nova.db.instance_extra_update_by_uuid') as update_extra: inst.drop_migration_context() self.assertIsNone(inst.migration_context) update_extra.assert_called_once_with(self.context, inst.uuid, {"migration_context": None}) def test_mutated_migration_context(self): numa_topology = (test_instance_numa_topology. fake_obj_numa_topology.obj_clone()) numa_topology.cells[0].memory = 1024 numa_topology.cells[1].memory = 1024 inst = instance.Instance(context=self.context, uuid=uuids.instance, numa_topology=numa_topology) inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj( self.context) with inst.mutated_migration_context(): self.assertIs(inst.numa_topology, inst.migration_context.new_numa_topology) self.assertIs(numa_topology, inst.numa_topology) def test_clear_numa_topology(self): numa_topology = (test_instance_numa_topology. fake_obj_numa_topology.obj_clone()) numa_topology.cells[0].id = 42 numa_topology.cells[1].id = 43 inst = instance.Instance(context=self.context, uuid=uuids.instance, numa_topology=numa_topology) inst.obj_reset_changes() inst.clear_numa_topology() self.assertIn('numa_topology', inst.obj_what_changed()) self.assertEqual(-1, numa_topology.cells[0].id) self.assertEqual(-1, numa_topology.cells[1].id) @mock.patch.object(objects.Instance, 'get_by_uuid') def test_load_generic(self, mock_get): inst2 = instance.Instance(metadata={'foo': 'bar'}) mock_get.return_value = inst2 inst = instance.Instance(context=self.context, uuid=uuids.instance) inst.metadata @mock.patch('nova.db.instance_fault_get_by_instance_uuids') def test_load_fault(self, mock_get): fake_fault = test_instance_fault.fake_faults['fake-uuid'][0] mock_get.return_value = {uuids.load_fault_instance: [fake_fault]} inst = objects.Instance(context=self.context, uuid=uuids.load_fault_instance) fault = inst.fault mock_get.assert_called_once_with(self.context, [uuids.load_fault_instance]) self.assertEqual(fake_fault['id'], fault.id) self.assertNotIn('metadata', inst.obj_what_changed()) @mock.patch('nova.objects.EC2Ids.get_by_instance') def test_load_ec2_ids(self, mock_get): fake_ec2_ids = objects.EC2Ids(instance_id='fake-inst', ami_id='fake-ami') mock_get.return_value = fake_ec2_ids inst = objects.Instance(context=self.context, uuid=uuids.instance) ec2_ids = inst.ec2_ids mock_get.assert_called_once_with(self.context, inst) self.assertEqual(fake_ec2_ids, ec2_ids) @mock.patch('nova.objects.SecurityGroupList.get_by_instance') def test_load_security_groups(self, mock_get): secgroups = [] for name in ('foo', 'bar'): secgroup = security_group.SecurityGroup() secgroup.name = name secgroups.append(secgroup) fake_secgroups = security_group.SecurityGroupList(objects=secgroups) mock_get.return_value = fake_secgroups inst = objects.Instance(context=self.context, uuid='fake') secgroups = inst.security_groups mock_get.assert_called_once_with(self.context, inst) self.assertEqual(fake_secgroups, secgroups) @mock.patch('nova.objects.PciDeviceList.get_by_instance_uuid') def test_load_pci_devices(self, mock_get): fake_pci_devices = pci_device.PciDeviceList() mock_get.return_value = fake_pci_devices inst = objects.Instance(context=self.context, uuid=uuids.pci_devices) pci_devices = inst.pci_devices mock_get.assert_called_once_with(self.context, uuids.pci_devices) self.assertEqual(fake_pci_devices, pci_devices) def test_get_with_extras(self): pci_requests = objects.InstancePCIRequests(requests=[ objects.InstancePCIRequest(count=123, spec=[])]) inst = objects.Instance(context=self.context, user_id=self.context.user_id, project_id=self.context.project_id, pci_requests=pci_requests) inst.create() uuid = inst.uuid inst = objects.Instance.get_by_uuid(self.context, uuid) self.assertFalse(inst.obj_attr_is_set('pci_requests')) inst = objects.Instance.get_by_uuid( self.context, uuid, expected_attrs=['pci_requests']) self.assertTrue(inst.obj_attr_is_set('pci_requests')) class TestInstanceObject(test_objects._LocalTest, _TestInstanceObject): def _test_save_objectfield_fk_constraint_fails(self, foreign_key, expected_exception): # NOTE(danms): Do this here and not in the remote test because # we're mocking out obj_attr_is_set() without the thing actually # being set, which confuses the heck out of the serialization # stuff. error = db_exc.DBReferenceError('table', 'constraint', foreign_key, 'key_table') # Prevent lazy-loading any fields, results in InstanceNotFound attrs = objects.instance.INSTANCE_OPTIONAL_ATTRS instance = fake_instance.fake_instance_obj(self.context, expected_attrs=attrs) fields_with_save_methods = [field for field in instance.fields if hasattr(instance, '_save_%s' % field)] for field in fields_with_save_methods: @mock.patch.object(instance, '_save_%s' % field) @mock.patch.object(instance, 'obj_attr_is_set') def _test(mock_is_set, mock_save_field): mock_is_set.return_value = True mock_save_field.side_effect = error instance.obj_reset_changes(fields=[field]) instance._changed_fields.add(field) self.assertRaises(expected_exception, instance.save) instance.obj_reset_changes(fields=[field]) _test() def test_save_objectfield_missing_instance_row(self): self._test_save_objectfield_fk_constraint_fails( 'instance_uuid', exception.InstanceNotFound) def test_save_objectfield_reraises_if_not_instance_related(self): self._test_save_objectfield_fk_constraint_fails( 'other_foreign_key', db_exc.DBReferenceError) class TestRemoteInstanceObject(test_objects._RemoteTest, _TestInstanceObject): pass class _TestInstanceListObject(object): def fake_instance(self, id, updates=None): db_inst = fake_instance.fake_db_instance(id=2, access_ip_v4='1.2.3.4', access_ip_v6='::1') db_inst['terminated_at'] = None db_inst['deleted_at'] = None db_inst['created_at'] = None db_inst['updated_at'] = None db_inst['launched_at'] = datetime.datetime(1955, 11, 12, 22, 4, 0) db_inst['security_groups'] = [] db_inst['deleted'] = 0 db_inst['info_cache'] = dict(test_instance_info_cache.fake_info_cache, instance_uuid=db_inst['uuid']) if updates: db_inst.update(updates) return db_inst def test_get_all_by_filters(self): fakes = [self.fake_instance(1), self.fake_instance(2)] self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid', 'asc', limit=None, marker=None, columns_to_join=['metadata'] ).AndReturn(fakes) self.mox.ReplayAll() inst_list = objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, 'uuid', 'asc', expected_attrs=['metadata'], use_slave=False) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid) def test_get_all_by_filters_sorted(self): fakes = [self.fake_instance(1), self.fake_instance(2)] self.mox.StubOutWithMock(db, 'instance_get_all_by_filters_sort') db.instance_get_all_by_filters_sort(self.context, {'foo': 'bar'}, limit=None, marker=None, columns_to_join=['metadata'], sort_keys=['uuid'], sort_dirs=['asc']).AndReturn(fakes) self.mox.ReplayAll() inst_list = objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, expected_attrs=['metadata'], use_slave=False, sort_keys=['uuid'], sort_dirs=['asc']) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid) @mock.patch.object(db, 'instance_get_all_by_filters_sort') @mock.patch.object(db, 'instance_get_all_by_filters') def test_get_all_by_filters_calls_non_sort(self, mock_get_by_filters, mock_get_by_filters_sort): '''Verifies InstanceList.get_by_filters calls correct DB function.''' # Single sort key/direction is set, call non-sorted DB function objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, sort_key='key', sort_dir='dir', limit=100, marker='uuid', use_slave=True) mock_get_by_filters.assert_called_once_with( self.context, {'foo': 'bar'}, 'key', 'dir', limit=100, marker='uuid', columns_to_join=None) self.assertEqual(0, mock_get_by_filters_sort.call_count) @mock.patch.object(db, 'instance_get_all_by_filters_sort') @mock.patch.object(db, 'instance_get_all_by_filters') def test_get_all_by_filters_calls_sort(self, mock_get_by_filters, mock_get_by_filters_sort): '''Verifies InstanceList.get_by_filters calls correct DB function.''' # Multiple sort keys/directions are set, call sorted DB function objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, limit=100, marker='uuid', use_slave=True, sort_keys=['key1', 'key2'], sort_dirs=['dir1', 'dir2']) mock_get_by_filters_sort.assert_called_once_with( self.context, {'foo': 'bar'}, limit=100, marker='uuid', columns_to_join=None, sort_keys=['key1', 'key2'], sort_dirs=['dir1', 'dir2']) self.assertEqual(0, mock_get_by_filters.call_count) def test_get_all_by_filters_works_for_cleaned(self): fakes = [self.fake_instance(1), self.fake_instance(2, updates={'deleted': 2, 'cleaned': None})] self.context.read_deleted = 'yes' self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc', limit=None, marker=None, columns_to_join=['metadata']).AndReturn( [fakes[1]]) self.mox.ReplayAll() inst_list = objects.InstanceList.get_by_filters( self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc', expected_attrs=['metadata'], use_slave=False) self.assertEqual(1, len(inst_list)) self.assertIsInstance(inst_list.objects[0], instance.Instance) self.assertEqual(fakes[1]['uuid'], inst_list.objects[0].uuid) def test_get_by_host(self): fakes = [self.fake_instance(1), self.fake_instance(2)] self.mox.StubOutWithMock(db, 'instance_get_all_by_host') db.instance_get_all_by_host(self.context, 'foo', columns_to_join=None).AndReturn(fakes) self.mox.ReplayAll() inst_list = objects.InstanceList.get_by_host(self.context, 'foo') for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid) self.assertEqual(self.context, inst_list.objects[i]._context) self.assertEqual(set(), inst_list.obj_what_changed()) def test_get_by_host_and_node(self): fakes = [self.fake_instance(1), self.fake_instance(2)] self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node') db.instance_get_all_by_host_and_node(self.context, 'foo', 'bar', columns_to_join=None).AndReturn( fakes) self.mox.ReplayAll() inst_list = objects.InstanceList.get_by_host_and_node(self.context, 'foo', 'bar') for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid) def test_get_by_host_and_not_type(self): fakes = [self.fake_instance(1), self.fake_instance(2)] self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_not_type') db.instance_get_all_by_host_and_not_type(self.context, 'foo', type_id='bar').AndReturn( fakes) self.mox.ReplayAll() inst_list = objects.InstanceList.get_by_host_and_not_type( self.context, 'foo', 'bar') for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid) @mock.patch('nova.objects.instance._expected_cols') @mock.patch('nova.db.instance_get_all') def test_get_all(self, mock_get_all, mock_exp): fakes = [self.fake_instance(1), self.fake_instance(2)] mock_get_all.return_value = fakes mock_exp.return_value = mock.sentinel.exp_att inst_list = objects.InstanceList.get_all( self.context, expected_attrs='fake') mock_get_all.assert_called_once_with( self.context, columns_to_join=mock.sentinel.exp_att) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid) def test_get_hung_in_rebooting(self): fakes = [self.fake_instance(1), self.fake_instance(2)] dt = utils.isotime() self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting') db.instance_get_all_hung_in_rebooting(self.context, dt).AndReturn( fakes) self.mox.ReplayAll() inst_list = objects.InstanceList.get_hung_in_rebooting(self.context, dt) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid) def test_get_active_by_window_joined(self): fakes = [self.fake_instance(1), self.fake_instance(2)] # NOTE(mriedem): Send in a timezone-naive datetime since the # InstanceList.get_active_by_window_joined method should convert it # to tz-aware for the DB API call, which we'll assert with our stub. dt = timeutils.utcnow() def fake_instance_get_active_by_window_joined(context, begin, end, project_id, host, columns_to_join): # make sure begin is tz-aware self.assertIsNotNone(begin.utcoffset()) self.assertIsNone(end) self.assertEqual(['metadata'], columns_to_join) return fakes with mock.patch.object(db, 'instance_get_active_by_window_joined', fake_instance_get_active_by_window_joined): inst_list = objects.InstanceList.get_active_by_window_joined( self.context, dt, expected_attrs=['metadata']) for fake, obj in zip(fakes, inst_list.objects): self.assertIsInstance(obj, instance.Instance) self.assertEqual(fake['uuid'], obj.uuid) def test_with_fault(self): fake_insts = [ fake_instance.fake_db_instance(uuid=uuids.faults_instance, host='host'), fake_instance.fake_db_instance(uuid=uuids.faults_instance_nonexist, host='host'), ] fake_faults = test_instance_fault.fake_faults self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_get_all_by_host(self.context, 'host', columns_to_join=[]).AndReturn(fake_insts) db.instance_fault_get_by_instance_uuids( self.context, [x['uuid'] for x in fake_insts] ).AndReturn(fake_faults) self.mox.ReplayAll() instances = objects.InstanceList.get_by_host(self.context, 'host', expected_attrs=['fault'], use_slave=False) self.assertEqual(2, len(instances)) self.assertEqual(fake_faults['fake-uuid'][0], dict(instances[0].fault)) self.assertIsNone(instances[1].fault) def test_fill_faults(self): self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') inst1 = objects.Instance(uuid=uuids.db_fault_1) inst2 = objects.Instance(uuid=uuids.db_fault_2) insts = [inst1, inst2] for inst in insts: inst.obj_reset_changes() db_faults = { 'uuid1': [{'id': 123, 'instance_uuid': uuids.db_fault_1, 'code': 456, 'message': 'Fake message', 'details': 'No details', 'host': 'foo', 'deleted': False, 'deleted_at': None, 'updated_at': None, 'created_at': None, } ]} db.instance_fault_get_by_instance_uuids(self.context, [x.uuid for x in insts], ).AndReturn(db_faults) self.mox.ReplayAll() inst_list = objects.InstanceList() inst_list._context = self.context inst_list.objects = insts faulty = inst_list.fill_faults() self.assertEqual([uuids.db_fault_1], list(faulty)) self.assertEqual(db_faults['uuid1'][0]['message'], inst_list[0].fault.message) self.assertIsNone(inst_list[1].fault) for inst in inst_list: self.assertEqual(set(), inst.obj_what_changed()) @mock.patch('nova.objects.instance.Instance.obj_make_compatible') def test_get_by_security_group(self, mock_compat): fake_secgroup = dict(test_security_group.fake_secgroup) fake_secgroup['instances'] = [ fake_instance.fake_db_instance(id=1, system_metadata={'foo': 'bar'}), fake_instance.fake_db_instance(id=2), ] with mock.patch.object(db, 'security_group_get') as sgg: sgg.return_value = fake_secgroup secgroup = security_group.SecurityGroup() secgroup.id = fake_secgroup['id'] instances = instance.InstanceList.get_by_security_group( self.context, secgroup) self.assertEqual(2, len(instances)) self.assertEqual([1, 2], [x.id for x in instances]) self.assertTrue(instances[0].obj_attr_is_set('system_metadata')) self.assertEqual({'foo': 'bar'}, instances[0].system_metadata) def test_get_by_grantee_security_group_ids(self): fake_instances = [ fake_instance.fake_db_instance(id=1), fake_instance.fake_db_instance(id=2) ] with mock.patch.object( db, 'instance_get_all_by_grantee_security_groups') as igabgsg: igabgsg.return_value = fake_instances secgroup_ids = [1] instances = objects.InstanceList.get_by_grantee_security_group_ids( self.context, secgroup_ids) igabgsg.assert_called_once_with(self.context, secgroup_ids) self.assertEqual(2, len(instances)) self.assertEqual([1, 2], [x.id for x in instances]) class TestInstanceListObject(test_objects._LocalTest, _TestInstanceListObject): pass class TestRemoteInstanceListObject(test_objects._RemoteTest, _TestInstanceListObject): pass class TestInstanceObjectMisc(test.TestCase): def test_expected_cols(self): self.stubs.Set(instance, '_INSTANCE_OPTIONAL_JOINED_FIELDS', ['bar']) self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar'])) self.assertIsNone(instance._expected_cols(None)) def test_expected_cols_extra(self): self.assertEqual(['metadata', 'extra', 'extra.numa_topology'], instance._expected_cols(['metadata', 'numa_topology'])) nova-13.0.0/nova/tests/unit/objects/__init__.py0000664000567000056710000000000012701407773022523 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/objects/test_pci_device_pool.py0000664000567000056710000001056112701407773025163 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova import objects from nova.objects import pci_device_pool from nova import test from nova.tests.unit import fake_pci_device_pools as fake_pci from nova.tests.unit.objects import test_objects class _TestPciDevicePoolObject(object): def test_pci_pool_from_dict_not_distructive(self): test_dict = copy.copy(fake_pci.fake_pool_dict) objects.PciDevicePool.from_dict(test_dict) self.assertEqual(fake_pci.fake_pool_dict, test_dict) def test_pci_pool_from_dict(self): pool_obj = objects.PciDevicePool.from_dict(fake_pci.fake_pool_dict) self.assertEqual(pool_obj.product_id, 'fake-product') self.assertEqual(pool_obj.vendor_id, 'fake-vendor') self.assertEqual(pool_obj.numa_node, 1) self.assertEqual(pool_obj.tags, {'t1': 'v1', 't2': 'v2'}) self.assertEqual(pool_obj.count, 2) def test_pci_pool_from_dict_bad_tags(self): bad_dict = copy.deepcopy(fake_pci.fake_pool_dict) bad_dict['bad'] = {'foo': 'bar'} self.assertRaises(ValueError, objects.PciDevicePool.from_dict, value=bad_dict) def test_pci_pool_from_dict_no_tags(self): dict_notag = copy.copy(fake_pci.fake_pool_dict) dict_notag.pop('t1') dict_notag.pop('t2') pool_obj = objects.PciDevicePool.from_dict(dict_notag) self.assertEqual(pool_obj.tags, {}) def test_pci_pool_to_dict(self): tags = {'t1': 'foo', 't2': 'bar'} pool_obj = objects.PciDevicePool(product_id='pid', tags=tags) pool_dict = pool_obj.to_dict() self.assertEqual({'product_id': 'pid', 't1': 'foo', 't2': 'bar'}, pool_dict) def test_pci_pool_to_dict_no_tags(self): pool_obj = objects.PciDevicePool(product_id='pid', tags={}) pool_dict = pool_obj.to_dict() self.assertEqual({'product_id': 'pid'}, pool_dict) def test_pci_pool_to_dict_with_tags_unset(self): pool_obj = objects.PciDevicePool(product_id='pid') pool_dict = pool_obj.to_dict() self.assertEqual({'product_id': 'pid'}, pool_dict) def test_obj_make_compatible(self): pool_obj = objects.PciDevicePool(product_id='pid', numa_node=1) primitive = pool_obj.obj_to_primitive() self.assertIn('numa_node', primitive['nova_object.data']) pool_obj.obj_make_compatible(primitive['nova_object.data'], '1.0') self.assertNotIn('numa_node', primitive['nova_object.data']) class TestPciDevicePoolObject(test_objects._LocalTest, _TestPciDevicePoolObject): pass class TestRemotePciDevicePoolObject(test_objects._RemoteTest, _TestPciDevicePoolObject): pass class TestConvertPciStats(test.NoDBTestCase): def test_from_pci_stats_obj(self): prim = fake_pci.fake_pool_list_primitive pools = pci_device_pool.from_pci_stats(prim) self.assertIsInstance(pools, pci_device_pool.PciDevicePoolList) self.assertEqual(len(pools), 1) def test_from_pci_stats_dict(self): prim = fake_pci.fake_pool_dict pools = pci_device_pool.from_pci_stats(prim) self.assertIsInstance(pools, pci_device_pool.PciDevicePoolList) self.assertEqual(len(pools), 1) def test_from_pci_stats_list_of_dicts(self): prim = fake_pci.fake_pool_dict pools = pci_device_pool.from_pci_stats([prim, prim]) self.assertIsInstance(pools, pci_device_pool.PciDevicePoolList) self.assertEqual(len(pools), 2) def test_from_pci_stats_bad(self): prim = "not a valid json string for an object" pools = pci_device_pool.from_pci_stats(prim) self.assertEqual(len(pools), 0) nova-13.0.0/nova/tests/unit/objects/test_security_group.py0000664000567000056710000001565612701407773025135 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import db from nova.objects import instance from nova.objects import security_group from nova.tests.unit.objects import test_objects fake_secgroup = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 1, 'name': 'fake-name', 'description': 'fake-desc', 'user_id': 'fake-user', 'project_id': 'fake-project', } class _TestSecurityGroupObject(object): def _fix_deleted(self, db_secgroup): # NOTE(danms): Account for the difference in 'deleted' return dict(db_secgroup.items(), deleted=False) def test_get(self): self.mox.StubOutWithMock(db, 'security_group_get') db.security_group_get(self.context, 1).AndReturn(fake_secgroup) self.mox.ReplayAll() secgroup = security_group.SecurityGroup.get(self.context, 1) self.assertEqual(self._fix_deleted(fake_secgroup), dict(secgroup.items())) self.assertEqual(secgroup.obj_what_changed(), set()) def test_get_by_name(self): self.mox.StubOutWithMock(db, 'security_group_get_by_name') db.security_group_get_by_name(self.context, 'fake-project', 'fake-name').AndReturn(fake_secgroup) self.mox.ReplayAll() secgroup = security_group.SecurityGroup.get_by_name(self.context, 'fake-project', 'fake-name') self.assertEqual(self._fix_deleted(fake_secgroup), dict(secgroup.items())) self.assertEqual(secgroup.obj_what_changed(), set()) def test_in_use(self): self.mox.StubOutWithMock(db, 'security_group_in_use') db.security_group_in_use(self.context, 123).AndReturn(True) self.mox.ReplayAll() secgroup = security_group.SecurityGroup(context=self.context) secgroup.id = 123 self.assertTrue(secgroup.in_use()) def test_save(self): self.mox.StubOutWithMock(db, 'security_group_update') updated_secgroup = dict(fake_secgroup, project_id='changed') db.security_group_update(self.context, 1, {'description': 'foobar'}).AndReturn( updated_secgroup) self.mox.ReplayAll() secgroup = security_group.SecurityGroup._from_db_object( self.context, security_group.SecurityGroup(), fake_secgroup) secgroup.description = 'foobar' secgroup.save() self.assertEqual(self._fix_deleted(updated_secgroup), dict(secgroup.items())) self.assertEqual(secgroup.obj_what_changed(), set()) def test_save_no_changes(self): self.mox.StubOutWithMock(db, 'security_group_update') self.mox.ReplayAll() secgroup = security_group.SecurityGroup._from_db_object( self.context, security_group.SecurityGroup(), fake_secgroup) secgroup.save() def test_refresh(self): updated_secgroup = dict(fake_secgroup, description='changed') self.mox.StubOutWithMock(db, 'security_group_get') db.security_group_get(self.context, 1).AndReturn(updated_secgroup) self.mox.ReplayAll() secgroup = security_group.SecurityGroup._from_db_object( self.context, security_group.SecurityGroup(self.context), fake_secgroup) secgroup.refresh() self.assertEqual(self._fix_deleted(updated_secgroup), dict(secgroup.items())) self.assertEqual(secgroup.obj_what_changed(), set()) class TestSecurityGroupObject(test_objects._LocalTest, _TestSecurityGroupObject): pass class TestSecurityGroupObjectRemote(test_objects._RemoteTest, _TestSecurityGroupObject): pass fake_secgroups = [ dict(fake_secgroup, id=1, name='secgroup1'), dict(fake_secgroup, id=2, name='secgroup2'), ] class _TestSecurityGroupListObject(object): def test_get_all(self): self.mox.StubOutWithMock(db, 'security_group_get_all') db.security_group_get_all(self.context).AndReturn(fake_secgroups) self.mox.ReplayAll() secgroup_list = security_group.SecurityGroupList.get_all(self.context) for i in range(len(fake_secgroups)): self.assertIsInstance(secgroup_list[i], security_group.SecurityGroup) self.assertEqual(fake_secgroups[i]['id'], secgroup_list[i]['id']) self.assertEqual(secgroup_list[i]._context, self.context) def test_get_by_project(self): self.mox.StubOutWithMock(db, 'security_group_get_by_project') db.security_group_get_by_project(self.context, 'fake-project').AndReturn( fake_secgroups) self.mox.ReplayAll() secgroup_list = security_group.SecurityGroupList.get_by_project( self.context, 'fake-project') for i in range(len(fake_secgroups)): self.assertIsInstance(secgroup_list[i], security_group.SecurityGroup) self.assertEqual(fake_secgroups[i]['id'], secgroup_list[i]['id']) def test_get_by_instance(self): inst = instance.Instance() inst.uuid = 'fake-inst-uuid' self.mox.StubOutWithMock(db, 'security_group_get_by_instance') db.security_group_get_by_instance(self.context, 'fake-inst-uuid').AndReturn( fake_secgroups) self.mox.ReplayAll() secgroup_list = security_group.SecurityGroupList.get_by_instance( self.context, inst) for i in range(len(fake_secgroups)): self.assertIsInstance(secgroup_list[i], security_group.SecurityGroup) self.assertEqual(fake_secgroups[i]['id'], secgroup_list[i]['id']) class TestSecurityGroupListObject(test_objects._LocalTest, _TestSecurityGroupListObject): pass class TestSecurityGroupListObjectRemote(test_objects._RemoteTest, _TestSecurityGroupListObject): pass nova-13.0.0/nova/tests/unit/objects/test_resource_provider.py0000664000567000056710000002533212701407773025603 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import exception from nova import objects from nova.tests.unit.objects import test_objects from nova.tests import uuidsentinel as uuids _RESOURCE_CLASS_NAME = 'DISK_GB' _RESOURCE_CLASS_ID = 2 _RESOURCE_PROVIDER_ID = 1 _RESOURCE_PROVIDER_UUID = uuids.resource_provider _RESOURCE_PROVIDER_DB = { 'id': _RESOURCE_PROVIDER_ID, 'uuid': _RESOURCE_PROVIDER_UUID, } _INVENTORY_ID = 2 _INVENTORY_DB = { 'id': _INVENTORY_ID, 'resource_provider_id': _RESOURCE_PROVIDER_ID, 'resource_class_id': _RESOURCE_CLASS_ID, 'total': 16, 'reserved': 2, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 1.0, } class _TestResourceProviderNoDB(object): @mock.patch('nova.objects.ResourceProvider._get_by_uuid_from_db', return_value=_RESOURCE_PROVIDER_DB) def test_object_get_by_uuid(self, mock_db_get): resource_provider_object = objects.ResourceProvider.get_by_uuid( mock.sentinel.ctx, _RESOURCE_PROVIDER_UUID) self.assertEqual(_RESOURCE_PROVIDER_ID, resource_provider_object.id) self.assertEqual(_RESOURCE_PROVIDER_UUID, resource_provider_object.uuid) @mock.patch('nova.objects.ResourceProvider._create_in_db', return_value=_RESOURCE_PROVIDER_DB) def test_create(self, mock_db_create): obj = objects.ResourceProvider(context=self.context, uuid=_RESOURCE_PROVIDER_UUID) obj.create() self.assertEqual(_RESOURCE_PROVIDER_UUID, obj.uuid) self.assertIsInstance(obj.id, int) mock_db_create.assert_called_once_with( self.context, {'uuid': _RESOURCE_PROVIDER_UUID}) def test_create_id_fail(self): obj = objects.ResourceProvider(context=self.context, uuid=_RESOURCE_PROVIDER_UUID, id=_RESOURCE_PROVIDER_ID) self.assertRaises(exception.ObjectActionError, obj.create) def test_create_no_uuid_fail(self): obj = objects.ResourceProvider(context=self.context) self.assertRaises(exception.ObjectActionError, obj.create) class TestResourceProviderNoDB(test_objects._LocalTest, _TestResourceProviderNoDB): USES_DB = False class TestRemoteResourceProviderNoDB(test_objects._RemoteTest, _TestResourceProviderNoDB): USES_DB = False class TestResourceProvider(test_objects._LocalTest): def test_create_in_db(self): updates = {'uuid': _RESOURCE_PROVIDER_UUID} db_rp = objects.ResourceProvider._create_in_db( self.context, updates) self.assertIsInstance(db_rp.id, int) self.assertEqual(_RESOURCE_PROVIDER_UUID, db_rp.uuid) def test_get_by_uuid_from_db(self): rp = objects.ResourceProvider(context=self.context, uuid=_RESOURCE_PROVIDER_UUID) rp.create() retrieved_rp = objects.ResourceProvider._get_by_uuid_from_db( self.context, _RESOURCE_PROVIDER_UUID) self.assertEqual(rp.uuid, retrieved_rp.uuid) self.assertRaises(exception.NotFound, objects.ResourceProvider._get_by_uuid_from_db, self.context, uuids.missing) class _TestInventoryNoDB(object): @mock.patch('nova.objects.Inventory._create_in_db', return_value=_INVENTORY_DB) def test_create(self, mock_db_create): rp = objects.ResourceProvider(id=_RESOURCE_PROVIDER_ID, uuid=_RESOURCE_PROVIDER_UUID) obj = objects.Inventory(context=self.context, resource_provider=rp, resource_class=_RESOURCE_CLASS_NAME, total=16, reserved=2, min_unit=1, max_unit=8, step_size=1, allocation_ratio=1.0) obj.create() self.assertEqual(_INVENTORY_ID, obj.id) expected = dict(_INVENTORY_DB) expected.pop('id') mock_db_create.assert_called_once_with(self.context, expected) @mock.patch('nova.objects.Inventory._update_in_db', return_value=_INVENTORY_DB) def test_save(self, mock_db_save): obj = objects.Inventory(context=self.context, id=_INVENTORY_ID, reserved=4) obj.save() mock_db_save.assert_called_once_with(self.context, _INVENTORY_ID, {'reserved': 4}) @mock.patch('nova.objects.InventoryList._get_all_by_resource_provider') def test_get_all_by_resource_provider(self, mock_get): expected = [dict(_INVENTORY_DB, resource_provider=dict(_RESOURCE_PROVIDER_DB)), dict(_INVENTORY_DB, id=_INVENTORY_DB['id'] + 1, resource_provider=dict(_RESOURCE_PROVIDER_DB))] mock_get.return_value = expected objs = objects.InventoryList.get_all_by_resource_provider_uuid( self.context, _RESOURCE_PROVIDER_DB['uuid']) self.assertEqual(2, len(objs)) self.assertEqual(_INVENTORY_DB['id'], objs[0].id) self.assertEqual(_INVENTORY_DB['id'] + 1, objs[1].id) class TestInventoryNoDB(test_objects._LocalTest, _TestInventoryNoDB): USES_DB = False class TestRemoteInventoryNoDB(test_objects._RemoteTest, _TestInventoryNoDB): USES_DB = False class TestInventory(test_objects._LocalTest): def _make_inventory(self): db_rp = objects.ResourceProvider( context=self.context, uuid=uuids.inventory_resource_provider) db_rp.create() updates = dict(_INVENTORY_DB, resource_provider_id=db_rp.id) updates.pop('id') db_inventory = objects.Inventory._create_in_db( self.context, updates) return db_rp, db_inventory def test_create_in_db(self): updates = dict(_INVENTORY_DB) updates.pop('id') db_inventory = objects.Inventory._create_in_db( self.context, updates) self.assertEqual(_INVENTORY_DB['total'], db_inventory.total) def test_update_in_db(self): db_rp, db_inventory = self._make_inventory() objects.Inventory._update_in_db(self.context, db_inventory.id, {'total': 32}) inventories = objects.InventoryList.\ get_all_by_resource_provider_uuid(self.context, db_rp.uuid) self.assertEqual(32, inventories[0].total) def test_update_in_db_fails_bad_id(self): db_rp, db_inventory = self._make_inventory() self.assertRaises(exception.NotFound, objects.Inventory._update_in_db, self.context, 99, {'total': 32}) def test_get_all_by_resource_provider_uuid(self): db_rp, db_inventory = self._make_inventory() retrieved_inventories = ( objects.InventoryList._get_all_by_resource_provider( self.context, db_rp.uuid) ) self.assertEqual(1, len(retrieved_inventories)) self.assertEqual(db_inventory.id, retrieved_inventories[0].id) self.assertEqual(db_inventory.total, retrieved_inventories[0].total) retrieved_inventories = ( objects.InventoryList._get_all_by_resource_provider( self.context, uuids.bad_rp_uuid) ) self.assertEqual(0, len(retrieved_inventories)) def test_create_requires_resource_provider(self): inventory_dict = dict(_INVENTORY_DB) inventory_dict.pop('id') inventory_dict.pop('resource_provider_id') inventory_dict.pop('resource_class_id') inventory_dict['resource_class'] = _RESOURCE_CLASS_NAME inventory = objects.Inventory(context=self.context, **inventory_dict) error = self.assertRaises(exception.ObjectActionError, inventory.create) self.assertIn('resource_provider required', str(error)) def test_create_requires_created_resource_provider(self): rp = objects.ResourceProvider( context=self.context, uuid=uuids.inventory_resource_provider) inventory_dict = dict(_INVENTORY_DB) inventory_dict.pop('id') inventory_dict.pop('resource_provider_id') inventory_dict.pop('resource_class_id') inventory_dict['resource_provider'] = rp inventory = objects.Inventory(context=self.context, **inventory_dict) error = self.assertRaises(exception.ObjectActionError, inventory.create) self.assertIn('resource_provider required', str(error)) def test_create_requires_resource_class(self): rp = objects.ResourceProvider( context=self.context, uuid=uuids.inventory_resource_provider) rp.create() inventory_dict = dict(_INVENTORY_DB) inventory_dict.pop('id') inventory_dict.pop('resource_provider_id') inventory_dict.pop('resource_class_id') inventory_dict['resource_provider'] = rp inventory = objects.Inventory(context=self.context, **inventory_dict) error = self.assertRaises(exception.ObjectActionError, inventory.create) self.assertIn('resource_class required', str(error)) def test_create_id_fails(self): inventory = objects.Inventory(self.context, **_INVENTORY_DB) self.assertRaises(exception.ObjectActionError, inventory.create) def test_save_without_id_fails(self): inventory_dict = dict(_INVENTORY_DB) inventory_dict.pop('id') inventory = objects.Inventory(self.context, **inventory_dict) self.assertRaises(exception.ObjectActionError, inventory.save) nova-13.0.0/nova/tests/unit/objects/test_instance_group.py0000664000567000056710000003427612701407773025071 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid import mock from oslo_utils import timeutils from nova import exception from nova import objects from nova.tests.unit.objects import test_objects _TS_NOW = timeutils.utcnow(with_timezone=True) # o.vo.fields.DateTimeField converts to tz-aware and # in process we lose microsecond resolution. _TS_NOW = _TS_NOW.replace(microsecond=0) _DB_UUID = str(uuid.uuid4()) _INST_GROUP_DB = { 'id': 1, 'uuid': _DB_UUID, 'user_id': 'fake_user', 'project_id': 'fake_project', 'name': 'fake_name', 'policies': ['policy1', 'policy2'], 'members': ['instance_id1', 'instance_id2'], 'deleted': False, 'created_at': _TS_NOW, 'updated_at': _TS_NOW, 'deleted_at': None, } class _TestInstanceGroupObject(object): @mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB) def test_get_by_uuid(self, mock_db_get): obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID) mock_db_get.assert_called_once_with(mock.sentinel.ctx, _DB_UUID) self.assertEqual(_INST_GROUP_DB['members'], obj.members) self.assertEqual(_INST_GROUP_DB['policies'], obj.policies) self.assertEqual(_DB_UUID, obj.uuid) self.assertEqual(_INST_GROUP_DB['project_id'], obj.project_id) self.assertEqual(_INST_GROUP_DB['user_id'], obj.user_id) self.assertEqual(_INST_GROUP_DB['name'], obj.name) @mock.patch('nova.db.instance_group_get_by_instance', return_value=_INST_GROUP_DB) def test_get_by_instance_uuid(self, mock_db_get): objects.InstanceGroup.get_by_instance_uuid( mock.sentinel.ctx, mock.sentinel.instance_uuid) mock_db_get.assert_called_once_with( mock.sentinel.ctx, mock.sentinel.instance_uuid) @mock.patch('nova.db.instance_group_get') def test_refresh(self, mock_db_get): changed_group = copy.deepcopy(_INST_GROUP_DB) changed_group['name'] = 'new_name' mock_db_get.side_effect = [_INST_GROUP_DB, changed_group] obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID) self.assertEqual(_INST_GROUP_DB['name'], obj.name) obj.refresh() self.assertEqual('new_name', obj.name) self.assertEqual(set([]), obj.obj_what_changed()) @mock.patch('nova.compute.utils.notify_about_server_group_update') @mock.patch('nova.db.instance_group_update') @mock.patch('nova.db.instance_group_get') def test_save(self, mock_db_get, mock_db_update, mock_notify): changed_group = copy.deepcopy(_INST_GROUP_DB) changed_group['name'] = 'new_name' mock_db_get.side_effect = [_INST_GROUP_DB, changed_group] obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID) self.assertEqual(obj.name, 'fake_name') obj.name = 'new_name' obj.policies = ['policy1'] # Remove policy 2 obj.members = ['instance_id1'] # Remove member 2 obj.save() mock_db_update.assert_called_once_with(mock.sentinel.ctx, _DB_UUID, {'name': 'new_name', 'members': ['instance_id1'], 'policies': ['policy1']}) mock_notify.assert_called_once_with(mock.sentinel.ctx, "update", {'name': 'new_name', 'members': ['instance_id1'], 'policies': ['policy1'], 'server_group_id': _DB_UUID}) @mock.patch('nova.compute.utils.notify_about_server_group_update') @mock.patch('nova.db.instance_group_update') @mock.patch('nova.db.instance_group_get') def test_save_without_hosts(self, mock_db_get, mock_db_update, mock_notify): mock_db_get.side_effect = [_INST_GROUP_DB, _INST_GROUP_DB] obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID) obj.hosts = ['fake-host1'] self.assertRaises(exception.InstanceGroupSaveException, obj.save) # make sure that we can save by removing hosts from what is updated obj.obj_reset_changes(['hosts']) obj.save() # since hosts was the only update, there is no actual call self.assertFalse(mock_db_update.called) self.assertFalse(mock_notify.called) @mock.patch('nova.compute.utils.notify_about_server_group_update') @mock.patch('nova.db.instance_group_create', return_value=_INST_GROUP_DB) def test_create(self, mock_db_create, mock_notify): obj = objects.InstanceGroup(context=mock.sentinel.ctx) obj.uuid = _DB_UUID obj.name = _INST_GROUP_DB['name'] obj.user_id = _INST_GROUP_DB['user_id'] obj.project_id = _INST_GROUP_DB['project_id'] obj.members = _INST_GROUP_DB['members'] obj.policies = _INST_GROUP_DB['policies'] obj.updated_at = _TS_NOW obj.created_at = _TS_NOW obj.deleted_at = None obj.deleted = False obj.create() mock_db_create.assert_called_once_with( mock.sentinel.ctx, {'uuid': _DB_UUID, 'name': _INST_GROUP_DB['name'], 'user_id': _INST_GROUP_DB['user_id'], 'project_id': _INST_GROUP_DB['project_id'], 'created_at': _TS_NOW, 'updated_at': _TS_NOW, 'deleted_at': None, 'deleted': False, }, members=_INST_GROUP_DB['members'], policies=_INST_GROUP_DB['policies']) mock_notify.assert_called_once_with( mock.sentinel.ctx, "create", {'uuid': _DB_UUID, 'name': _INST_GROUP_DB['name'], 'user_id': _INST_GROUP_DB['user_id'], 'project_id': _INST_GROUP_DB['project_id'], 'created_at': _TS_NOW, 'updated_at': _TS_NOW, 'deleted_at': None, 'deleted': False, 'members': _INST_GROUP_DB['members'], 'policies': _INST_GROUP_DB['policies'], 'server_group_id': _DB_UUID}) self.assertRaises(exception.ObjectActionError, obj.create) @mock.patch('nova.compute.utils.notify_about_server_group_update') @mock.patch('nova.db.instance_group_delete') def test_destroy(self, mock_db_delete, mock_notify): obj = objects.InstanceGroup(context=mock.sentinel.ctx) obj.uuid = _DB_UUID obj.destroy() mock_db_delete.assert_called_once_with(mock.sentinel.ctx, _DB_UUID) mock_notify.assert_called_once_with(mock.sentinel.ctx, "delete", {'server_group_id': _DB_UUID}) @mock.patch('nova.compute.utils.notify_about_server_group_update') @mock.patch('nova.db.instance_group_members_add') def test_add_members(self, mock_members_add_db, mock_notify): mock_members_add_db.return_value = [mock.sentinel.members] members = objects.InstanceGroup.add_members(mock.sentinel.ctx, _DB_UUID, mock.sentinel.members) self.assertEqual([mock.sentinel.members], members) mock_members_add_db.assert_called_once_with( mock.sentinel.ctx, _DB_UUID, mock.sentinel.members) mock_notify.assert_called_once_with( mock.sentinel.ctx, "addmember", {'instance_uuids': mock.sentinel.members, 'server_group_id': _DB_UUID}) @mock.patch('nova.objects.InstanceList.get_by_filters') @mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB) def test_count_members_by_user(self, mock_get_db, mock_il_get): mock_il_get.return_value = [mock.ANY] obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID) expected_filters = { 'uuid': ['instance_id1', 'instance_id2'], 'user_id': 'fake_user', 'deleted': False } self.assertEqual(1, obj.count_members_by_user('fake_user')) mock_il_get.assert_called_once_with(mock.sentinel.ctx, filters=expected_filters) @mock.patch('nova.objects.InstanceList.get_by_filters') @mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB) def test_get_hosts(self, mock_get_db, mock_il_get): mock_il_get.return_value = [objects.Instance(host='host1'), objects.Instance(host='host2'), objects.Instance(host=None)] obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID) hosts = obj.get_hosts() self.assertEqual(['instance_id1', 'instance_id2'], obj.members) expected_filters = { 'uuid': ['instance_id1', 'instance_id2'], 'deleted': False } mock_il_get.assert_called_once_with(mock.sentinel.ctx, filters=expected_filters) self.assertEqual(2, len(hosts)) self.assertIn('host1', hosts) self.assertIn('host2', hosts) # Test manual exclusion mock_il_get.reset_mock() hosts = obj.get_hosts(exclude=['instance_id1']) expected_filters = { 'uuid': set(['instance_id2']), 'deleted': False } mock_il_get.assert_called_once_with(mock.sentinel.ctx, filters=expected_filters) @mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB) def test_obj_make_compatible(self, mock_db_get): obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID) obj_primitive = obj.obj_to_primitive() self.assertNotIn('metadetails', obj_primitive) obj.obj_make_compatible(obj_primitive, '1.6') self.assertEqual({}, obj_primitive['metadetails']) @mock.patch.object(objects.InstanceList, 'get_by_filters') def test_load_hosts(self, mock_get_by_filt): mock_get_by_filt.return_value = [objects.Instance(host='host1'), objects.Instance(host='host2')] obj = objects.InstanceGroup(mock.sentinel.ctx, members=['uuid1']) self.assertEqual(2, len(obj.hosts)) self.assertIn('host1', obj.hosts) self.assertIn('host2', obj.hosts) self.assertNotIn('hosts', obj.obj_what_changed()) def test_load_anything_else_but_hosts(self): obj = objects.InstanceGroup(mock.sentinel.ctx) self.assertRaises(exception.ObjectActionError, getattr, obj, 'members') class TestInstanceGroupObject(test_objects._LocalTest, _TestInstanceGroupObject): pass class TestRemoteInstanceGroupObject(test_objects._RemoteTest, _TestInstanceGroupObject): pass def _mock_db_list_get(*args): instances = [(str(uuid.uuid4()), 'f1', 'p1'), (str(uuid.uuid4()), 'f2', 'p1'), (str(uuid.uuid4()), 'f3', 'p2'), (str(uuid.uuid4()), 'f4', 'p2')] result = [] for instance in instances: values = copy.deepcopy(_INST_GROUP_DB) values['uuid'] = instance[0] values['name'] = instance[1] values['project_id'] = instance[2] result.append(values) return result class _TestInstanceGroupListObject(object): @mock.patch('nova.db.instance_group_get_all') def test_list_all(self, mock_db_get): mock_db_get.side_effect = _mock_db_list_get inst_list = objects.InstanceGroupList.get_all(mock.sentinel.ctx) self.assertEqual(4, len(inst_list.objects)) mock_db_get.assert_called_once_with(mock.sentinel.ctx) @mock.patch('nova.db.instance_group_get_all_by_project_id') def test_list_by_project_id(self, mock_db_get): mock_db_get.side_effect = _mock_db_list_get objects.InstanceGroupList.get_by_project_id( mock.sentinel.ctx, mock.sentinel.project_id) mock_db_get.assert_called_once_with( mock.sentinel.ctx, mock.sentinel.project_id) @mock.patch('nova.db.instance_group_get_all_by_project_id') def test_get_by_name(self, mock_db_get): mock_db_get.side_effect = _mock_db_list_get # Need the project_id value set, otherwise we'd use mock.sentinel mock_ctx = mock.MagicMock() mock_ctx.project_id = 'fake_project' ig = objects.InstanceGroup.get_by_name(mock_ctx, 'f1') mock_db_get.assert_called_once_with(mock_ctx, 'fake_project') self.assertEqual('f1', ig.name) self.assertRaises(exception.InstanceGroupNotFound, objects.InstanceGroup.get_by_name, mock_ctx, 'unknown') @mock.patch('nova.objects.InstanceGroup.get_by_uuid') @mock.patch('nova.objects.InstanceGroup.get_by_name') def test_get_by_hint(self, mock_name, mock_uuid): objects.InstanceGroup.get_by_hint(mock.sentinel.ctx, _DB_UUID) mock_uuid.assert_called_once_with(mock.sentinel.ctx, _DB_UUID) objects.InstanceGroup.get_by_hint(mock.sentinel.ctx, 'name') mock_name.assert_called_once_with(mock.sentinel.ctx, 'name') class TestInstanceGroupListObject(test_objects._LocalTest, _TestInstanceGroupListObject): pass class TestRemoteInstanceGroupListObject(test_objects._RemoteTest, _TestInstanceGroupListObject): pass nova-13.0.0/nova/tests/unit/objects/test_pci_device.py0000664000567000056710000007462512701407773024145 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_utils import timeutils from nova import context from nova import db from nova import exception from nova import objects from nova.objects import fields from nova.objects import instance from nova.objects import pci_device from nova import test from nova.tests.unit.objects import test_objects dev_dict = { 'compute_node_id': 1, 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', 'numa_node': 0, 'dev_type': fields.PciDeviceType.STANDARD, 'parent_addr': None, 'status': fields.PciDeviceStatus.AVAILABLE} fake_db_dev = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'parent_addr': None, 'id': 1, 'compute_node_id': 1, 'address': 'a', 'vendor_id': 'v', 'product_id': 'p', 'numa_node': 0, 'dev_type': fields.PciDeviceType.STANDARD, 'status': fields.PciDeviceStatus.AVAILABLE, 'dev_id': 'i', 'label': 'l', 'instance_uuid': None, 'extra_info': '{}', 'request_id': None, } fake_db_dev_1 = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 2, 'parent_addr': 'a', 'compute_node_id': 1, 'address': 'a1', 'vendor_id': 'v1', 'product_id': 'p1', 'numa_node': 1, 'dev_type': fields.PciDeviceType.STANDARD, 'status': fields.PciDeviceStatus.AVAILABLE, 'dev_id': 'i', 'label': 'l', 'instance_uuid': None, 'extra_info': '{}', 'request_id': None, } fake_db_dev_old = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 2, 'parent_addr': None, 'compute_node_id': 1, 'address': 'a1', 'vendor_id': 'v1', 'product_id': 'p1', 'numa_node': 1, 'dev_type': fields.PciDeviceType.SRIOV_VF, 'status': fields.PciDeviceStatus.AVAILABLE, 'dev_id': 'i', 'label': 'l', 'instance_uuid': None, 'extra_info': '{"phys_function": "blah"}', 'request_id': None, } class _TestPciDeviceObject(object): def _create_fake_instance(self): self.inst = instance.Instance() self.inst.uuid = 'fake-inst-uuid' self.inst.pci_devices = pci_device.PciDeviceList() def _create_fake_pci_device(self, ctxt=None): if not ctxt: ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'pci_device_get_by_addr') db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev) self.mox.ReplayAll() self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a') def test_create_pci_device(self): self.pci_device = pci_device.PciDevice.create(None, dev_dict) self.assertEqual(self.pci_device.product_id, 'p') self.assertEqual(self.pci_device.obj_what_changed(), set(['compute_node_id', 'product_id', 'vendor_id', 'numa_node', 'status', 'address', 'extra_info', 'dev_type', 'parent_addr'])) def test_pci_device_extra_info(self): self.dev_dict = copy.copy(dev_dict) self.dev_dict['k1'] = 'v1' self.dev_dict['k2'] = 'v2' self.pci_device = pci_device.PciDevice.create(None, self.dev_dict) extra_value = self.pci_device.extra_info self.assertEqual(extra_value.get('k1'), 'v1') self.assertEqual(set(extra_value.keys()), set(('k1', 'k2'))) self.assertEqual(self.pci_device.obj_what_changed(), set(['compute_node_id', 'address', 'product_id', 'vendor_id', 'numa_node', 'status', 'extra_info', 'dev_type', 'parent_addr'])) def test_update_device(self): self.pci_device = pci_device.PciDevice.create(None, dev_dict) self.pci_device.obj_reset_changes() changes = {'product_id': 'p2', 'vendor_id': 'v2'} self.pci_device.update_device(changes) self.assertEqual(self.pci_device.vendor_id, 'v2') self.assertEqual(self.pci_device.obj_what_changed(), set(['vendor_id', 'product_id', 'parent_addr'])) def test_update_device_same_value(self): self.pci_device = pci_device.PciDevice.create(None, dev_dict) self.pci_device.obj_reset_changes() changes = {'product_id': 'p', 'vendor_id': 'v2'} self.pci_device.update_device(changes) self.assertEqual(self.pci_device.product_id, 'p') self.assertEqual(self.pci_device.vendor_id, 'v2') self.assertEqual(self.pci_device.obj_what_changed(), set(['vendor_id', 'product_id', 'parent_addr'])) def test_get_by_dev_addr(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'pci_device_get_by_addr') db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev) self.mox.ReplayAll() self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a') self.assertEqual(self.pci_device.product_id, 'p') self.assertEqual(self.pci_device.obj_what_changed(), set()) def test_get_by_dev_id(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'pci_device_get_by_id') db.pci_device_get_by_id(ctxt, 1).AndReturn(fake_db_dev) self.mox.ReplayAll() self.pci_device = pci_device.PciDevice.get_by_dev_id(ctxt, 1) self.assertEqual(self.pci_device.product_id, 'p') self.assertEqual(self.pci_device.obj_what_changed(), set()) def test_from_db_obj_pre_1_4_format(self): ctxt = context.get_admin_context() dev = pci_device.PciDevice._from_db_object( ctxt, pci_device.PciDevice(), fake_db_dev_old) self.assertEqual('blah', dev.parent_addr) self.assertEqual({'phys_function': 'blah'}, dev.extra_info) def test_from_db_obj_pre_1_5_format(self): ctxt = context.get_admin_context() fake_dev_pre_1_5 = copy.deepcopy(fake_db_dev_old) fake_dev_pre_1_5['status'] = fields.PciDeviceStatus.UNAVAILABLE dev = pci_device.PciDevice._from_db_object( ctxt, pci_device.PciDevice(), fake_dev_pre_1_5) self.assertRaises(exception.ObjectActionError, dev.obj_to_primitive, '1.4') def test_save_empty_parent_addr(self): ctxt = context.get_admin_context() dev = pci_device.PciDevice._from_db_object( ctxt, pci_device.PciDevice(), fake_db_dev) dev.parent_addr = None with mock.patch.object(db, 'pci_device_update', return_value=fake_db_dev): dev.save() self.assertIsNone(dev.parent_addr) self.assertEqual({}, dev.extra_info) def test_save(self): ctxt = context.get_admin_context() self._create_fake_pci_device(ctxt=ctxt) return_dev = dict(fake_db_dev, status=fields.PciDeviceStatus.AVAILABLE, instance_uuid='fake-uuid-3') self.pci_device.status = fields.PciDeviceStatus.ALLOCATED self.pci_device.instance_uuid = 'fake-uuid-2' expected_updates = dict(status=fields.PciDeviceStatus.ALLOCATED, instance_uuid='fake-uuid-2') self.mox.StubOutWithMock(db, 'pci_device_update') db.pci_device_update(ctxt, 1, 'a', expected_updates).AndReturn(return_dev) self.mox.ReplayAll() self.pci_device.save() self.assertEqual(self.pci_device.status, fields.PciDeviceStatus.AVAILABLE) self.assertEqual(self.pci_device.instance_uuid, 'fake-uuid-3') def test_save_no_extra_info(self): return_dev = dict(fake_db_dev, status=fields.PciDeviceStatus.AVAILABLE, instance_uuid='fake-uuid-3') def _fake_update(ctxt, node_id, addr, updates): self.extra_info = updates.get('extra_info') return return_dev ctxt = context.get_admin_context() self.stub_out('nova.db.pci_device_update', _fake_update) self.pci_device = pci_device.PciDevice.create(None, dev_dict) self.pci_device._context = ctxt self.pci_device.save() self.assertEqual(self.extra_info, '{}') def test_save_removed(self): ctxt = context.get_admin_context() self._create_fake_pci_device(ctxt=ctxt) self.pci_device.status = fields.PciDeviceStatus.REMOVED self.mox.StubOutWithMock(db, 'pci_device_destroy') db.pci_device_destroy(ctxt, 1, 'a') self.mox.ReplayAll() self.pci_device.save() self.assertEqual(self.pci_device.status, fields.PciDeviceStatus.DELETED) def test_save_deleted(self): def _fake_destroy(ctxt, node_id, addr): self.called = True def _fake_update(ctxt, node_id, addr, updates): self.called = True self.stub_out('nova.db.pci_device_destroy', _fake_destroy) self.stub_out('nova.db.pci_device_update', _fake_update) self._create_fake_pci_device() self.pci_device.status = fields.PciDeviceStatus.DELETED self.called = False self.pci_device.save() self.assertFalse(self.called) @mock.patch.object(objects.Service, 'get_minimum_version', return_value=4) def test_save_migrate_parent_addr(self, get_min_ver_mock): ctxt = context.get_admin_context() dev = pci_device.PciDevice._from_db_object( ctxt, pci_device.PciDevice(), fake_db_dev_old) with mock.patch.object(db, 'pci_device_update', return_value=fake_db_dev_old) as update_mock: dev.save() update_mock.assert_called_once_with( ctxt, dev.compute_node_id, dev.address, {'extra_info': '{}', 'parent_addr': 'blah'}) @mock.patch.object(objects.Service, 'get_minimum_version', return_value=4) def test_save_migrate_parent_addr_updated(self, get_min_ver_mock): ctxt = context.get_admin_context() dev = pci_device.PciDevice._from_db_object( ctxt, pci_device.PciDevice(), fake_db_dev_old) # Note that the pci manager code will never update parent_addr alone, # but we want to make it future proof so we guard against it dev.parent_addr = 'doh!' with mock.patch.object(db, 'pci_device_update', return_value=fake_db_dev_old) as update_mock: dev.save() update_mock.assert_called_once_with( ctxt, dev.compute_node_id, dev.address, {'extra_info': '{}', 'parent_addr': 'doh!'}) @mock.patch.object(objects.Service, 'get_minimum_version', return_value=2) def test_save_dont_migrate_parent_addr(self, get_min_ver_mock): ctxt = context.get_admin_context() dev = pci_device.PciDevice._from_db_object( ctxt, pci_device.PciDevice(), fake_db_dev_old) dev.extra_info['other'] = "blahtoo" with mock.patch.object(db, 'pci_device_update', return_value=fake_db_dev_old) as update_mock: dev.save() self.assertEqual("blah", update_mock.call_args[0][3]['parent_addr']) self.assertIn("phys_function", update_mock.call_args[0][3]['extra_info']) self.assertIn("other", update_mock.call_args[0][3]['extra_info']) def test_update_numa_node(self): self.pci_device = pci_device.PciDevice.create(None, dev_dict) self.assertEqual(0, self.pci_device.numa_node) self.dev_dict = copy.copy(dev_dict) self.dev_dict['numa_node'] = '1' self.pci_device = pci_device.PciDevice.create(None, self.dev_dict) self.assertEqual(1, self.pci_device.numa_node) def test_pci_device_equivalent(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) pci_device2 = pci_device.PciDevice.create(None, dev_dict) self.assertEqual(pci_device1, pci_device2) def test_pci_device_equivalent_with_ignore_field(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) pci_device2 = pci_device.PciDevice.create(None, dev_dict) pci_device2.updated_at = timeutils.utcnow() self.assertEqual(pci_device1, pci_device2) def test_pci_device_not_equivalent1(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) dev_dict2 = copy.copy(dev_dict) dev_dict2['address'] = 'b' pci_device2 = pci_device.PciDevice.create(None, dev_dict2) self.assertNotEqual(pci_device1, pci_device2) def test_pci_device_not_equivalent2(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) pci_device2 = pci_device.PciDevice.create(None, dev_dict) delattr(pci_device2, 'address') self.assertNotEqual(pci_device1, pci_device2) def test_pci_device_not_equivalent_with_none(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) pci_device2 = pci_device.PciDevice.create(None, dev_dict) pci_device1.instance_uuid = 'aaa' pci_device2.instance_uuid = None self.assertNotEqual(pci_device1, pci_device2) def test_claim_device(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.claim(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.CLAIMED) self.assertEqual(devobj.instance_uuid, self.inst.uuid) self.assertEqual(len(self.inst.pci_devices), 0) def test_claim_device_fail(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.status = fields.PciDeviceStatus.ALLOCATED self.assertRaises(exception.PciDeviceInvalidStatus, devobj.claim, self.inst) def test_allocate_device(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.claim(self.inst) devobj.allocate(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED) self.assertEqual(devobj.instance_uuid, 'fake-inst-uuid') self.assertEqual(len(self.inst.pci_devices), 1) self.assertEqual(self.inst.pci_devices[0].vendor_id, 'v') self.assertEqual(self.inst.pci_devices[0].status, fields.PciDeviceStatus.ALLOCATED) def test_allocate_device_fail_status(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.status = 'removed' self.assertRaises(exception.PciDeviceInvalidStatus, devobj.allocate, self.inst) def test_allocate_device_fail_owner(self): self._create_fake_instance() inst_2 = instance.Instance() inst_2.uuid = 'fake-inst-uuid-2' devobj = pci_device.PciDevice.create(None, dev_dict) devobj.claim(self.inst) self.assertRaises(exception.PciDeviceInvalidOwner, devobj.allocate, inst_2) def test_free_claimed_device(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.claim(self.inst) devobj.free(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.AVAILABLE) self.assertIsNone(devobj.instance_uuid) def test_free_allocated_device(self): self._create_fake_instance() ctx = context.get_admin_context() devobj = pci_device.PciDevice._from_db_object( ctx, pci_device.PciDevice(), fake_db_dev) devobj.claim(self.inst) devobj.allocate(self.inst) self.assertEqual(len(self.inst.pci_devices), 1) devobj.free(self.inst) self.assertEqual(len(self.inst.pci_devices), 0) self.assertEqual(devobj.status, fields.PciDeviceStatus.AVAILABLE) self.assertIsNone(devobj.instance_uuid) def test_free_device_fail(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.status = fields.PciDeviceStatus.REMOVED self.assertRaises(exception.PciDeviceInvalidStatus, devobj.free) def test_remove_device(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.remove() self.assertEqual(devobj.status, fields.PciDeviceStatus.REMOVED) self.assertIsNone(devobj.instance_uuid) def test_remove_device_fail(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.claim(self.inst) self.assertRaises(exception.PciDeviceInvalidStatus, devobj.remove) class TestPciDeviceObject(test_objects._LocalTest, _TestPciDeviceObject): pass class TestPciDeviceObjectRemote(test_objects._RemoteTest, _TestPciDeviceObject): pass fake_pci_devs = [fake_db_dev, fake_db_dev_1] class _TestPciDeviceListObject(object): def test_get_by_compute_node(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'pci_device_get_all_by_node') db.pci_device_get_all_by_node(ctxt, 1).AndReturn(fake_pci_devs) self.mox.ReplayAll() devs = pci_device.PciDeviceList.get_by_compute_node(ctxt, 1) for i in range(len(fake_pci_devs)): self.assertIsInstance(devs[i], pci_device.PciDevice) self.assertEqual(fake_pci_devs[i]['vendor_id'], devs[i].vendor_id) def test_get_by_instance_uuid(self): ctxt = context.get_admin_context() fake_db_1 = dict(fake_db_dev, address='a1', status=fields.PciDeviceStatus.ALLOCATED, instance_uuid='1') fake_db_2 = dict(fake_db_dev, address='a2', status=fields.PciDeviceStatus.ALLOCATED, instance_uuid='1') self.mox.StubOutWithMock(db, 'pci_device_get_all_by_instance_uuid') db.pci_device_get_all_by_instance_uuid(ctxt, '1').AndReturn( [fake_db_1, fake_db_2]) self.mox.ReplayAll() devs = pci_device.PciDeviceList.get_by_instance_uuid(ctxt, '1') self.assertEqual(len(devs), 2) for i in range(len(fake_pci_devs)): self.assertIsInstance(devs[i], pci_device.PciDevice) self.assertEqual(devs[0].vendor_id, 'v') self.assertEqual(devs[1].vendor_id, 'v') class TestPciDeviceListObject(test_objects._LocalTest, _TestPciDeviceListObject): pass class TestPciDeviceListObjectRemote(test_objects._RemoteTest, _TestPciDeviceListObject): pass class _TestSRIOVPciDeviceObject(object): def _create_pci_devices(self, vf_product_id=1515, pf_product_id=1528, num_pfs=2, num_vfs=8): self.sriov_pf_devices = [] for dev in range(num_pfs): pci_dev = {'compute_node_id': 1, 'address': '0000:81:00.%d' % dev, 'vendor_id': '8086', 'product_id': '%d' % pf_product_id, 'status': 'available', 'request_id': None, 'dev_type': fields.PciDeviceType.SRIOV_PF, 'parent_addr': None, 'numa_node': 0} pci_dev_obj = objects.PciDevice.create(None, pci_dev) pci_dev_obj.id = num_pfs + 81 self.sriov_pf_devices.append(pci_dev_obj) self.sriov_vf_devices = [] for dev in range(num_vfs): pci_dev = {'compute_node_id': 1, 'address': '0000:81:10.%d' % dev, 'vendor_id': '8086', 'product_id': '%d' % vf_product_id, 'status': 'available', 'request_id': None, 'dev_type': fields.PciDeviceType.SRIOV_VF, 'parent_addr': '0000:81:00.%d' % int(dev / 4), 'numa_node': 0} pci_dev_obj = objects.PciDevice.create(None, pci_dev) pci_dev_obj.id = num_vfs + 1 self.sriov_vf_devices.append(pci_dev_obj) def _create_fake_instance(self): self.inst = instance.Instance() self.inst.uuid = 'fake-inst-uuid' self.inst.pci_devices = pci_device.PciDeviceList() def _create_fake_pci_device(self, ctxt=None): if not ctxt: ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'pci_device_get_by_addr') db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev) self.mox.ReplayAll() self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a') def _fake_get_by_parent_address(self, ctxt, node_id, addr): vf_devs = [] for dev in self.sriov_vf_devices: if dev.parent_addr == addr: vf_devs.append(dev) return vf_devs def _fake_pci_device_get_by_addr(self, ctxt, id, addr): for dev in self.sriov_pf_devices: if dev.address == addr: return dev def test_claim_PF(self): self._create_fake_instance() with mock.patch.object(objects.PciDeviceList, 'get_by_parent_address', side_effect=self._fake_get_by_parent_address): self._create_pci_devices() devobj = self.sriov_pf_devices[0] devobj.claim(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.CLAIMED) self.assertEqual(devobj.instance_uuid, self.inst.uuid) self.assertEqual(len(self.inst.pci_devices), 0) # check if the all the dependants are UNCLAIMABLE self.assertTrue(all( [dev.status == fields.PciDeviceStatus.UNCLAIMABLE for dev in self._fake_get_by_parent_address(None, None, self.sriov_pf_devices[0].address)])) def test_claim_VF(self): self._create_fake_instance() with mock.patch.object(objects.PciDevice, 'get_by_dev_addr', side_effect=self._fake_pci_device_get_by_addr): self._create_pci_devices() devobj = self.sriov_vf_devices[0] devobj.claim(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.CLAIMED) self.assertEqual(devobj.instance_uuid, self.inst.uuid) self.assertEqual(len(self.inst.pci_devices), 0) # check if parent device status has been changed to UNCLAIMABLE parent = self._fake_pci_device_get_by_addr(None, None, devobj.parent_addr) self.assertTrue(fields.PciDeviceStatus.UNCLAIMABLE, parent.status) def test_allocate_PF(self): self._create_fake_instance() with mock.patch.object(objects.PciDeviceList, 'get_by_parent_address', side_effect=self._fake_get_by_parent_address): self._create_pci_devices() devobj = self.sriov_pf_devices[0] devobj.claim(self.inst) devobj.allocate(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED) self.assertEqual(devobj.instance_uuid, self.inst.uuid) self.assertEqual(len(self.inst.pci_devices), 1) # check if the all the dependants are UNAVAILABLE self.assertTrue(all( [dev.status == fields.PciDeviceStatus.UNAVAILABLE for dev in self._fake_get_by_parent_address(None, None, self.sriov_pf_devices[0].address)])) def test_allocate_VF(self): self._create_fake_instance() with mock.patch.object(objects.PciDevice, 'get_by_dev_addr', side_effect=self._fake_pci_device_get_by_addr): self._create_pci_devices() devobj = self.sriov_vf_devices[0] devobj.claim(self.inst) devobj.allocate(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED) self.assertEqual(devobj.instance_uuid, self.inst.uuid) self.assertEqual(len(self.inst.pci_devices), 1) # check if parent device status has been changed to UNAVAILABLE parent = self._fake_pci_device_get_by_addr(None, None, devobj.parent_addr) self.assertTrue(fields.PciDeviceStatus.UNAVAILABLE, parent.status) def test_claim_PF_fail(self): self._create_fake_instance() with mock.patch.object(objects.PciDeviceList, 'get_by_parent_address', side_effect=self._fake_get_by_parent_address): self._create_pci_devices() devobj = self.sriov_pf_devices[0] self.sriov_vf_devices[0].status = fields.PciDeviceStatus.CLAIMED self.assertRaises(exception.PciDeviceVFInvalidStatus, devobj.claim, self.inst) def test_claim_VF_fail(self): self._create_fake_instance() with mock.patch.object(objects.PciDevice, 'get_by_dev_addr', side_effect=self._fake_pci_device_get_by_addr): self._create_pci_devices() devobj = self.sriov_vf_devices[0] parent = self._fake_pci_device_get_by_addr(None, None, devobj.parent_addr) parent.status = fields.PciDeviceStatus.CLAIMED self.assertRaises(exception.PciDevicePFInvalidStatus, devobj.claim, self.inst) def test_allocate_PF_fail(self): self._create_fake_instance() with mock.patch.object(objects.PciDeviceList, 'get_by_parent_address', side_effect=self._fake_get_by_parent_address): self._create_pci_devices() devobj = self.sriov_pf_devices[0] self.sriov_vf_devices[0].status = fields.PciDeviceStatus.CLAIMED self.assertRaises(exception.PciDeviceVFInvalidStatus, devobj.allocate, self.inst) def test_allocate_VF_fail(self): self._create_fake_instance() with mock.patch.object(objects.PciDevice, 'get_by_dev_addr', side_effect=self._fake_pci_device_get_by_addr): self._create_pci_devices() devobj = self.sriov_vf_devices[0] parent = self._fake_pci_device_get_by_addr(None, None, devobj.parent_addr) parent.status = fields.PciDeviceStatus.CLAIMED self.assertRaises(exception.PciDevicePFInvalidStatus, devobj.allocate, self.inst) def test_free_allocated_PF(self): self._create_fake_instance() with mock.patch.object(objects.PciDeviceList, 'get_by_parent_address', side_effect=self._fake_get_by_parent_address): self._create_pci_devices() devobj = self.sriov_pf_devices[0] devobj.claim(self.inst) devobj.allocate(self.inst) devobj.free(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.AVAILABLE) self.assertIsNone(devobj.instance_uuid) # check if the all the dependants are AVAILABLE self.assertTrue(all( [dev.status == fields.PciDeviceStatus.AVAILABLE for dev in self._fake_get_by_parent_address(None, None, self.sriov_pf_devices[0].address)])) def test_free_allocated_VF(self): self._create_fake_instance() with test.nested( mock.patch.object(objects.PciDevice, 'get_by_dev_addr', side_effect=self._fake_pci_device_get_by_addr), mock.patch.object(objects.PciDeviceList, 'get_by_parent_address', side_effect=self._fake_get_by_parent_address)): self._create_pci_devices() vf = self.sriov_vf_devices[0] dependents = self._fake_get_by_parent_address(None, None, vf.parent_addr) for devobj in dependents: devobj.claim(self.inst) devobj.allocate(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED) for devobj in dependents[:3]: devobj.free(self.inst) # check if parent device status is still UNAVAILABLE parent = self._fake_pci_device_get_by_addr(None, None, devobj.parent_addr) self.assertTrue(fields.PciDeviceStatus.UNAVAILABLE, parent.status) for devobj in dependents[3:]: devobj.free(self.inst) # check if parent device status is now AVAILABLE parent = self._fake_pci_device_get_by_addr(None, None, devobj.parent_addr) self.assertTrue(fields.PciDeviceStatus.AVAILABLE, parent.status) class TestSRIOVPciDeviceListObject(test_objects._LocalTest, _TestSRIOVPciDeviceObject): pass class TestSRIOVPciDeviceListObjectRemote(test_objects._RemoteTest, _TestSRIOVPciDeviceObject): pass nova-13.0.0/nova/tests/unit/objects/test_virt_cpu_topology.py0000664000567000056710000000256512701407773025634 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova.tests.unit.objects import test_objects _top_dict = { 'sockets': 2, 'cores': 4, 'threads': 8 } class _TestVirtCPUTopologyObject(object): def test_object_from_dict(self): top_obj = objects.VirtCPUTopology.from_dict(_top_dict) self.compare_obj(top_obj, _top_dict) def test_object_to_dict(self): top_obj = objects.VirtCPUTopology() top_obj.sockets = 2 top_obj.cores = 4 top_obj.threads = 8 spec = top_obj.to_dict() self.assertEqual(_top_dict, spec) class TestVirtCPUTopologyObject(test_objects._LocalTest, _TestVirtCPUTopologyObject): pass class TestRemoteVirtCPUTopologyObject(test_objects._RemoteTest, _TestVirtCPUTopologyObject): pass nova-13.0.0/nova/tests/unit/objects/test_build_request.py0000664000567000056710000000613712701407773024713 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import exception from nova import objects from nova.objects import build_request from nova.tests.unit import fake_build_request from nova.tests.unit.objects import test_objects class _TestBuildRequestObject(object): @mock.patch.object(build_request.BuildRequest, '_get_by_instance_uuid_from_db') def test_get_by_instance_uuid(self, get_by_uuid): fake_req = fake_build_request.fake_db_req() get_by_uuid.return_value = fake_req req_obj = build_request.BuildRequest.get_by_instance_uuid(self.context, fake_req['request_spec']['instance_uuid']) self.assertEqual(fake_req['request_spec']['instance_uuid'], req_obj.request_spec.instance_uuid) self.assertEqual(fake_req['project_id'], req_obj.project_id) self.assertIsInstance(req_obj.request_spec, objects.RequestSpec) get_by_uuid.assert_called_once_with(self.context, fake_req['request_spec']['instance_uuid']) @mock.patch.object(build_request.BuildRequest, '_create_in_db') def test_create(self, create_in_db): fake_req = fake_build_request.fake_db_req() req_obj = fake_build_request.fake_req_obj(self.context, fake_req) def _test_create_args(self2, context, changes): for field in [fields for fields in build_request.BuildRequest.fields if fields not in ['created_at', 'updated_at', 'request_spec', 'id']]: self.assertEqual(fake_req[field], changes[field]) self.assertEqual(fake_req['request_spec']['id'], changes['request_spec_id']) return fake_req with mock.patch.object(build_request.BuildRequest, '_create_in_db', _test_create_args): req_obj.create() def test_create_id_set(self): req_obj = build_request.BuildRequest(self.context) req_obj.id = 3 self.assertRaises(exception.ObjectActionError, req_obj.create) @mock.patch.object(build_request.BuildRequest, '_destroy_in_db') def test_destroy(self, destroy_in_db): req_obj = build_request.BuildRequest(self.context) req_obj.id = 1 req_obj.destroy() destroy_in_db.assert_called_once_with(self.context, req_obj.id) class TestBuildRequestObject(test_objects._LocalTest, _TestBuildRequestObject): pass class TestRemoteBuildRequestObject(test_objects._RemoteTest, _TestBuildRequestObject): pass nova-13.0.0/nova/tests/unit/objects/test_service.py0000664000567000056710000005164312701410011023461 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import timeutils from oslo_versionedobjects import base as ovo_base from oslo_versionedobjects import exception as ovo_exc from nova.compute import manager as compute_manager from nova import context from nova import db from nova import exception from nova import objects from nova.objects import aggregate from nova.objects import fields from nova.objects import service from nova import test from nova.tests.unit.objects import test_compute_node from nova.tests.unit.objects import test_objects NOW = timeutils.utcnow().replace(microsecond=0) def _fake_service(**kwargs): fake_service = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'host': 'fake-host', 'binary': 'nova-fake', 'topic': 'fake-service-topic', 'report_count': 1, 'forced_down': False, 'disabled': False, 'disabled_reason': None, 'last_seen_up': None, 'version': service.SERVICE_VERSION, } fake_service.update(kwargs) return fake_service fake_service = _fake_service() OPTIONAL = ['availability_zone', 'compute_node'] class _TestServiceObject(object): def supported_hv_specs_comparator(self, expected, obj_val): obj_val = [inst.to_list() for inst in obj_val] self.assertJsonEqual(expected, obj_val) def pci_device_pools_comparator(self, expected, obj_val): obj_val = obj_val.obj_to_primitive() self.assertJsonEqual(expected, obj_val) def comparators(self): return {'stats': self.assertJsonEqual, 'host_ip': self.assertJsonEqual, 'supported_hv_specs': self.supported_hv_specs_comparator, 'pci_device_pools': self.pci_device_pools_comparator} def subs(self): return {'supported_hv_specs': 'supported_instances', 'pci_device_pools': 'pci_stats'} def _test_query(self, db_method, obj_method, *args, **kwargs): self.mox.StubOutWithMock(db, db_method) db_exception = kwargs.pop('db_exception', None) if db_exception: getattr(db, db_method)(self.context, *args, **kwargs).AndRaise( db_exception) else: getattr(db, db_method)(self.context, *args, **kwargs).AndReturn( fake_service) self.mox.ReplayAll() obj = getattr(service.Service, obj_method)(self.context, *args, **kwargs) if db_exception: self.assertIsNone(obj) else: self.compare_obj(obj, fake_service, allow_missing=OPTIONAL) def test_get_by_id(self): self._test_query('service_get', 'get_by_id', 123) def test_get_by_host_and_topic(self): self._test_query('service_get_by_host_and_topic', 'get_by_host_and_topic', 'fake-host', 'fake-topic') def test_get_by_host_and_binary(self): self._test_query('service_get_by_host_and_binary', 'get_by_host_and_binary', 'fake-host', 'fake-binary') def test_get_by_host_and_binary_raises(self): self._test_query('service_get_by_host_and_binary', 'get_by_host_and_binary', 'fake-host', 'fake-binary', db_exception=exception.HostBinaryNotFound( host='fake-host', binary='fake-binary')) def test_get_by_compute_host(self): self._test_query('service_get_by_compute_host', 'get_by_compute_host', 'fake-host') def test_get_by_args(self): self._test_query('service_get_by_host_and_binary', 'get_by_args', 'fake-host', 'fake-binary') def test_create(self): self.mox.StubOutWithMock(db, 'service_create') db.service_create(self.context, {'host': 'fake-host', 'version': fake_service['version']} ).AndReturn(fake_service) self.mox.ReplayAll() service_obj = service.Service(context=self.context) service_obj.host = 'fake-host' service_obj.create() self.assertEqual(fake_service['id'], service_obj.id) self.assertEqual(service.SERVICE_VERSION, service_obj.version) def test_recreate_fails(self): self.mox.StubOutWithMock(db, 'service_create') db.service_create(self.context, {'host': 'fake-host', 'version': fake_service['version']} ).AndReturn(fake_service) self.mox.ReplayAll() service_obj = service.Service(context=self.context) service_obj.host = 'fake-host' service_obj.create() self.assertRaises(exception.ObjectActionError, service_obj.create) def test_save(self): self.mox.StubOutWithMock(db, 'service_update') db.service_update(self.context, 123, {'host': 'fake-host', 'version': fake_service['version']} ).AndReturn(fake_service) self.mox.ReplayAll() service_obj = service.Service(context=self.context) service_obj.id = 123 service_obj.host = 'fake-host' service_obj.save() self.assertEqual(service.SERVICE_VERSION, service_obj.version) @mock.patch.object(db, 'service_create', return_value=fake_service) def test_set_id_failure(self, db_mock): service_obj = service.Service(context=self.context, binary='nova-compute') service_obj.create() self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr, service_obj, 'id', 124) def _test_destroy(self): self.mox.StubOutWithMock(db, 'service_destroy') db.service_destroy(self.context, 123) self.mox.ReplayAll() service_obj = service.Service(context=self.context) service_obj.id = 123 service_obj.destroy() def test_destroy(self): # The test harness needs db.service_destroy to work, # so avoid leaving it broken here after we're done orig_service_destroy = db.service_destroy try: self._test_destroy() finally: db.service_destroy = orig_service_destroy def test_get_by_topic(self): self.mox.StubOutWithMock(db, 'service_get_all_by_topic') db.service_get_all_by_topic(self.context, 'fake-topic').AndReturn( [fake_service]) self.mox.ReplayAll() services = service.ServiceList.get_by_topic(self.context, 'fake-topic') self.assertEqual(1, len(services)) self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL) @mock.patch('nova.db.service_get_all_by_binary') def test_get_by_binary(self, mock_get): mock_get.return_value = [fake_service] services = service.ServiceList.get_by_binary(self.context, 'fake-binary') self.assertEqual(1, len(services)) mock_get.assert_called_once_with(self.context, 'fake-binary', include_disabled=False) @mock.patch('nova.db.service_get_all_by_binary') def test_get_by_binary_disabled(self, mock_get): mock_get.return_value = [_fake_service(disabled=True)] services = service.ServiceList.get_by_binary(self.context, 'fake-binary', include_disabled=True) self.assertEqual(1, len(services)) mock_get.assert_called_once_with(self.context, 'fake-binary', include_disabled=True) @mock.patch('nova.db.service_get_all_by_binary') def test_get_by_binary_both(self, mock_get): mock_get.return_value = [_fake_service(), _fake_service(disabled=True)] services = service.ServiceList.get_by_binary(self.context, 'fake-binary', include_disabled=True) self.assertEqual(2, len(services)) mock_get.assert_called_once_with(self.context, 'fake-binary', include_disabled=True) def test_get_by_host(self): self.mox.StubOutWithMock(db, 'service_get_all_by_host') db.service_get_all_by_host(self.context, 'fake-host').AndReturn( [fake_service]) self.mox.ReplayAll() services = service.ServiceList.get_by_host(self.context, 'fake-host') self.assertEqual(1, len(services)) self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL) def test_get_all(self): self.mox.StubOutWithMock(db, 'service_get_all') db.service_get_all(self.context, disabled=False).AndReturn( [fake_service]) self.mox.ReplayAll() services = service.ServiceList.get_all(self.context, disabled=False) self.assertEqual(1, len(services)) self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL) def test_get_all_with_az(self): self.mox.StubOutWithMock(db, 'service_get_all') self.mox.StubOutWithMock(aggregate.AggregateList, 'get_by_metadata_key') db.service_get_all(self.context, disabled=None).AndReturn( [dict(fake_service, topic='compute')]) agg = aggregate.Aggregate(context=self.context) agg.name = 'foo' agg.metadata = {'availability_zone': 'test-az'} agg.create() agg.hosts = [fake_service['host']] aggregate.AggregateList.get_by_metadata_key(self.context, 'availability_zone', hosts=set(agg.hosts)).AndReturn([agg]) self.mox.ReplayAll() services = service.ServiceList.get_all(self.context, set_zones=True) self.assertEqual(1, len(services)) self.assertEqual('test-az', services[0].availability_zone) def test_compute_node(self): fake_compute_node = objects.ComputeNode._from_db_object( self.context, objects.ComputeNode(), test_compute_node.fake_compute_node) self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all_by_host') objects.ComputeNodeList.get_all_by_host( self.context, 'fake-host').AndReturn( [fake_compute_node]) self.mox.ReplayAll() service_obj = service.Service(id=123, host="fake-host", binary="nova-compute") service_obj._context = self.context self.assertEqual(service_obj.compute_node, fake_compute_node) # Make sure it doesn't re-fetch this service_obj.compute_node def test_load_when_orphaned(self): service_obj = service.Service() service_obj.id = 123 self.assertRaises(exception.OrphanedObjectError, getattr, service_obj, 'compute_node') @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host') def test_obj_make_compatible_for_compute_node(self, get_all_by_host): service_obj = objects.Service(context=self.context) fake_service_dict = fake_service.copy() fake_compute_obj = objects.ComputeNode(host=fake_service['host'], service_id=fake_service['id']) get_all_by_host.return_value = [fake_compute_obj] versions = ovo_base.obj_tree_get_versions('Service') versions['ComputeNode'] = '1.10' service_obj.obj_make_compatible_from_manifest(fake_service_dict, '1.9', versions) self.assertEqual( fake_compute_obj.obj_to_primitive(target_version='1.10', version_manifest=versions), fake_service_dict['compute_node']) @mock.patch('nova.db.service_get_minimum_version') def test_get_minimum_version_none(self, mock_get): mock_get.return_value = None self.assertEqual(0, objects.Service.get_minimum_version(self.context, 'nova-compute')) mock_get.assert_called_once_with(self.context, 'nova-compute') @mock.patch('nova.db.service_get_minimum_version') def test_get_minimum_version(self, mock_get): mock_get.return_value = 123 self.assertEqual(123, objects.Service.get_minimum_version(self.context, 'nova-compute')) mock_get.assert_called_once_with(self.context, 'nova-compute') @mock.patch('nova.db.service_get_minimum_version') @mock.patch('nova.objects.service.LOG') def test_get_minimum_version_checks_binary(self, mock_log, mock_get): mock_get.return_value = None self.assertEqual(0, objects.Service.get_minimum_version(self.context, 'nova-compute')) self.assertFalse(mock_log.warning.called) self.assertRaises(exception.ObjectActionError, objects.Service.get_minimum_version, self.context, 'compute') self.assertTrue(mock_log.warning.called) @mock.patch('nova.db.service_get_minimum_version') def test_get_minimum_version_with_caching(self, mock_get): objects.Service.enable_min_version_cache() mock_get.return_value = 123 self.assertEqual(123, objects.Service.get_minimum_version(self.context, 'nova-compute')) self.assertEqual({"nova-compute": 123}, objects.Service._MIN_VERSION_CACHE) self.assertEqual(123, objects.Service.get_minimum_version(self.context, 'nova-compute')) mock_get.assert_called_once_with(self.context, 'nova-compute') objects.Service._SERVICE_VERSION_CACHING = False objects.Service.clear_min_version_cache() @mock.patch('nova.db.service_get_minimum_version', return_value=2) def test_create_above_minimum(self, mock_get): with mock.patch('nova.objects.service.SERVICE_VERSION', new=3): objects.Service(context=self.context, binary='nova-compute').create() @mock.patch('nova.db.service_get_minimum_version', return_value=2) def test_create_equal_to_minimum(self, mock_get): with mock.patch('nova.objects.service.SERVICE_VERSION', new=2): objects.Service(context=self.context, binary='nova-compute').create() @mock.patch('nova.db.service_get_minimum_version', return_value=2) def test_create_below_minimum(self, mock_get): with mock.patch('nova.objects.service.SERVICE_VERSION', new=1): self.assertRaises(exception.ServiceTooOld, objects.Service(context=self.context, binary='nova-compute', ).create) class TestServiceObject(test_objects._LocalTest, _TestServiceObject): pass class TestRemoteServiceObject(test_objects._RemoteTest, _TestServiceObject): pass class TestServiceVersion(test.TestCase): def setUp(self): self.ctxt = context.get_admin_context() super(TestServiceVersion, self).setUp() def _collect_things(self): data = { 'compute_rpc': compute_manager.ComputeManager.target.version, } return data def test_version(self): calculated = self._collect_things() self.assertEqual( len(service.SERVICE_VERSION_HISTORY), service.SERVICE_VERSION + 1, 'Service version %i has no history. Please update ' 'nova.objects.service.SERVICE_VERSION_HISTORY ' 'and add %s to it' % (service.SERVICE_VERSION, repr(calculated))) current = service.SERVICE_VERSION_HISTORY[service.SERVICE_VERSION] self.assertEqual( current, calculated, 'Changes detected that require a SERVICE_VERSION change. Please ' 'increment nova.objects.service.SERVICE_VERSION, and make sure it' 'is equal to nova.compute.manager.ComputeManager.target.version.') def test_version_in_init(self): self.assertRaises(exception.ObjectActionError, objects.Service, version=123) def test_version_set_on_init(self): self.assertEqual(service.SERVICE_VERSION, objects.Service().version) def test_version_loaded_from_db(self): fake_version = fake_service['version'] + 1 fake_different_service = dict(fake_service) fake_different_service['version'] = fake_version obj = objects.Service() obj._from_db_object(self.ctxt, obj, fake_different_service) self.assertEqual(fake_version, obj.version) def test_save_noop_with_only_version(self): o = objects.Service(context=self.ctxt, id=fake_service['id']) o.obj_reset_changes(['id']) self.assertEqual(set(['version']), o.obj_what_changed()) with mock.patch('nova.db.service_update') as mock_update: o.save() self.assertFalse(mock_update.called) o.host = 'foo' with mock.patch('nova.db.service_update') as mock_update: mock_update.return_value = fake_service o.save() mock_update.assert_called_once_with( self.ctxt, fake_service['id'], {'version': service.SERVICE_VERSION, 'host': 'foo'}) class TestServiceStatusNotification(test.TestCase): def setUp(self): self.ctxt = context.get_admin_context() super(TestServiceStatusNotification, self).setUp() @mock.patch('nova.objects.service.ServiceStatusNotification') def _verify_notification(self, service_obj, mock_notification): service_obj.save() self.assertTrue(mock_notification.called) event_type = mock_notification.call_args[1]['event_type'] priority = mock_notification.call_args[1]['priority'] publisher = mock_notification.call_args[1]['publisher'] payload = mock_notification.call_args[1]['payload'] self.assertEqual(service_obj.host, publisher.host) self.assertEqual(service_obj.binary, publisher.binary) self.assertEqual(fields.NotificationPriority.INFO, priority) self.assertEqual('service', event_type.object) self.assertEqual(fields.NotificationAction.UPDATE, event_type.action) for field in service.ServiceStatusPayload.SCHEMA: if field in fake_service: self.assertEqual(fake_service[field], getattr(payload, field)) mock_notification.return_value.emit.assert_called_once_with(self.ctxt) @mock.patch('nova.db.service_update') def test_service_update_with_notification(self, mock_db_service_update): service_obj = objects.Service(context=self.ctxt, id=fake_service['id']) mock_db_service_update.return_value = fake_service for key, value in {'disabled': True, 'disabled_reason': 'my reason', 'forced_down': True}.items(): setattr(service_obj, key, value) self._verify_notification(service_obj) @mock.patch('nova.objects.service.ServiceStatusNotification') @mock.patch('nova.db.service_update') def test_service_update_without_notification(self, mock_db_service_update, mock_notification): service_obj = objects.Service(context=self.ctxt, id=fake_service['id']) mock_db_service_update.return_value = fake_service for key, value in {'report_count': 13, 'last_seen_up': timeutils.utcnow()}.items(): setattr(service_obj, key, value) service_obj.save() self.assertFalse(mock_notification.called) nova-13.0.0/nova/tests/unit/objects/test_quotas.py0000664000567000056710000001467512701407773023366 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import context from nova.objects import quotas as quotas_obj from nova import quota from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_objects QUOTAS = quota.QUOTAS class TestQuotasModule(test.NoDBTestCase): def setUp(self): super(TestQuotasModule, self).setUp() self.context = context.RequestContext('fake_user1', 'fake_proj1') self.instance = fake_instance.fake_db_instance( project_id='fake_proj2', user_id='fake_user2') def test_ids_from_instance_non_admin(self): project_id, user_id = quotas_obj.ids_from_instance( self.context, self.instance) self.assertEqual('fake_user2', user_id) self.assertEqual('fake_proj1', project_id) def test_ids_from_instance_admin(self): project_id, user_id = quotas_obj.ids_from_instance( self.context.elevated(), self.instance) self.assertEqual('fake_user2', user_id) self.assertEqual('fake_proj2', project_id) class _TestQuotasObject(object): def setUp(self): super(_TestQuotasObject, self).setUp() self.context = context.RequestContext('fake_user1', 'fake_proj1') self.instance = fake_instance.fake_db_instance( project_id='fake_proj2', user_id='fake_user2') def test_from_reservations(self): fake_reservations = ['1', '2'] quotas = quotas_obj.Quotas.from_reservations( self.context, fake_reservations) self.assertEqual(self.context, quotas._context) self.assertEqual(fake_reservations, quotas.reservations) self.assertIsNone(quotas.project_id) self.assertIsNone(quotas.user_id) def test_from_reservations_bogus(self): fake_reservations = [_TestQuotasObject, _TestQuotasObject] self.assertRaises(ValueError, quotas_obj.Quotas.from_reservations, self.context, fake_reservations) def test_from_reservations_instance(self): fake_reservations = ['1', '2'] quotas = quotas_obj.Quotas.from_reservations( self.context, fake_reservations, instance=self.instance) self.assertEqual(self.context, quotas._context) self.assertEqual(fake_reservations, quotas.reservations) self.assertEqual('fake_proj1', quotas.project_id) self.assertEqual('fake_user2', quotas.user_id) def test_from_reservations_instance_admin(self): fake_reservations = ['1', '2'] elevated = self.context.elevated() quotas = quotas_obj.Quotas.from_reservations( elevated, fake_reservations, instance=self.instance) self.assertEqual(elevated, quotas._context) self.assertEqual(fake_reservations, quotas.reservations) self.assertEqual('fake_proj2', quotas.project_id) self.assertEqual('fake_user2', quotas.user_id) def test_reserve(self): fake_reservations = ['1', '2'] quotas = quotas_obj.Quotas(context=self.context) self.mox.StubOutWithMock(QUOTAS, 'reserve') QUOTAS.reserve(self.context, expire='expire', project_id='project_id', user_id='user_id', moo='cow').AndReturn(fake_reservations) self.mox.ReplayAll() quotas.reserve(expire='expire', project_id='project_id', user_id='user_id', moo='cow') self.assertEqual(self.context, quotas._context) self.assertEqual(fake_reservations, quotas.reservations) self.assertEqual('project_id', quotas.project_id) self.assertEqual('user_id', quotas.user_id) def test_commit(self): fake_reservations = ['1', '2'] quotas = quotas_obj.Quotas.from_reservations( self.context, fake_reservations) self.mox.StubOutWithMock(QUOTAS, 'commit') QUOTAS.commit(self.context, fake_reservations, project_id=None, user_id=None) self.mox.ReplayAll() quotas.commit() self.assertIsNone(quotas.reservations) def test_commit_none_reservations(self): quotas = quotas_obj.Quotas.from_reservations(self.context, None) self.mox.StubOutWithMock(QUOTAS, 'commit') self.mox.ReplayAll() quotas.commit() def test_rollback(self): fake_reservations = ['1', '2'] quotas = quotas_obj.Quotas.from_reservations( self.context, fake_reservations) self.mox.StubOutWithMock(QUOTAS, 'rollback') QUOTAS.rollback(self.context, fake_reservations, project_id=None, user_id=None) self.mox.ReplayAll() quotas.rollback() self.assertIsNone(quotas.reservations) def test_rollback_none_reservations(self): quotas = quotas_obj.Quotas.from_reservations(self.context, None) self.mox.StubOutWithMock(QUOTAS, 'rollback') self.mox.ReplayAll() quotas.rollback() @mock.patch('nova.db.quota_create') def test_create_limit(self, mock_create): quotas_obj.Quotas.create_limit(self.context, 'fake-project', 'foo', 10, user_id='user') mock_create.assert_called_once_with(self.context, 'fake-project', 'foo', 10, user_id='user') @mock.patch('nova.db.quota_update') def test_update_limit(self, mock_update): quotas_obj.Quotas.update_limit(self.context, 'fake-project', 'foo', 10, user_id='user') mock_update.assert_called_once_with(self.context, 'fake-project', 'foo', 10, user_id='user') class TestQuotasObject(_TestQuotasObject, test_objects._LocalTest): pass class TestRemoteQuotasObject(_TestQuotasObject, test_objects._RemoteTest): pass nova-13.0.0/nova/tests/unit/objects/test_flavor.py0000664000567000056710000002375412701410011023314 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import db from nova import exception from nova.objects import flavor as flavor_obj from nova.tests.unit.objects import test_objects fake_flavor = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'id': 1, 'name': 'm1.foo', 'memory_mb': 1024, 'vcpus': 4, 'root_gb': 20, 'ephemeral_gb': 0, 'flavorid': 'm1.foo', 'swap': 0, 'rxtx_factor': 1.0, 'vcpu_weight': 1, 'disabled': False, 'is_public': True, 'extra_specs': {'foo': 'bar'}, } class _TestFlavor(object): @staticmethod def _compare(test, db, obj): for field, value in db.items(): test.assertEqual(db[field], obj[field]) def test_get_by_id(self): with mock.patch.object(db, 'flavor_get') as get: get.return_value = fake_flavor flavor = flavor_obj.Flavor.get_by_id(self.context, 1) self._compare(self, fake_flavor, flavor) def test_get_by_name(self): with mock.patch.object(db, 'flavor_get_by_name') as get_by_name: get_by_name.return_value = fake_flavor flavor = flavor_obj.Flavor.get_by_name(self.context, 'm1.foo') self._compare(self, fake_flavor, flavor) def test_get_by_flavor_id(self): with mock.patch.object(db, 'flavor_get_by_flavor_id') as get_by_id: get_by_id.return_value = fake_flavor flavor = flavor_obj.Flavor.get_by_flavor_id(self.context, 'm1.foo') self._compare(self, fake_flavor, flavor) def test_add_access(self): elevated = self.context.elevated() flavor = flavor_obj.Flavor(context=elevated, flavorid='123') with mock.patch.object(db, 'flavor_access_add') as add: flavor.add_access('456') add.assert_called_once_with(elevated, '123', '456') def test_add_access_with_dirty_projects(self): flavor = flavor_obj.Flavor(context=self.context, projects=['1']) self.assertRaises(exception.ObjectActionError, flavor.add_access, '2') def test_remove_access(self): elevated = self.context.elevated() flavor = flavor_obj.Flavor(context=elevated, flavorid='123') with mock.patch.object(db, 'flavor_access_remove') as remove: flavor.remove_access('456') remove.assert_called_once_with(elevated, '123', '456') def test_create(self): flavor = flavor_obj.Flavor(context=self.context) flavor.name = 'm1.foo' flavor.extra_specs = fake_flavor['extra_specs'] with mock.patch.object(db, 'flavor_create') as create: create.return_value = fake_flavor flavor.create() self.assertEqual(self.context, flavor._context) # NOTE(danms): Orphan this to avoid lazy-loads flavor._context = None self._compare(self, fake_flavor, flavor) def test_create_with_projects(self): context = self.context.elevated() flavor = flavor_obj.Flavor(context=context) flavor.name = 'm1.foo' flavor.extra_specs = fake_flavor['extra_specs'] flavor.projects = ['project-1', 'project-2'] db_flavor = dict(fake_flavor, projects=list(flavor.projects)) with mock.patch.multiple(db, flavor_create=mock.DEFAULT, flavor_access_get_by_flavor_id=mock.DEFAULT ) as methods: methods['flavor_create'].return_value = db_flavor methods['flavor_access_get_by_flavor_id'].return_value = [ {'project_id': 'project-1'}, {'project_id': 'project-2'}] flavor.create() methods['flavor_create'].assert_called_once_with( context, {'name': 'm1.foo', 'extra_specs': fake_flavor['extra_specs']}, projects=['project-1', 'project-2']) self.assertEqual(context, flavor._context) # NOTE(danms): Orphan this to avoid lazy-loads flavor._context = None self._compare(self, fake_flavor, flavor) self.assertEqual(['project-1', 'project-2'], flavor.projects) def test_create_with_id(self): flavor = flavor_obj.Flavor(context=self.context, id=123) self.assertRaises(exception.ObjectActionError, flavor.create) @mock.patch('nova.db.flavor_access_add') @mock.patch('nova.db.flavor_access_remove') @mock.patch('nova.db.flavor_extra_specs_delete') @mock.patch('nova.db.flavor_extra_specs_update_or_create') def test_save(self, mock_update, mock_delete, mock_remove, mock_add): ctxt = self.context.elevated() extra_specs = {'key1': 'value1', 'key2': 'value2'} projects = ['project-1', 'project-2'] flavor = flavor_obj.Flavor(context=ctxt, flavorid='foo', extra_specs=extra_specs, projects=projects) flavor.obj_reset_changes() # Test deleting an extra_specs key and project del flavor.extra_specs['key1'] del flavor.projects[-1] self.assertEqual(set(['extra_specs', 'projects']), flavor.obj_what_changed()) flavor.save() self.assertEqual({'key2': 'value2'}, flavor.extra_specs) mock_delete.assert_called_once_with(ctxt, 'foo', 'key1') self.assertEqual(['project-1'], flavor.projects) mock_remove.assert_called_once_with(ctxt, 'foo', 'project-2') # Test updating an extra_specs key value flavor.extra_specs['key2'] = 'foobar' self.assertEqual(set(['extra_specs']), flavor.obj_what_changed()) flavor.save() self.assertEqual({'key2': 'foobar'}, flavor.extra_specs) mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar'}) # Test adding an extra_specs and project flavor.extra_specs['key3'] = 'value3' flavor.projects.append('project-3') self.assertEqual(set(['extra_specs', 'projects']), flavor.obj_what_changed()) flavor.save() self.assertEqual({'key2': 'foobar', 'key3': 'value3'}, flavor.extra_specs) mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar', 'key3': 'value3'}) self.assertEqual(['project-1', 'project-3'], flavor.projects) mock_add.assert_called_once_with(ctxt, 'foo', 'project-3') @mock.patch('nova.db.flavor_create') @mock.patch('nova.db.flavor_extra_specs_delete') @mock.patch('nova.db.flavor_extra_specs_update_or_create') def test_save_deleted_extra_specs(self, mock_update, mock_delete, mock_create): mock_create.return_value = dict(fake_flavor, extra_specs={'key1': 'value1'}) ctxt = self.context.elevated() flavor = flavor_obj.Flavor(context=ctxt) flavor.flavorid = 'test' flavor.extra_specs = {'key1': 'value1'} flavor.create() flavor.extra_specs = {} flavor.save() mock_delete.assert_called_once_with(ctxt, flavor.flavorid, 'key1') self.assertFalse(mock_update.called) def test_save_invalid_fields(self): flavor = flavor_obj.Flavor(id=123) self.assertRaises(exception.ObjectActionError, flavor.save) def test_destroy(self): flavor = flavor_obj.Flavor(context=self.context, id=123, name='foo') with mock.patch.object(db, 'flavor_destroy') as destroy: flavor.destroy() destroy.assert_called_once_with(self.context, flavor.name) def test_load_projects(self): flavor = flavor_obj.Flavor(context=self.context, flavorid='foo') with mock.patch.object(db, 'flavor_access_get_by_flavor_id') as get: get.return_value = [{'project_id': 'project-1'}] projects = flavor.projects self.assertEqual(['project-1'], projects) self.assertNotIn('projects', flavor.obj_what_changed()) def test_load_anything_else(self): flavor = flavor_obj.Flavor() self.assertRaises(exception.ObjectActionError, getattr, flavor, 'name') class TestFlavor(test_objects._LocalTest, _TestFlavor): pass class TestFlavorRemote(test_objects._RemoteTest, _TestFlavor): pass class _TestFlavorList(object): def test_get_all(self): with mock.patch.object(db, 'flavor_get_all') as get_all: get_all.return_value = [fake_flavor] filters = {'min_memory_mb': 4096} flavors = flavor_obj.FlavorList.get_all(self.context, inactive=False, filters=filters, sort_key='id', sort_dir='asc') self.assertEqual(1, len(flavors)) _TestFlavor._compare(self, fake_flavor, flavors[0]) get_all.assert_called_once_with(self.context, inactive=False, filters=filters, sort_key='id', sort_dir='asc', limit=None, marker=None) class TestFlavorList(test_objects._LocalTest, _TestFlavorList): pass class TestFlavorListRemote(test_objects._RemoteTest, _TestFlavorList): pass nova-13.0.0/nova/tests/unit/objects/test_instance_info_cache.py0000664000567000056710000001366412701407773026011 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslo_utils import timeutils from nova.cells import opts as cells_opts from nova.cells import rpcapi as cells_rpcapi from nova import db from nova import exception from nova.network import model as network_model from nova.objects import instance_info_cache from nova.tests.unit.objects import test_objects from nova.tests import uuidsentinel as uuids fake_info_cache = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'instance_uuid': uuids.info_instance, 'network_info': '[]', } class _TestInstanceInfoCacheObject(object): def test_get_by_instance_uuid(self): nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}]) self.mox.StubOutWithMock(db, 'instance_info_cache_get') db.instance_info_cache_get( self.context, uuids.info_instance).AndReturn( dict(fake_info_cache, network_info=nwinfo.json())) self.mox.ReplayAll() obj = instance_info_cache.InstanceInfoCache.get_by_instance_uuid( self.context, uuids.info_instance) self.assertEqual(uuids.info_instance, obj.instance_uuid) self.assertEqual(nwinfo, obj.network_info) def test_get_by_instance_uuid_no_entries(self): self.mox.StubOutWithMock(db, 'instance_info_cache_get') db.instance_info_cache_get(self.context, uuids.info_instance).AndReturn(None) self.mox.ReplayAll() self.assertRaises( exception.InstanceInfoCacheNotFound, instance_info_cache.InstanceInfoCache.get_by_instance_uuid, self.context, uuids.info_instance) def test_new(self): obj = instance_info_cache.InstanceInfoCache.new(self.context, uuids.info_instance) self.assertEqual(set(['instance_uuid', 'network_info']), obj.obj_what_changed()) self.assertEqual(uuids.info_instance, obj.instance_uuid) self.assertIsNone(obj.network_info) def _save_helper(self, cell_type, update_cells): obj = instance_info_cache.InstanceInfoCache() cells_api = cells_rpcapi.CellsAPI() self.mox.StubOutWithMock(db, 'instance_info_cache_update') self.mox.StubOutWithMock(cells_opts, 'get_cell_type') self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI', use_mock_anything=True) self.mox.StubOutWithMock(cells_api, 'instance_info_cache_update_at_top') nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}]) new_info_cache = fake_info_cache.copy() new_info_cache['network_info'] = nwinfo.json() db.instance_info_cache_update( self.context, uuids.info_instance, {'network_info': nwinfo.json()}).AndReturn(new_info_cache) if update_cells: cells_opts.get_cell_type().AndReturn(cell_type) if cell_type == 'compute': cells_rpcapi.CellsAPI().AndReturn(cells_api) cells_api.instance_info_cache_update_at_top( self.context, 'foo') self.mox.ReplayAll() obj._context = self.context obj.instance_uuid = uuids.info_instance obj.network_info = nwinfo obj.save(update_cells=update_cells) def test_save_with_update_cells_and_compute_cell(self): self._save_helper('compute', True) def test_save_with_update_cells_and_non_compute_cell(self): self._save_helper(None, True) def test_save_without_update_cells(self): self._save_helper(None, False) @mock.patch.object(db, 'instance_info_cache_update') def test_save_updates_self(self, mock_update): fake_updated_at = datetime.datetime(2015, 1, 1) nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}]) nwinfo_json = nwinfo.json() new_info_cache = fake_info_cache.copy() new_info_cache['id'] = 1 new_info_cache['updated_at'] = fake_updated_at new_info_cache['network_info'] = nwinfo_json mock_update.return_value = new_info_cache obj = instance_info_cache.InstanceInfoCache(context=self.context) obj.instance_uuid = uuids.info_instance obj.network_info = nwinfo_json obj.save() mock_update.assert_called_once_with(self.context, uuids.info_instance, {'network_info': nwinfo_json}) self.assertEqual(timeutils.normalize_time(fake_updated_at), timeutils.normalize_time(obj.updated_at)) def test_refresh(self): obj = instance_info_cache.InstanceInfoCache.new(self.context, uuids.info_instance_1) self.mox.StubOutWithMock(db, 'instance_info_cache_get') db.instance_info_cache_get( self.context, uuids.info_instance_1).AndReturn(fake_info_cache) self.mox.ReplayAll() obj.refresh() self.assertEqual(fake_info_cache['instance_uuid'], obj.instance_uuid) class TestInstanceInfoCacheObject(test_objects._LocalTest, _TestInstanceInfoCacheObject): pass class TestInstanceInfoCacheObjectRemote(test_objects._RemoteTest, _TestInstanceInfoCacheObject): pass nova-13.0.0/nova/tests/unit/objects/test_instance_action.py0000664000567000056710000004031012701407773025174 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import traceback import mock from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils import six from nova import db from nova.objects import instance_action from nova import test from nova.tests.unit.objects import test_objects NOW = timeutils.utcnow().replace(microsecond=0) fake_action = { 'created_at': NOW, 'deleted_at': None, 'updated_at': None, 'deleted': False, 'id': 123, 'action': 'fake-action', 'instance_uuid': 'fake-uuid', 'request_id': 'fake-request', 'user_id': 'fake-user', 'project_id': 'fake-project', 'start_time': NOW, 'finish_time': None, 'message': 'foo', } fake_event = { 'created_at': NOW, 'deleted_at': None, 'updated_at': None, 'deleted': False, 'id': 123, 'event': 'fake-event', 'action_id': 123, 'start_time': NOW, 'finish_time': None, 'result': 'fake-result', 'traceback': 'fake-tb', } class _TestInstanceActionObject(object): @mock.patch.object(db, 'action_get_by_request_id') def test_get_by_request_id(self, mock_get): context = self.context mock_get.return_value = fake_action action = instance_action.InstanceAction.get_by_request_id( context, 'fake-uuid', 'fake-request') self.compare_obj(action, fake_action) mock_get.assert_called_once_with(context, 'fake-uuid', 'fake-request') def test_pack_action_start(self): values = instance_action.InstanceAction.pack_action_start( self.context, 'fake-uuid', 'fake-action') self.assertEqual(values['request_id'], self.context.request_id) self.assertEqual(values['user_id'], self.context.user_id) self.assertEqual(values['project_id'], self.context.project_id) self.assertEqual(values['instance_uuid'], 'fake-uuid') self.assertEqual(values['action'], 'fake-action') self.assertEqual(values['start_time'].replace(tzinfo=None), self.context.timestamp) def test_pack_action_finish(self): self.useFixture(utils_fixture.TimeFixture(NOW)) values = instance_action.InstanceAction.pack_action_finish( self.context, 'fake-uuid') self.assertEqual(values['request_id'], self.context.request_id) self.assertEqual(values['instance_uuid'], 'fake-uuid') self.assertEqual(values['finish_time'].replace(tzinfo=None), NOW) @mock.patch.object(db, 'action_start') def test_action_start(self, mock_start): test_class = instance_action.InstanceAction expected_packed_values = test_class.pack_action_start( self.context, 'fake-uuid', 'fake-action') mock_start.return_value = fake_action action = instance_action.InstanceAction.action_start( self.context, 'fake-uuid', 'fake-action', want_result=True) mock_start.assert_called_once_with(self.context, expected_packed_values) self.compare_obj(action, fake_action) @mock.patch.object(db, 'action_start') def test_action_start_no_result(self, mock_start): test_class = instance_action.InstanceAction expected_packed_values = test_class.pack_action_start( self.context, 'fake-uuid', 'fake-action') mock_start.return_value = fake_action action = instance_action.InstanceAction.action_start( self.context, 'fake-uuid', 'fake-action', want_result=False) mock_start.assert_called_once_with(self.context, expected_packed_values) self.assertIsNone(action) @mock.patch.object(db, 'action_finish') def test_action_finish(self, mock_finish): self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceAction expected_packed_values = test_class.pack_action_finish( self.context, 'fake-uuid') mock_finish.return_value = fake_action action = instance_action.InstanceAction.action_finish( self.context, 'fake-uuid', want_result=True) mock_finish.assert_called_once_with(self.context, expected_packed_values) self.compare_obj(action, fake_action) @mock.patch.object(db, 'action_finish') def test_action_finish_no_result(self, mock_finish): self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceAction expected_packed_values = test_class.pack_action_finish( self.context, 'fake-uuid') mock_finish.return_value = fake_action action = instance_action.InstanceAction.action_finish( self.context, 'fake-uuid', want_result=False) mock_finish.assert_called_once_with(self.context, expected_packed_values) self.assertIsNone(action) @mock.patch.object(db, 'action_finish') @mock.patch.object(db, 'action_start') def test_finish(self, mock_start, mock_finish): self.useFixture(utils_fixture.TimeFixture(NOW)) expected_packed_action_start = { 'request_id': self.context.request_id, 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'instance_uuid': 'fake-uuid', 'action': 'fake-action', 'start_time': self.context.timestamp, } expected_packed_action_finish = { 'request_id': self.context.request_id, 'instance_uuid': 'fake-uuid', 'finish_time': NOW, } mock_start.return_value = fake_action mock_finish.return_value = fake_action action = instance_action.InstanceAction.action_start( self.context, 'fake-uuid', 'fake-action') action.finish() mock_start.assert_called_once_with(self.context, expected_packed_action_start) mock_finish.assert_called_once_with(self.context, expected_packed_action_finish) self.compare_obj(action, fake_action) @mock.patch.object(db, 'actions_get') def test_get_list(self, mock_get): fake_actions = [dict(fake_action, id=1234), dict(fake_action, id=5678)] mock_get.return_value = fake_actions obj_list = instance_action.InstanceActionList.get_by_instance_uuid( self.context, 'fake-uuid') for index, action in enumerate(obj_list): self.compare_obj(action, fake_actions[index]) mock_get.assert_called_once_with(self.context, 'fake-uuid') class TestInstanceActionObject(test_objects._LocalTest, _TestInstanceActionObject): pass class TestRemoteInstanceActionObject(test_objects._RemoteTest, _TestInstanceActionObject): pass class _TestInstanceActionEventObject(object): @mock.patch.object(db, 'action_event_get_by_id') def test_get_by_id(self, mock_get): mock_get.return_value = fake_event event = instance_action.InstanceActionEvent.get_by_id( self.context, 'fake-action-id', 'fake-event-id') self.compare_obj(event, fake_event) mock_get.assert_called_once_with(self.context, 'fake-action-id', 'fake-event-id') @mock.patch.object(db, 'action_event_start') def test_event_start(self, mock_start): self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceActionEvent expected_packed_values = test_class.pack_action_event_start( self.context, 'fake-uuid', 'fake-event') mock_start.return_value = fake_event event = instance_action.InstanceActionEvent.event_start( self.context, 'fake-uuid', 'fake-event', want_result=True) mock_start.assert_called_once_with(self.context, expected_packed_values) self.compare_obj(event, fake_event) @mock.patch.object(db, 'action_event_start') def test_event_start_no_result(self, mock_start): self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceActionEvent expected_packed_values = test_class.pack_action_event_start( self.context, 'fake-uuid', 'fake-event') mock_start.return_value = fake_event event = instance_action.InstanceActionEvent.event_start( self.context, 'fake-uuid', 'fake-event', want_result=False) mock_start.assert_called_once_with(self.context, expected_packed_values) self.assertIsNone(event) @mock.patch.object(db, 'action_event_finish') def test_event_finish(self, mock_finish): self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceActionEvent expected_packed_values = test_class.pack_action_event_finish( self.context, 'fake-uuid', 'fake-event') expected_packed_values['finish_time'] = NOW mock_finish.return_value = fake_event event = instance_action.InstanceActionEvent.event_finish( self.context, 'fake-uuid', 'fake-event', want_result=True) mock_finish.assert_called_once_with(self.context, expected_packed_values) self.compare_obj(event, fake_event) @mock.patch.object(db, 'action_event_finish') def test_event_finish_no_result(self, mock_finish): self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceActionEvent expected_packed_values = test_class.pack_action_event_finish( self.context, 'fake-uuid', 'fake-event') expected_packed_values['finish_time'] = NOW mock_finish.return_value = fake_event event = instance_action.InstanceActionEvent.event_finish( self.context, 'fake-uuid', 'fake-event', want_result=False) mock_finish.assert_called_once_with(self.context, expected_packed_values) self.assertIsNone(event) @mock.patch.object(traceback, 'format_tb') @mock.patch.object(db, 'action_event_finish') def test_event_finish_with_failure(self, mock_finish, mock_tb): self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceActionEvent expected_packed_values = test_class.pack_action_event_finish( self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb') expected_packed_values['finish_time'] = NOW mock_finish.return_value = fake_event event = test_class.event_finish_with_failure( self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb', want_result=True) mock_finish.assert_called_once_with(self.context, expected_packed_values) self.compare_obj(event, fake_event) @mock.patch.object(traceback, 'format_tb') @mock.patch.object(db, 'action_event_finish') def test_event_finish_with_failure_legacy(self, mock_finish, mock_tb): # Tests that exc_tb is serialized when it's not a string type. mock_tb.return_value = 'fake-tb' self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceActionEvent expected_packed_values = test_class.pack_action_event_finish( self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb') expected_packed_values['finish_time'] = NOW mock_finish.return_value = fake_event fake_tb = mock.sentinel.fake_tb event = test_class.event_finish_with_failure( self.context, 'fake-uuid', 'fake-event', exc_val='val', exc_tb=fake_tb, want_result=True) mock_finish.assert_called_once_with(self.context, expected_packed_values) self.compare_obj(event, fake_event) mock_tb.assert_called_once_with(fake_tb) @mock.patch.object(db, 'action_event_finish') def test_event_finish_with_failure_legacy_unicode(self, mock_finish): # Tests that traceback.format_tb is not called when exc_tb is unicode. self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceActionEvent expected_packed_values = test_class.pack_action_event_finish( self.context, 'fake-uuid', 'fake-event', 'val', six.text_type('fake-tb')) expected_packed_values['finish_time'] = NOW mock_finish.return_value = fake_event event = test_class.event_finish_with_failure( self.context, 'fake-uuid', 'fake-event', exc_val='val', exc_tb=six.text_type('fake-tb'), want_result=True) mock_finish.assert_called_once_with(self.context, expected_packed_values) self.compare_obj(event, fake_event) @mock.patch.object(traceback, 'format_tb') @mock.patch.object(db, 'action_event_finish') def test_event_finish_with_failure_no_result(self, mock_finish, mock_tb): # Tests that traceback.format_tb is not called when exc_tb is a str # and want_result is False, so no event should come back. mock_tb.return_value = 'fake-tb' self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceActionEvent expected_packed_values = test_class.pack_action_event_finish( self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb') expected_packed_values['finish_time'] = NOW mock_finish.return_value = fake_event event = test_class.event_finish_with_failure( self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb', want_result=False) mock_finish.assert_called_once_with(self.context, expected_packed_values) self.assertIsNone(event) self.assertFalse(mock_tb.called) @mock.patch.object(db, 'action_events_get') def test_get_by_action(self, mock_get): fake_events = [dict(fake_event, id=1234), dict(fake_event, id=5678)] mock_get.return_value = fake_events obj_list = instance_action.InstanceActionEventList.get_by_action( self.context, 'fake-action-id') for index, event in enumerate(obj_list): self.compare_obj(event, fake_events[index]) mock_get.assert_called_once_with(self.context, 'fake-action-id') @mock.patch('nova.objects.instance_action.InstanceActionEvent.' 'pack_action_event_finish') @mock.patch('traceback.format_tb') def test_event_finish_with_failure_serialized(self, mock_format, mock_pack): mock_format.return_value = 'traceback' mock_pack.side_effect = test.TestingException self.assertRaises( test.TestingException, instance_action.InstanceActionEvent.event_finish_with_failure, self.context, 'fake-uuid', 'fake-event', exc_val=mock.sentinel.exc_val, exc_tb=mock.sentinel.exc_tb) mock_pack.assert_called_once_with(self.context, 'fake-uuid', 'fake-event', exc_val=str(mock.sentinel.exc_val), exc_tb='traceback') mock_format.assert_called_once_with(mock.sentinel.exc_tb) class TestInstanceActionEventObject(test_objects._LocalTest, _TestInstanceActionEventObject): pass class TestRemoteInstanceActionEventObject(test_objects._RemoteTest, _TestInstanceActionEventObject): pass nova-13.0.0/nova/tests/unit/objects/test_aggregate.py0000664000567000056710000002073312701407773023770 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import timeutils from nova import db from nova import exception from nova.objects import aggregate from nova.tests.unit import fake_notifier from nova.tests.unit.objects import test_objects from nova.tests import uuidsentinel NOW = timeutils.utcnow().replace(microsecond=0) fake_aggregate = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'uuid': uuidsentinel.fake_aggregate, 'name': 'fake-aggregate', 'hosts': ['foo', 'bar'], 'metadetails': {'this': 'that'}, } SUBS = {'metadata': 'metadetails'} class _TestAggregateObject(object): def test_get_by_id(self): self.mox.StubOutWithMock(db, 'aggregate_get') db.aggregate_get(self.context, 123).AndReturn(fake_aggregate) self.mox.ReplayAll() agg = aggregate.Aggregate.get_by_id(self.context, 123) self.compare_obj(agg, fake_aggregate, subs=SUBS) @mock.patch('nova.objects.Aggregate.save') @mock.patch('nova.db.aggregate_get') def test_load_allocates_uuid(self, mock_get, mock_save): fake_agg = dict(fake_aggregate) del fake_agg['uuid'] mock_get.return_value = fake_agg uuid = uuidsentinel.aggregate with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_g: mock_g.return_value = uuid obj = aggregate.Aggregate.get_by_id(self.context, 123) mock_g.assert_called_once_with() self.assertEqual(uuid, obj.uuid) mock_save.assert_called_once_with() def test_create(self): self.mox.StubOutWithMock(db, 'aggregate_create') db.aggregate_create(self.context, {'name': 'foo', 'uuid': uuidsentinel.fake_agg}, metadata={'one': 'two'}).AndReturn(fake_aggregate) self.mox.ReplayAll() agg = aggregate.Aggregate(context=self.context) agg.name = 'foo' agg.metadata = {'one': 'two'} agg.uuid = uuidsentinel.fake_agg agg.create() self.compare_obj(agg, fake_aggregate, subs=SUBS) def test_recreate_fails(self): self.mox.StubOutWithMock(db, 'aggregate_create') db.aggregate_create(self.context, {'name': 'foo', 'uuid': uuidsentinel.fake_agg}, metadata={'one': 'two'}).AndReturn(fake_aggregate) self.mox.ReplayAll() agg = aggregate.Aggregate(context=self.context) agg.name = 'foo' agg.metadata = {'one': 'two'} agg.uuid = uuidsentinel.fake_agg agg.create() self.assertRaises(exception.ObjectActionError, agg.create) def test_save(self): self.mox.StubOutWithMock(db, 'aggregate_update') db.aggregate_update(self.context, 123, {'name': 'baz'}).AndReturn( fake_aggregate) self.mox.ReplayAll() agg = aggregate.Aggregate(context=self.context) agg.id = 123 agg.name = 'baz' agg.save() self.compare_obj(agg, fake_aggregate, subs=SUBS) def test_save_and_create_no_hosts(self): agg = aggregate.Aggregate(context=self.context) agg.id = 123 agg.hosts = ['foo', 'bar'] self.assertRaises(exception.ObjectActionError, agg.create) self.assertRaises(exception.ObjectActionError, agg.save) def test_update_metadata(self): self.mox.StubOutWithMock(db, 'aggregate_metadata_delete') self.mox.StubOutWithMock(db, 'aggregate_metadata_add') db.aggregate_metadata_delete(self.context, 123, 'todelete') db.aggregate_metadata_add(self.context, 123, {'toadd': 'myval'}) self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] agg = aggregate.Aggregate() agg._context = self.context agg.id = 123 agg.metadata = {'foo': 'bar'} agg.obj_reset_changes() agg.update_metadata({'todelete': None, 'toadd': 'myval'}) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('aggregate.updatemetadata.start', msg.event_type) self.assertEqual({'todelete': None, 'toadd': 'myval'}, msg.payload['meta_data']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('aggregate.updatemetadata.end', msg.event_type) self.assertEqual({'todelete': None, 'toadd': 'myval'}, msg.payload['meta_data']) self.assertEqual({'foo': 'bar', 'toadd': 'myval'}, agg.metadata) def test_destroy(self): self.mox.StubOutWithMock(db, 'aggregate_delete') db.aggregate_delete(self.context, 123) self.mox.ReplayAll() agg = aggregate.Aggregate(context=self.context) agg.id = 123 agg.destroy() def test_add_host(self): self.mox.StubOutWithMock(db, 'aggregate_host_add') db.aggregate_host_add(self.context, 123, 'bar' ).AndReturn({'host': 'bar'}) self.mox.ReplayAll() agg = aggregate.Aggregate() agg.id = 123 agg.hosts = ['foo'] agg._context = self.context agg.add_host('bar') self.assertEqual(agg.hosts, ['foo', 'bar']) def test_delete_host(self): self.mox.StubOutWithMock(db, 'aggregate_host_delete') db.aggregate_host_delete(self.context, 123, 'foo') self.mox.ReplayAll() agg = aggregate.Aggregate() agg.id = 123 agg.hosts = ['foo', 'bar'] agg._context = self.context agg.delete_host('foo') self.assertEqual(agg.hosts, ['bar']) def test_availability_zone(self): agg = aggregate.Aggregate() agg.metadata = {'availability_zone': 'foo'} self.assertEqual('foo', agg.availability_zone) def test_get_all(self): self.mox.StubOutWithMock(db, 'aggregate_get_all') db.aggregate_get_all(self.context).AndReturn([fake_aggregate]) self.mox.ReplayAll() aggs = aggregate.AggregateList.get_all(self.context) self.assertEqual(1, len(aggs)) self.compare_obj(aggs[0], fake_aggregate, subs=SUBS) def test_by_host(self): self.mox.StubOutWithMock(db, 'aggregate_get_by_host') db.aggregate_get_by_host(self.context, 'fake-host', key=None, ).AndReturn([fake_aggregate]) self.mox.ReplayAll() aggs = aggregate.AggregateList.get_by_host(self.context, 'fake-host') self.assertEqual(1, len(aggs)) self.compare_obj(aggs[0], fake_aggregate, subs=SUBS) @mock.patch('nova.db.aggregate_get_by_metadata_key') def test_get_by_metadata_key(self, get_by_metadata_key): get_by_metadata_key.return_value = [fake_aggregate] aggs = aggregate.AggregateList.get_by_metadata_key( self.context, 'this') self.assertEqual(1, len(aggs)) self.compare_obj(aggs[0], fake_aggregate, subs=SUBS) @mock.patch('nova.db.aggregate_get_by_metadata_key') def test_get_by_metadata_key_and_hosts_no_match(self, get_by_metadata_key): get_by_metadata_key.return_value = [fake_aggregate] aggs = aggregate.AggregateList.get_by_metadata_key( self.context, 'this', hosts=['baz']) self.assertEqual(0, len(aggs)) @mock.patch('nova.db.aggregate_get_by_metadata_key') def test_get_by_metadata_key_and_hosts_match(self, get_by_metadata_key): get_by_metadata_key.return_value = [fake_aggregate] aggs = aggregate.AggregateList.get_by_metadata_key( self.context, 'this', hosts=['foo', 'bar']) self.assertEqual(1, len(aggs)) self.compare_obj(aggs[0], fake_aggregate, subs=SUBS) class TestAggregateObject(test_objects._LocalTest, _TestAggregateObject): pass class TestRemoteAggregateObject(test_objects._RemoteTest, _TestAggregateObject): pass nova-13.0.0/nova/tests/unit/objects/test_migration_context.py0000664000567000056710000001136412701407773025577 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_serialization import jsonutils from nova import exception from nova import objects from nova.tests.unit.objects import test_instance_numa_topology from nova.tests.unit.objects import test_objects fake_instance_uuid = str(uuid.uuid4()) fake_migration_context_obj = objects.MigrationContext() fake_migration_context_obj.instance_uuid = fake_instance_uuid fake_migration_context_obj.migration_id = 42 fake_migration_context_obj.new_numa_topology = ( test_instance_numa_topology.fake_obj_numa_topology.obj_clone()) fake_migration_context_obj.old_numa_topology = None fake_db_context = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'instance_uuid': fake_instance_uuid, 'migration_context': jsonutils.dumps( fake_migration_context_obj.obj_to_primitive()), } def get_fake_migration_context_obj(ctxt): obj = fake_migration_context_obj.obj_clone() obj._context = ctxt return obj class _TestMigrationContext(object): @mock.patch('nova.db.instance_extra_update_by_uuid') def test_create(self, mock_update): ctxt_obj = get_fake_migration_context_obj(self.context) ctxt_obj._save() self.assertEqual(1, len(mock_update.call_args_list)) update_call = mock_update.call_args self.assertEqual(self.context, update_call[0][0]) self.assertEqual(fake_instance_uuid, update_call[0][1]) self.assertIsInstance(ctxt_obj.new_numa_topology, objects.InstanceNUMATopology) self.assertIsNone(ctxt_obj.old_numa_topology) @mock.patch('nova.db.instance_extra_update_by_uuid') def test_destroy(self, mock_update): objects.MigrationContext._destroy(self.context, fake_instance_uuid) self.assertEqual(1, len(mock_update.call_args_list)) update_call = mock_update.call_args self.assertEqual(self.context, update_call[0][0]) self.assertEqual(fake_instance_uuid, update_call[0][1]) self.assertEqual({'migration_context': None}, update_call[0][2]) def _test_get_by_instance_uuid(self, db_data): mig_context = objects.MigrationContext.get_by_instance_uuid( self.context, fake_db_context['instance_uuid']) if mig_context: self.assertEqual(fake_db_context['instance_uuid'], mig_context.instance_uuid) expected_mig_context = db_data and db_data.get('migration_context') expected_mig_context = objects.MigrationContext.obj_from_db_obj( expected_mig_context) self.assertEqual(expected_mig_context.instance_uuid, mig_context.instance_uuid) self.assertEqual(expected_mig_context.migration_id, mig_context.migration_id) self.assertIsInstance(expected_mig_context.new_numa_topology, mig_context.new_numa_topology.__class__) self.assertIsInstance(expected_mig_context.old_numa_topology, mig_context.old_numa_topology.__class__) else: self.assertIsNone(mig_context) @mock.patch('nova.db.instance_extra_get_by_instance_uuid') def test_get_by_instance_uuid(self, mock_get): mock_get.return_value = fake_db_context self._test_get_by_instance_uuid(fake_db_context) @mock.patch('nova.db.instance_extra_get_by_instance_uuid') def test_get_by_instance_uuid_none(self, mock_get): db_context = fake_db_context.copy() db_context['migration_context'] = None mock_get.return_value = db_context self._test_get_by_instance_uuid(db_context) @mock.patch('nova.db.instance_extra_get_by_instance_uuid') def test_get_by_instance_uuid_missing(self, mock_get): mock_get.return_value = None self.assertRaises( exception.MigrationContextNotFound, objects.MigrationContext.get_by_instance_uuid, self.context, 'fake_uuid') class TestMigrationContext(test_objects._LocalTest, _TestMigrationContext): pass class TestMigrationContextRemote(test_objects._RemoteTest, _TestMigrationContext): pass nova-13.0.0/nova/tests/unit/objects/test_volume_usage.py0000664000567000056710000000610412701407773024531 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import timeutils from nova import objects from nova.tests.unit.objects import test_objects NOW = timeutils.utcnow().replace(microsecond=0) fake_vol_usage = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'id': 1, 'volume_id': 'fake-vol-id', 'instance_uuid': 'fake-inst-uuid', 'project_id': 'fake-project-id', 'user_id': 'fake-user-id', 'availability_zone': None, 'tot_last_refreshed': None, 'tot_reads': 0, 'tot_read_bytes': 0, 'tot_writes': 0, 'tot_write_bytes': 0, 'curr_last_refreshed': NOW, 'curr_reads': 10, 'curr_read_bytes': 20, 'curr_writes': 30, 'curr_write_bytes': 40, } class _TestVolumeUsage(object): @mock.patch('nova.db.vol_usage_update', return_value=fake_vol_usage) def test_save(self, mock_upd): vol_usage = objects.VolumeUsage(self.context) vol_usage.volume_id = 'fake-vol-id' vol_usage.instance_uuid = 'fake-inst-uuid' vol_usage.project_id = 'fake-project-id' vol_usage.user_id = 'fake-user-id' vol_usage.availability_zone = None vol_usage.curr_reads = 10 vol_usage.curr_read_bytes = 20 vol_usage.curr_writes = 30 vol_usage.curr_write_bytes = 40 vol_usage.save() mock_upd.assert_called_once_with( self.context, 'fake-vol-id', 10, 20, 30, 40, 'fake-inst-uuid', 'fake-project-id', 'fake-user-id', None, update_totals=False) self.compare_obj(vol_usage, fake_vol_usage) @mock.patch('nova.db.vol_usage_update', return_value=fake_vol_usage) def test_save_update_totals(self, mock_upd): vol_usage = objects.VolumeUsage(self.context) vol_usage.volume_id = 'fake-vol-id' vol_usage.instance_uuid = 'fake-inst-uuid' vol_usage.project_id = 'fake-project-id' vol_usage.user_id = 'fake-user-id' vol_usage.availability_zone = None vol_usage.curr_reads = 10 vol_usage.curr_read_bytes = 20 vol_usage.curr_writes = 30 vol_usage.curr_write_bytes = 40 vol_usage.save(update_totals=True) mock_upd.assert_called_once_with( self.context, 'fake-vol-id', 10, 20, 30, 40, 'fake-inst-uuid', 'fake-project-id', 'fake-user-id', None, update_totals=True) self.compare_obj(vol_usage, fake_vol_usage) class TestVolumeUsage(test_objects._LocalTest, _TestVolumeUsage): pass class TestRemoteVolumeUsage(test_objects._RemoteTest, _TestVolumeUsage): pass nova-13.0.0/nova/tests/unit/objects/test_block_device.py0000664000567000056710000005267212701407773024462 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from nova.cells import rpcapi as cells_rpcapi from nova import context from nova import db from nova import exception from nova import objects from nova.objects import block_device as block_device_obj from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_objects class _TestBlockDeviceMappingObject(object): def fake_bdm(self, instance=None): instance = instance or {} fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'instance_uuid': instance.get('uuid') or 'fake-instance', 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'snapshot_id': 'fake-snapshot-id-1', 'boot_index': -1 }) if instance: fake_bdm['instance'] = instance return fake_bdm def _test_save(self, cell_type=None, update_device_name=False): if cell_type: self.flags(enable=True, cell_type=cell_type, group='cells') else: self.flags(enable=False, group='cells') create = False fake_bdm = self.fake_bdm() with test.nested( mock.patch.object( db, 'block_device_mapping_update', return_value=fake_bdm), mock.patch.object( cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top') ) as (bdm_update_mock, cells_update_mock): bdm_object = objects.BlockDeviceMapping(context=self.context) bdm_object.id = 123 bdm_object.volume_id = 'fake_volume_id' if update_device_name: bdm_object.device_name = '/dev/vda' create = None bdm_object.save() if update_device_name: bdm_update_mock.assert_called_once_with( self.context, 123, {'volume_id': 'fake_volume_id', 'device_name': '/dev/vda'}, legacy=False) else: bdm_update_mock.assert_called_once_with( self.context, 123, {'volume_id': 'fake_volume_id'}, legacy=False) if cell_type != 'compute': self.assertFalse(cells_update_mock.called) else: self.assertEqual(1, cells_update_mock.call_count) self.assertTrue(len(cells_update_mock.call_args[0]) > 1) self.assertIsInstance(cells_update_mock.call_args[0][1], block_device_obj.BlockDeviceMapping) self.assertEqual({'create': create}, cells_update_mock.call_args[1]) def test_save_nocells(self): self._test_save() def test_save_apicell(self): self._test_save(cell_type='api') def test_save_computecell(self): self._test_save(cell_type='compute') def test_save_computecell_device_name_changed(self): self._test_save(cell_type='compute', update_device_name=True) def test_save_instance_changed(self): bdm_object = objects.BlockDeviceMapping(context=self.context) bdm_object.instance = objects.Instance() self.assertRaises(exception.ObjectActionError, bdm_object.save) @mock.patch.object(db, 'block_device_mapping_update', return_value=None) def test_save_not_found(self, bdm_update): bdm_object = objects.BlockDeviceMapping(context=self.context) bdm_object.id = 123 self.assertRaises(exception.BDMNotFound, bdm_object.save) @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_id(self, get_by_vol_id): # NOTE(danms): Include two results to make sure the first was picked. # An invalid second item shouldn't be touched -- if it is, it'll # fail from_db_object(). get_by_vol_id.return_value = [self.fake_bdm(), None] vol_bdm = objects.BlockDeviceMapping.get_by_volume_id( self.context, 'fake-volume-id') for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS: self.assertFalse(vol_bdm.obj_attr_is_set(attr)) @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_id_not_found(self, get_by_vol_id): get_by_vol_id.return_value = None self.assertRaises(exception.VolumeBDMNotFound, objects.BlockDeviceMapping.get_by_volume_id, self.context, 'fake-volume-id') @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_instance_uuid_missmatch(self, get_by_vol_id): fake_bdm_vol = self.fake_bdm(instance={'uuid': 'other-fake-instance'}) get_by_vol_id.return_value = [fake_bdm_vol] self.assertRaises(exception.InvalidVolume, objects.BlockDeviceMapping.get_by_volume_id, self.context, 'fake-volume-id', instance_uuid='fake-instance') @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_id_with_expected(self, get_by_vol_id): get_by_vol_id.return_value = [self.fake_bdm( fake_instance.fake_db_instance())] vol_bdm = objects.BlockDeviceMapping.get_by_volume_id( self.context, 'fake-volume-id', expected_attrs=['instance']) for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS: self.assertTrue(vol_bdm.obj_attr_is_set(attr)) get_by_vol_id.assert_called_once_with(self.context, 'fake-volume-id', ['instance']) @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_returned_single(self, get_all): fake_bdm_vol = self.fake_bdm() get_all.return_value = [fake_bdm_vol] vol_bdm = objects.BlockDeviceMapping.get_by_volume( self.context, 'fake-volume-id') self.assertEqual(fake_bdm_vol['id'], vol_bdm.id) @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_returned_multiple(self, get_all): fake_bdm_vol1 = self.fake_bdm() fake_bdm_vol2 = self.fake_bdm() get_all.return_value = [fake_bdm_vol1, fake_bdm_vol2] self.assertRaises(exception.VolumeBDMIsMultiAttach, objects.BlockDeviceMapping.get_by_volume, self.context, 'fake-volume-id') @mock.patch.object(db, 'block_device_mapping_get_by_instance_and_volume_id') def test_get_by_instance_and_volume_id(self, mock_get): fake_inst = fake_instance.fake_db_instance() mock_get.return_value = self.fake_bdm(fake_inst) obj_bdm = objects.BlockDeviceMapping vol_bdm = obj_bdm.get_by_volume_and_instance( self.context, 'fake-volume-id', 'fake-instance-id') for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS: self.assertFalse(vol_bdm.obj_attr_is_set(attr)) @mock.patch.object(db, 'block_device_mapping_get_by_instance_and_volume_id') def test_test_get_by_instance_and_volume_id_with_expected(self, mock_get): fake_inst = fake_instance.fake_db_instance() mock_get.return_value = self.fake_bdm(fake_inst) obj_bdm = objects.BlockDeviceMapping vol_bdm = obj_bdm.get_by_volume_and_instance( self.context, 'fake-volume-id', fake_inst['uuid'], expected_attrs=['instance']) for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS: self.assertTrue(vol_bdm.obj_attr_is_set(attr)) mock_get.assert_called_once_with(self.context, 'fake-volume-id', fake_inst['uuid'], ['instance']) @mock.patch.object(db, 'block_device_mapping_get_by_instance_and_volume_id') def test_get_by_instance_and_volume_id_not_found(self, mock_get): mock_get.return_value = None obj_bdm = objects.BlockDeviceMapping self.assertRaises(exception.VolumeBDMNotFound, obj_bdm.get_by_volume_and_instance, self.context, 'fake-volume-id', 'fake-instance-id') def _test_create_mocked(self, cell_type=None, update_or_create=False, device_name=None): if cell_type: self.flags(enable=True, cell_type=cell_type, group='cells') else: self.flags(enable=False, group='cells') values = {'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'instance_uuid': 'fake-instance'} if device_name: values['device_name'] = device_name fake_bdm = fake_block_device.FakeDbBlockDeviceDict(values) with test.nested( mock.patch.object( db, 'block_device_mapping_create', return_value=fake_bdm), mock.patch.object( db, 'block_device_mapping_update_or_create', return_value=fake_bdm), mock.patch.object(cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top') ) as (bdm_create_mock, bdm_update_or_create_mock, cells_update_mock): bdm = objects.BlockDeviceMapping(context=self.context, **values) if update_or_create: method = bdm.update_or_create else: method = bdm.create if cell_type == 'api': self.assertRaises(exception.ObjectActionError, method) else: method() if update_or_create: bdm_update_or_create_mock.assert_called_once_with( self.context, values, legacy=False) else: bdm_create_mock.assert_called_once_with( self.context, values, legacy=False) if cell_type == 'compute' and 'device_name' in values: self.assertEqual(1, cells_update_mock.call_count) self.assertTrue(len(cells_update_mock.call_args[0]) > 1) self.assertEqual(self.context, cells_update_mock.call_args[0][0]) self.assertIsInstance(cells_update_mock.call_args[0][1], block_device_obj.BlockDeviceMapping) self.assertEqual({'create': update_or_create or None}, cells_update_mock.call_args[1]) else: self.assertFalse(cells_update_mock.called) def test_create_nocells(self): self._test_create_mocked() def test_update_or_create(self): self._test_create_mocked(update_or_create=True) def test_create_apicell(self): self._test_create_mocked(cell_type='api') def test_update_or_create_apicell(self): self._test_create_mocked(cell_type='api', update_or_create=True) def test_create_computecell(self): self._test_create_mocked(cell_type='compute') def test_update_or_create_computecell(self): self._test_create_mocked(cell_type='compute', update_or_create=True) def test_device_name_compute_cell(self): self._test_create_mocked(cell_type='compute', device_name='/dev/xvdb') def test_create(self): values = {'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'instance_uuid': 'fake-instance'} bdm = objects.BlockDeviceMapping(context=self.context, **values) with mock.patch.object(cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top'): bdm.create() for k, v in six.iteritems(values): self.assertEqual(v, getattr(bdm, k)) def test_create_fails(self): values = {'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'instance_uuid': 'fake-instance'} bdm = objects.BlockDeviceMapping(context=self.context, **values) bdm.create() self.assertRaises(exception.ObjectActionError, bdm.create) def test_create_fails_instance(self): values = {'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'instance_uuid': 'fake-instance', 'instance': objects.Instance()} bdm = objects.BlockDeviceMapping(context=self.context, **values) self.assertRaises(exception.ObjectActionError, bdm.create) def _test_destroy_mocked(self, cell_type=None): values = {'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'id': 1, 'instance_uuid': 'fake-instance', 'device_name': 'fake'} if cell_type: self.flags(enable=True, cell_type=cell_type, group='cells') else: self.flags(enable=False, group='cells') with test.nested( mock.patch.object(db, 'block_device_mapping_destroy'), mock.patch.object(cells_rpcapi.CellsAPI, 'bdm_destroy_at_top') ) as (bdm_del, cells_destroy): bdm = objects.BlockDeviceMapping(context=self.context, **values) bdm.destroy() bdm_del.assert_called_once_with(self.context, values['id']) if cell_type != 'compute': self.assertFalse(cells_destroy.called) else: cells_destroy.assert_called_once_with( self.context, values['instance_uuid'], device_name=values['device_name'], volume_id=values['volume_id']) def test_destroy_nocells(self): self._test_destroy_mocked() def test_destroy_apicell(self): self._test_destroy_mocked(cell_type='api') def test_destroy_computecell(self): self._test_destroy_mocked(cell_type='compute') def test_is_image_true(self): bdm = objects.BlockDeviceMapping(context=self.context, source_type='image') self.assertTrue(bdm.is_image) def test_is_image_false(self): bdm = objects.BlockDeviceMapping(context=self.context, source_type='snapshot') self.assertFalse(bdm.is_image) def test_is_volume_true(self): bdm = objects.BlockDeviceMapping(context=self.context, destination_type='volume') self.assertTrue(bdm.is_volume) def test_is_volume_false(self): bdm = objects.BlockDeviceMapping(context=self.context, destination_type='local') self.assertFalse(bdm.is_volume) class TestBlockDeviceMappingObject(test_objects._LocalTest, _TestBlockDeviceMappingObject): pass class TestRemoteBlockDeviceMappingObject(test_objects._RemoteTest, _TestBlockDeviceMappingObject): pass class _TestBlockDeviceMappingListObject(object): def fake_bdm(self, bdm_id, boot_index=-1, instance_uuid='fake-instance'): fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': bdm_id, 'boot_index': boot_index, 'instance_uuid': instance_uuid, 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'snapshot_id': 'fake-snapshot-id-1', }) return fake_bdm @mock.patch.object(db, 'block_device_mapping_get_all_by_instance_uuids') def test_bdms_by_instance_uuid(self, get_all_by_inst_uuids): fakes = [self.fake_bdm(123), self.fake_bdm(456)] get_all_by_inst_uuids.return_value = fakes bdms_by_uuid = objects.BlockDeviceMappingList.bdms_by_instance_uuid( self.context, ['fake-instance']) self.assertEqual(['fake-instance'], list(bdms_by_uuid.keys())) self.assertIsInstance( bdms_by_uuid['fake-instance'], objects.BlockDeviceMappingList) for faked, got in zip(fakes, bdms_by_uuid['fake-instance']): self.assertIsInstance(got, objects.BlockDeviceMapping) self.assertEqual(faked['id'], got.id) @mock.patch.object(db, 'block_device_mapping_get_all_by_instance_uuids') def test_bdms_by_instance_uuid_no_result(self, get_all_by_inst_uuids): get_all_by_inst_uuids.return_value = None bdms_by_uuid = objects.BlockDeviceMappingList.bdms_by_instance_uuid( self.context, ['fake-instance']) self.assertEqual({}, bdms_by_uuid) @mock.patch.object(db, 'block_device_mapping_get_all_by_instance_uuids') def test_get_by_instance_uuids(self, get_all_by_inst_uuids): fakes = [self.fake_bdm(123), self.fake_bdm(456)] get_all_by_inst_uuids.return_value = fakes bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuids( self.context, ['fake-instance']) for faked, got in zip(fakes, bdm_list): self.assertIsInstance(got, objects.BlockDeviceMapping) self.assertEqual(faked['id'], got.id) @mock.patch.object(db, 'block_device_mapping_get_all_by_instance_uuids') def test_get_by_instance_uuids_no_result(self, get_all_by_inst_uuids): get_all_by_inst_uuids.return_value = None bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuids( self.context, ['fake-instance']) self.assertEqual(0, len(bdm_list)) @mock.patch.object(db, 'block_device_mapping_get_all_by_instance') def test_get_by_instance_uuid(self, get_all_by_inst): fakes = [self.fake_bdm(123), self.fake_bdm(456)] get_all_by_inst.return_value = fakes bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, 'fake-instance') for faked, got in zip(fakes, bdm_list): self.assertIsInstance(got, objects.BlockDeviceMapping) self.assertEqual(faked['id'], got.id) @mock.patch.object(db, 'block_device_mapping_get_all_by_instance') def test_get_by_instance_uuid_no_result(self, get_all_by_inst): get_all_by_inst.return_value = None bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, 'fake-instance') self.assertEqual(0, len(bdm_list)) @mock.patch.object(db, 'block_device_mapping_get_all_by_instance') def test_root_bdm(self, get_all_by_inst): fakes = [self.fake_bdm(123), self.fake_bdm(456, boot_index=0)] get_all_by_inst.return_value = fakes bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, 'fake-instance') self.assertEqual(456, bdm_list.root_bdm().id) @mock.patch.object(db, 'block_device_mapping_get_all_by_instance') def test_root_bdm_empty_bdm_list(self, get_all_by_inst): get_all_by_inst.return_value = None bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, 'fake-instance') self.assertIsNone(bdm_list.root_bdm()) @mock.patch.object(db, 'block_device_mapping_get_all_by_instance') def test_root_bdm_undefined(self, get_all_by_inst): fakes = [ self.fake_bdm(123, instance_uuid='uuid_1'), self.fake_bdm(456, instance_uuid='uuid_2') ] get_all_by_inst.return_value = fakes bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, 'fake-instance') self.assertRaises(exception.UndefinedRootBDM, bdm_list.root_bdm) class TestBlockDeviceMappingListObject(test_objects._LocalTest, _TestBlockDeviceMappingListObject): pass class TestRemoteBlockDeviceMappingListObject( test_objects._RemoteTest, _TestBlockDeviceMappingListObject): pass class TestBlockDeviceUtils(test.NoDBTestCase): def test_make_list_from_dicts(self): ctx = context.get_admin_context() dicts = [{'id': 1}, {'id': 2}] objs = block_device_obj.block_device_make_list_from_dicts(ctx, dicts) self.assertIsInstance(objs, block_device_obj.BlockDeviceMappingList) self.assertEqual(2, len(objs)) self.assertEqual(1, objs[0].id) self.assertEqual(2, objs[1].id) def test_make_list_from_dicts_empty(self): ctx = context.get_admin_context() objs = block_device_obj.block_device_make_list_from_dicts(ctx, []) self.assertIsInstance(objs, block_device_obj.BlockDeviceMappingList) self.assertEqual(0, len(objs)) nova-13.0.0/nova/tests/unit/objects/test_notification.py0000664000567000056710000002222012701407773024521 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import timeutils from nova import objects from nova.objects import base from nova.objects import fields from nova.objects import notification from nova import test class TestNotificationBase(test.NoDBTestCase): @base.NovaObjectRegistry.register_if(False) class TestObject(base.NovaObject): VERSION = '1.0' fields = { 'field_1': fields.StringField(), 'field_2': fields.IntegerField(), 'not_important_field': fields.IntegerField(), } @base.NovaObjectRegistry.register_if(False) class TestNotificationPayload(notification.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), } fields = { 'extra_field': fields.StringField(), # filled by ctor 'field_1': fields.StringField(), # filled by the schema 'field_2': fields.IntegerField(), # filled by the schema } def populate_schema(self, source_field): super(TestNotificationBase.TestNotificationPayload, self).populate_schema(source_field=source_field) @base.NovaObjectRegistry.register_if(False) class TestNotificationPayloadEmptySchema( notification.NotificationPayloadBase): VERSION = '1.0' fields = { 'extra_field': fields.StringField(), # filled by ctor } @base.NovaObjectRegistry.register_if(False) class TestNotification(notification.NotificationBase): VERSION = '1.0' fields = { 'payload': fields.ObjectField('TestNotificationPayload') } @base.NovaObjectRegistry.register_if(False) class TestNotificationEmptySchema(notification.NotificationBase): VERSION = '1.0' fields = { 'payload': fields.ObjectField('TestNotificationPayloadEmptySchema') } fake_service = { 'created_at': timeutils.utcnow().replace(microsecond=0), 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'host': 'fake-host', 'binary': 'nova-fake', 'topic': 'fake-service-topic', 'report_count': 1, 'forced_down': False, 'disabled': False, 'disabled_reason': None, 'last_seen_up': None, 'version': 1} expected_payload = { 'nova_object.name': 'TestNotificationPayload', 'nova_object.data': { 'extra_field': 'test string', 'field_1': 'test1', 'field_2': 42}, 'nova_object.version': '1.0', 'nova_object.namespace': 'nova'} def setUp(self): super(TestNotificationBase, self).setUp() with mock.patch('nova.db.service_update') as mock_db_service_update: self.service_obj = objects.Service(context=mock.sentinel.context, id=self.fake_service['id']) self.service_obj.obj_reset_changes(['version']) mock_db_service_update.return_value = self.fake_service self.service_obj.save() self.my_obj = self.TestObject(field_1='test1', field_2=42, not_important_field=13) self.payload = self.TestNotificationPayload( extra_field='test string') self.payload.populate_schema(source_field=self.my_obj) self.notification = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE, phase=fields.NotificationPhase.START), publisher=notification.NotificationPublisher.from_service_obj( self.service_obj), priority=fields.NotificationPriority.INFO, payload=self.payload) def _verify_notification(self, mock_notifier, mock_context, expected_event_type, expected_payload): mock_notifier.prepare.assert_called_once_with( publisher_id='nova-fake:fake-host') mock_notify = mock_notifier.prepare.return_value.info self.assertTrue(mock_notify.called) self.assertEqual(mock_notify.call_args[0][0], mock_context) self.assertEqual(mock_notify.call_args[1]['event_type'], expected_event_type) actual_payload = mock_notify.call_args[1]['payload'] self.assertJsonEqual(expected_payload, actual_payload) @mock.patch('nova.rpc.LEGACY_NOTIFIER') @mock.patch('nova.rpc.NOTIFIER') def test_emit_notification(self, mock_notifier, mock_legacy): mock_context = mock.Mock() mock_context.to_dict.return_value = {} self.notification.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update.start', expected_payload=self.expected_payload) self.assertFalse(mock_legacy.called) @mock.patch('nova.rpc.NOTIFIER') def test_emit_with_host_and_binary_as_publisher(self, mock_notifier): noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher(host='fake-host', binary='nova-fake'), priority=fields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload=self.expected_payload) @mock.patch('nova.rpc.LEGACY_NOTIFIER') @mock.patch('nova.rpc.NOTIFIER') def test_emit_event_type_without_phase(self, mock_notifier, mock_legacy): noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher.from_service_obj( self.service_obj), priority=fields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload=self.expected_payload) self.assertFalse(mock_legacy.called) @mock.patch('nova.rpc.NOTIFIER') def test_not_possible_to_emit_if_not_populated(self, mock_notifier): non_populated_payload = self.TestNotificationPayload( extra_field='test string') noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher.from_service_obj( self.service_obj), priority=fields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() self.assertRaises(AssertionError, noti.emit, mock_context) self.assertFalse(mock_notifier.called) @mock.patch('nova.rpc.NOTIFIER') def test_empty_schema(self, mock_notifier): non_populated_payload = self.TestNotificationPayloadEmptySchema( extra_field='test string') noti = self.TestNotificationEmptySchema( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher.from_service_obj( self.service_obj), priority=fields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload= {'nova_object.name': 'TestNotificationPayloadEmptySchema', 'nova_object.data': {'extra_field': u'test string'}, 'nova_object.version': '1.0', 'nova_object.namespace': 'nova'}) nova-13.0.0/nova/tests/unit/objects/test_hv_spec.py0000664000567000056710000000440412701407773023466 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import arch from nova.compute import hv_type from nova.compute import vm_mode from nova import objects from nova.tests.unit.objects import test_objects spec_dict = { 'arch': arch.I686, 'hv_type': hv_type.KVM, 'vm_mode': vm_mode.HVM } spec_list = [ arch.I686, hv_type.KVM, vm_mode.HVM ] spec_dict_vz = { 'arch': arch.I686, 'hv_type': hv_type.VIRTUOZZO, 'vm_mode': vm_mode.HVM } spec_dict_parallels = { 'arch': arch.I686, 'hv_type': hv_type.PARALLELS, 'vm_mode': vm_mode.HVM } class _TestHVSpecObject(object): def test_hv_spec_from_list(self): spec_obj = objects.HVSpec.from_list(spec_list) self.compare_obj(spec_obj, spec_dict) def test_hv_spec_to_list(self): spec_obj = objects.HVSpec() spec_obj.arch = arch.I686 spec_obj.hv_type = hv_type.KVM spec_obj.vm_mode = vm_mode.HVM spec = spec_obj.to_list() self.assertEqual(spec_list, spec) def test_hv_spec_obj_make_compatible(self): spec_dict_vz_copy = spec_dict_vz.copy() # check 1.1->1.0 compatibility objects.HVSpec().obj_make_compatible(spec_dict_vz_copy, '1.0') self.assertEqual(spec_dict_parallels, spec_dict_vz_copy) # check that nothing changed objects.HVSpec().obj_make_compatible(spec_dict_vz_copy, '1.1') self.assertEqual(spec_dict_parallels, spec_dict_vz_copy) class TestHVSpecObject(test_objects._LocalTest, _TestHVSpecObject): pass class TestRemoteHVSpecObject(test_objects._RemoteTest, _TestHVSpecObject): pass nova-13.0.0/nova/tests/unit/objects/test_tag.py0000664000567000056710000000725612701407773022622 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.objects import tag from nova.tests.unit.objects import test_objects RESOURCE_ID = '123' TAG_NAME1 = 'fake-tag1' TAG_NAME2 = 'fake-tag2' fake_tag1 = { 'resource_id': RESOURCE_ID, 'tag': TAG_NAME1, } fake_tag2 = { 'resource_id': RESOURCE_ID, 'tag': TAG_NAME1, } fake_tag_list = [fake_tag1, fake_tag2] def _get_tag(resource_id, tag_name, context=None): t = tag.Tag(context=context) t.resource_id = resource_id t.tag = tag_name return t class _TestTagObject(object): @mock.patch('nova.db.instance_tag_add') def test_create(self, tag_add): tag_add.return_value = fake_tag1 tag_obj = _get_tag(RESOURCE_ID, TAG_NAME1, context=self.context) tag_obj.create() tag_add.assert_called_once_with(self.context, RESOURCE_ID, TAG_NAME1) self.compare_obj(tag_obj, fake_tag1) @mock.patch('nova.db.instance_tag_delete') def test_destroy(self, tag_delete): tag.Tag.destroy(self.context, RESOURCE_ID, TAG_NAME1) tag_delete.assert_called_once_with(self.context, RESOURCE_ID, TAG_NAME1) @mock.patch('nova.db.instance_tag_exists') def test_exists(self, instance_tag_exists): tag.Tag.exists(self.context, RESOURCE_ID, TAG_NAME1) instance_tag_exists.assert_called_once_with( self.context, RESOURCE_ID, TAG_NAME1) class TestMigrationObject(test_objects._LocalTest, _TestTagObject): pass class TestRemoteMigrationObject(test_objects._RemoteTest, _TestTagObject): pass class _TestTagList(object): def _compare_tag_list(self, tag_list, tag_list_obj): self.assertEqual(len(tag_list), len(tag_list_obj)) for obj, fake in zip(tag_list_obj, tag_list): self.assertIsInstance(obj, tag.Tag) self.assertEqual(obj.tag, fake['tag']) self.assertEqual(obj.resource_id, fake['resource_id']) @mock.patch('nova.db.instance_tag_get_by_instance_uuid') def test_get_by_resource_id(self, get_by_inst): get_by_inst.return_value = fake_tag_list tag_list_obj = tag.TagList.get_by_resource_id( self.context, RESOURCE_ID) get_by_inst.assert_called_once_with(self.context, RESOURCE_ID) self._compare_tag_list(fake_tag_list, tag_list_obj) @mock.patch('nova.db.instance_tag_set') def test_create(self, tag_set): tag_set.return_value = fake_tag_list tag_list_obj = tag.TagList.create( self.context, RESOURCE_ID, [TAG_NAME1, TAG_NAME2]) tag_set.assert_called_once_with(self.context, RESOURCE_ID, [TAG_NAME1, TAG_NAME2]) self._compare_tag_list(fake_tag_list, tag_list_obj) @mock.patch('nova.db.instance_tag_delete_all') def test_destroy(self, tag_delete_all): tag.TagList.destroy(self.context, RESOURCE_ID) tag_delete_all.assert_called_once_with(self.context, RESOURCE_ID) class TestTagList(test_objects._LocalTest, _TestTagList): pass class TestTagListRemote(test_objects._RemoteTest, _TestTagList): pass nova-13.0.0/nova/tests/unit/objects/test_instance_mapping.py0000664000567000056710000001114412701410011025330 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from nova import objects from nova.objects import instance_mapping from nova.tests.unit.objects import test_objects def get_db_mapping(**updates): db_mapping = { 'id': 1, 'instance_uuid': uuidutils.generate_uuid(), 'cell_id': 42, 'project_id': 'fake-project', 'created_at': None, 'updated_at': None, } db_mapping.update(updates) return db_mapping class _TestInstanceMappingObject(object): @mock.patch.object(instance_mapping.InstanceMapping, '_get_by_instance_uuid_from_db') def test_get_by_instance_uuid(self, uuid_from_db): db_mapping = get_db_mapping() uuid_from_db.return_value = db_mapping mapping_obj = objects.InstanceMapping().get_by_instance_uuid( self.context, db_mapping['instance_uuid']) uuid_from_db.assert_called_once_with(self.context, db_mapping['instance_uuid']) self.compare_obj(mapping_obj, db_mapping) @mock.patch.object(instance_mapping.InstanceMapping, '_create_in_db') def test_create(self, create_in_db): db_mapping = get_db_mapping() uuid = db_mapping['instance_uuid'] create_in_db.return_value = db_mapping mapping_obj = objects.InstanceMapping(self.context) mapping_obj.instance_uuid = uuid mapping_obj.cell_id = db_mapping['cell_id'] mapping_obj.project_id = db_mapping['project_id'] mapping_obj.create() create_in_db.assert_called_once_with(self.context, {'instance_uuid': uuid, 'cell_id': db_mapping['cell_id'], 'project_id': db_mapping['project_id']}) self.compare_obj(mapping_obj, db_mapping) @mock.patch.object(instance_mapping.InstanceMapping, '_save_in_db') def test_save(self, save_in_db): db_mapping = get_db_mapping() uuid = db_mapping['instance_uuid'] save_in_db.return_value = db_mapping mapping_obj = objects.InstanceMapping(self.context) mapping_obj.instance_uuid = uuid mapping_obj.cell_id = 3 mapping_obj.save() save_in_db.assert_called_once_with(self.context, db_mapping['instance_uuid'], {'cell_id': 3, 'instance_uuid': uuid}) self.compare_obj(mapping_obj, db_mapping) @mock.patch.object(instance_mapping.InstanceMapping, '_destroy_in_db') def test_destroy(self, destroy_in_db): uuid = uuidutils.generate_uuid() mapping_obj = objects.InstanceMapping(self.context) mapping_obj.instance_uuid = uuid mapping_obj.destroy() destroy_in_db.assert_called_once_with(self.context, uuid) def test_cell_id_nullable(self): mapping_obj = objects.InstanceMapping(self.context) # Just ensure this doesn't raise an exception mapping_obj.cell_id = None class TestInstanceMappingObject(test_objects._LocalTest, _TestInstanceMappingObject): pass class TestRemoteInstanceMappingObject(test_objects._RemoteTest, _TestInstanceMappingObject): pass class _TestInstanceMappingListObject(object): @mock.patch.object(instance_mapping.InstanceMappingList, '_get_by_project_id_from_db') def test_get_by_project_id(self, project_id_from_db): db_mapping = get_db_mapping() project_id_from_db.return_value = [db_mapping] mapping_obj = objects.InstanceMappingList().get_by_project_id( self.context, db_mapping['project_id']) project_id_from_db.assert_called_once_with(self.context, db_mapping['project_id']) self.compare_obj(mapping_obj.objects[0], db_mapping) class TestInstanceMappingListObject(test_objects._LocalTest, _TestInstanceMappingListObject): pass class TestRemoteInstanceMappingListObject(test_objects._RemoteTest, _TestInstanceMappingListObject): pass nova-13.0.0/nova/tests/unit/objects/test_bandwidth_usage.py0000664000567000056710000001364612701410011025152 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from oslo_utils import timeutils from nova import context from nova import db from nova.objects import bandwidth_usage from nova import test from nova.tests.unit.objects import test_objects class _TestBandwidthUsage(test.TestCase): def setUp(self): super(_TestBandwidthUsage, self).setUp() self.user_id = 'fake_user' self.project_id = 'fake_project' self.context = context.RequestContext(self.user_id, self.project_id) now, start_period = self._time_now_and_start_period() self.expected_bw_usage = self._fake_bw_usage( time=now, start_period=start_period) @staticmethod def _compare(test, db, obj, ignored_fields=None): if ignored_fields is None: ignored_fields = [] for field, value in db.items(): if field in ignored_fields: continue obj_field = field if obj_field == 'uuid': obj_field = 'instance_uuid' test.assertEqual(db[field], obj[obj_field], 'Field %s is not equal' % field) @staticmethod def _fake_bw_usage(time=None, start_period=None, bw_in=100, bw_out=200, last_ctr_in=12345, last_ctr_out=67890): fake_bw_usage = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'uuid': 'fake_uuid1', 'mac': 'fake_mac1', 'start_period': start_period, 'bw_in': bw_in, 'bw_out': bw_out, 'last_ctr_in': last_ctr_in, 'last_ctr_out': last_ctr_out, 'last_refreshed': time } return fake_bw_usage @staticmethod def _time_now_and_start_period(): now = timeutils.utcnow().replace(tzinfo=iso8601.iso8601.Utc(), microsecond=0) start_period = now - datetime.timedelta(seconds=10) return now, start_period @mock.patch.object(db, 'bw_usage_get') def test_get_by_instance_uuid_and_mac(self, mock_get): mock_get.return_value = self.expected_bw_usage bw_usage = bandwidth_usage.BandwidthUsage.get_by_instance_uuid_and_mac( self.context, 'fake_uuid', 'fake_mac', start_period=self.expected_bw_usage['start_period']) self._compare(self, self.expected_bw_usage, bw_usage) @mock.patch.object(db, 'bw_usage_get_by_uuids') def test_get_by_uuids(self, mock_get_by_uuids): mock_get_by_uuids.return_value = [self.expected_bw_usage] bw_usages = bandwidth_usage.BandwidthUsageList.get_by_uuids( self.context, ['fake_uuid'], start_period=self.expected_bw_usage['start_period']) self.assertEqual(1, len(bw_usages)) self._compare(self, self.expected_bw_usage, bw_usages[0]) @mock.patch.object(db, 'bw_usage_update') def test_create(self, mock_create): mock_create.return_value = self.expected_bw_usage bw_usage = bandwidth_usage.BandwidthUsage(context=self.context) bw_usage.create('fake_uuid', 'fake_mac', 100, 200, 12345, 67890, start_period=self.expected_bw_usage['start_period']) self._compare(self, self.expected_bw_usage, bw_usage) def test_update_with_db(self): expected_bw_usage1 = self._fake_bw_usage( time=self.expected_bw_usage['last_refreshed'], start_period=self.expected_bw_usage['start_period'], last_ctr_in=42, last_ctr_out=42) bw_usage = bandwidth_usage.BandwidthUsage(context=self.context) bw_usage.create('fake_uuid1', 'fake_mac1', 100, 200, 42, 42, start_period=self.expected_bw_usage['start_period']) self._compare(self, expected_bw_usage1, bw_usage, ignored_fields=['last_refreshed', 'created_at']) bw_usage.create('fake_uuid1', 'fake_mac1', 100, 200, 12345, 67890, start_period=self.expected_bw_usage['start_period']) self._compare(self, self.expected_bw_usage, bw_usage, ignored_fields=['last_refreshed', 'created_at', 'updated_at']) @mock.patch.object(db, 'bw_usage_update') def test_update(self, mock_update): expected_bw_usage1 = self._fake_bw_usage( time=self.expected_bw_usage['last_refreshed'], start_period=self.expected_bw_usage['start_period'], last_ctr_in=42, last_ctr_out=42) mock_update.side_effect = [expected_bw_usage1, self.expected_bw_usage] bw_usage = bandwidth_usage.BandwidthUsage(context=self.context) bw_usage.create('fake_uuid1', 'fake_mac1', 100, 200, 42, 42, start_period=self.expected_bw_usage['start_period']) self._compare(self, expected_bw_usage1, bw_usage) bw_usage.create('fake_uuid1', 'fake_mac1', 100, 200, 12345, 67890, start_period=self.expected_bw_usage['start_period']) self._compare(self, self.expected_bw_usage, bw_usage) class TestBandwidthUsageObject(test_objects._LocalTest, _TestBandwidthUsage): pass class TestRemoteBandwidthUsageObject(test_objects._RemoteTest, _TestBandwidthUsage): pass nova-13.0.0/nova/tests/unit/objects/test_fixed_ip.py0000664000567000056710000004053012701407773023626 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock import netaddr from oslo_utils import timeutils from oslo_versionedobjects import base as ovo_base from nova import exception from nova.objects import fixed_ip from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_network from nova.tests.unit.objects import test_objects from nova import utils fake_fixed_ip = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'address': '192.168.1.100', 'network_id': None, 'virtual_interface_id': None, 'instance_uuid': None, 'allocated': False, 'leased': False, 'reserved': False, 'host': None, 'network': None, 'virtual_interface': None, 'floating_ips': [], } class _TestFixedIPObject(object): def _compare(self, obj, db_obj): for field in obj.fields: if field in ('default_route', 'floating_ips'): continue if field in fixed_ip.FIXED_IP_OPTIONAL_ATTRS: if obj.obj_attr_is_set(field) and db_obj[field] is not None: obj_val = obj[field].uuid db_val = db_obj[field]['uuid'] else: continue else: obj_val = obj[field] db_val = db_obj[field] if isinstance(obj_val, netaddr.IPAddress): obj_val = str(obj_val) self.assertEqual(db_val, obj_val) @mock.patch('nova.db.fixed_ip_get') def test_get_by_id(self, get): get.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123) get.assert_called_once_with(self.context, 123, get_network=False) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_get') @mock.patch('nova.db.network_get') def test_get_by_id_with_extras(self, network_get, fixed_get): db_fixed = dict(fake_fixed_ip, network=test_network.fake_network) fixed_get.return_value = db_fixed fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123, expected_attrs=['network']) fixed_get.assert_called_once_with(self.context, 123, get_network=True) self._compare(fixedip, db_fixed) self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid) self.assertFalse(network_get.called) @mock.patch('nova.db.fixed_ip_get_by_address') def test_get_by_address(self, get): get.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4') get.assert_called_once_with(self.context, '1.2.3.4', columns_to_join=[]) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.instance_get') def test_get_by_address_with_extras(self, instance_get, network_get, fixed_get): db_fixed = dict(fake_fixed_ip, network=test_network.fake_network, instance=fake_instance.fake_db_instance()) fixed_get.return_value = db_fixed fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4', expected_attrs=['network', 'instance']) fixed_get.assert_called_once_with(self.context, '1.2.3.4', columns_to_join=['network', 'instance']) self._compare(fixedip, db_fixed) self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid) self.assertEqual(db_fixed['instance']['uuid'], fixedip.instance.uuid) self.assertFalse(network_get.called) self.assertFalse(instance_get.called) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.instance_get') def test_get_by_address_with_extras_deleted_instance(self, instance_get, network_get, fixed_get): db_fixed = dict(fake_fixed_ip, network=test_network.fake_network, instance=None) fixed_get.return_value = db_fixed fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4', expected_attrs=['network', 'instance']) fixed_get.assert_called_once_with(self.context, '1.2.3.4', columns_to_join=['network', 'instance']) self._compare(fixedip, db_fixed) self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid) self.assertIsNone(fixedip.instance) self.assertFalse(network_get.called) self.assertFalse(instance_get.called) @mock.patch('nova.db.fixed_ip_get_by_floating_address') def test_get_by_floating_address(self, get): get.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context, '1.2.3.4') get.assert_called_once_with(self.context, '1.2.3.4') self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_get_by_floating_address') def test_get_by_floating_address_none(self, get): get.return_value = None fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context, '1.2.3.4') get.assert_called_once_with(self.context, '1.2.3.4') self.assertIsNone(fixedip) @mock.patch('nova.db.fixed_ip_get_by_network_host') def test_get_by_network_and_host(self, get): get.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.get_by_network_and_host(self.context, 123, 'host') get.assert_called_once_with(self.context, 123, 'host') self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_associate') def test_associate(self, associate): associate.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.associate(self.context, '1.2.3.4', 'fake-uuid') associate.assert_called_with(self.context, '1.2.3.4', 'fake-uuid', network_id=None, reserved=False, virtual_interface_id=None) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_associate') def test_associate_with_vif(self, associate): associate.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.associate(self.context, '1.2.3.4', 'fake-uuid', vif_id=0) associate.assert_called_with(self.context, '1.2.3.4', 'fake-uuid', network_id=None, reserved=False, virtual_interface_id=0) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_associate_pool') def test_associate_pool(self, associate): associate.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.associate_pool(self.context, 123, 'fake-uuid', 'host') associate.assert_called_with(self.context, 123, instance_uuid='fake-uuid', host='host', virtual_interface_id=None) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_associate_pool') def test_associate_pool_with_vif(self, associate): associate.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.associate_pool(self.context, 123, 'fake-uuid', 'host', vif_id=0) associate.assert_called_with(self.context, 123, instance_uuid='fake-uuid', host='host', virtual_interface_id=0) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_disassociate') def test_disassociate_by_address(self, disassociate): fixed_ip.FixedIP.disassociate_by_address(self.context, '1.2.3.4') disassociate.assert_called_with(self.context, '1.2.3.4') @mock.patch('nova.db.fixed_ip_disassociate_all_by_timeout') def test_disassociate_all_by_timeout(self, disassociate): now = timeutils.utcnow() now_tz = timeutils.parse_isotime( utils.isotime(now)).replace( tzinfo=iso8601.iso8601.Utc()) disassociate.return_value = 123 result = fixed_ip.FixedIP.disassociate_all_by_timeout(self.context, 'host', now) self.assertEqual(123, result) # NOTE(danms): be pedantic about timezone stuff args, kwargs = disassociate.call_args_list[0] self.assertEqual(now_tz, args[2]) self.assertEqual((self.context, 'host'), args[:2]) self.assertEqual({}, kwargs) @mock.patch('nova.db.fixed_ip_create') def test_create(self, create): create.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4') fixedip.create() create.assert_called_once_with( self.context, {'address': '1.2.3.4'}) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_update') def test_save(self, update): update.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4', instance_uuid='fake-uuid') self.assertRaises(exception.ObjectActionError, fixedip.save) fixedip.obj_reset_changes(['address']) fixedip.save() update.assert_called_once_with(self.context, '1.2.3.4', {'instance_uuid': 'fake-uuid'}) @mock.patch('nova.db.fixed_ip_disassociate') def test_disassociate(self, disassociate): fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4', instance_uuid='fake-uuid') fixedip.obj_reset_changes() fixedip.disassociate() disassociate.assert_called_once_with(self.context, '1.2.3.4') self.assertIsNone(fixedip.instance_uuid) @mock.patch('nova.db.fixed_ip_get_all') def test_get_all(self, get_all): get_all.return_value = [fake_fixed_ip] fixedips = fixed_ip.FixedIPList.get_all(self.context) self.assertEqual(1, len(fixedips)) get_all.assert_called_once_with(self.context) self._compare(fixedips[0], fake_fixed_ip) @mock.patch('nova.db.fixed_ip_get_by_instance') def test_get_by_instance(self, get): get.return_value = [fake_fixed_ip] fixedips = fixed_ip.FixedIPList.get_by_instance_uuid(self.context, 'fake-uuid') self.assertEqual(1, len(fixedips)) get.assert_called_once_with(self.context, 'fake-uuid') self._compare(fixedips[0], fake_fixed_ip) @mock.patch('nova.db.fixed_ip_get_by_host') def test_get_by_host(self, get): get.return_value = [fake_fixed_ip] fixedips = fixed_ip.FixedIPList.get_by_host(self.context, 'host') self.assertEqual(1, len(fixedips)) get.assert_called_once_with(self.context, 'host') self._compare(fixedips[0], fake_fixed_ip) @mock.patch('nova.db.fixed_ips_by_virtual_interface') def test_get_by_virtual_interface_id(self, get): get.return_value = [fake_fixed_ip] fixedips = fixed_ip.FixedIPList.get_by_virtual_interface_id( self.context, 123) self.assertEqual(1, len(fixedips)) get.assert_called_once_with(self.context, 123) self._compare(fixedips[0], fake_fixed_ip) def test_floating_ips_do_not_lazy_load(self): fixedip = fixed_ip.FixedIP() self.assertRaises(NotImplementedError, lambda: fixedip.floating_ips) @mock.patch('nova.db.fixed_ip_bulk_create') def test_bulk_create(self, bulk): fixed_ips = [fixed_ip.FixedIP(address='192.168.1.1'), fixed_ip.FixedIP(address='192.168.1.2')] fixed_ip.FixedIPList.bulk_create(self.context, fixed_ips) bulk.assert_called_once_with(self.context, [{'address': '192.168.1.1'}, {'address': '192.168.1.2'}]) @mock.patch('nova.db.network_get_associated_fixed_ips') def test_get_by_network(self, get): info = {'address': '1.2.3.4', 'instance_uuid': 'fake-uuid', 'network_id': 0, 'vif_id': 1, 'vif_address': 'de:ad:be:ee:f0:00', 'instance_hostname': 'fake-host', 'instance_updated': datetime.datetime(1955, 11, 5), 'instance_created': datetime.datetime(1955, 11, 5), 'allocated': True, 'leased': True, 'default_route': True, } get.return_value = [info] fixed_ips = fixed_ip.FixedIPList.get_by_network( self.context, {'id': 0}, host='fake-host') get.assert_called_once_with(self.context, 0, host='fake-host') self.assertEqual(1, len(fixed_ips)) fip = fixed_ips[0] self.assertEqual('1.2.3.4', str(fip.address)) self.assertEqual('fake-uuid', fip.instance_uuid) self.assertEqual(0, fip.network_id) self.assertEqual(1, fip.virtual_interface_id) self.assertTrue(fip.allocated) self.assertTrue(fip.leased) self.assertEqual('fake-uuid', fip.instance.uuid) self.assertEqual('fake-host', fip.instance.hostname) self.assertIsInstance(fip.instance.created_at, datetime.datetime) self.assertIsInstance(fip.instance.updated_at, datetime.datetime) self.assertEqual(1, fip.virtual_interface.id) self.assertEqual(info['vif_address'], fip.virtual_interface.address) @mock.patch('nova.db.network_get_associated_fixed_ips') def test_backport_default_route(self, mock_get): info = {'address': '1.2.3.4', 'instance_uuid': 'fake-uuid', 'network_id': 0, 'vif_id': 1, 'vif_address': 'de:ad:be:ee:f0:00', 'instance_hostname': 'fake-host', 'instance_updated': datetime.datetime(1955, 11, 5), 'instance_created': datetime.datetime(1955, 11, 5), 'allocated': True, 'leased': True, 'default_route': True, } mock_get.return_value = [info] fixed_ips = fixed_ip.FixedIPList.get_by_network( self.context, {'id': 0}, host='fake-host') primitive = fixed_ips[0].obj_to_primitive() self.assertIn('default_route', primitive['nova_object.data']) versions = ovo_base.obj_tree_get_versions('FixedIP') fixed_ips[0].obj_make_compatible_from_manifest( primitive['nova_object.data'], target_version='1.1', version_manifest=versions) self.assertNotIn('default_route', primitive['nova_object.data']) class TestFixedIPObject(test_objects._LocalTest, _TestFixedIPObject): pass class TestRemoteFixedIPObject(test_objects._RemoteTest, _TestFixedIPObject): pass nova-13.0.0/nova/tests/unit/objects/test_floating_ip.py0000664000567000056710000002647712701407773024350 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from oslo_versionedobjects import base as ovo_base from nova import exception from nova import objects from nova.objects import floating_ip from nova.tests.unit.objects import test_fixed_ip from nova.tests.unit.objects import test_network from nova.tests.unit.objects import test_objects fake_floating_ip = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'address': '172.17.0.1', 'fixed_ip_id': None, 'project_id': None, 'host': None, 'auto_assigned': False, 'pool': None, 'interface': None, 'fixed_ip': None, } class _TestFloatingIPObject(object): def _compare(self, obj, db_obj): for field in obj.fields: if field in floating_ip.FLOATING_IP_OPTIONAL_ATTRS: if obj.obj_attr_is_set(field): obj_val = obj[field].id db_val = db_obj[field]['id'] else: continue else: obj_val = obj[field] db_val = db_obj[field] if isinstance(obj_val, netaddr.IPAddress): obj_val = str(obj_val) self.assertEqual(db_val, obj_val) @mock.patch('nova.db.floating_ip_get') def test_get_by_id(self, get): db_floatingip = dict(fake_floating_ip, fixed_ip=test_fixed_ip.fake_fixed_ip) get.return_value = db_floatingip floatingip = floating_ip.FloatingIP.get_by_id(self.context, 123) get.assert_called_once_with(self.context, 123) self._compare(floatingip, db_floatingip) @mock.patch('nova.db.floating_ip_get_by_address') def test_get_by_address(self, get): get.return_value = fake_floating_ip floatingip = floating_ip.FloatingIP.get_by_address(self.context, '1.2.3.4') get.assert_called_once_with(self.context, '1.2.3.4') self._compare(floatingip, fake_floating_ip) @mock.patch('nova.db.floating_ip_get_pools') def test_get_pool_names(self, get): get.return_value = [{'name': 'a'}, {'name': 'b'}] self.assertEqual(['a', 'b'], floating_ip.FloatingIP.get_pool_names(self.context)) @mock.patch('nova.db.floating_ip_allocate_address') def test_allocate_address(self, allocate): allocate.return_value = '1.2.3.4' self.assertEqual('1.2.3.4', floating_ip.FloatingIP.allocate_address(self.context, 'project', 'pool')) allocate.assert_called_with(self.context, 'project', 'pool', auto_assigned=False) @mock.patch('nova.db.floating_ip_fixed_ip_associate') def test_associate(self, associate): db_fixed = dict(test_fixed_ip.fake_fixed_ip, network=test_network.fake_network) associate.return_value = db_fixed floatingip = floating_ip.FloatingIP.associate(self.context, '172.17.0.1', '192.168.1.1', 'host') associate.assert_called_with(self.context, '172.17.0.1', '192.168.1.1', 'host') self.assertEqual(db_fixed['id'], floatingip.fixed_ip.id) self.assertEqual('172.17.0.1', str(floatingip.address)) self.assertEqual('host', floatingip.host) @mock.patch('nova.db.floating_ip_deallocate') def test_deallocate(self, deallocate): floating_ip.FloatingIP.deallocate(self.context, '1.2.3.4') deallocate.assert_called_with(self.context, '1.2.3.4') @mock.patch('nova.db.floating_ip_destroy') def test_destroy(self, destroy): floating_ip.FloatingIP.destroy(self.context, '1.2.3.4') destroy.assert_called_with(self.context, '1.2.3.4') @mock.patch('nova.db.floating_ip_disassociate') def test_disassociate(self, disassociate): db_fixed = dict(test_fixed_ip.fake_fixed_ip, network=test_network.fake_network) disassociate.return_value = db_fixed floatingip = floating_ip.FloatingIP.disassociate(self.context, '1.2.3.4') disassociate.assert_called_with(self.context, '1.2.3.4') self.assertEqual(db_fixed['id'], floatingip.fixed_ip.id) self.assertEqual('1.2.3.4', str(floatingip.address)) @mock.patch('nova.db.floating_ip_update') def test_save(self, update): update.return_value = fake_floating_ip floatingip = floating_ip.FloatingIP(context=self.context, id=123, address='1.2.3.4', host='foo') floatingip.obj_reset_changes(['address', 'id']) floatingip.save() self.assertEqual(set(), floatingip.obj_what_changed()) update.assert_called_with(self.context, '1.2.3.4', {'host': 'foo'}) def test_save_errors(self): floatingip = floating_ip.FloatingIP(context=self.context, id=123, host='foo') floatingip.obj_reset_changes() floating_ip.address = '1.2.3.4' self.assertRaises(exception.ObjectActionError, floatingip.save) floatingip.obj_reset_changes() floatingip.fixed_ip_id = 1 self.assertRaises(exception.ObjectActionError, floatingip.save) @mock.patch('nova.db.floating_ip_update') def test_save_no_fixedip(self, update): update.return_value = fake_floating_ip floatingip = floating_ip.FloatingIP(context=self.context, id=123) floatingip.fixed_ip = objects.FixedIP(context=self.context, id=456) self.assertNotIn('fixed_ip', update.calls[1]) @mock.patch('nova.db.floating_ip_get_all') def test_get_all(self, get): get.return_value = [fake_floating_ip] floatingips = floating_ip.FloatingIPList.get_all(self.context) self.assertEqual(1, len(floatingips)) self._compare(floatingips[0], fake_floating_ip) get.assert_called_with(self.context) @mock.patch('nova.db.floating_ip_get_all_by_host') def test_get_by_host(self, get): get.return_value = [fake_floating_ip] floatingips = floating_ip.FloatingIPList.get_by_host(self.context, 'host') self.assertEqual(1, len(floatingips)) self._compare(floatingips[0], fake_floating_ip) get.assert_called_with(self.context, 'host') @mock.patch('nova.db.floating_ip_get_all_by_project') def test_get_by_project(self, get): get.return_value = [fake_floating_ip] floatingips = floating_ip.FloatingIPList.get_by_project(self.context, 'project') self.assertEqual(1, len(floatingips)) self._compare(floatingips[0], fake_floating_ip) get.assert_called_with(self.context, 'project') @mock.patch('nova.db.floating_ip_get_by_fixed_address') def test_get_by_fixed_address(self, get): get.return_value = [fake_floating_ip] floatingips = floating_ip.FloatingIPList.get_by_fixed_address( self.context, '1.2.3.4') self.assertEqual(1, len(floatingips)) self._compare(floatingips[0], fake_floating_ip) get.assert_called_with(self.context, '1.2.3.4') @mock.patch('nova.db.floating_ip_get_by_fixed_ip_id') def test_get_by_fixed_ip_id(self, get): get.return_value = [fake_floating_ip] floatingips = floating_ip.FloatingIPList.get_by_fixed_ip_id( self.context, 123) self.assertEqual(1, len(floatingips)) self._compare(floatingips[0], fake_floating_ip) get.assert_called_with(self.context, 123) @mock.patch('nova.db.instance_floating_address_get_all') def test_get_addresses_by_instance(self, get_all): expected = ['1.2.3.4', '4.5.6.7'] get_all.return_value = list(expected) ips = floating_ip.FloatingIP.get_addresses_by_instance( self.context, {'uuid': '1234'}) self.assertEqual(expected, ips) get_all.assert_called_once_with(self.context, '1234') def test_make_ip_info(self): result = objects.FloatingIPList.make_ip_info('1.2.3.4', 'pool', 'eth0') self.assertEqual({'address': '1.2.3.4', 'pool': 'pool', 'interface': 'eth0'}, result) @mock.patch('nova.db.floating_ip_bulk_create') def test_bulk_create(self, create_mock): def fake_create(ctxt, ip_info, want_result=False): return [{'id': 1, 'address': ip['address'], 'fixed_ip_id': 1, 'project_id': 'foo', 'host': 'host', 'auto_assigned': False, 'pool': ip['pool'], 'interface': ip['interface'], 'fixed_ip': None, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False} for ip in ip_info] create_mock.side_effect = fake_create ips = [objects.FloatingIPList.make_ip_info('1.1.1.1', 'pool', 'eth0'), objects.FloatingIPList.make_ip_info('1.1.1.2', 'loop', 'eth1')] result = objects.FloatingIPList.create(None, ips) self.assertIs(result, None) result = objects.FloatingIPList.create(None, ips, want_result=True) self.assertEqual('1.1.1.1', str(result[0].address)) self.assertEqual('1.1.1.2', str(result[1].address)) @mock.patch('nova.db.floating_ip_bulk_destroy') def test_bulk_destroy(self, destroy_mock): ips = [{'address': '1.2.3.4'}, {'address': '4.5.6.7'}] objects.FloatingIPList.destroy(None, ips) destroy_mock.assert_called_once_with(None, ips) def test_backport_fixedip_1_1(self): floating = objects.FloatingIP() fixed = objects.FixedIP() floating.fixed_ip = fixed versions = ovo_base.obj_tree_get_versions('FloatingIP') versions['FixedIP'] = '1.1' primitive = floating.obj_to_primitive(target_version='1.1', version_manifest=versions) self.assertEqual('1.1', primitive['nova_object.data']['fixed_ip']['nova_object.version']) class TestFloatingIPObject(test_objects._LocalTest, _TestFloatingIPObject): pass class TestRemoteFloatingIPObject(test_objects._RemoteTest, _TestFloatingIPObject): pass nova-13.0.0/nova/tests/unit/objects/test_numa.py0000664000567000056710000002534712701407773023010 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import exception from nova import objects from nova.tests.unit.objects import test_objects fake_obj_numa = objects.NUMATopology( cells=[ objects.NUMACell( id=0, cpuset=set([1, 2]), memory=512, cpu_usage=2, memory_usage=256, mempages=[], pinned_cpus=set([]), siblings=[]), objects.NUMACell( id=1, cpuset=set([3, 4]), memory=512, cpu_usage=1, memory_usage=128, mempages=[], pinned_cpus=set([]), siblings=[])]) class _TestNUMA(object): def test_convert_wipe(self): d1 = fake_obj_numa._to_dict() d2 = objects.NUMATopology.obj_from_primitive(d1)._to_dict() self.assertEqual(d1, d2) def test_from_legacy_limits(self): old_style = {"cells": [ {"mem": { "total": 1024, "limit": 2048}, "cpu_limit": 96.0, "cpus": "0,1,2,3,4,5", "id": 0}]} limits = objects.NUMATopologyLimits.obj_from_db_obj(old_style) self.assertEqual(16.0, limits.cpu_allocation_ratio) self.assertEqual(2.0, limits.ram_allocation_ratio) def test_to_legacy_limits(self): limits = objects.NUMATopologyLimits( cpu_allocation_ratio=16, ram_allocation_ratio=2) host_topo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([1, 2]), memory=1024) ]) old_style = {'cells': [ {'mem': {'total': 1024, 'limit': 2048.0}, 'id': 0, 'cpus': '1,2', 'cpu_limit': 32.0}]} self.assertEqual(old_style, limits.to_dict_legacy(host_topo)) def test_free_cpus(self): obj = objects.NUMATopology(cells=[ objects.NUMACell( id=0, cpuset=set([1, 2]), memory=512, cpu_usage=2, memory_usage=256, pinned_cpus=set([1]), siblings=[], mempages=[]), objects.NUMACell( id=1, cpuset=set([3, 4]), memory=512, cpu_usage=1, memory_usage=128, pinned_cpus=set([]), siblings=[], mempages=[]) ] ) self.assertEqual(set([2]), obj.cells[0].free_cpus) self.assertEqual(set([3, 4]), obj.cells[1].free_cpus) def test_pinning_logic(self): numacell = objects.NUMACell(id=0, cpuset=set([1, 2, 3, 4]), memory=512, cpu_usage=2, memory_usage=256, pinned_cpus=set([1]), siblings=[], mempages=[]) numacell.pin_cpus(set([2, 3])) self.assertEqual(set([4]), numacell.free_cpus) self.assertRaises(exception.CPUPinningUnknown, numacell.pin_cpus, set([1, 55])) self.assertRaises(exception.CPUPinningInvalid, numacell.pin_cpus, set([1, 4])) self.assertRaises(exception.CPUPinningUnknown, numacell.unpin_cpus, set([1, 55])) self.assertRaises(exception.CPUPinningInvalid, numacell.unpin_cpus, set([1, 4])) numacell.unpin_cpus(set([1, 2, 3])) self.assertEqual(set([1, 2, 3, 4]), numacell.free_cpus) def test_pinning_with_siblings(self): numacell = objects.NUMACell(id=0, cpuset=set([1, 2, 3, 4]), memory=512, cpu_usage=2, memory_usage=256, pinned_cpus=set([]), siblings=[set([1, 3]), set([2, 4])], mempages=[]) numacell.pin_cpus_with_siblings(set([1, 2])) self.assertEqual(set(), numacell.free_cpus) numacell.unpin_cpus_with_siblings(set([1])) self.assertEqual(set([1, 3]), numacell.free_cpus) self.assertRaises(exception.CPUPinningInvalid, numacell.unpin_cpus_with_siblings, set([3])) self.assertRaises(exception.CPUPinningInvalid, numacell.pin_cpus_with_siblings, set([4])) self.assertRaises(exception.CPUPinningInvalid, numacell.unpin_cpus_with_siblings, set([3, 4])) self.assertEqual(set([1, 3]), numacell.free_cpus) numacell.unpin_cpus_with_siblings(set([4])) self.assertEqual(set([1, 2, 3, 4]), numacell.free_cpus) def test_pages_topology_wipe(self): pages_topology = objects.NUMAPagesTopology( size_kb=2048, total=1024, used=512) self.assertEqual(2048, pages_topology.size_kb) self.assertEqual(1024, pages_topology.total) self.assertEqual(512, pages_topology.used) self.assertEqual(512, pages_topology.free) self.assertEqual(1048576, pages_topology.free_kb) def test_can_fit_hugepages(self): cell = objects.NUMACell( id=0, cpuset=set([1, 2]), memory=1024, siblings=[], pinned_cpus=set([]), mempages=[ objects.NUMAPagesTopology( size_kb=4, total=1548736, used=0), objects.NUMAPagesTopology( size_kb=2048, total=513, used=0)]) # 1,002G pagesize = 2048 self.assertTrue(cell.can_fit_hugepages(pagesize, 2 ** 20)) self.assertFalse(cell.can_fit_hugepages(pagesize, 2 ** 21)) self.assertFalse(cell.can_fit_hugepages(pagesize, 2 ** 19 + 1)) self.assertRaises( exception.MemoryPageSizeNotSupported, cell.can_fit_hugepages, 12345, 2 ** 20) def test_default_behavior(self): inst_cell = objects.NUMACell() self.assertEqual(0, len(inst_cell.obj_get_changes())) def test_numa_pages_equivalent(self): pt1 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0) pt2 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0) self.assertEqual(pt1, pt2) def test_numa_pages_not_equivalent(self): pt1 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0) pt2 = objects.NUMAPagesTopology(size_kb=1024, total=33, used=0) self.assertNotEqual(pt1, pt2) def test_numa_pages_not_equivalent_missing_a(self): pt1 = objects.NUMAPagesTopology(size_kb=1024, used=0) pt2 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0) self.assertNotEqual(pt1, pt2) def test_numa_pages_not_equivalent_missing_b(self): pt1 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0) pt2 = objects.NUMAPagesTopology(size_kb=1024, used=0) self.assertNotEqual(pt1, pt2) def test_numa_cell_equivalent(self): cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32, cpu_usage=10, pinned_cpus=set([3, 4]), siblings=[set([5, 6])]) cell2 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32, cpu_usage=10, pinned_cpus=set([3, 4]), siblings=[set([5, 6])]) self.assertEqual(cell1, cell2) def test_numa_cell_not_equivalent(self): cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32, cpu_usage=10, pinned_cpus=set([3, 4]), siblings=[set([5, 6])]) cell2 = objects.NUMACell(id=2, cpuset=set([1, 2]), memory=32, cpu_usage=10, pinned_cpus=set([3, 4]), siblings=[set([5, 6])]) self.assertNotEqual(cell1, cell2) def test_numa_cell_not_equivalent_missing_a(self): cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32, pinned_cpus=set([3, 4]), siblings=[set([5, 6])]) cell2 = objects.NUMACell(id=2, cpuset=set([1, 2]), memory=32, cpu_usage=10, pinned_cpus=set([3, 4]), siblings=[set([5, 6])]) self.assertNotEqual(cell1, cell2) def test_numa_cell_not_equivalent_missing_b(self): cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32, cpu_usage=10, pinned_cpus=set([3, 4]), siblings=[set([5, 6])]) cell2 = objects.NUMACell(id=2, cpuset=set([1, 2]), memory=32, pinned_cpus=set([3, 4]), siblings=[set([5, 6])]) self.assertNotEqual(cell1, cell2) def test_numa_cell_equivalent_different_pages(self): pt1 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0) pt2 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0) cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32, cpu_usage=10, pinned_cpus=set([3, 4]), siblings=[set([5, 6])], mempages=[pt1]) cell2 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32, cpu_usage=10, pinned_cpus=set([3, 4]), siblings=[set([5, 6])], mempages=[pt2]) self.assertEqual(cell1, cell2) def test_numa_cell_not_equivalent_different_pages(self): pt1 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0) pt2 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=1) cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32, cpu_usage=10, pinned_cpus=set([3, 4]), siblings=[set([5, 6])], mempages=[pt1]) cell2 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32, cpu_usage=10, pinned_cpus=set([3, 4]), siblings=[set([5, 6])], mempages=[pt2]) self.assertNotEqual(cell1, cell2) class TestNUMA(test_objects._LocalTest, _TestNUMA): pass class TestNUMARemote(test_objects._RemoteTest, _TestNUMA): pass nova-13.0.0/nova/tests/unit/objects/test_agent.py0000664000567000056710000000727312701407773023144 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import exception from nova.objects import agent as agent_obj from nova.tests.unit.objects import test_objects fake_agent = { 'id': 1, 'hypervisor': 'novavm', 'os': 'linux', 'architecture': 'DISC', 'version': '1.0', 'url': 'http://openstack.org/novavm/agents/novavm_agent_v1.0.rpm', 'md5hash': '8cb151f3adc23a92db8ddbe084796823', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, } class _TestAgent(object): @staticmethod def _compare(test, db, obj): for field, value in db.items(): test.assertEqual(db[field], getattr(obj, field)) @mock.patch('nova.db.agent_build_get_by_triple') def test_get_by_triple(self, mock_get): mock_get.return_value = fake_agent agent = agent_obj.Agent.get_by_triple(self.context, 'novavm', 'linux', 'DISC') self._compare(self, fake_agent, agent) @mock.patch('nova.db.agent_build_get_by_triple') def test_get_by_triple_none(self, mock_get): mock_get.return_value = None agent = agent_obj.Agent.get_by_triple(self.context, 'novavm', 'linux', 'DISC') self.assertIsNone(agent) @mock.patch('nova.db.agent_build_create') def test_create(self, mock_create): mock_create.return_value = fake_agent agent = agent_obj.Agent(context=self.context) agent.hypervisor = 'novavm' agent.create() mock_create.assert_called_once_with(self.context, {'hypervisor': 'novavm'}) self._compare(self, fake_agent, agent) @mock.patch('nova.db.agent_build_create') def test_create_with_id(self, mock_create): agent = agent_obj.Agent(context=self.context, id=123) self.assertRaises(exception.ObjectActionError, agent.create) self.assertFalse(mock_create.called) @mock.patch('nova.db.agent_build_destroy') def test_destroy(self, mock_destroy): agent = agent_obj.Agent(context=self.context, id=123) agent.destroy() mock_destroy.assert_called_once_with(self.context, 123) @mock.patch('nova.db.agent_build_update') def test_save(self, mock_update): mock_update.return_value = fake_agent agent = agent_obj.Agent(context=self.context, id=123) agent.obj_reset_changes() agent.hypervisor = 'novavm' agent.save() mock_update.assert_called_once_with(self.context, 123, {'hypervisor': 'novavm'}) @mock.patch('nova.db.agent_build_get_all') def test_get_all(self, mock_get_all): mock_get_all.return_value = [fake_agent] agents = agent_obj.AgentList.get_all(self.context, hypervisor='novavm') self.assertEqual(1, len(agents)) self._compare(self, fake_agent, agents[0]) mock_get_all.assert_called_once_with(self.context, hypervisor='novavm') class TestAgent(test_objects._LocalTest, _TestAgent): pass class TestAgentRemote(test_objects._RemoteTest, _TestAgent): pass nova-13.0.0/nova/tests/unit/objects/test_instance_numa_topology.py0000664000567000056710000001640312701407773026621 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid import mock from oslo_serialization import jsonutils from nova import exception from nova import objects from nova.objects import fields from nova.tests.unit.objects import test_objects fake_instance_uuid = str(uuid.uuid4()) fake_obj_numa_topology = objects.InstanceNUMATopology( instance_uuid = fake_instance_uuid, cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=512, pagesize=2048), objects.InstanceNUMACell( id=1, cpuset=set([3, 4]), memory=512, pagesize=2048) ]) fake_numa_topology = fake_obj_numa_topology._to_dict() fake_db_topology = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'id': 1, 'instance_uuid': fake_instance_uuid, 'numa_topology': fake_obj_numa_topology._to_json() } fake_old_db_topology = dict(fake_db_topology) # copy fake_old_db_topology['numa_topology'] = jsonutils.dumps(fake_numa_topology) def get_fake_obj_numa_topology(context): fake_obj_numa_topology_cpy = fake_obj_numa_topology.obj_clone() fake_obj_numa_topology_cpy._context = context return fake_obj_numa_topology_cpy class _TestInstanceNUMATopology(object): @mock.patch('nova.db.instance_extra_update_by_uuid') def test_create(self, mock_update): topo_obj = get_fake_obj_numa_topology(self.context) topo_obj.instance_uuid = fake_db_topology['instance_uuid'] topo_obj.create() self.assertEqual(1, len(mock_update.call_args_list)) @mock.patch('nova.db.instance_extra_update_by_uuid') def test_save(self, mock_update): topo_obj = get_fake_obj_numa_topology(self.context) topo_obj.instance_uuid = fake_db_topology['instance_uuid'] topo_obj._save() self.assertEqual(1, len(mock_update.call_args_list)) def _test_get_by_instance_uuid(self): numa_topology = objects.InstanceNUMATopology.get_by_instance_uuid( self.context, fake_db_topology['instance_uuid']) self.assertEqual(fake_db_topology['instance_uuid'], numa_topology.instance_uuid) for obj_cell, topo_cell in zip( numa_topology.cells, fake_obj_numa_topology['cells']): self.assertIsInstance(obj_cell, objects.InstanceNUMACell) self.assertEqual(topo_cell.id, obj_cell.id) self.assertEqual(topo_cell.cpuset, obj_cell.cpuset) self.assertEqual(topo_cell.memory, obj_cell.memory) self.assertEqual(topo_cell.pagesize, obj_cell.pagesize) @mock.patch('nova.db.instance_extra_get_by_instance_uuid') def test_get_by_instance_uuid(self, mock_get): mock_get.return_value = fake_db_topology self._test_get_by_instance_uuid() @mock.patch('nova.db.instance_extra_get_by_instance_uuid') def test_get_by_instance_uuid_old(self, mock_get): mock_get.return_value = fake_old_db_topology self._test_get_by_instance_uuid() @mock.patch('nova.db.instance_extra_get_by_instance_uuid') def test_get_by_instance_uuid_missing(self, mock_get): mock_get.return_value = None self.assertRaises( exception.NumaTopologyNotFound, objects.InstanceNUMATopology.get_by_instance_uuid, self.context, 'fake_uuid') def test_siblings(self): inst_cell = objects.InstanceNUMACell( cpuset=set([0, 1, 2])) self.assertEqual([], inst_cell.siblings) topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=0) inst_cell = objects.InstanceNUMACell( cpuset=set([0, 1, 2]), cpu_topology=topo) self.assertEqual([], inst_cell.siblings) # One thread actually means no threads topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1) inst_cell = objects.InstanceNUMACell( cpuset=set([0, 1, 2]), cpu_topology=topo) self.assertEqual([], inst_cell.siblings) topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) inst_cell = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), cpu_topology=topo) self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings) topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4) inst_cell = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), cpu_topology=topo) self.assertEqual([set([0, 1, 2, 3])], inst_cell.siblings) def test_pin(self): inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), cpu_pinning=None) inst_cell.pin(0, 14) self.assertEqual({0: 14}, inst_cell.cpu_pinning) inst_cell.pin(12, 14) self.assertEqual({0: 14}, inst_cell.cpu_pinning) inst_cell.pin(1, 16) self.assertEqual({0: 14, 1: 16}, inst_cell.cpu_pinning) def test_pin_vcpus(self): inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), cpu_pinning=None) inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17)) self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning) def test_default_behavior(self): inst_cell = objects.InstanceNUMACell() self.assertEqual(0, len(inst_cell.obj_get_changes())) def test_cpu_pinning_requested_cell(self): inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), cpu_pinning=None) self.assertFalse(inst_cell.cpu_pinning_requested) inst_cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED self.assertTrue(inst_cell.cpu_pinning_requested) def test_cpu_pinning_requested(self): fake_topo_obj = copy.deepcopy(fake_obj_numa_topology) self.assertFalse(fake_topo_obj.cpu_pinning_requested) for cell in fake_topo_obj.cells: cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED self.assertTrue(fake_topo_obj.cpu_pinning_requested) def test_clear_host_pinning(self): topo_obj = get_fake_obj_numa_topology(self.context) topo_obj.cells[0].pin_vcpus((1, 10), (2, 11)) topo_obj.cells[0].id = 3 topo_obj.cells[1].pin_vcpus((3, 0), (4, 1)) topo_obj.cells[1].id = 0 topo_obj.clear_host_pinning() self.assertEqual({}, topo_obj.cells[0].cpu_pinning) self.assertEqual(-1, topo_obj.cells[0].id) self.assertEqual({}, topo_obj.cells[1].cpu_pinning) self.assertEqual(-1, topo_obj.cells[1].id) class TestInstanceNUMATopology(test_objects._LocalTest, _TestInstanceNUMATopology): pass class TestInstanceNUMATopologyRemote(test_objects._RemoteTest, _TestInstanceNUMATopology): pass nova-13.0.0/nova/tests/unit/objects/test_monitor_metric.py0000664000567000056710000000727112701407773025076 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import timeutils from nova import objects from nova.objects import fields from nova.tests.unit.objects import test_objects _ts_now = timeutils.utcnow() _monitor_metric_spec = { 'name': fields.MonitorMetricType.CPU_FREQUENCY, 'value': 1000, 'timestamp': _ts_now.isoformat(), 'source': 'nova.virt.libvirt.driver' } _monitor_metric_perc_spec = { 'name': fields.MonitorMetricType.CPU_PERCENT, 'value': 0.17, 'timestamp': _ts_now.isoformat(), 'source': 'nova.virt.libvirt.driver' } _monitor_numa_metric_spec = { 'name': fields.MonitorMetricType.NUMA_MEM_BW_CURRENT, 'numa_membw_values': {"0": 10, "1": 43}, 'timestamp': _ts_now.isoformat(), 'source': 'nova.virt.libvirt.driver' } _monitor_metric_list_spec = [_monitor_metric_spec] class _TestMonitorMetricObject(object): def test_monitor_metric_to_dict(self): obj = objects.MonitorMetric(name='cpu.frequency', value=1000, timestamp=_ts_now, source='nova.virt.libvirt.driver') self.assertEqual(_monitor_metric_spec, obj.to_dict()) def test_monitor_metric_perc_to_dict(self): """Test to ensure division by 100.0 occurs on percentage value.""" obj = objects.MonitorMetric(name='cpu.percent', value=17, timestamp=_ts_now, source='nova.virt.libvirt.driver') self.assertEqual(_monitor_metric_perc_spec, obj.to_dict()) def test_monitor_metric_list_to_list(self): obj = objects.MonitorMetric(name='cpu.frequency', value=1000, timestamp=_ts_now, source='nova.virt.libvirt.driver') list_obj = objects.MonitorMetricList(objects=[obj]) self.assertEqual(_monitor_metric_list_spec, list_obj.to_list()) def test_monitor_NUMA_metric_to_dict(self): obj = objects.MonitorMetric(name='numa.membw.current', numa_membw_values={"0": 10, "1": 43}, timestamp=_ts_now, source='nova.virt.libvirt.driver') self.assertEqual(_monitor_numa_metric_spec, obj.to_dict()) def test_conversion_in_monitor_metric_list_from_json(self): spec_list = [_monitor_metric_spec, _monitor_metric_perc_spec] metrics = objects.MonitorMetricList.from_json( jsonutils.dumps(spec_list)) for metric, spec in zip(metrics, spec_list): exp = spec['value'] if (spec['name'] in objects.monitor_metric.FIELDS_REQUIRING_CONVERSION): exp = spec['value'] * 100 self.assertEqual(exp, metric.value) class TestMonitorMetricObject(test_objects._LocalTest, _TestMonitorMetricObject): pass class TestRemoteMonitorMetricObject(test_objects._RemoteTest, _TestMonitorMetricObject): pass nova-13.0.0/nova/tests/unit/objects/test_vcpu_model.py0000664000567000056710000000716312701407773024201 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import arch from nova.compute import cpumodel from nova import objects from nova.tests.unit.objects import test_objects fake_cpu_model_feature = { 'policy': cpumodel.POLICY_REQUIRE, 'name': 'sse2', } fake_cpu_model_feature_obj = objects.VirtCPUFeature( **fake_cpu_model_feature) fake_vcpumodel_dict = { 'arch': arch.I686, 'vendor': 'fake-vendor', 'match': cpumodel.MATCH_EXACT, 'topology': objects.VirtCPUTopology(sockets=1, cores=1, threads=1), 'features': [fake_cpu_model_feature_obj], 'mode': cpumodel.MODE_HOST_MODEL, 'model': 'fake-model', } fake_vcpumodel = objects.VirtCPUModel(**fake_vcpumodel_dict) class _TestVirtCPUFeatureObj(object): def test_policy_limitation(self): obj = objects.VirtCPUFeature() self.assertRaises(ValueError, setattr, obj, 'policy', 'foo') class TestVirtCPUFeatureObj(test_objects._LocalTest, _TestVirtCPUFeatureObj): pass class TestRemoteVirtCPUFeatureObj(test_objects._LocalTest, _TestVirtCPUFeatureObj): pass class _TestVirtCPUModel(object): def test_create(self): model = objects.VirtCPUModel(**fake_vcpumodel_dict) self.assertEqual(fake_vcpumodel_dict['model'], model.model) self.assertEqual(fake_vcpumodel_dict['topology'].sockets, model.topology.sockets) feature = model.features[0] self.assertEqual(fake_cpu_model_feature['policy'], feature.policy) def test_defaults(self): model = objects.VirtCPUModel() self.assertIsNone(model.mode) self.assertIsNone(model.model) self.assertIsNone(model.vendor) self.assertIsNone(model.arch) self.assertIsNone(model.match) self.assertEqual([], model.features) self.assertIsNone(model.topology) def test_arch_field(self): model = objects.VirtCPUModel(**fake_vcpumodel_dict) self.assertRaises(ValueError, setattr, model, 'arch', 'foo') def test_serialize(self): modelin = objects.VirtCPUModel(**fake_vcpumodel_dict) modelout = objects.VirtCPUModel.from_json(modelin.to_json()) self.assertEqual(modelin.mode, modelout.mode) self.assertEqual(modelin.model, modelout.model) self.assertEqual(modelin.vendor, modelout.vendor) self.assertEqual(modelin.arch, modelout.arch) self.assertEqual(modelin.match, modelout.match) self.assertEqual(modelin.features[0].policy, modelout.features[0].policy) self.assertEqual(modelin.features[0].name, modelout.features[0].name) self.assertEqual(modelin.topology.sockets, modelout.topology.sockets) self.assertEqual(modelin.topology.cores, modelout.topology.cores) self.assertEqual(modelin.topology.threads, modelout.topology.threads) class TestVirtCPUModel(test_objects._LocalTest, _TestVirtCPUModel): pass class TestRemoteVirtCPUModel(test_objects._LocalTest, _TestVirtCPUModel): pass nova-13.0.0/nova/tests/unit/objects/test_security_group_rule.py0000664000567000056710000001113512701407773026150 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_versionedobjects import exception as ovo_exc from nova import db from nova import objects from nova.tests.unit.objects import test_objects from nova.tests.unit.objects import test_security_group fake_rule = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 1, 'protocol': 'tcp', 'from_port': 22, 'to_port': 22, 'cidr': '0.0.0.0/0', } class _TestSecurityGroupRuleObject(object): def test_get_by_id(self): with mock.patch.object(db, 'security_group_rule_get') as sgrg: sgrg.return_value = fake_rule rule = objects.SecurityGroupRule.get_by_id( self.context, 1) for field in fake_rule: if field == 'cidr': self.assertEqual(fake_rule[field], str(getattr(rule, field))) else: self.assertEqual(fake_rule[field], getattr(rule, field)) sgrg.assert_called_with(self.context, 1) def test_get_by_security_group(self): secgroup = objects.SecurityGroup() secgroup.id = 123 rule = dict(fake_rule) rule['grantee_group'] = dict(test_security_group.fake_secgroup, id=123) stupid_method = 'security_group_rule_get_by_security_group' with mock.patch.object(db, stupid_method) as sgrgbsg: sgrgbsg.return_value = [rule] rules = (objects.SecurityGroupRuleList. get_by_security_group(self.context, secgroup)) self.assertEqual(1, len(rules)) self.assertEqual(123, rules[0].grantee_group.id) @mock.patch.object(db, 'security_group_rule_create', return_value=fake_rule) def test_create(self, db_mock): rule = objects.SecurityGroupRule(context=self.context) rule.protocol = 'tcp' secgroup = objects.SecurityGroup() secgroup.id = 123 parentgroup = objects.SecurityGroup() parentgroup.id = 223 rule.grantee_group = secgroup rule.parent_group = parentgroup rule.create() updates = db_mock.call_args[0][1] self.assertEqual(fake_rule['id'], rule.id) self.assertEqual(updates['group_id'], rule.grantee_group.id) self.assertEqual(updates['parent_group_id'], rule.parent_group.id) @mock.patch.object(db, 'security_group_rule_create', return_value=fake_rule) def test_set_id_failure(self, db_mock): rule = objects.SecurityGroupRule(context=self.context) rule.create() self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr, rule, 'id', 124) class TestSecurityGroupRuleObject(test_objects._LocalTest, _TestSecurityGroupRuleObject): pass class TestSecurityGroupRuleObjectRemote(test_objects._RemoteTest, _TestSecurityGroupRuleObject): pass fake_rules = [ dict(fake_rule, id=1, grantee_group=test_security_group.fake_secgroup), dict(fake_rule, id=2, grantee_group=test_security_group.fake_secgroup), ] class _TestSecurityGroupRuleListObject(object): @mock.patch('nova.db.security_group_rule_get_by_instance') def test_get_by_instance(self, mock_get): mock_get.return_value = fake_rules instance = objects.Instance(uuid='fake-uuid') rules = objects.SecurityGroupRuleList.get_by_instance(self.context, instance) mock_get.assert_called_once_with(self.context, instance.uuid) self.assertEqual(2, len(rules)) self.assertEqual([1, 2], [x.id for x in rules]) class TestSecurityGroupRuleListObject(test_objects._LocalTest, _TestSecurityGroupRuleListObject): pass class TestSecurityGroupRuleListObjectRemote(test_objects._RemoteTest, _TestSecurityGroupRuleListObject): pass nova-13.0.0/nova/tests/unit/objects/test_image_meta.py0000664000567000056710000003014012701407773024123 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from nova import exception from nova import objects from nova import test class TestImageMeta(test.NoDBTestCase): def test_basic_attrs(self): image = {'status': 'active', 'container_format': 'bare', 'min_ram': 0, 'updated_at': '2014-12-12T11:16:36.000000', # Testing string -> int conversion 'min_disk': '0', 'owner': '2d8b9502858c406ebee60f0849486222', # Testing string -> bool conversion 'protected': 'yes', 'properties': { 'os_type': 'Linux', 'hw_video_model': 'vga', 'hw_video_ram': '512', 'hw_qemu_guest_agent': 'yes', 'hw_scsi_model': 'virtio-scsi', }, 'size': 213581824, 'name': 'f16-x86_64-openstack-sda', 'checksum': '755122332caeb9f661d5c978adb8b45f', 'created_at': '2014-12-10T16:23:14.000000', 'disk_format': 'qcow2', 'id': 'c8b1790e-a07d-4971-b137-44f2432936cd' } image_meta = objects.ImageMeta.from_dict(image) self.assertEqual('active', image_meta.status) self.assertEqual('bare', image_meta.container_format) self.assertEqual(0, image_meta.min_ram) self.assertIsInstance(image_meta.updated_at, datetime.datetime) self.assertEqual(0, image_meta.min_disk) self.assertEqual('2d8b9502858c406ebee60f0849486222', image_meta.owner) self.assertTrue(image_meta.protected) self.assertEqual(213581824, image_meta.size) self.assertEqual('f16-x86_64-openstack-sda', image_meta.name) self.assertEqual('755122332caeb9f661d5c978adb8b45f', image_meta.checksum) self.assertIsInstance(image_meta.created_at, datetime.datetime) self.assertEqual('qcow2', image_meta.disk_format) self.assertEqual('c8b1790e-a07d-4971-b137-44f2432936cd', image_meta.id) self.assertIsInstance(image_meta.properties, objects.ImageMetaProps) def test_no_props(self): image_meta = objects.ImageMeta.from_dict({}) self.assertIsInstance(image_meta.properties, objects.ImageMetaProps) def test_volume_backed_image(self): image = {'container_format': None, 'size': 0, 'checksum': None, 'disk_format': None, } image_meta = objects.ImageMeta.from_dict(image) self.assertEqual('', image_meta.container_format) self.assertEqual(0, image_meta.size) self.assertEqual('', image_meta.checksum) self.assertEqual('', image_meta.disk_format) def test_null_substitution(self): image = {'name': None, 'checksum': None, 'owner': None, 'size': None, 'virtual_size': None, 'container_format': None, 'disk_format': None, } image_meta = objects.ImageMeta.from_dict(image) self.assertEqual('', image_meta.name) self.assertEqual('', image_meta.checksum) self.assertEqual('', image_meta.owner) self.assertEqual(0, image_meta.size) self.assertEqual(0, image_meta.virtual_size) self.assertEqual('', image_meta.container_format) self.assertEqual('', image_meta.disk_format) class TestImageMetaProps(test.NoDBTestCase): def test_normal_props(self): props = {'os_type': 'windows', 'hw_video_model': 'vga', 'hw_video_ram': '512', 'hw_qemu_guest_agent': 'yes', # Fill sane values for the rest here } virtprops = objects.ImageMetaProps.from_dict(props) self.assertEqual('windows', virtprops.os_type) self.assertEqual('vga', virtprops.hw_video_model) self.assertEqual(512, virtprops.hw_video_ram) self.assertTrue(virtprops.hw_qemu_guest_agent) def test_default_props(self): props = {} virtprops = objects.ImageMetaProps.from_dict(props) for prop in virtprops.fields: self.assertIsNone(virtprops.get(prop)) def test_default_prop_value(self): props = {} virtprops = objects.ImageMetaProps.from_dict(props) self.assertEqual("hvm", virtprops.get("hw_vm_mode", "hvm")) def test_non_existent_prop(self): props = {} virtprops = objects.ImageMetaProps.from_dict(props) self.assertRaises(AttributeError, virtprops.get, "doesnotexist") def test_legacy_compat(self): legacy_props = { 'architecture': 'x86_64', 'owner_id': '123', 'vmware_adaptertype': 'lsiLogic', 'vmware_disktype': 'preallocated', 'vmware_image_version': '2', 'vmware_ostype': 'rhel3_64Guest', 'auto_disk_config': 'yes', 'ipxe_boot': 'yes', 'xenapi_device_id': '3', 'xenapi_image_compression_level': '2', 'vmware_linked_clone': 'false', 'xenapi_use_agent': 'yes', 'xenapi_skip_agent_inject_ssh': 'no', 'xenapi_skip_agent_inject_files_at_boot': 'no', 'cache_in_nova': 'yes', 'vm_mode': 'hvm', 'bittorrent': 'yes', 'mappings': [], 'block_device_mapping': [], 'bdm_v2': 'yes', 'root_device_name': '/dev/vda', 'hypervisor_version_requires': '>=1.5.3', 'hypervisor_type': 'qemu', } image_meta = objects.ImageMetaProps.from_dict(legacy_props) self.assertEqual('x86_64', image_meta.hw_architecture) self.assertEqual('123', image_meta.img_owner_id) self.assertEqual('lsilogic', image_meta.hw_scsi_model) self.assertEqual('preallocated', image_meta.hw_disk_type) self.assertEqual(2, image_meta.img_version) self.assertEqual('rhel3_64Guest', image_meta.os_distro) self.assertTrue(image_meta.hw_auto_disk_config) self.assertTrue(image_meta.hw_ipxe_boot) self.assertEqual(3, image_meta.hw_device_id) self.assertEqual(2, image_meta.img_compression_level) self.assertFalse(image_meta.img_linked_clone) self.assertTrue(image_meta.img_use_agent) self.assertFalse(image_meta.os_skip_agent_inject_ssh) self.assertFalse(image_meta.os_skip_agent_inject_files_at_boot) self.assertTrue(image_meta.img_cache_in_nova) self.assertTrue(image_meta.img_bittorrent) self.assertEqual([], image_meta.img_mappings) self.assertEqual([], image_meta.img_block_device_mapping) self.assertTrue(image_meta.img_bdm_v2) self.assertEqual("/dev/vda", image_meta.img_root_device_name) self.assertEqual('>=1.5.3', image_meta.img_hv_requested_version) self.assertEqual('qemu', image_meta.img_hv_type) def test_legacy_compat_vmware_adapter_types(self): legacy_types = ['lsiLogic', 'busLogic', 'ide', 'lsiLogicsas', 'paraVirtual', None, ''] for legacy_type in legacy_types: legacy_props = { 'vmware_adaptertype': legacy_type, } image_meta = objects.ImageMetaProps.from_dict(legacy_props) if legacy_type == 'ide': self.assertEqual('ide', image_meta.hw_disk_bus) elif not legacy_type: self.assertFalse(image_meta.obj_attr_is_set('hw_disk_bus')) self.assertFalse(image_meta.obj_attr_is_set('hw_scsi_model')) else: self.assertEqual('scsi', image_meta.hw_disk_bus) if legacy_type == 'lsiLogicsas': expected = 'lsisas1068' elif legacy_type == 'paraVirtual': expected = 'vmpvscsi' else: expected = legacy_type.lower() self.assertEqual(expected, image_meta.hw_scsi_model) def test_duplicate_legacy_and_normal_props(self): # Both keys are referring to the same object field props = {'hw_scsi_model': 'virtio-scsi', 'vmware_adaptertype': 'lsiLogic', } virtprops = objects.ImageMetaProps.from_dict(props) # The normal property always wins vs. the legacy field since # _set_attr_from_current_names is called finally self.assertEqual('virtio-scsi', virtprops.hw_scsi_model) def test_get(self): props = objects.ImageMetaProps(os_distro='linux') self.assertEqual('linux', props.get('os_distro')) self.assertIsNone(props.get('img_version')) self.assertEqual(1, props.get('img_version', 1)) def test_set_numa_mem(self): props = {'hw_numa_nodes': 2, 'hw_numa_mem.0': "2048", 'hw_numa_mem.1': "4096"} virtprops = objects.ImageMetaProps.from_dict(props) self.assertEqual(2, virtprops.hw_numa_nodes) self.assertEqual([2048, 4096], virtprops.hw_numa_mem) def test_set_numa_mem_sparse(self): props = {'hw_numa_nodes': 2, 'hw_numa_mem.0': "2048", 'hw_numa_mem.1': "1024", 'hw_numa_mem.3': "4096"} virtprops = objects.ImageMetaProps.from_dict(props) self.assertEqual(2, virtprops.hw_numa_nodes) self.assertEqual([2048, 1024], virtprops.hw_numa_mem) def test_set_numa_mem_no_count(self): props = {'hw_numa_mem.0': "2048", 'hw_numa_mem.3': "4096"} virtprops = objects.ImageMetaProps.from_dict(props) self.assertIsNone(virtprops.get("hw_numa_nodes")) self.assertEqual([2048], virtprops.hw_numa_mem) def test_set_numa_cpus(self): props = {'hw_numa_nodes': 2, 'hw_numa_cpus.0': "0-3", 'hw_numa_cpus.1': "4-7"} virtprops = objects.ImageMetaProps.from_dict(props) self.assertEqual(2, virtprops.hw_numa_nodes) self.assertEqual([set([0, 1, 2, 3]), set([4, 5, 6, 7])], virtprops.hw_numa_cpus) def test_set_numa_cpus_sparse(self): props = {'hw_numa_nodes': 4, 'hw_numa_cpus.0': "0-3", 'hw_numa_cpus.1': "4,5", 'hw_numa_cpus.3': "6-7"} virtprops = objects.ImageMetaProps.from_dict(props) self.assertEqual(4, virtprops.hw_numa_nodes) self.assertEqual([set([0, 1, 2, 3]), set([4, 5])], virtprops.hw_numa_cpus) def test_set_numa_cpus_no_count(self): props = {'hw_numa_cpus.0': "0-3", 'hw_numa_cpus.3': "4-7"} virtprops = objects.ImageMetaProps.from_dict(props) self.assertIsNone(virtprops.get("hw_numa_nodes")) self.assertEqual([set([0, 1, 2, 3])], virtprops.hw_numa_cpus) def test_obj_make_compatible(self): props = { 'img_config_drive': 'mandatory', 'os_admin_user': 'root', 'hw_vif_multiqueue_enabled': True, 'img_hv_type': 'kvm', 'img_hv_requested_version': '>= 1.0', 'os_require_quiesce': True, } obj = objects.ImageMetaProps(**props) primitive = obj.obj_to_primitive('1.0') self.assertFalse(any([x in primitive['nova_object.data'] for x in props])) for bus in ('lxc', 'uml'): obj.hw_disk_bus = bus self.assertRaises(exception.ObjectActionError, obj.obj_to_primitive, '1.0') nova-13.0.0/nova/tests/unit/objects/test_instance_fault.py0000664000567000056710000001223512701407773025037 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import db from nova import exception from nova.objects import instance_fault from nova.tests.unit.objects import test_objects from nova.tests import uuidsentinel as uuids fake_faults = { 'fake-uuid': [ {'id': 1, 'instance_uuid': uuids.faults_instance, 'code': 123, 'message': 'msg1', 'details': 'details', 'host': 'host', 'deleted': False, 'created_at': None, 'updated_at': None, 'deleted_at': None}, {'id': 2, 'instance_uuid': uuids.faults_instance, 'code': 456, 'message': 'msg2', 'details': 'details', 'host': 'host', 'deleted': False, 'created_at': None, 'updated_at': None, 'deleted_at': None}, ] } class _TestInstanceFault(object): def test_get_latest_for_instance(self): self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid'] ).AndReturn(fake_faults) self.mox.ReplayAll() fault = instance_fault.InstanceFault.get_latest_for_instance( self.context, 'fake-uuid') for key in fake_faults['fake-uuid'][0]: self.assertEqual(fake_faults['fake-uuid'][0][key], fault[key]) def test_get_latest_for_instance_with_none(self): self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid'] ).AndReturn({}) self.mox.ReplayAll() fault = instance_fault.InstanceFault.get_latest_for_instance( self.context, 'fake-uuid') self.assertIsNone(fault) def test_get_by_instance(self): self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid'] ).AndReturn(fake_faults) self.mox.ReplayAll() faults = instance_fault.InstanceFaultList.get_by_instance_uuids( self.context, ['fake-uuid']) for index, db_fault in enumerate(fake_faults['fake-uuid']): for key in db_fault: self.assertEqual(fake_faults['fake-uuid'][index][key], faults[index][key]) def test_get_by_instance_with_none(self): self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid'] ).AndReturn({}) self.mox.ReplayAll() faults = instance_fault.InstanceFaultList.get_by_instance_uuids( self.context, ['fake-uuid']) self.assertEqual(0, len(faults)) @mock.patch('nova.cells.rpcapi.CellsAPI.instance_fault_create_at_top') @mock.patch('nova.db.instance_fault_create') def _test_create(self, update_cells, mock_create, cells_fault_create): mock_create.return_value = fake_faults['fake-uuid'][1] fault = instance_fault.InstanceFault(context=self.context) fault.instance_uuid = uuids.faults_instance fault.code = 456 fault.message = 'foo' fault.details = 'you screwed up' fault.host = 'myhost' fault.create() self.assertEqual(2, fault.id) mock_create.assert_called_once_with(self.context, {'instance_uuid': uuids.faults_instance, 'code': 456, 'message': 'foo', 'details': 'you screwed up', 'host': 'myhost'}) if update_cells: cells_fault_create.assert_called_once_with( self.context, fake_faults['fake-uuid'][1]) else: self.assertFalse(cells_fault_create.called) def test_create_no_cells(self): self.flags(enable=False, group='cells') self._test_create(False) def test_create_api_cell(self): self.flags(cell_type='api', enable=True, group='cells') self._test_create(False) def test_create_compute_cell(self): self.flags(cell_type='compute', enable=True, group='cells') self._test_create(True) def test_create_already_created(self): fault = instance_fault.InstanceFault(context=self.context) fault.id = 1 self.assertRaises(exception.ObjectActionError, fault.create) class TestInstanceFault(test_objects._LocalTest, _TestInstanceFault): pass class TestInstanceFaultRemote(test_objects._RemoteTest, _TestInstanceFault): pass nova-13.0.0/nova/tests/unit/objects/test_external_event.py0000664000567000056710000000404212701407773025060 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.objects import external_event as external_event_obj from nova.tests.unit.objects import test_objects class _TestInstanceExternalEventObject(object): def test_make_key(self): key = external_event_obj.InstanceExternalEvent.make_key('foo', 'bar') self.assertEqual('foo-bar', key) def test_make_key_no_tag(self): key = external_event_obj.InstanceExternalEvent.make_key('foo') self.assertEqual('foo', key) def test_key(self): event = external_event_obj.InstanceExternalEvent( name='network-changed', tag='bar') with mock.patch.object(event, 'make_key') as make_key: make_key.return_value = 'key' self.assertEqual('key', event.key) make_key.assert_called_once_with('network-changed', 'bar') def test_event_names(self): for event in external_event_obj.EVENT_NAMES: external_event_obj.InstanceExternalEvent(name=event, tag='bar') self.assertRaises(ValueError, external_event_obj.InstanceExternalEvent, name='foo', tag='bar') class TestInstanceExternalEventObject(test_objects._LocalTest, _TestInstanceExternalEventObject): pass class TestRemoteInstanceExternalEventObject(test_objects._RemoteTest, _TestInstanceExternalEventObject): pass nova-13.0.0/nova/tests/unit/objects/test_network.py0000664000567000056710000002234012701407773023527 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from nova.objects import network as network_obj from nova.tests.unit.objects import test_objects from nova.tests import uuidsentinel as uuids fake_network = { 'deleted': False, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'id': 1, 'label': 'Fake Network', 'injected': False, 'cidr': '192.168.1.0/24', 'cidr_v6': '1234::/64', 'multi_host': False, 'netmask': '255.255.255.0', 'gateway': '192.168.1.1', 'broadcast': '192.168.1.255', 'netmask_v6': 64, 'gateway_v6': '1234::1', 'bridge': 'br100', 'bridge_interface': 'eth0', 'dns1': '8.8.8.8', 'dns2': '8.8.4.4', 'vlan': None, 'vpn_public_address': None, 'vpn_public_port': None, 'vpn_private_address': None, 'dhcp_start': '192.168.1.10', 'rxtx_base': None, 'project_id': None, 'priority': None, 'host': None, 'uuid': uuids.network_instance, 'mtu': None, 'dhcp_server': '192.168.1.1', 'enable_dhcp': True, 'share_address': False, } class _TestNetworkObject(object): def _compare(self, obj, db_obj): for field in obj.fields: db_val = db_obj[field] obj_val = obj[field] if isinstance(obj_val, netaddr.IPAddress): obj_val = str(obj_val) if isinstance(obj_val, netaddr.IPNetwork): obj_val = str(obj_val) if field == 'netmask_v6': db_val = str(netaddr.IPNetwork('1::/%i' % db_val).netmask) self.assertEqual(db_val, obj_val) @mock.patch('nova.db.network_get') def test_get_by_id(self, get): get.return_value = fake_network network = network_obj.Network.get_by_id(self.context, 'foo') self._compare(network, fake_network) get.assert_called_once_with(self.context, 'foo', project_only='allow_none') @mock.patch('nova.db.network_get_by_uuid') def test_get_by_uuid(self, get): get.return_value = fake_network network = network_obj.Network.get_by_uuid(self.context, 'foo') self._compare(network, fake_network) get.assert_called_once_with(self.context, 'foo') @mock.patch('nova.db.network_get_by_cidr') def test_get_by_cidr(self, get): get.return_value = fake_network network = network_obj.Network.get_by_cidr(self.context, '192.168.1.0/24') self._compare(network, fake_network) get.assert_called_once_with(self.context, '192.168.1.0/24') @mock.patch('nova.db.network_update') @mock.patch('nova.db.network_set_host') def test_save(self, set_host, update): result = dict(fake_network, injected=True) network = network_obj.Network._from_db_object(self.context, network_obj.Network(), fake_network) network.obj_reset_changes() network.save() network.label = 'bar' update.return_value = result network.save() update.assert_called_once_with(self.context, network.id, {'label': 'bar'}) self.assertFalse(set_host.called) self._compare(network, result) @mock.patch('nova.db.network_update') @mock.patch('nova.db.network_set_host') @mock.patch('nova.db.network_get') def test_save_with_host(self, get, set_host, update): result = dict(fake_network, injected=True) network = network_obj.Network._from_db_object(self.context, network_obj.Network(), fake_network) network.obj_reset_changes() network.host = 'foo' get.return_value = result network.save() set_host.assert_called_once_with(self.context, network.id, 'foo') self.assertFalse(update.called) self._compare(network, result) @mock.patch('nova.db.network_update') @mock.patch('nova.db.network_set_host') def test_save_with_host_and_other(self, set_host, update): result = dict(fake_network, injected=True) network = network_obj.Network._from_db_object(self.context, network_obj.Network(), fake_network) network.obj_reset_changes() network.host = 'foo' network.label = 'bar' update.return_value = result network.save() set_host.assert_called_once_with(self.context, network.id, 'foo') update.assert_called_once_with(self.context, network.id, {'label': 'bar'}) self._compare(network, result) @mock.patch('nova.db.network_associate') def test_associate(self, associate): network_obj.Network.associate(self.context, 'project', network_id=123) associate.assert_called_once_with(self.context, 'project', network_id=123, force=False) @mock.patch('nova.db.network_disassociate') def test_disassociate(self, disassociate): network_obj.Network.disassociate(self.context, 123, host=True, project=True) disassociate.assert_called_once_with(self.context, 123, True, True) @mock.patch('nova.db.network_create_safe') def test_create(self, create): create.return_value = fake_network network = network_obj.Network(context=self.context, label='foo') network.create() create.assert_called_once_with(self.context, {'label': 'foo'}) self._compare(network, fake_network) @mock.patch('nova.db.network_delete_safe') def test_destroy(self, delete): network = network_obj.Network(context=self.context, id=123) network.destroy() delete.assert_called_once_with(self.context, 123) self.assertTrue(network.deleted) self.assertNotIn('deleted', network.obj_what_changed()) @mock.patch('nova.db.network_get_all') def test_get_all(self, get_all): get_all.return_value = [fake_network] networks = network_obj.NetworkList.get_all(self.context) self.assertEqual(1, len(networks)) get_all.assert_called_once_with(self.context, 'allow_none') self._compare(networks[0], fake_network) @mock.patch('nova.db.network_get_all_by_uuids') def test_get_all_by_uuids(self, get_all): get_all.return_value = [fake_network] networks = network_obj.NetworkList.get_by_uuids(self.context, ['foo']) self.assertEqual(1, len(networks)) get_all.assert_called_once_with(self.context, ['foo'], 'allow_none') self._compare(networks[0], fake_network) @mock.patch('nova.db.network_get_all_by_host') def test_get_all_by_host(self, get_all): get_all.return_value = [fake_network] networks = network_obj.NetworkList.get_by_host(self.context, 'host') self.assertEqual(1, len(networks)) get_all.assert_called_once_with(self.context, 'host') self._compare(networks[0], fake_network) @mock.patch('nova.db.network_in_use_on_host') def test_in_use_on_host(self, in_use): in_use.return_value = True self.assertTrue(network_obj.Network.in_use_on_host(self.context, 123, 'foo')) in_use.assert_called_once_with(self.context, 123, 'foo') @mock.patch('nova.db.project_get_networks') def test_get_all_by_project(self, get_nets): get_nets.return_value = [fake_network] networks = network_obj.NetworkList.get_by_project(self.context, 123) self.assertEqual(1, len(networks)) get_nets.assert_called_once_with(self.context, 123, associate=True) self._compare(networks[0], fake_network) def test_compat_version_1_1(self): network = network_obj.Network._from_db_object(self.context, network_obj.Network(), fake_network) primitive = network.obj_to_primitive(target_version='1.1') self.assertNotIn('mtu', primitive) self.assertNotIn('enable_dhcp', primitive) self.assertNotIn('dhcp_server', primitive) self.assertNotIn('share_address', primitive) class TestNetworkObject(test_objects._LocalTest, _TestNetworkObject): pass class TestRemoteNetworkObject(test_objects._RemoteTest, _TestNetworkObject): pass nova-13.0.0/nova/tests/unit/objects/test_network_request.py0000664000567000056710000000720712701410011025257 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.tests.unit.objects import test_objects FAKE_UUID = '0C5C9AD2-F967-4E92-A7F3-24410F697440' class _TestNetworkRequestObject(object): def test_basic(self): request = objects.NetworkRequest() request.network_id = '456' request.address = '1.2.3.4' request.port_id = FAKE_UUID def test_load(self): request = objects.NetworkRequest() self.assertIsNone(request.port_id) def test_to_tuple_neutron(self): request = objects.NetworkRequest(network_id='123', address='1.2.3.4', port_id=FAKE_UUID, ) with mock.patch('nova.utils.is_neutron', return_value=True): self.assertEqual(('123', '1.2.3.4', FAKE_UUID, None), request.to_tuple()) def test_to_tuple_nova(self): request = objects.NetworkRequest(network_id='123', address='1.2.3.4', port_id=FAKE_UUID) with mock.patch('nova.utils.is_neutron', return_value=False): self.assertEqual(('123', '1.2.3.4'), request.to_tuple()) def test_from_tuple_neutron(self): request = objects.NetworkRequest.from_tuple( ('123', '1.2.3.4', FAKE_UUID, None)) self.assertEqual('123', request.network_id) self.assertEqual('1.2.3.4', str(request.address)) self.assertEqual(FAKE_UUID, request.port_id) def test_from_tuple_nova(self): request = objects.NetworkRequest.from_tuple( ('123', '1.2.3.4')) self.assertEqual('123', request.network_id) self.assertEqual('1.2.3.4', str(request.address)) self.assertIsNone(request.port_id) @mock.patch('nova.utils.is_neutron', return_value=True) def test_list_as_tuples(self, is_neutron): requests = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='123'), objects.NetworkRequest(network_id='456')]) self.assertEqual( [('123', None, None, None), ('456', None, None, None)], requests.as_tuples()) def test_is_single_unspecified(self): requests = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='123')]) self.assertFalse(requests.is_single_unspecified) requests = objects.NetworkRequestList( objects=[objects.NetworkRequest(), objects.NetworkRequest()]) self.assertFalse(requests.is_single_unspecified) requests = objects.NetworkRequestList( objects=[objects.NetworkRequest()]) self.assertTrue(requests.is_single_unspecified) class TestNetworkRequestObject(test_objects._LocalTest, _TestNetworkRequestObject): pass class TestNetworkRequestRemoteObject(test_objects._RemoteTest, _TestNetworkRequestObject): pass nova-13.0.0/nova/tests/unit/objects/test_instance_pci_requests.py0000664000567000056710000001664012701407773026436 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils from nova import objects from nova.tests.unit.objects import test_objects FAKE_UUID = '79a53d6b-0893-4838-a971-15f4f382e7c2' FAKE_REQUEST_UUID = '69b53d6b-0793-4839-c981-f5c4f382e7d2' # NOTE(danms): Yes, these are the same right now, but going forward, # we have changes to make which will be reflected in the format # in instance_extra, but not in system_metadata. fake_pci_requests = [ {'count': 2, 'spec': [{'vendor_id': '8086', 'device_id': '1502'}], 'alias_name': 'alias_1', 'is_new': False, 'request_id': FAKE_REQUEST_UUID}, {'count': 2, 'spec': [{'vendor_id': '6502', 'device_id': '07B5'}], 'alias_name': 'alias_2', 'is_new': True, 'request_id': FAKE_REQUEST_UUID}, ] fake_legacy_pci_requests = [ {'count': 2, 'spec': [{'vendor_id': '8086', 'device_id': '1502'}], 'alias_name': 'alias_1'}, {'count': 1, 'spec': [{'vendor_id': '6502', 'device_id': '07B5'}], 'alias_name': 'alias_2'}, ] class _TestInstancePCIRequests(object): @mock.patch('nova.db.instance_extra_get_by_instance_uuid') def test_get_by_instance_uuid(self, mock_get): mock_get.return_value = { 'instance_uuid': FAKE_UUID, 'pci_requests': jsonutils.dumps(fake_pci_requests), } requests = objects.InstancePCIRequests.get_by_instance_uuid( self.context, FAKE_UUID) self.assertEqual(2, len(requests.requests)) for index, request in enumerate(requests.requests): self.assertEqual(fake_pci_requests[index]['alias_name'], request.alias_name) self.assertEqual(fake_pci_requests[index]['count'], request.count) self.assertEqual(fake_pci_requests[index]['spec'], [dict(x.items()) for x in request.spec]) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') def test_get_by_instance_uuid_and_newness(self, mock_get): pcir = objects.InstancePCIRequests mock_get.return_value = objects.InstancePCIRequests( instance_uuid='fake-uuid', requests=[objects.InstancePCIRequest(count=1, is_new=False), objects.InstancePCIRequest(count=2, is_new=True)]) old_req = pcir.get_by_instance_uuid_and_newness(self.context, 'fake-uuid', False) mock_get.return_value = objects.InstancePCIRequests( instance_uuid='fake-uuid', requests=[objects.InstancePCIRequest(count=1, is_new=False), objects.InstancePCIRequest(count=2, is_new=True)]) new_req = pcir.get_by_instance_uuid_and_newness(self.context, 'fake-uuid', True) self.assertEqual(1, old_req.requests[0].count) self.assertEqual(2, new_req.requests[0].count) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') def test_get_by_instance_current(self, mock_get): instance = objects.Instance(uuid='fake-uuid', system_metadata={}) objects.InstancePCIRequests.get_by_instance(self.context, instance) mock_get.assert_called_once_with(self.context, 'fake-uuid') def test_get_by_instance_legacy(self): fakesysmeta = { 'pci_requests': jsonutils.dumps([fake_legacy_pci_requests[0]]), 'new_pci_requests': jsonutils.dumps([fake_legacy_pci_requests[1]]), } instance = objects.Instance(uuid='fake-uuid', system_metadata=fakesysmeta) requests = objects.InstancePCIRequests.get_by_instance(self.context, instance) self.assertEqual(2, len(requests.requests)) self.assertEqual('alias_1', requests.requests[0].alias_name) self.assertFalse(requests.requests[0].is_new) self.assertEqual('alias_2', requests.requests[1].alias_name) self.assertTrue(requests.requests[1].is_new) def test_new_compatibility(self): request = objects.InstancePCIRequest(is_new=False) self.assertFalse(request.new) def test_backport_1_0(self): requests = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(count=1, request_id=FAKE_UUID), objects.InstancePCIRequest(count=2, request_id=FAKE_UUID)]) primitive = requests.obj_to_primitive(target_version='1.0') backported = objects.InstancePCIRequests.obj_from_primitive( primitive) self.assertEqual('1.0', backported.VERSION) self.assertEqual(2, len(backported.requests)) self.assertFalse(backported.requests[0].obj_attr_is_set('request_id')) self.assertFalse(backported.requests[1].obj_attr_is_set('request_id')) def test_obj_from_db(self): req = objects.InstancePCIRequests.obj_from_db(None, FAKE_UUID, None) self.assertEqual(FAKE_UUID, req.instance_uuid) self.assertEqual(0, len(req.requests)) db_req = jsonutils.dumps(fake_pci_requests) req = objects.InstancePCIRequests.obj_from_db(None, FAKE_UUID, db_req) self.assertEqual(FAKE_UUID, req.instance_uuid) self.assertEqual(2, len(req.requests)) self.assertEqual('alias_1', req.requests[0].alias_name) def test_from_request_spec_instance_props(self): requests = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(count=1, request_id=FAKE_UUID, spec=[{'vendor_id': '8086', 'device_id': '1502'}]) ], instance_uuid=FAKE_UUID) result = jsonutils.to_primitive(requests) result = objects.InstancePCIRequests.from_request_spec_instance_props( result) self.assertEqual(1, len(result.requests)) self.assertEqual(1, result.requests[0].count) self.assertEqual(FAKE_UUID, result.requests[0].request_id) self.assertEqual([{'vendor_id': '8086', 'device_id': '1502'}], result.requests[0].spec) class TestInstancePCIRequests(test_objects._LocalTest, _TestInstancePCIRequests): pass class TestRemoteInstancePCIRequests(test_objects._RemoteTest, _TestInstancePCIRequests): pass nova-13.0.0/nova/tests/unit/objects/test_virtual_interface.py0000664000567000056710000001104012701407773025537 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import db from nova.objects import virtual_interface as vif_obj from nova.tests.unit.objects import test_objects fake_vif = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'id': 1, 'address': '00:00:00:00:00:00', 'network_id': 123, 'instance_uuid': 'fake-uuid', 'uuid': 'fake-uuid-2', } class _TestVirtualInterface(object): @staticmethod def _compare(test, db, obj): for field, value in db.items(): test.assertEqual(db[field], getattr(obj, field)) def test_get_by_id(self): with mock.patch.object(db, 'virtual_interface_get') as get: get.return_value = fake_vif vif = vif_obj.VirtualInterface.get_by_id(self.context, 1) self._compare(self, fake_vif, vif) def test_get_by_uuid(self): with mock.patch.object(db, 'virtual_interface_get_by_uuid') as get: get.return_value = fake_vif vif = vif_obj.VirtualInterface.get_by_uuid(self.context, 'fake-uuid-2') self._compare(self, fake_vif, vif) def test_get_by_address(self): with mock.patch.object(db, 'virtual_interface_get_by_address') as get: get.return_value = fake_vif vif = vif_obj.VirtualInterface.get_by_address(self.context, '00:00:00:00:00:00') self._compare(self, fake_vif, vif) def test_get_by_instance_and_network(self): with mock.patch.object(db, 'virtual_interface_get_by_instance_and_network') as get: get.return_value = fake_vif vif = vif_obj.VirtualInterface.get_by_instance_and_network( self.context, 'fake-uuid', 123) self._compare(self, fake_vif, vif) def test_create(self): vif = vif_obj.VirtualInterface(context=self.context) vif.address = '00:00:00:00:00:00' vif.network_id = 123 vif.instance_uuid = 'fake-uuid' vif.uuid = 'fake-uuid-2' with mock.patch.object(db, 'virtual_interface_create') as create: create.return_value = fake_vif vif.create() self.assertEqual(self.context, vif._context) vif._context = None self._compare(self, fake_vif, vif) def test_delete_by_instance_uuid(self): with mock.patch.object(db, 'virtual_interface_delete_by_instance') as delete: vif_obj.VirtualInterface.delete_by_instance_uuid(self.context, 'fake-uuid') delete.assert_called_with(self.context, 'fake-uuid') class TestVirtualInterfaceObject(test_objects._LocalTest, _TestVirtualInterface): pass class TestRemoteVirtualInterfaceObject(test_objects._RemoteTest, _TestVirtualInterface): pass class _TestVirtualInterfaceList(object): def test_get_all(self): with mock.patch.object(db, 'virtual_interface_get_all') as get: get.return_value = [fake_vif] vifs = vif_obj.VirtualInterfaceList.get_all(self.context) self.assertEqual(1, len(vifs)) _TestVirtualInterface._compare(self, fake_vif, vifs[0]) def test_get_by_instance_uuid(self): with mock.patch.object(db, 'virtual_interface_get_by_instance') as get: get.return_value = [fake_vif] vifs = vif_obj.VirtualInterfaceList.get_by_instance_uuid( self.context, 'fake-uuid') self.assertEqual(1, len(vifs)) _TestVirtualInterface._compare(self, fake_vif, vifs[0]) class TestVirtualInterfaceList(test_objects._LocalTest, _TestVirtualInterfaceList): pass class TestRemoteVirtualInterfaceList(test_objects._RemoteTest, _TestVirtualInterfaceList): pass nova-13.0.0/nova/tests/unit/objects/test_dns_domain.py0000664000567000056710000000570312701407773024155 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import db from nova.objects import dns_domain from nova.tests.unit.objects import test_objects fake_dnsd = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'domain': 'blah.example.com', 'scope': 'private', 'availability_zone': 'overthere', 'project_id': '867530niner', } class _TestDNSDomain(object): @staticmethod def _compare(test, db, obj): for field, value in db.items(): test.assertEqual(db[field], getattr(obj, field)) def test_get_by_domain(self): with mock.patch.object(db, 'dnsdomain_get') as get: get.return_value = fake_dnsd dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain') self._compare(self, fake_dnsd, dnsd) def test_register_for_zone(self): dns_domain.DNSDomain.register_for_zone(self.context.elevated(), 'domain', 'zone') dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain') self.assertEqual('domain', dnsd.domain) self.assertEqual('zone', dnsd.availability_zone) def test_register_for_project(self): dns_domain.DNSDomain.register_for_project(self.context.elevated(), 'domain', 'project') dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain') self.assertEqual('domain', dnsd.domain) self.assertEqual('project', dnsd.project_id) def test_delete_by_domain(self): dns_domain.DNSDomain.register_for_zone(self.context.elevated(), 'domain', 'zone') dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain') self.assertEqual('domain', dnsd.domain) self.assertEqual('zone', dnsd.availability_zone) dns_domain.DNSDomain.delete_by_domain(self.context.elevated(), 'domain') dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain') self.assertIsNone(dnsd) def test_get_all(self): with mock.patch.object(db, 'dnsdomain_get_all') as get: get.return_value = [fake_dnsd] dns_domain.DNSDomainList.get_all(self.context) class TestDNSDomainObject(test_objects._LocalTest, _TestDNSDomain): pass class TestRemoteDNSDomainObject(test_objects._RemoteTest, _TestDNSDomain): pass nova-13.0.0/nova/tests/unit/objects/test_migrate_data.py0000664000567000056710000002642412701407773024466 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova.objects import migrate_data from nova.tests.unit.objects import test_objects class _TestLiveMigrateData(object): def test_to_legacy_dict(self): obj = migrate_data.LiveMigrateData(is_volume_backed=False) self.assertEqual({'is_volume_backed': False}, obj.to_legacy_dict()) def test_from_legacy_dict(self): obj = migrate_data.LiveMigrateData() obj.from_legacy_dict({'is_volume_backed': False, 'ignore': 'foo'}) self.assertFalse(obj.is_volume_backed) def test_from_legacy_dict_migration(self): migration = objects.Migration() obj = migrate_data.LiveMigrateData() obj.from_legacy_dict({'is_volume_backed': False, 'ignore': 'foo', 'migration': migration}) self.assertFalse(obj.is_volume_backed) self.assertIsInstance(obj.migration, objects.Migration) def test_legacy_with_pre_live_migration_result(self): obj = migrate_data.LiveMigrateData(is_volume_backed=False) self.assertEqual({'pre_live_migration_result': {}, 'is_volume_backed': False}, obj.to_legacy_dict(pre_migration_result=True)) def test_detect_implementation_none(self): legacy = migrate_data.LiveMigrateData().to_legacy_dict() self.assertIsInstance( migrate_data.LiveMigrateData.detect_implementation(legacy), migrate_data.LiveMigrateData) def test_detect_implementation_libvirt(self): legacy = migrate_data.LibvirtLiveMigrateData( instance_relative_path='foo').to_legacy_dict() self.assertIsInstance( migrate_data.LiveMigrateData.detect_implementation(legacy), migrate_data.LibvirtLiveMigrateData) def test_detect_implementation_libvirt_early(self): legacy = migrate_data.LibvirtLiveMigrateData( image_type='foo').to_legacy_dict() self.assertIsInstance( migrate_data.LiveMigrateData.detect_implementation(legacy), migrate_data.LibvirtLiveMigrateData) def test_detect_implementation_xenapi(self): legacy = migrate_data.XenapiLiveMigrateData( migrate_send_data={}, destination_sr_ref='foo').to_legacy_dict() self.assertIsInstance( migrate_data.LiveMigrateData.detect_implementation(legacy), migrate_data.XenapiLiveMigrateData) class TestLiveMigrateData(test_objects._LocalTest, _TestLiveMigrateData): pass class TestRemoteLiveMigrateData(test_objects._RemoteTest, _TestLiveMigrateData): pass class _TestLibvirtLiveMigrateData(object): def test_bdm_to_disk_info(self): obj = migrate_data.LibvirtLiveMigrateBDMInfo( serial='foo', bus='scsi', dev='sda', type='disk') expected_info = { 'dev': 'sda', 'bus': 'scsi', 'type': 'disk', } self.assertEqual(expected_info, obj.as_disk_info()) obj.format = 'raw' obj.boot_index = 1 expected_info['format'] = 'raw' expected_info['boot_index'] = '1' self.assertEqual(expected_info, obj.as_disk_info()) def test_to_legacy_dict(self): obj = migrate_data.LibvirtLiveMigrateData( is_volume_backed=False, filename='foo', image_type='rbd', block_migration=False, disk_over_commit=False, disk_available_mb=123, is_shared_instance_path=False, is_shared_block_storage=False, instance_relative_path='foo/bar') expected = { 'is_volume_backed': False, 'filename': 'foo', 'image_type': 'rbd', 'block_migration': False, 'disk_over_commit': False, 'disk_available_mb': 123, 'is_shared_instance_path': False, 'is_shared_block_storage': False, 'instance_relative_path': 'foo/bar', } self.assertEqual(expected, obj.to_legacy_dict()) def test_from_legacy_dict(self): obj = migrate_data.LibvirtLiveMigrateData( is_volume_backed=False, filename='foo', image_type='rbd', block_migration=False, disk_over_commit=False, disk_available_mb=123, is_shared_instance_path=False, is_shared_block_storage=False, instance_relative_path='foo/bar') legacy = obj.to_legacy_dict() legacy['ignore_this_thing'] = True obj2 = migrate_data.LibvirtLiveMigrateData() obj2.from_legacy_dict(legacy) self.assertEqual(obj.filename, obj2.filename) def test_to_legacy_dict_with_pre_result(self): test_bdmi = migrate_data.LibvirtLiveMigrateBDMInfo( serial='123', bus='scsi', dev='/dev/sda', type='disk', format='qcow2', boot_index=1, connection_info='myinfo') obj = migrate_data.LibvirtLiveMigrateData( is_volume_backed=False, filename='foo', image_type='rbd', block_migration=False, disk_over_commit=False, disk_available_mb=123, is_shared_instance_path=False, is_shared_block_storage=False, instance_relative_path='foo/bar', graphics_listen_addr_vnc='127.0.0.1', serial_listen_addr='127.0.0.1', bdms=[test_bdmi]) legacy = obj.to_legacy_dict(pre_migration_result=True) self.assertIn('pre_live_migration_result', legacy) expected = { 'graphics_listen_addrs': {'vnc': '127.0.0.1', 'spice': None}, 'target_connect_addr': None, 'serial_listen_addr': '127.0.0.1', 'volume': { '123': { 'connection_info': 'myinfo', 'disk_info': { 'bus': 'scsi', 'dev': '/dev/sda', 'type': 'disk', 'format': 'qcow2', 'boot_index': '1', } } } } self.assertEqual(expected, legacy['pre_live_migration_result']) def test_from_legacy_with_pre_result(self): test_bdmi = migrate_data.LibvirtLiveMigrateBDMInfo( serial='123', bus='scsi', dev='/dev/sda', type='disk', format='qcow2', boot_index=1, connection_info='myinfo') obj = migrate_data.LibvirtLiveMigrateData( is_volume_backed=False, filename='foo', image_type='rbd', block_migration=False, disk_over_commit=False, disk_available_mb=123, is_shared_instance_path=False, is_shared_block_storage=False, instance_relative_path='foo/bar', graphics_listen_addrs={'vnc': '127.0.0.1'}, serial_listen_addr='127.0.0.1', bdms=[test_bdmi]) obj2 = migrate_data.LibvirtLiveMigrateData() obj2.from_legacy_dict(obj.to_legacy_dict(pre_migration_result=True)) self.assertEqual(obj.to_legacy_dict(), obj2.to_legacy_dict()) self.assertEqual(obj.bdms[0].serial, obj2.bdms[0].serial) class TestLibvirtLiveMigrateData(test_objects._LocalTest, _TestLibvirtLiveMigrateData): pass class TestRemoteLibvirtLiveMigrateData(test_objects._RemoteTest, _TestLibvirtLiveMigrateData): pass class _TestXenapiLiveMigrateData(object): def test_to_legacy_dict(self): obj = migrate_data.XenapiLiveMigrateData( is_volume_backed=False, block_migration=False, destination_sr_ref='foo', migrate_send_data={'key': 'val'}, sr_uuid_map={'apple': 'banana'}) expected = { 'is_volume_backed': False, 'block_migration': False, 'migrate_data': { 'destination_sr_ref': 'foo', 'migrate_send_data': {'key': 'val'}, } } self.assertEqual(expected, obj.to_legacy_dict()) def test_from_legacy_dict(self): obj = migrate_data.XenapiLiveMigrateData( is_volume_backed=False, block_migration=False, destination_sr_ref='foo', migrate_send_data={'key': 'val'}, sr_uuid_map={'apple': 'banana'}) legacy = obj.to_legacy_dict() legacy['ignore_this_thing'] = True obj2 = migrate_data.XenapiLiveMigrateData() obj2.from_legacy_dict(legacy) self.assertEqual(obj.destination_sr_ref, obj2.destination_sr_ref) def test_to_legacy_dict_missing_attrs(self): obj = migrate_data.XenapiLiveMigrateData( is_volume_backed=False, destination_sr_ref='foo', sr_uuid_map={'apple': 'banana'}) expected = { 'is_volume_backed': False, } self.assertEqual(expected, obj.to_legacy_dict()) obj = migrate_data.XenapiLiveMigrateData( is_volume_backed=False, destination_sr_ref='foo') expected = { 'is_volume_backed': False, 'pre_live_migration_result': { 'sr_uuid_map': {}, }, } self.assertEqual(expected, obj.to_legacy_dict(True)) def test_from_legacy_dict_missing_attrs(self): obj = migrate_data.XenapiLiveMigrateData( is_volume_backed=False, destination_sr_ref='foo', sr_uuid_map={'apple': 'banana'}) legacy = obj.to_legacy_dict() obj2 = migrate_data.XenapiLiveMigrateData() obj2.from_legacy_dict(legacy) self.assertFalse(obj2.block_migration) self.assertNotIn('migrate_send_data', obj2) self.assertNotIn('sr_uuid_map', obj2) def test_to_legacy_with_pre_result(self): obj = migrate_data.XenapiLiveMigrateData( sr_uuid_map={'a': 'b'}) self.assertNotIn('sr_uuid_map', obj.to_legacy_dict()) legacy = obj.to_legacy_dict(True) self.assertEqual( {'a': 'b'}, legacy['pre_live_migration_result']['sr_uuid_map']) obj2 = migrate_data.XenapiLiveMigrateData() obj2.from_legacy_dict(legacy) self.assertEqual({'a': 'b'}, obj2.sr_uuid_map) class TestXenapiLiveMigrateData(test_objects._LocalTest, _TestXenapiLiveMigrateData): pass class TestRemoteXenapiLiveMigrateData(test_objects._RemoteTest, _TestXenapiLiveMigrateData): pass nova-13.0.0/nova/tests/unit/fake_instance.py0000664000567000056710000001163612701407773022146 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo_serialization import jsonutils from nova import objects from nova.objects import fields def fake_db_secgroups(instance, names): secgroups = [] for i, name in enumerate(names): group_name = 'secgroup-%i' % i if isinstance(name, dict) and name.get('name'): group_name = name.get('name') secgroups.append( {'id': i, 'instance_uuid': instance['uuid'], 'name': group_name, 'description': 'Fake secgroup', 'user_id': instance['user_id'], 'project_id': instance['project_id'], 'deleted': False, 'deleted_at': None, 'created_at': None, 'updated_at': None, }) return secgroups def fake_db_instance(**updates): if 'instance_type' in updates: if isinstance(updates['instance_type'], objects.Flavor): flavor = updates['instance_type'] else: flavor = objects.Flavor(**updates['instance_type']) flavorinfo = jsonutils.dumps({ 'cur': flavor.obj_to_primitive(), 'old': None, 'new': None, }) else: flavorinfo = None db_instance = { 'id': 1, 'deleted': False, 'uuid': str(uuid.uuid4()), 'user_id': 'fake-user', 'project_id': 'fake-project', 'host': 'fake-host', 'created_at': datetime.datetime(1955, 11, 5), 'pci_devices': [], 'security_groups': [], 'metadata': {}, 'system_metadata': {}, 'root_gb': 0, 'ephemeral_gb': 0, 'extra': {'pci_requests': None, 'flavor': flavorinfo, 'numa_topology': None, 'vcpu_model': None, }, 'tags': [], 'services': [] } for name, field in objects.Instance.fields.items(): if name in db_instance: continue if field.nullable: db_instance[name] = None elif field.default != fields.UnspecifiedDefault: db_instance[name] = field.default elif name in ['flavor', 'ec2_ids']: pass else: raise Exception('fake_db_instance needs help with %s' % name) if updates: db_instance.update(updates) if db_instance.get('security_groups'): db_instance['security_groups'] = fake_db_secgroups( db_instance, db_instance['security_groups']) return db_instance def fake_instance_obj(context, obj_instance_class=None, **updates): if obj_instance_class is None: obj_instance_class = objects.Instance expected_attrs = updates.pop('expected_attrs', None) flavor = updates.pop('flavor', None) if not flavor: flavor = objects.Flavor(id=1, name='flavor1', memory_mb=256, vcpus=1, root_gb=1, ephemeral_gb=1, flavorid='1', swap=0, rxtx_factor=1.0, vcpu_weight=1, disabled=False, is_public=True, extra_specs={}, projects=[]) flavor.obj_reset_changes() inst = obj_instance_class._from_db_object(context, obj_instance_class(), fake_db_instance(**updates), expected_attrs=expected_attrs) if flavor: inst.flavor = flavor inst.old_flavor = None inst.new_flavor = None inst.obj_reset_changes() return inst def fake_fault_obj(context, instance_uuid, code=404, message='HTTPNotFound', details='Stock details for test', **updates): fault = { 'id': 1, 'instance_uuid': instance_uuid, 'code': code, 'message': message, 'details': details, 'host': 'fake_host', 'deleted': False, 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0), 'updated_at': None, 'deleted_at': None } if updates: fault.update(updates) return objects.InstanceFault._from_db_object(context, objects.InstanceFault(), fault) nova-13.0.0/nova/tests/unit/test_block_device.py0000664000567000056710000007124112701407773023022 0ustar jenkinsjenkins00000000000000# Copyright 2011 Isaku Yamahata # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Block Device utility functions. """ from nova import block_device from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import matchers class BlockDeviceTestCase(test.NoDBTestCase): def setUp(self): super(BlockDeviceTestCase, self).setUp() BDM = block_device.BlockDeviceDict self.new_mapping = [ BDM({'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'volume_size': 1, 'guest_format': 'swap', 'boot_index': -1}), BDM({'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'volume_size': 10, 'delete_on_termination': True, 'boot_index': -1}), BDM({'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'connection_info': "{'fake': 'connection_info'}", 'boot_index': 0}), BDM({'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}), BDM({'id': 5, 'instance_uuid': 'fake-instance', 'no_device': True, 'device_name': '/dev/vdc'}), ] def test_properties(self): root_device0 = '/dev/sda' root_device1 = '/dev/sdb' mappings = [{'virtual': 'root', 'device': root_device0}] properties0 = {'mappings': mappings} properties1 = {'mappings': mappings, 'root_device_name': root_device1} self.assertIsNone(block_device.properties_root_device_name({})) self.assertEqual(root_device0, block_device.properties_root_device_name(properties0)) self.assertEqual(root_device1, block_device.properties_root_device_name(properties1)) def test_ephemeral(self): self.assertFalse(block_device.is_ephemeral('ephemeral')) self.assertTrue(block_device.is_ephemeral('ephemeral0')) self.assertTrue(block_device.is_ephemeral('ephemeral1')) self.assertTrue(block_device.is_ephemeral('ephemeral11')) self.assertFalse(block_device.is_ephemeral('root')) self.assertFalse(block_device.is_ephemeral('swap')) self.assertFalse(block_device.is_ephemeral('/dev/sda1')) self.assertEqual(0, block_device.ephemeral_num('ephemeral0')) self.assertEqual(1, block_device.ephemeral_num('ephemeral1')) self.assertEqual(11, block_device.ephemeral_num('ephemeral11')) self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral')) self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0')) self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1')) self.assertTrue(block_device.is_swap_or_ephemeral('swap')) self.assertFalse(block_device.is_swap_or_ephemeral('root')) self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1')) def test_mappings_prepend_dev(self): mapping = [ {'virtual': 'ami', 'device': '/dev/sda'}, {'virtual': 'root', 'device': 'sda'}, {'virtual': 'ephemeral0', 'device': 'sdb'}, {'virtual': 'swap', 'device': 'sdc'}, {'virtual': 'ephemeral1', 'device': 'sdd'}, {'virtual': 'ephemeral2', 'device': 'sde'}] expected = [ {'virtual': 'ami', 'device': '/dev/sda'}, {'virtual': 'root', 'device': 'sda'}, {'virtual': 'ephemeral0', 'device': '/dev/sdb'}, {'virtual': 'swap', 'device': '/dev/sdc'}, {'virtual': 'ephemeral1', 'device': '/dev/sdd'}, {'virtual': 'ephemeral2', 'device': '/dev/sde'}] prepended = block_device.mappings_prepend_dev(mapping) self.assertEqual(expected.sort(), prepended.sort()) def test_strip_dev(self): self.assertEqual('sda', block_device.strip_dev('/dev/sda')) self.assertEqual('sda', block_device.strip_dev('sda')) self.assertIsNone(block_device.strip_dev(None)) def test_strip_prefix(self): self.assertEqual('a', block_device.strip_prefix('/dev/sda')) self.assertEqual('a', block_device.strip_prefix('a')) self.assertEqual('a', block_device.strip_prefix('xvda')) self.assertEqual('a', block_device.strip_prefix('vda')) self.assertEqual('a', block_device.strip_prefix('hda')) self.assertIsNone(block_device.strip_prefix(None)) def test_get_device_letter(self): self.assertEqual('', block_device.get_device_letter('')) self.assertEqual('a', block_device.get_device_letter('/dev/sda1')) self.assertEqual('b', block_device.get_device_letter('/dev/xvdb')) self.assertEqual('d', block_device.get_device_letter('/dev/d')) self.assertEqual('a', block_device.get_device_letter('a')) self.assertEqual('b', block_device.get_device_letter('sdb2')) self.assertEqual('c', block_device.get_device_letter('vdc')) self.assertEqual('c', block_device.get_device_letter('hdc')) self.assertIsNone(block_device.get_device_letter(None)) def test_volume_in_mapping(self): swap = {'device_name': '/dev/sdb', 'swap_size': 1} ephemerals = [{'num': 0, 'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1', 'size': 1}, {'num': 2, 'virtual_name': 'ephemeral2', 'device_name': '/dev/sdd', 'size': 1}] block_device_mapping = [{'mount_device': '/dev/sde', 'device_path': 'fake_device'}, {'mount_device': '/dev/sdf', 'device_path': 'fake_device'}] block_device_info = { 'root_device_name': '/dev/sda', 'swap': swap, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} def _assert_volume_in_mapping(device_name, true_or_false): in_mapping = block_device.volume_in_mapping( device_name, block_device_info) self.assertEqual(true_or_false, in_mapping) _assert_volume_in_mapping('sda', False) _assert_volume_in_mapping('sdb', True) _assert_volume_in_mapping('sdc1', True) _assert_volume_in_mapping('sdd', True) _assert_volume_in_mapping('sde', True) _assert_volume_in_mapping('sdf', True) _assert_volume_in_mapping('sdg', False) _assert_volume_in_mapping('sdh1', False) def test_get_root_bdm(self): root_bdm = {'device_name': 'vda', 'boot_index': 0} bdms = [root_bdm, {'device_name': 'vdb', 'boot_index': 1}, {'device_name': 'vdc', 'boot_index': -1}, {'device_name': 'vdd'}] self.assertEqual(root_bdm, block_device.get_root_bdm(bdms)) self.assertEqual(root_bdm, block_device.get_root_bdm([bdms[0]])) self.assertIsNone(block_device.get_root_bdm(bdms[1:])) self.assertIsNone(block_device.get_root_bdm(bdms[2:])) self.assertIsNone(block_device.get_root_bdm(bdms[3:])) self.assertIsNone(block_device.get_root_bdm([])) def test_get_bdm_ephemeral_disk_size(self): size = block_device.get_bdm_ephemeral_disk_size(self.new_mapping) self.assertEqual(10, size) def test_get_bdm_swap_list(self): swap_list = block_device.get_bdm_swap_list(self.new_mapping) self.assertEqual(1, len(swap_list)) self.assertEqual(1, swap_list[0].get('id')) def test_get_bdm_local_disk_num(self): size = block_device.get_bdm_local_disk_num(self.new_mapping) self.assertEqual(2, size) def test_new_format_is_swap(self): expected_results = [True, False, False, False, False] for expected, bdm in zip(expected_results, self.new_mapping): res = block_device.new_format_is_swap(bdm) self.assertEqual(expected, res) def test_new_format_is_ephemeral(self): expected_results = [False, True, False, False, False] for expected, bdm in zip(expected_results, self.new_mapping): res = block_device.new_format_is_ephemeral(bdm) self.assertEqual(expected, res) def test_validate_device_name(self): for value in [' ', 10, None, 'a' * 260]: self.assertRaises(exception.InvalidBDMFormat, block_device.validate_device_name, value) def test_validate_and_default_volume_size(self): bdm = {} for value in [-1, 'a', 2.5]: bdm['volume_size'] = value self.assertRaises(exception.InvalidBDMFormat, block_device.validate_and_default_volume_size, bdm) def test_get_bdms_to_connect(self): root_bdm = {'device_name': 'vda', 'boot_index': 0} bdms = [root_bdm, {'device_name': 'vdb', 'boot_index': 1}, {'device_name': 'vdc', 'boot_index': -1}, {'device_name': 'vde', 'boot_index': None}, {'device_name': 'vdd'}] self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms, exclude_root_mapping=True)) self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms)) class TestBlockDeviceDict(test.NoDBTestCase): def setUp(self): super(TestBlockDeviceDict, self).setUp() BDM = block_device.BlockDeviceDict self.api_mapping = [ {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'boot_index': -1}, {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'boot_index': -1}, {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'uuid': 'fake-volume-id-1', 'boot_index': 0}, {'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'uuid': 'fake-snapshot-id-1', 'boot_index': -1}, {'id': 5, 'instance_uuid': 'fake-instance', 'no_device': True, 'device_name': '/dev/vdc'}, ] self.new_mapping = [ BDM({'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'boot_index': -1}), BDM({'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'boot_index': -1}), BDM({'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'connection_info': "{'fake': 'connection_info'}", 'boot_index': 0}), BDM({'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}), BDM({'id': 5, 'instance_uuid': 'fake-instance', 'no_device': True, 'device_name': '/dev/vdc'}), ] self.legacy_mapping = [ {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'delete_on_termination': True, 'virtual_name': 'swap'}, {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'delete_on_termination': True, 'virtual_name': 'ephemeral0'}, {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'volume_id': 'fake-volume-id-1', 'connection_info': "{'fake': 'connection_info'}"}, {'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'connection_info': "{'fake': 'connection_info'}", 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2'}, {'id': 5, 'instance_uuid': 'fake-instance', 'no_device': True, 'device_name': '/dev/vdc'}, ] self.new_mapping_source_image = [ BDM({'id': 6, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda3', 'source_type': 'image', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'volume_id': 'fake-volume-id-3', 'boot_index': -1}), BDM({'id': 7, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda4', 'source_type': 'image', 'destination_type': 'local', 'connection_info': "{'fake': 'connection_info'}", 'image_id': 'fake-image-id-2', 'boot_index': -1}), ] self.legacy_mapping_source_image = [ {'id': 6, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda3', 'connection_info': "{'fake': 'connection_info'}", 'volume_id': 'fake-volume-id-3'}, ] def test_init(self): def fake_validate(obj, dct): pass self.stub_out('nova.block_device.BlockDeviceDict._fields', set(['field1', 'field2'])) self.stub_out('nova.block_device.BlockDeviceDict._db_only_fields', set(['db_field1', 'db_field2'])) self.stub_out('nova.block_device.BlockDeviceDict._validate', fake_validate) # Make sure db fields are not picked up if they are not # in the original dict dev_dict = block_device.BlockDeviceDict({'field1': 'foo', 'field2': 'bar', 'db_field1': 'baz'}) self.assertIn('field1', dev_dict) self.assertIn('field2', dev_dict) self.assertIn('db_field1', dev_dict) self.assertNotIn('db_field2', dev_dict) # Make sure all expected fields are defaulted dev_dict = block_device.BlockDeviceDict({'field1': 'foo'}) self.assertIn('field1', dev_dict) self.assertIn('field2', dev_dict) self.assertIsNone(dev_dict['field2']) self.assertNotIn('db_field1', dev_dict) self.assertNotIn('db_field2', dev_dict) # Unless they are not meant to be dev_dict = block_device.BlockDeviceDict({'field1': 'foo'}, do_not_default=set(['field2'])) self.assertIn('field1', dev_dict) self.assertNotIn('field2', dev_dict) self.assertNotIn('db_field1', dev_dict) self.assertNotIn('db_field2', dev_dict) # Passing kwargs to constructor works dev_dict = block_device.BlockDeviceDict(field1='foo') self.assertIn('field1', dev_dict) self.assertIn('field2', dev_dict) self.assertIsNone(dev_dict['field2']) dev_dict = block_device.BlockDeviceDict( {'field1': 'foo'}, field2='bar') self.assertEqual('foo', dev_dict['field1']) self.assertEqual('bar', dev_dict['field2']) def test_init_prepend_dev_to_device_name(self): bdm = {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': 'vda', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'boot_index': 0} bdm_dict = block_device.BlockDeviceDict(bdm) self.assertEqual('/dev/vda', bdm_dict['device_name']) bdm['device_name'] = '/dev/vdb' bdm_dict = block_device.BlockDeviceDict(bdm) self.assertEqual('/dev/vdb', bdm_dict['device_name']) bdm['device_name'] = None bdm_dict = block_device.BlockDeviceDict(bdm) self.assertIsNone(bdm_dict['device_name']) def test_init_boolify_delete_on_termination(self): # Make sure that when delete_on_termination is not passed it's # still set to False and not None bdm = {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': 'vda', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'boot_index': 0} bdm_dict = block_device.BlockDeviceDict(bdm) self.assertFalse(bdm_dict['delete_on_termination']) def test_validate(self): self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, {'bogus_field': 'lame_val'}) lame_bdm = dict(self.new_mapping[2]) del lame_bdm['source_type'] self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, lame_bdm) lame_bdm['no_device'] = True block_device.BlockDeviceDict(lame_bdm) lame_dev_bdm = dict(self.new_mapping[2]) lame_dev_bdm['device_name'] = "not a valid name" self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, lame_dev_bdm) lame_dev_bdm['device_name'] = "" self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, lame_dev_bdm) cool_volume_size_bdm = dict(self.new_mapping[2]) cool_volume_size_bdm['volume_size'] = '42' cool_volume_size_bdm = block_device.BlockDeviceDict( cool_volume_size_bdm) self.assertEqual(42, cool_volume_size_bdm['volume_size']) lame_volume_size_bdm = dict(self.new_mapping[2]) lame_volume_size_bdm['volume_size'] = 'some_non_int_string' self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, lame_volume_size_bdm) truthy_bdm = dict(self.new_mapping[2]) truthy_bdm['delete_on_termination'] = '1' truthy_bdm = block_device.BlockDeviceDict(truthy_bdm) self.assertTrue(truthy_bdm['delete_on_termination']) verbose_bdm = dict(self.new_mapping[2]) verbose_bdm['boot_index'] = 'first' self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, verbose_bdm) def test_from_legacy(self): for legacy, new in zip(self.legacy_mapping, self.new_mapping): self.assertThat( block_device.BlockDeviceDict.from_legacy(legacy), matchers.IsSubDictOf(new)) def test_from_legacy_mapping(self): def _get_image_bdms(bdms): return [bdm for bdm in bdms if bdm['source_type'] == 'image'] def _get_bootable_bdms(bdms): return [bdm for bdm in bdms if bdm['boot_index'] >= 0] new_no_img = block_device.from_legacy_mapping(self.legacy_mapping) self.assertEqual(0, len(_get_image_bdms(new_no_img))) for new, expected in zip(new_no_img, self.new_mapping): self.assertThat(new, matchers.IsSubDictOf(expected)) new_with_img = block_device.from_legacy_mapping( self.legacy_mapping, 'fake_image_ref') image_bdms = _get_image_bdms(new_with_img) boot_bdms = _get_bootable_bdms(new_with_img) self.assertEqual(1, len(image_bdms)) self.assertEqual(1, len(boot_bdms)) self.assertEqual(0, image_bdms[0]['boot_index']) self.assertEqual('image', boot_bdms[0]['source_type']) new_with_img_and_root = block_device.from_legacy_mapping( self.legacy_mapping, 'fake_image_ref', 'sda1') image_bdms = _get_image_bdms(new_with_img_and_root) boot_bdms = _get_bootable_bdms(new_with_img_and_root) self.assertEqual(0, len(image_bdms)) self.assertEqual(1, len(boot_bdms)) self.assertEqual(0, boot_bdms[0]['boot_index']) self.assertEqual('volume', boot_bdms[0]['source_type']) new_no_root = block_device.from_legacy_mapping( self.legacy_mapping, 'fake_image_ref', 'sda1', no_root=True) self.assertEqual(0, len(_get_image_bdms(new_no_root))) self.assertEqual(0, len(_get_bootable_bdms(new_no_root))) def test_from_api(self): for api, new in zip(self.api_mapping, self.new_mapping): new['connection_info'] = None if new['snapshot_id']: new['volume_id'] = None self.assertThat( block_device.BlockDeviceDict.from_api(api, False), matchers.IsSubDictOf(new)) def test_from_api_invalid_blank_id(self): api_dict = {'id': 1, 'source_type': 'blank', 'destination_type': 'volume', 'uuid': 'fake-volume-id-1', 'delete_on_termination': True, 'boot_index': -1} self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict.from_api, api_dict, False) def test_from_api_invalid_source_to_local_mapping(self): api_dict = {'id': 1, 'source_type': 'image', 'destination_type': 'local', 'uuid': 'fake-volume-id-1'} self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict.from_api, api_dict, False) def test_from_api_valid_source_to_local_mapping(self): api_dict = {'id': 1, 'source_type': 'image', 'destination_type': 'local', 'volume_id': 'fake-volume-id-1', 'uuid': 1, 'boot_index': 0} retexp = block_device.BlockDeviceDict( {'id': 1, 'source_type': 'image', 'image_id': 1, 'destination_type': 'local', 'volume_id': 'fake-volume-id-1', 'boot_index': 0}) self.assertEqual(retexp, block_device.BlockDeviceDict.from_api(api_dict, True)) def test_from_api_invalid_source_to_local_mapping_with_string_bi(self): api_dict = {'id': 1, 'source_type': 'image', 'destination_type': 'local', 'uuid': 'fake-volume-id-1', 'boot_index': 'aaaa0'} self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict.from_api, api_dict, False) def test_from_api_valid_source_to_local_mapping_with_string_bi(self): api_dict = {'id': 1, 'source_type': 'image', 'destination_type': 'local', 'volume_id': 'fake-volume-id-1', 'uuid': 1, 'boot_index': '0'} retexp = block_device.BlockDeviceDict( {'id': 1, 'source_type': 'image', 'image_id': 1, 'destination_type': 'local', 'volume_id': 'fake-volume-id-1', 'boot_index': 0}) self.assertEqual(retexp, block_device.BlockDeviceDict.from_api(api_dict, True)) def test_legacy(self): for legacy, new in zip(self.legacy_mapping, self.new_mapping): self.assertThat( legacy, matchers.IsSubDictOf(new.legacy())) def test_legacy_mapping(self): got_legacy = block_device.legacy_mapping(self.new_mapping) for legacy, expected in zip(got_legacy, self.legacy_mapping): self.assertThat(expected, matchers.IsSubDictOf(legacy)) def test_legacy_source_image(self): for legacy, new in zip(self.legacy_mapping_source_image, self.new_mapping_source_image): if new['destination_type'] == 'volume': self.assertThat(legacy, matchers.IsSubDictOf(new.legacy())) else: self.assertRaises(exception.InvalidBDMForLegacy, new.legacy) def test_legacy_mapping_source_image(self): got_legacy = block_device.legacy_mapping(self.new_mapping) for legacy, expected in zip(got_legacy, self.legacy_mapping): self.assertThat(expected, matchers.IsSubDictOf(legacy)) def test_legacy_mapping_from_object_list(self): bdm1 = objects.BlockDeviceMapping() bdm1 = objects.BlockDeviceMapping._from_db_object( None, bdm1, fake_block_device.FakeDbBlockDeviceDict( self.new_mapping[0])) bdm2 = objects.BlockDeviceMapping() bdm2 = objects.BlockDeviceMapping._from_db_object( None, bdm2, fake_block_device.FakeDbBlockDeviceDict( self.new_mapping[1])) bdmlist = objects.BlockDeviceMappingList() bdmlist.objects = [bdm1, bdm2] block_device.legacy_mapping(bdmlist) def test_image_mapping(self): removed_fields = ['id', 'instance_uuid', 'connection_info', 'created_at', 'updated_at', 'deleted_at', 'deleted'] for bdm in self.new_mapping: mapping_bdm = fake_block_device.FakeDbBlockDeviceDict( bdm).get_image_mapping() for fld in removed_fields: self.assertNotIn(fld, mapping_bdm) def _test_snapshot_from_bdm(self, template): snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template) self.assertEqual('new-snapshot-id', snapshot['snapshot_id']) self.assertEqual('snapshot', snapshot['source_type']) self.assertEqual('volume', snapshot['destination_type']) self.assertEqual(template.volume_size, snapshot['volume_size']) self.assertEqual(template.delete_on_termination, snapshot['delete_on_termination']) self.assertEqual(template.device_name, snapshot['device_name']) for key in ['disk_bus', 'device_type', 'boot_index']: self.assertEqual(template[key], snapshot[key]) def test_snapshot_from_bdm(self): for bdm in self.new_mapping: self._test_snapshot_from_bdm(objects.BlockDeviceMapping(**bdm)) def test_snapshot_from_object(self): for bdm in self.new_mapping[:-1]: obj = objects.BlockDeviceMapping() obj = objects.BlockDeviceMapping._from_db_object( None, obj, fake_block_device.FakeDbBlockDeviceDict( bdm)) self._test_snapshot_from_bdm(obj) nova-13.0.0/nova/tests/unit/test_matchers.py0000664000567000056710000003327712701407773022226 0ustar jenkinsjenkins00000000000000# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import OrderedDict import testtools from testtools.tests.matchers import helpers from nova.tests.unit import matchers class TestDictMatches(testtools.TestCase, helpers.TestMatchersInterface): matches_dict = OrderedDict(sorted({'foo': 'bar', 'baz': 'DONTCARE', 'cat': {'tabby': True, 'fluffy': False}}.items())) matches_matcher = matchers.DictMatches( matches_dict ) matches_matches = [ {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}}, {'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': False}}, ] matches_mismatches = [ {}, {'foo': 'bar', 'baz': 'qux'}, {'foo': 'bop', 'baz': 'qux', 'cat': {'tabby': True, 'fluffy': False}}, {'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': True}}, {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}}, ] str_examples = [ ("DictMatches({0})".format(matches_dict), matches_matcher), ] describe_examples = [ ("Keys in d1 and not d2: {0}. Keys in d2 and not d1: []" .format(str(sorted(matches_dict.keys()))), {}, matches_matcher), ("Dictionaries do not match at fluffy. d1: False d2: True", {'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher), ("Dictionaries do not match at foo. d1: bar d2: bop", {'foo': 'bop', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher), ] class TestDictListMatches(testtools.TestCase, helpers.TestMatchersInterface): matches_matcher = matchers.DictListMatches( [{'foo': 'bar', 'baz': 'DONTCARE', 'cat': {'tabby': True, 'fluffy': False}}, {'dog': 'yorkie'}, ]) matches_matches = [ [{'foo': 'bar', 'baz': 'qoox', 'cat': {'tabby': True, 'fluffy': False}}, {'dog': 'yorkie'}], [{'foo': 'bar', 'baz': False, 'cat': {'tabby': True, 'fluffy': False}}, {'dog': 'yorkie'}], ] matches_mismatches = [ [], {}, [{'foo': 'bar', 'baz': 'qoox', 'cat': {'tabby': True, 'fluffy': True}}, {'dog': 'yorkie'}], [{'foo': 'bar', 'baz': False, 'cat': {'tabby': True, 'fluffy': False}}, {'cat': 'yorkie'}], [{'foo': 'bop', 'baz': False, 'cat': {'tabby': True, 'fluffy': False}}, {'dog': 'yorkie'}], ] str_examples = [ ("DictListMatches([{'baz': 'DONTCARE', 'cat':" " {'fluffy': False, 'tabby': True}, 'foo': 'bar'},\n" " {'dog': 'yorkie'}])", matches_matcher), ] describe_examples = [ ("Length mismatch: len(L1)=2 != len(L2)=0", {}, matches_matcher), ("Dictionaries do not match at fluffy. d1: True d2: False", [{'foo': 'bar', 'baz': 'qoox', 'cat': {'tabby': True, 'fluffy': True}}, {'dog': 'yorkie'}], matches_matcher), ] class TestIsSubDictOf(testtools.TestCase, helpers.TestMatchersInterface): matches_matcher = matchers.IsSubDictOf( OrderedDict(sorted({'foo': 'bar', 'baz': 'DONTCARE', 'cat': {'tabby': True, 'fluffy': False}}.items())) ) matches_matches = [ {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}}, {'foo': 'bar', 'baz': 'quux'} ] matches_mismatches = [ {'foo': 'bop', 'baz': 'qux', 'cat': {'tabby': True, 'fluffy': False}}, {'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': True}}, {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}, 'dog': None}, ] str_examples = [ ("IsSubDictOf({0})".format( str(OrderedDict(sorted({'foo': 'bar', 'baz': 'DONTCARE', 'cat': {'tabby': True, 'fluffy': False}}.items())))), matches_matcher), ] describe_examples = [ ("Dictionaries do not match at fluffy. d1: False d2: True", {'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher), ("Dictionaries do not match at foo. d1: bar d2: bop", {'foo': 'bop', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher), ] class TestXMLMatches(testtools.TestCase, helpers.TestMatchersInterface): matches_matcher = matchers.XMLMatches(""" some text here some other text here child 1 child 2 DONTCARE """, allow_mixed_nodes=False) matches_matches = [""" some text here some other text here child 1 child 2 child 3 """, """ some text here some other text here child 1 child 2 blah """, ] matches_mismatches = [""" some text here mismatch text child 1 child 2 child 3 """, """ some text here some other text here child 1 child 2 child 3 """, """ some text here some other text here child 1 child 2 child 3 """, """ some text here some other text here child 1 child 4 child 2 child 3 """, """ some text here some other text here child 1 child 2 """, """ some text here some other text here child 1 child 2 child 3 child 4 """, """ some text here some other text here child 2 child 1 DONTCARE """, """ some text here some other text here child 1 child 2 DONTCARE """, ] str_examples = [ ("XMLMatches('\\n" "\\n" " some text here\\n" " some other text here\\n" " \\n" " \\n" " \\n" " child 1\\n" " child 2\\n" " DONTCARE\\n" " \\n" " \\n" "')", matches_matcher), ] describe_examples = [ ("/root/text[1]: XML text value mismatch: expected text value: " "['some other text here']; actual value: ['mismatch text']", """ some text here mismatch text child 1 child 2 child 3 """, matches_matcher), ("/root/attrs[2]: XML attributes mismatch: keys only in expected: " "key2; keys only in actual: key3", """ some text here some other text here child 1 child 2 child 3 """, matches_matcher), ("/root/attrs[2]: XML attribute value mismatch: expected value of " "attribute key1: 'spam'; actual value: 'quux'", """ some text here some other text here child 1 child 2 child 3 """, matches_matcher), ("/root/children[3]: XML tag mismatch at index 1: expected tag " "; actual tag ", """ some text here some other text here child 1 child 4 child 2 child 3 """, matches_matcher), ("/root/children[3]: XML expected child element not " "present at index 2", """ some text here some other text here child 1 child 2 """, matches_matcher), ("/root/children[3]: XML unexpected child element " "present at index 3", """ some text here some other text here child 1 child 2 child 3 child 4 """, matches_matcher), ("/root/children[3]: XML tag mismatch at index 0: " "expected tag ; actual tag ", """ some text here some other text here child 2 child 1 child 3 """, matches_matcher), ("/: XML information mismatch(version, encoding) " "expected version 1.0, expected encoding UTF-8; " "actual version 1.1, actual encoding UTF-8", """ some text here some other text here child 1 child 2 DONTCARE """, matches_matcher), ] class TestXMLMatchesUnorderedNodes(testtools.TestCase, helpers.TestMatchersInterface): matches_matcher = matchers.XMLMatches(""" some text here some other text here DONTCARE child 2 child 1 """, allow_mixed_nodes=True) matches_matches = [""" some text here child 1 child 2 child 3 some other text here """, ] matches_mismatches = [""" some text here mismatch text child 1 child 2 child 3 """, ] describe_examples = [ ("/root: XML expected child element not present at index 4", """ some text here mismatch text child 1 child 2 child 3 """, matches_matcher), ] str_examples = [] nova-13.0.0/nova/tests/unit/fake_crypto.py0000664000567000056710000001605312701407773021660 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def ensure_ca_filesystem(): pass def fetch_ca(project_id=None): rootca = """-----BEGIN CERTIFICATE----- MIICyzCCAjSgAwIBAgIJAIJ/UoFWKoOUMA0GCSqGSIb3DQEBBAUAME4xEjAQBgNV BAoTCU5PVkEgUk9PVDEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UECBMK Q2FsaWZvcm5pYTELMAkGA1UEBhMCVVMwHhcNMTIxMDAyMTg1NzQ1WhcNMTMxMDAy MTg1NzQ1WjBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWlu IFZpZXcxEzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTMIGfMA0GCSqG SIb3DQEBAQUAA4GNADCBiQKBgQCg0Bn8WSqbJF3QNTZUxo1TzmFBxuqvhjZLKbnQ IiShdVIWUK7RC8frq8FJI7dgJNmvkIBn9njABWDoZmurQRCzD65yCSbUc4R2ea5H IK4wQIui0CJykvMBNjAe3bzztVVs8/ccDTsjtqq3F/KeQkKzQVfSWBrJSmYtG5tO G+dOSwIDAQABo4GwMIGtMAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFCljRfaNOsA/ 9mHuq0io7Lt83FtaMH4GA1UdIwR3MHWAFCljRfaNOsA/9mHuq0io7Lt83FtaoVKk UDBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWluIFZpZXcx EzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTggkAgn9SgVYqg5QwDQYJ KoZIhvcNAQEEBQADgYEAEbpJOOlpKCh5omwfAwAfFg1ml4h/FJiCH3PETmOCc+3l CtWTBd4MG8AoH7A3PU2JKAGVQ5XWo6+ihpW1RgfQpCnloI6vIeGcws+rSLnlzULt IvfCJpRg7iQdR3jZGt3295behtP1GsCqipJEulOkOaEIs8iLlXgSOG94Mkwlb4Q= -----END CERTIFICATE----- """ return rootca def generate_x509_cert(user_id, project_id, bits=1024): pk = """-----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgQC4h2d63ijt9l0fIBRY37D3Yj2FYajCMUlftSoHNA4lEw0uTXnH Jjbd0j7HNlSADWeAMuaoSDNp7CIsXMt6iA/ASN5nFFTZlLRqIzYoI0RHiiSJjvSG d1n4Yrar1eC8tK3Rld1Zo6rj6tOuIxfFVJajJVZykCAHjGNNvulgfhBXFwIDAQAB AoGBAIjfxx4YU/vO1lwUC4OwyS92q3OYcPk6XdakJryZHDTb4NcLmNzjt6bqIK7b 2enyB2fMWdNRWvGiueZ2HmiRLDyOGsAVdEsHvL4qbr9EZGTqC8Qxx+zTevWWf6pB F1zxzbXNQDFZDf9kVsSLCkbMHITnW1k4MrM++9gfCO3WrfehAkEA4nd8TyCCZazq KMOQwFLTNaiVLeTXCtvGopl4ZNiKYZ1qI3KDXb2wbAyArFuERlotxFlylXpwtlMo SlI/C/sYqwJBANCX1sdfRJq8DpdP44ThWqOkWFLB9rBiwyyBt8746fX8amwr8eyz H44/z5GT/Vyp8qFsjkuDzeP93eeDnr2qE0UCP1zipRnPO6x4P5J4o+Y+EmLvwkAQ nCLYAaCvUbILHrbq2Z2wWjEYnEO03RHUd2xjkGH4TgcBMTmW4e+ZzEIduwJACnIw LVfWBbG5QVac3EC021EVoz9XbUnk4Eu2usS4Yrs7USN6QBJQWD1V1cKFg6h3ICJh leKJ4wsJm9h5kKH9yQJBAN8CaX223MlTSuBOVuIOwNA+09iLfx4UCLiH1fGMKDpe xVcmkM3qCnTqNxrAPSFdT9IyB3IXiaLWbvzl7MfiOwQ= -----END RSA PRIVATE KEY----- """ csr = """Certificate: Data: Version: 1 (0x0) Serial Number: 23 (0x17) Signature Algorithm: md5WithRSAEncryption Issuer: O=NOVA ROOT, L=Mountain View, ST=California, C=US Validity Not Before: Oct 2 19:31:45 2012 GMT Not After : Oct 2 19:31:45 2013 GMT Subject: C=US, ST=California, O=OpenStack, OU=NovaDev, """ """CN=openstack-fake-2012-10-02T19:31:45Z Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public Key: (1024 bit) Modulus (1024 bit): 00:b8:87:67:7a:de:28:ed:f6:5d:1f:20:14:58:df: b0:f7:62:3d:85:61:a8:c2:31:49:5f:b5:2a:07:34: 0e:25:13:0d:2e:4d:79:c7:26:36:dd:d2:3e:c7:36: 54:80:0d:67:80:32:e6:a8:48:33:69:ec:22:2c:5c: cb:7a:88:0f:c0:48:de:67:14:54:d9:94:b4:6a:23: 36:28:23:44:47:8a:24:89:8e:f4:86:77:59:f8:62: b6:ab:d5:e0:bc:b4:ad:d1:95:dd:59:a3:aa:e3:ea: d3:ae:23:17:c5:54:96:a3:25:56:72:90:20:07:8c: 63:4d:be:e9:60:7e:10:57:17 Exponent: 65537 (0x10001) Signature Algorithm: md5WithRSAEncryption 32:82:ff:8b:92:0e:8d:9c:6b:ce:7e:fe:34:16:2a:4c:47:4f: c7:28:a2:33:1e:48:56:2e:4b:e8:e8:e3:48:b1:3d:a3:43:21: ef:83:e7:df:e2:10:91:7e:9a:c0:4d:1e:96:68:2b:b9:f7:84: 7f:ec:84:8a:bf:bc:5e:50:05:d9:ce:4a:1a:bf:d2:bf:0c:d1: 7e:ec:64:c3:a5:37:78:a3:a6:2b:a1:b7:1c:cc:c8:b9:78:61: 98:50:3c:e6:28:34:f1:0e:62:bb:b5:d7:a1:dd:1f:38:c6:0d: 58:9f:81:67:ff:9c:32:fc:52:7e:6d:8c:91:43:49:fe:e3:48: bb:40 -----BEGIN CERTIFICATE----- MIICMzCCAZwCARcwDQYJKoZIhvcNAQEEBQAwTjESMBAGA1UEChMJTk9WQSBST09U MRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQIEwpDYWxpZm9ybmlhMQsw CQYDVQQGEwJVUzAeFw0xMjEwMDIxOTMxNDVaFw0xMzEwMDIxOTMxNDVaMHYxCzAJ BgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRIwEAYDVQQKEwlPcGVuU3Rh Y2sxEDAOBgNVBAsTB05vdmFEZXYxLDAqBgNVBAMTI29wZW5zdGFjay1mYWtlLTIw MTItMTAtMDJUMTk6MzE6NDVaMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC4 h2d63ijt9l0fIBRY37D3Yj2FYajCMUlftSoHNA4lEw0uTXnHJjbd0j7HNlSADWeA MuaoSDNp7CIsXMt6iA/ASN5nFFTZlLRqIzYoI0RHiiSJjvSGd1n4Yrar1eC8tK3R ld1Zo6rj6tOuIxfFVJajJVZykCAHjGNNvulgfhBXFwIDAQABMA0GCSqGSIb3DQEB BAUAA4GBADKC/4uSDo2ca85+/jQWKkxHT8coojMeSFYuS+jo40ixPaNDIe+D59/i EJF+msBNHpZoK7n3hH/shIq/vF5QBdnOShq/0r8M0X7sZMOlN3ijpiuhtxzMyLl4 YZhQPOYoNPEOYru116HdHzjGDVifgWf/nDL8Un5tjJFDSf7jSLtA -----END CERTIFICATE----- """ return pk, csr def get_x509_cert_and_fingerprint(): fingerprint = "a1:6f:6d:ea:a6:36:d0:3a:c6:eb:b6:ee:07:94:3e:2a:90:98:2b:c9" certif = ( "-----BEGIN CERTIFICATE-----\n" "MIIDIjCCAgqgAwIBAgIJAIE8EtWfZhhFMA0GCSqGSIb3DQEBCwUAMCQxIjAgBgNV\n" "BAMTGWNsb3VkYmFzZS1pbml0LXVzZXItMTM1NTkwHhcNMTUwMTI5MTgyMzE4WhcN\n" "MjUwMTI2MTgyMzE4WjAkMSIwIAYDVQQDExljbG91ZGJhc2UtaW5pdC11c2VyLTEz\n" "NTU5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv4lv95ofkXLIbALU\n" "UEb1f949TYNMUvMGNnLyLgGOY+D61TNG7RZn85cRg9GVJ7KDjSLN3e3LwH5rgv5q\n" "pU+nM/idSMhG0CQ1lZeExTsMEJVT3bG7LoU5uJ2fJSf5+hA0oih2M7/Kap5ggHgF\n" "h+h8MWvDC9Ih8x1aadkk/OEmJsTrziYm0C/V/FXPHEuXfZn8uDNKZ/tbyfI6hwEj\n" "nLz5Zjgg29n6tIPYMrnLNDHScCwtNZOcnixmWzsxCt1bxsAEA/y9gXUT7xWUf52t\n" "2+DGQbLYxo0PHjnPf3YnFXNavfTt+4c7ZdHhOQ6ZA8FGQ2LJHDHM1r2/8lK4ld2V\n" "qgNTcQIDAQABo1cwVTATBgNVHSUEDDAKBggrBgEFBQcDAjA+BgNVHREENzA1oDMG\n" "CisGAQQBgjcUAgOgJQwjY2xvdWRiYXNlLWluaXQtdXNlci0xMzU1OUBsb2NhbGhv\n" "c3QwDQYJKoZIhvcNAQELBQADggEBAHHX/ZUOMR0ZggQnfXuXLIHWlffVxxLOV/bE\n" "7JC/dtedHqi9iw6sRT5R6G1pJo0xKWr2yJVDH6nC7pfxCFkby0WgVuTjiu6iNRg2\n" "4zNJd8TGrTU+Mst+PPJFgsxrAY6vjwiaUtvZ/k8PsphHXu4ON+oLurtVDVgog7Vm\n" "fQCShx434OeJj1u8pb7o2WyYS5nDVrHBhlCAqVf2JPKu9zY+i9gOG2kimJwH7fJD\n" "xXpMIwAQ+flwlHR7OrE0L8TNcWwKPRAY4EPcXrT+cWo1k6aTqZDSK54ygW2iWtni\n" "ZBcstxwcB4GIwnp1DrPW9L2gw5eLe1Sl6wdz443TW8K/KPV9rWQ=\n" "-----END CERTIFICATE-----\n") return certif, fingerprint def get_ssh_public_key(): public_key = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg" "B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l" "RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv" "9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc" "pSxsIbECHw== Generated-by-Nova") return public_key nova-13.0.0/nova/tests/unit/test_wsgi.py0000664000567000056710000002722312701410011021336 0ustar jenkinsjenkins00000000000000# Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for `nova.wsgi`.""" import os.path import socket import tempfile import eventlet import eventlet.wsgi import mock from oslo_config import cfg import requests import testtools import webob import nova.exception from nova import test from nova.tests.unit import utils import nova.wsgi SSL_CERT_DIR = os.path.normpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), 'ssl_cert')) CONF = cfg.CONF class TestLoaderNothingExists(test.NoDBTestCase): """Loader tests where os.path.exists always returns False.""" def setUp(self): super(TestLoaderNothingExists, self).setUp() self.stub_out('os.path.exists', lambda _: False) def test_relpath_config_not_found(self): self.flags(api_paste_config='api-paste.ini') self.assertRaises( nova.exception.ConfigNotFound, nova.wsgi.Loader, ) def test_asbpath_config_not_found(self): self.flags(api_paste_config='/etc/nova/api-paste.ini') self.assertRaises( nova.exception.ConfigNotFound, nova.wsgi.Loader, ) class TestLoaderNormalFilesystem(test.NoDBTestCase): """Loader tests with normal filesystem (unmodified os.path module).""" _paste_config = """ [app:test_app] use = egg:Paste#static document_root = /tmp """ def setUp(self): super(TestLoaderNormalFilesystem, self).setUp() self.config = tempfile.NamedTemporaryFile(mode="w+t") self.config.write(self._paste_config.lstrip()) self.config.seek(0) self.config.flush() self.loader = nova.wsgi.Loader(self.config.name) def test_config_found(self): self.assertEqual(self.config.name, self.loader.config_path) def test_app_not_found(self): self.assertRaises( nova.exception.PasteAppNotFound, self.loader.load_app, "nonexistent app", ) def test_app_found(self): url_parser = self.loader.load_app("test_app") self.assertEqual("/tmp", url_parser.directory) def tearDown(self): self.config.close() super(TestLoaderNormalFilesystem, self).tearDown() class TestWSGIServer(test.NoDBTestCase): """WSGI server tests.""" def test_no_app(self): server = nova.wsgi.Server("test_app", None) self.assertEqual("test_app", server.name) def test_custom_max_header_line(self): self.flags(max_header_line=4096) # Default value is 16384. nova.wsgi.Server("test_custom_max_header_line", None) self.assertEqual(CONF.max_header_line, eventlet.wsgi.MAX_HEADER_LINE) def test_start_random_port(self): server = nova.wsgi.Server("test_random_port", None, host="127.0.0.1", port=0) server.start() self.assertNotEqual(0, server.port) server.stop() server.wait() @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support") def test_start_random_port_with_ipv6(self): server = nova.wsgi.Server("test_random_port", None, host="::1", port=0) server.start() self.assertEqual("::1", server.host) self.assertNotEqual(0, server.port) server.stop() server.wait() @testtools.skipIf(not utils.is_linux(), 'SO_REUSEADDR behaves differently ' 'on OSX and BSD, see bugs ' '1436895 and 1467145') def test_socket_options_for_simple_server(self): # test normal socket options has set properly self.flags(tcp_keepidle=500) server = nova.wsgi.Server("test_socket_options", None, host="127.0.0.1", port=0) server.start() sock = server._socket self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertEqual(CONF.tcp_keepidle, sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)) server.stop() server.wait() def test_server_pool_waitall(self): # test pools waitall method gets called while stopping server server = nova.wsgi.Server("test_server", None, host="127.0.0.1") server.start() with mock.patch.object(server._pool, 'waitall') as mock_waitall: server.stop() server.wait() mock_waitall.assert_called_once_with() def test_uri_length_limit(self): server = nova.wsgi.Server("test_uri_length_limit", None, host="127.0.0.1", max_url_len=16384) server.start() uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x') resp = requests.get(uri, proxies={"http": ""}) eventlet.sleep(0) self.assertNotEqual(resp.status_code, requests.codes.REQUEST_URI_TOO_LARGE) uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x') resp = requests.get(uri, proxies={"http": ""}) eventlet.sleep(0) self.assertEqual(resp.status_code, requests.codes.REQUEST_URI_TOO_LARGE) server.stop() server.wait() def test_reset_pool_size_to_default(self): server = nova.wsgi.Server("test_resize", None, host="127.0.0.1", max_url_len=16384) server.start() # Stopping the server, which in turn sets pool size to 0 server.stop() self.assertEqual(server._pool.size, 0) # Resetting pool size to default server.reset() server.start() self.assertEqual(server._pool.size, CONF.wsgi_default_pool_size) def test_client_socket_timeout(self): self.flags(client_socket_timeout=5) # mocking eventlet spawn method to check it is called with # configured 'client_socket_timeout' value. with mock.patch.object(eventlet, 'spawn') as mock_spawn: server = nova.wsgi.Server("test_app", None, host="127.0.0.1", port=0) server.start() _, kwargs = mock_spawn.call_args self.assertEqual(CONF.client_socket_timeout, kwargs['socket_timeout']) server.stop() def test_wsgi_keep_alive(self): self.flags(wsgi_keep_alive=False) # mocking eventlet spawn method to check it is called with # configured 'wsgi_keep_alive' value. with mock.patch.object(eventlet, 'spawn') as mock_spawn: server = nova.wsgi.Server("test_app", None, host="127.0.0.1", port=0) server.start() _, kwargs = mock_spawn.call_args self.assertEqual(CONF.wsgi_keep_alive, kwargs['keepalive']) server.stop() class TestWSGIServerWithSSL(test.NoDBTestCase): """WSGI server with SSL tests.""" def setUp(self): super(TestWSGIServerWithSSL, self).setUp() self.flags(enabled_ssl_apis=['fake_ssl'], ssl_cert_file=os.path.join(SSL_CERT_DIR, 'certificate.crt'), ssl_key_file=os.path.join(SSL_CERT_DIR, 'privatekey.key')) def test_ssl_server(self): def test_app(env, start_response): start_response('200 OK', {}) return ['PONG'] fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app, host="127.0.0.1", port=0, use_ssl=True) fake_ssl_server.start() self.assertNotEqual(0, fake_ssl_server.port) response = requests.post( 'https://127.0.0.1:%s/' % fake_ssl_server.port, verify=os.path.join(SSL_CERT_DIR, 'ca.crt'), data='PING') self.assertEqual(response.text, 'PONG') fake_ssl_server.stop() fake_ssl_server.wait() def test_two_servers(self): def test_app(env, start_response): start_response('200 OK', {}) return ['PONG'] fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app, host="127.0.0.1", port=0, use_ssl=True) fake_ssl_server.start() self.assertNotEqual(0, fake_ssl_server.port) fake_server = nova.wsgi.Server("fake", test_app, host="127.0.0.1", port=0) fake_server.start() self.assertNotEqual(0, fake_server.port) response = requests.post( 'https://127.0.0.1:%s/' % fake_ssl_server.port, verify=os.path.join(SSL_CERT_DIR, 'ca.crt'), data='PING') self.assertEqual(response.text, 'PONG') response = requests.post('http://127.0.0.1:%s/' % fake_server.port, data='PING') self.assertEqual(response.text, 'PONG') fake_ssl_server.stop() fake_ssl_server.wait() fake_server.stop() fake_server.wait() @testtools.skipIf(not utils.is_linux(), 'SO_REUSEADDR behaves differently ' 'on OSX and BSD, see bugs ' '1436895 and 1467145') def test_socket_options_for_ssl_server(self): # test normal socket options has set properly self.flags(tcp_keepidle=500) server = nova.wsgi.Server("test_socket_options", None, host="127.0.0.1", port=0, use_ssl=True) server.start() sock = server._socket self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertEqual(CONF.tcp_keepidle, sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)) server.stop() server.wait() @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support") def test_app_using_ipv6_and_ssl(self): greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = nova.wsgi.Server("fake_ssl", hello_world, host="::1", port=0, use_ssl=True) server.start() response = requests.get('https://[::1]:%d/' % server.port, verify=os.path.join(SSL_CERT_DIR, 'ca.crt')) self.assertEqual(greetings, response.text) server.stop() server.wait() nova-13.0.0/nova/tests/unit/fake_processutils.py0000664000567000056710000000662412701407773023102 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This modules stubs out functions in oslo_concurrency.processutils.""" import re from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging import six LOG = logging.getLogger(__name__) _fake_execute_repliers = [] _fake_execute_log = [] def fake_execute_get_log(): return _fake_execute_log def fake_execute_clear_log(): global _fake_execute_log _fake_execute_log = [] def fake_execute_set_repliers(repliers): """Allows the client to configure replies to commands.""" global _fake_execute_repliers _fake_execute_repliers = repliers def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): """A reply handler for commands that haven't been added to the reply list. Returns empty strings for stdout and stderr. """ return '', '' def fake_execute(*cmd_parts, **kwargs): """This function stubs out execute. It optionally executes a preconfigued function to return expected data. """ global _fake_execute_repliers process_input = kwargs.get('process_input', None) check_exit_code = kwargs.get('check_exit_code', 0) delay_on_retry = kwargs.get('delay_on_retry', True) attempts = kwargs.get('attempts', 1) run_as_root = kwargs.get('run_as_root', False) cmd_str = ' '.join(str(part) for part in cmd_parts) LOG.debug("Faking execution of cmd (subprocess): %s", cmd_str) _fake_execute_log.append(cmd_str) reply_handler = fake_execute_default_reply_handler for fake_replier in _fake_execute_repliers: if re.match(fake_replier[0], cmd_str): reply_handler = fake_replier[1] LOG.debug('Faked command matched %s', fake_replier[0]) break if isinstance(reply_handler, six.string_types): # If the reply handler is a string, return it as stdout reply = reply_handler, '' else: try: # Alternative is a function, so call it reply = reply_handler(cmd_parts, process_input=process_input, delay_on_retry=delay_on_retry, attempts=attempts, run_as_root=run_as_root, check_exit_code=check_exit_code) except processutils.ProcessExecutionError as e: LOG.debug('Faked command raised an exception %s', e) raise LOG.debug("Reply to faked command is stdout='%(stdout)s' " "stderr='%(stderr)s'", {'stdout': reply[0], 'stderr': reply[1]}) # Replicate the sleep call in the real function greenthread.sleep(0) return reply def stub_out_processutils_execute(stubs): fake_execute_set_repliers([]) fake_execute_clear_log() stubs.Set(processutils, 'execute', fake_execute) nova-13.0.0/nova/tests/unit/compute/0000775000567000056710000000000012701410205020427 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/compute/test_compute_cells.py0000664000567000056710000004527712701407773024735 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Compute w/ Cells """ import functools import inspect import mock from mox3 import mox from oslo_utils import timeutils from nova import block_device from nova.cells import manager from nova.compute import api as compute_api from nova.compute import cells_api as compute_cells_api from nova.compute import flavors from nova.compute import utils as compute_utils from nova.compute import vm_states import nova.conf from nova import context from nova import db from nova import exception from nova import objects from nova import quota from nova import test from nova.tests.unit.compute import test_compute from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_flavor from nova.tests import uuidsentinel as uuids ORIG_COMPUTE_API = None CONF = nova.conf.CONF def stub_call_to_cells(context, instance, method, *args, **kwargs): fn = getattr(ORIG_COMPUTE_API, method) original_instance = kwargs.pop('original_instance', None) if original_instance: instance = original_instance # Restore this in 'child cell DB' db.instance_update(context, instance['uuid'], dict(vm_state=instance['vm_state'], task_state=instance['task_state'])) # Use NoopQuotaDriver in child cells. saved_quotas = quota.QUOTAS quota.QUOTAS = quota.QuotaEngine( quota_driver_class=quota.NoopQuotaDriver()) compute_api.QUOTAS = quota.QUOTAS try: return fn(context, instance, *args, **kwargs) finally: quota.QUOTAS = saved_quotas compute_api.QUOTAS = saved_quotas def stub_cast_to_cells(context, instance, method, *args, **kwargs): fn = getattr(ORIG_COMPUTE_API, method) original_instance = kwargs.pop('original_instance', None) if original_instance: instance = original_instance # Restore this in 'child cell DB' db.instance_update(context, instance['uuid'], dict(vm_state=instance['vm_state'], task_state=instance['task_state'])) # Use NoopQuotaDriver in child cells. saved_quotas = quota.QUOTAS quota.QUOTAS = quota.QuotaEngine( quota_driver_class=quota.NoopQuotaDriver()) compute_api.QUOTAS = quota.QUOTAS try: fn(context, instance, *args, **kwargs) finally: quota.QUOTAS = saved_quotas compute_api.QUOTAS = saved_quotas def deploy_stubs(stubs, api, original_instance=None): call = stub_call_to_cells cast = stub_cast_to_cells if original_instance: kwargs = dict(original_instance=original_instance) call = functools.partial(stub_call_to_cells, **kwargs) cast = functools.partial(stub_cast_to_cells, **kwargs) stubs.Set(api, '_call_to_cells', call) stubs.Set(api, '_cast_to_cells', cast) class CellsComputeAPITestCase(test_compute.ComputeAPITestCase): def setUp(self): super(CellsComputeAPITestCase, self).setUp() global ORIG_COMPUTE_API ORIG_COMPUTE_API = self.compute_api self.flags(enable=True, group='cells') def _fake_validate_cell(*args, **kwargs): return self.compute_api = compute_cells_api.ComputeCellsAPI() self.stubs.Set(self.compute_api, '_validate_cell', _fake_validate_cell) deploy_stubs(self.stubs, self.compute_api) def tearDown(self): global ORIG_COMPUTE_API self.compute_api = ORIG_COMPUTE_API super(CellsComputeAPITestCase, self).tearDown() def test_instance_metadata(self): self.skipTest("Test is incompatible with cells.") def test_evacuate(self): @mock.patch.object(compute_api.API, 'evacuate') def _test(mock_evacuate): instance = objects.Instance(uuid=uuids.evacuate_instance, cell_name='fake_cell_name') dest_host = 'fake_cell_name@fakenode2' self.compute_api.evacuate(self.context, instance, host=dest_host) mock_evacuate.assert_called_once_with( self.context, instance, 'fakenode2') _test() def test_error_evacuate(self): self.skipTest("Test is incompatible with cells.") def _test_delete_instance_no_cell(self, method_name): cells_rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(cells_rpcapi, 'instance_delete_everywhere') self.mox.StubOutWithMock(compute_api.API, '_local_delete') inst = self._create_fake_instance_obj() delete_type = method_name == 'soft_delete' and 'soft' or 'hard' cells_rpcapi.instance_delete_everywhere(self.context, inst, delete_type) compute_api.API._local_delete(self.context, inst, mox.IsA(objects.BlockDeviceMappingList), method_name, mox.IgnoreArg()) self.mox.ReplayAll() self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance', lambda *a, **kw: None) getattr(self.compute_api, method_name)(self.context, inst) def test_delete_instance_no_cell_constraint_failure_does_not_loop(self): with mock.patch.object(self.compute_api.cells_rpcapi, 'instance_delete_everywhere'): inst = self._create_fake_instance_obj() inst.cell_name = None inst.destroy = mock.MagicMock() inst.destroy.side_effect = exception.ObjectActionError(action='', reason='') inst.refresh = mock.MagicMock() self.assertRaises(exception.ObjectActionError, self.compute_api.delete, self.context, inst) inst.destroy.assert_called_once_with() def test_delete_instance_no_cell_constraint_failure_corrects_itself(self): def add_cell_name(context, instance, delete_type): instance.cell_name = 'fake_cell_name' @mock.patch.object(compute_api.API, 'delete') @mock.patch.object(self.compute_api.cells_rpcapi, 'instance_delete_everywhere', side_effect=add_cell_name) def _test(mock_delete_everywhere, mock_compute_delete): inst = self._create_fake_instance_obj() inst.cell_name = None inst.destroy = mock.MagicMock() inst.destroy.side_effect = exception.ObjectActionError(action='', reason='') inst.refresh = mock.MagicMock() self.compute_api.delete(self.context, inst) inst.destroy.assert_called_once_with() mock_compute_delete.assert_called_once_with(self.context, inst) _test() def test_delete_instance_no_cell_destroy_fails_already_deleted(self): # If the instance.destroy() is reached during _local_delete, # it will raise ObjectActionError if the instance has already # been deleted by a instance_destroy_at_top, and instance.refresh() # will raise InstanceNotFound instance = objects.Instance(uuid=uuids.destroy_instance, cell_name=None) actionerror = exception.ObjectActionError(action='destroy', reason='') notfound = exception.InstanceNotFound(instance_id=instance.uuid) @mock.patch.object(compute_api.API, 'delete') @mock.patch.object(self.compute_api.cells_rpcapi, 'instance_delete_everywhere') @mock.patch.object(compute_api.API, '_local_delete', side_effect=actionerror) @mock.patch.object(instance, 'refresh', side_effect=notfound) def _test(mock_refresh, mock_local_delete, mock_delete_everywhere, mock_compute_delete): self.compute_api.delete(self.context, instance) mock_delete_everywhere.assert_called_once_with(self.context, instance, 'hard') mock_local_delete.assert_called_once_with(self.context, instance, mock.ANY, 'delete', self.compute_api._do_delete) mock_refresh.assert_called_once_with() self.assertFalse(mock_compute_delete.called) _test() def test_delete_instance_no_cell_instance_not_found_already_deleted(self): # If anything in _local_delete accesses the instance causing a db # lookup before instance.destroy() is reached, if the instance has # already been deleted by a instance_destroy_at_top, # InstanceNotFound will be raised instance = objects.Instance(uuid=uuids.delete_instance, cell_name=None) notfound = exception.InstanceNotFound(instance_id=instance.uuid) @mock.patch.object(compute_api.API, 'delete') @mock.patch.object(self.compute_api.cells_rpcapi, 'instance_delete_everywhere') @mock.patch.object(compute_api.API, '_local_delete', side_effect=notfound) def _test(mock_local_delete, mock_delete_everywhere, mock_compute_delete): self.compute_api.delete(self.context, instance) mock_delete_everywhere.assert_called_once_with(self.context, instance, 'hard') mock_local_delete.assert_called_once_with(self.context, instance, mock.ANY, 'delete', self.compute_api._do_delete) self.assertFalse(mock_compute_delete.called) _test() def test_soft_delete_instance_no_cell(self): self._test_delete_instance_no_cell('soft_delete') def test_delete_instance_no_cell(self): self._test_delete_instance_no_cell('delete') def test_force_delete_instance_no_cell(self): self._test_delete_instance_no_cell('force_delete') def test_get_migrations(self): filters = {'cell_name': 'ChildCell', 'status': 'confirmed'} migrations = {'migrations': [{'id': 1234}]} cells_rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(cells_rpcapi, 'get_migrations') cells_rpcapi.get_migrations(self.context, filters).AndReturn(migrations) self.mox.ReplayAll() response = self.compute_api.get_migrations(self.context, filters) self.assertEqual(migrations, response) def test_create_block_device_mapping(self): instance_type = {'swap': 1, 'ephemeral_gb': 1} instance = self._create_fake_instance_obj() bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': 'fake-image', 'boot_index': 0})] self.compute_api._create_block_device_mapping( instance_type, instance.uuid, bdms) bdms = db.block_device_mapping_get_all_by_instance( self.context, instance['uuid']) self.assertEqual(0, len(bdms)) def test_create_bdm_from_flavor(self): self.skipTest("Test is incompatible with cells.") @mock.patch('nova.cells.messaging._TargetedMessage') def test_rebuild_sig(self, mock_msg): # TODO(belliott) Cells could benefit from better testing to ensure API # and manager signatures stay up to date def wire(version): # wire the rpc cast directly to the manager method to make sure # the signature matches cells_mgr = manager.CellsManager() def cast(context, method, *args, **kwargs): fn = getattr(cells_mgr, method) fn(context, *args, **kwargs) cells_mgr.cast = cast return cells_mgr cells_rpcapi = self.compute_api.cells_rpcapi client = cells_rpcapi.client with mock.patch.object(client, 'prepare', side_effect=wire): inst = self._create_fake_instance_obj() inst.cell_name = 'mycell' cells_rpcapi.rebuild_instance(self.context, inst, 'pass', None, None, None, None, None, recreate=False, on_shared_storage=False, host='host', preserve_ephemeral=True, kwargs=None) # one targeted message should have been created self.assertEqual(1, mock_msg.call_count) class CellsConductorAPIRPCRedirect(test.NoDBTestCase): def setUp(self): super(CellsConductorAPIRPCRedirect, self).setUp() self.compute_api = compute_cells_api.ComputeCellsAPI() self.cells_rpcapi = mock.MagicMock() self.compute_api.compute_task_api.cells_rpcapi = self.cells_rpcapi self.context = context.RequestContext('fake', 'fake') @mock.patch.object(compute_api.API, '_record_action_start') @mock.patch.object(compute_api.API, '_provision_instances') @mock.patch.object(compute_api.API, '_check_and_transform_bdm') @mock.patch.object(compute_api.API, '_get_image') @mock.patch.object(compute_api.API, '_validate_and_build_base_options') @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') def test_build_instances(self, _checks_for_create_and_rebuild, _validate, _get_image, _check_bdm, _provision, _record_action_start): _get_image.return_value = (None, 'fake-image') _validate.return_value = ({}, 1) _check_bdm.return_value = objects.BlockDeviceMappingList() _provision.return_value = 'instances' self.compute_api.create(self.context, 'fake-flavor', 'fake-image') # Subsequent tests in class are verifying the hooking. We don't check # args since this is verified in compute test code. self.assertTrue(self.cells_rpcapi.build_instances.called) @mock.patch.object(compute_api.API, '_record_action_start') @mock.patch.object(compute_api.API, '_resize_cells_support') @mock.patch.object(compute_utils, 'reserve_quota_delta') @mock.patch.object(compute_utils, 'upsize_quota_delta') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(flavors, 'extract_flavor') @mock.patch.object(compute_api.API, '_check_auto_disk_config') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def test_resize_instance(self, _bdms, _check, _extract, _save, _upsize, _reserve, _cells, _record): flavor = objects.Flavor(**test_flavor.fake_flavor) _extract.return_value = flavor orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata']) instance.flavor = flavor instance.old_flavor = instance.new_flavor = None self.compute_api.resize(self.context, instance) self.assertTrue(self.cells_rpcapi.resize_instance.called) @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(compute_api.API, '_record_action_start') @mock.patch.object(objects.Instance, 'save') def test_live_migrate_instance(self, instance_save, _record, _get_spec): orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata']) self.compute_api.live_migrate(self.context, instance, True, True, 'fake_dest_host') self.assertTrue(self.cells_rpcapi.live_migrate_instance.called) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'get_flavor') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(compute_api.API, '_get_image') @mock.patch.object(compute_api.API, '_check_auto_disk_config') @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') @mock.patch.object(compute_api.API, '_record_action_start') def test_rebuild_instance(self, _record_action_start, _checks_for_create_and_rebuild, _check_auto_disk_config, _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save): orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata']) get_flavor.return_value = '' image_href = '' image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': 'x86_64'}} admin_pass = '' files_to_inject = [] bdms = objects.BlockDeviceMappingList() _get_image.return_value = (None, image) bdm_get_by_instance_uuid.return_value = bdms self.compute_api.rebuild(self.context, instance, image_href, admin_pass, files_to_inject) self.assertTrue(self.cells_rpcapi.rebuild_instance.called) def test_check_equal(self): task_api = self.compute_api.compute_task_api tests = set() for (name, value) in inspect.getmembers(self, inspect.ismethod): if name.startswith('test_') and name != 'test_check_equal': tests.add(name[5:]) if tests != set(task_api.cells_compatible): self.fail("Testcases not equivalent to cells_compatible list") class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase): def setUp(self): super(CellsComputePolicyTestCase, self).setUp() global ORIG_COMPUTE_API ORIG_COMPUTE_API = self.compute_api self.compute_api = compute_cells_api.ComputeCellsAPI() deploy_stubs(self.stubs, self.compute_api) def tearDown(self): global ORIG_COMPUTE_API self.compute_api = ORIG_COMPUTE_API super(CellsComputePolicyTestCase, self).tearDown() nova-13.0.0/nova/tests/unit/compute/test_compute.py0000664000567000056710000204445112701410011023521 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute service.""" import base64 import datetime import operator import sys import time import traceback import uuid from eventlet import greenthread import mock from mox3 import mox from neutronclient.common import exceptions as neutron_exceptions from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import fixture as utils_fixture from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils import six import testtools from testtools import matchers as testtools_matchers import nova from nova import availability_zones from nova import block_device from nova import compute from nova.compute import api as compute_api from nova.compute import arch from nova.compute import flavors from nova.compute import manager as compute_manager from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import manager as conductor_manager import nova.conf from nova.console import type as ctype from nova import context from nova import db from nova import exception from nova.image import api as image_api from nova.image import glance from nova.network import api as network_api from nova.network import model as network_model from nova.network.security_group import openstack_driver from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import instance as instance_obj from nova.objects import migrate_data as migrate_data_obj from nova import policy from nova import quota from nova.scheduler import client as scheduler_client from nova import test from nova.tests import fixtures from nova.tests.unit.compute import eventlet_utils from nova.tests.unit.compute import fake_resource_tracker from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_network from nova.tests.unit import fake_network_cache_model from nova.tests.unit import fake_notifier from nova.tests.unit import fake_server_actions from nova.tests.unit.image import fake as fake_image from nova.tests.unit import matchers from nova.tests.unit.objects import test_flavor from nova.tests.unit.objects import test_instance_numa_topology from nova.tests.unit.objects import test_migration from nova.tests.unit import utils as test_utils from nova.tests import uuidsentinel as uuids from nova import utils from nova.virt import block_device as driver_block_device from nova.virt import event from nova.virt import fake from nova.virt import hardware from nova.volume import cinder QUOTAS = quota.QUOTAS LOG = logging.getLogger(__name__) CONF = nova.conf.CONF CONF.import_opt('compute_manager', 'nova.service') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('live_migration_retry_count', 'nova.compute.manager') FAKE_IMAGE_REF = uuids.image_ref NODENAME = 'fakenode1' def fake_not_implemented(*args, **kwargs): raise NotImplementedError() def get_primitive_instance_by_uuid(context, instance_uuid): """Helper method to get an instance and then convert it to a primitive form using jsonutils. """ instance = db.instance_get_by_uuid(context, instance_uuid) return jsonutils.to_primitive(instance) def unify_instance(instance): """Return a dict-like instance for both object-initiated and model-initiated sources that can reasonably be compared. """ newdict = dict() for k, v in six.iteritems(instance): if isinstance(v, datetime.datetime): # NOTE(danms): DB models and Instance objects have different # timezone expectations v = v.replace(tzinfo=None) elif k == 'fault': # NOTE(danms): DB models don't have 'fault' continue elif k == 'pci_devices': # NOTE(yonlig.he) pci devices need lazy loading # fake db does not support it yet. continue newdict[k] = v return newdict class FakeComputeTaskAPI(object): def resize_instance(self, context, instance, extra_instance_updates, scheduler_hint, flavor, reservations): pass class BaseTestCase(test.TestCase): def setUp(self): super(BaseTestCase, self).setUp() self.flags(network_manager='nova.network.manager.FlatManager') fake.set_nodes([NODENAME]) self.flags(use_local=True, group='conductor') fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) self.compute = importutils.import_object(CONF.compute_manager) # execute power syncing synchronously for testing: self.compute._sync_power_pool = eventlet_utils.SyncPool() # override tracker with a version that doesn't need the database: fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host, self.compute.driver, NODENAME) self.compute._resource_tracker_dict[NODENAME] = fake_rt def fake_get_compute_nodes_in_db(context, use_slave=False): fake_compute_nodes = [{'local_gb': 259, 'uuid': uuids.fake_compute_node, 'vcpus_used': 0, 'deleted': 0, 'hypervisor_type': 'powervm', 'created_at': '2013-04-01T00:27:06.000000', 'local_gb_used': 0, 'updated_at': '2013-04-03T00:35:41.000000', 'hypervisor_hostname': 'fake_phyp1', 'memory_mb_used': 512, 'memory_mb': 131072, 'current_workload': 0, 'vcpus': 16, 'cpu_info': 'ppc64,powervm,3940', 'running_vms': 0, 'free_disk_gb': 259, 'service_id': 7, 'hypervisor_version': 7, 'disk_available_least': 265856, 'deleted_at': None, 'free_ram_mb': 130560, 'metrics': '', 'stats': '', 'numa_topology': '', 'id': 2, 'host': 'fake_phyp1', 'cpu_allocation_ratio': 16.0, 'ram_allocation_ratio': 1.5, 'disk_allocation_ratio': 1.0, 'host_ip': '127.0.0.1'}] return [objects.ComputeNode._from_db_object( context, objects.ComputeNode(), cn) for cn in fake_compute_nodes] def fake_compute_node_delete(context, compute_node_id): self.assertEqual(2, compute_node_id) self.stubs.Set(self.compute, '_get_compute_nodes_in_db', fake_get_compute_nodes_in_db) self.stub_out('nova.db.compute_node_delete', fake_compute_node_delete) self.compute.update_available_resource( context.get_admin_context()) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.none_quotas = objects.Quotas.from_reservations( self.context, None) def fake_show(meh, context, id, **kwargs): if id: return {'id': id, 'name': 'fake_name', 'status': 'active', 'properties': {'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'something_else': 'meow'}} else: raise exception.ImageNotFound(image_id=id) fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) fake_taskapi = FakeComputeTaskAPI() self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi) fake_network.set_stub_network_methods(self) fake_server_actions.stub_out_action_events(self.stubs) def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs): return network_model.NetworkInfo() self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs): self.assertFalse(ctxt.is_admin) return fake_network.fake_get_instance_nw_info(self, 1, 1) self.stubs.Set(network_api.API, 'allocate_for_instance', fake_allocate_for_instance) self.compute_api = compute.API() # Just to make long lines short self.rt = self.compute._get_resource_tracker(NODENAME) def tearDown(self): ctxt = context.get_admin_context() fake_image.FakeImageService_reset() instances = db.instance_get_all(ctxt) for instance in instances: db.instance_destroy(ctxt, instance['uuid']) fake.restore_nodes() super(BaseTestCase, self).tearDown() def _fake_instance(self, updates): return fake_instance.fake_instance_obj(None, **updates) def _create_fake_instance_obj(self, params=None, type_name='m1.tiny', services=False, context=None): flavor = flavors.get_flavor_by_name(type_name) inst = objects.Instance(context=context or self.context) inst.vm_state = vm_states.ACTIVE inst.task_state = None inst.power_state = power_state.RUNNING inst.image_ref = FAKE_IMAGE_REF inst.reservation_id = 'r-fakeres' inst.user_id = self.user_id inst.project_id = self.project_id inst.host = self.compute.host inst.node = NODENAME inst.instance_type_id = flavor.id inst.ami_launch_index = 0 inst.memory_mb = 0 inst.vcpus = 0 inst.root_gb = 0 inst.ephemeral_gb = 0 inst.architecture = arch.X86_64 inst.os_type = 'Linux' inst.system_metadata = ( params and params.get('system_metadata', {}) or {}) inst.locked = False inst.created_at = timeutils.utcnow() inst.updated_at = timeutils.utcnow() inst.launched_at = timeutils.utcnow() inst.security_groups = objects.SecurityGroupList(objects=[]) inst.flavor = flavor inst.old_flavor = None inst.new_flavor = None if params: inst.update(params) if services: _create_service_entries(self.context.elevated(), [['fake_zone', [inst.host]]]) inst.create() return inst def _create_instance_type(self, params=None): """Create a test instance type.""" if not params: params = {} context = self.context.elevated() inst = {} inst['name'] = 'm1.small' inst['memory_mb'] = 1024 inst['vcpus'] = 1 inst['root_gb'] = 20 inst['ephemeral_gb'] = 10 inst['flavorid'] = '1' inst['swap'] = 2048 inst['rxtx_factor'] = 1 inst.update(params) return db.flavor_create(context, inst)['id'] def _create_group(self): values = {'name': 'testgroup', 'description': 'testgroup', 'user_id': self.user_id, 'project_id': self.project_id} return db.security_group_create(self.context, values) def _stub_migrate_server(self): def _fake_migrate_server(*args, **kwargs): pass self.stubs.Set(conductor_manager.ComputeTaskManager, 'migrate_server', _fake_migrate_server) def _init_aggregate_with_host(self, aggr, aggr_name, zone, host): if not aggr: aggr = self.api.create_aggregate(self.context, aggr_name, zone) aggr = self.api.add_host_to_aggregate(self.context, aggr.id, host) return aggr class ComputeVolumeTestCase(BaseTestCase): def setUp(self): super(ComputeVolumeTestCase, self).setUp() self.fetched_attempts = 0 self.instance = { 'id': 'fake', 'uuid': uuids.instance, 'name': 'fake', 'root_device_name': '/dev/vda', } self.fake_volume = fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id, 'device_name': '/dev/vdb'}) self.instance_object = objects.Instance._from_db_object( self.context, objects.Instance(), fake_instance.fake_db_instance()) self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw: {'id': uuids.volume_id, 'size': 4, 'attach_status': 'detached'}) self.stubs.Set(self.compute.driver, 'get_volume_connector', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'initialize_connection', lambda *a, **kw: {}) self.stubs.Set(self.compute.volume_api, 'terminate_connection', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'attach', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'detach', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'check_attach', lambda *a, **kw: None) self.stubs.Set(greenthread, 'sleep', lambda *a, **kw: None) def store_cinfo(context, *args, **kwargs): self.cinfo = jsonutils.loads(args[-1].get('connection_info')) return self.fake_volume self.stub_out('nova.db.block_device_mapping_create', store_cinfo) self.stub_out('nova.db.block_device_mapping_update', store_cinfo) def test_attach_volume_serial(self): fake_bdm = objects.BlockDeviceMapping(context=self.context, **self.fake_volume) with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata', return_value={})): instance = self._create_fake_instance_obj() self.compute.attach_volume(self.context, instance, bdm=fake_bdm) self.assertEqual(self.cinfo.get('serial'), uuids.volume_id) def test_attach_volume_raises(self): fake_bdm = objects.BlockDeviceMapping(**self.fake_volume) instance = self._create_fake_instance_obj() def fake_attach(*args, **kwargs): raise test.TestingException with test.nested( mock.patch.object(driver_block_device.DriverVolumeBlockDevice, 'attach'), mock.patch.object(cinder.API, 'unreserve_volume'), mock.patch.object(objects.BlockDeviceMapping, 'destroy') ) as (mock_attach, mock_unreserve, mock_destroy): mock_attach.side_effect = fake_attach self.assertRaises( test.TestingException, self.compute.attach_volume, self.context, instance, fake_bdm) self.assertTrue(mock_unreserve.called) self.assertTrue(mock_destroy.called) def test_detach_volume_api_raises(self): fake_bdm = objects.BlockDeviceMapping(**self.fake_volume) instance = self._create_fake_instance_obj() with test.nested( mock.patch.object(self.compute, '_driver_detach_volume'), mock.patch.object(self.compute.volume_api, 'detach'), mock.patch.object(objects.BlockDeviceMapping, 'get_by_volume_and_instance'), mock.patch.object(fake_bdm, 'destroy') ) as (mock_internal_detach, mock_detach, mock_get, mock_destroy): mock_detach.side_effect = test.TestingException mock_get.return_value = fake_bdm self.assertRaises( test.TestingException, self.compute.detach_volume, self.context, 'fake', instance, 'fake_id') mock_internal_detach.assert_called_once_with(self.context, instance, fake_bdm) self.assertTrue(mock_destroy.called) def test_await_block_device_created_too_slow(self): self.flags(block_device_allocate_retries=2) self.flags(block_device_allocate_retries_interval=0.1) def never_get(context, vol_id): return { 'status': 'creating', 'id': 'blah', } self.stubs.Set(self.compute.volume_api, 'get', never_get) self.assertRaises(exception.VolumeNotCreated, self.compute._await_block_device_map_created, self.context, '1') def test_await_block_device_created_failed(self): c = self.compute fake_result = {'status': 'error', 'id': 'blah'} with mock.patch.object(c.volume_api, 'get', return_value=fake_result) as fake_get: self.assertRaises(exception.VolumeNotCreated, c._await_block_device_map_created, self.context, '1') fake_get.assert_called_once_with(self.context, '1') def test_await_block_device_created_slow(self): c = self.compute self.flags(block_device_allocate_retries=4) self.flags(block_device_allocate_retries_interval=0.1) def slow_get(context, vol_id): if self.fetched_attempts < 2: self.fetched_attempts += 1 return { 'status': 'creating', 'id': 'blah', } return { 'status': 'available', 'id': 'blah', } self.stubs.Set(c.volume_api, 'get', slow_get) attempts = c._await_block_device_map_created(self.context, '1') self.assertEqual(attempts, 3) def test_await_block_device_created_retries_negative(self): c = self.compute self.flags(block_device_allocate_retries=-1) self.flags(block_device_allocate_retries_interval=0.1) def volume_get(context, vol_id): return { 'status': 'available', 'id': 'blah', } self.stubs.Set(c.volume_api, 'get', volume_get) attempts = c._await_block_device_map_created(self.context, '1') self.assertEqual(1, attempts) def test_await_block_device_created_retries_zero(self): c = self.compute self.flags(block_device_allocate_retries=0) self.flags(block_device_allocate_retries_interval=0.1) def volume_get(context, vol_id): return { 'status': 'available', 'id': 'blah', } self.stubs.Set(c.volume_api, 'get', volume_get) attempts = c._await_block_device_map_created(self.context, '1') self.assertEqual(1, attempts) def test_boot_volume_serial(self): with ( mock.patch.object(objects.BlockDeviceMapping, 'save') ) as mock_save: block_device_mapping = [ block_device.BlockDeviceDict({ 'id': 1, 'no_device': None, 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': uuids.volume_id, 'device_name': '/dev/vdb', 'volume_size': 55, 'delete_on_termination': False, })] bdms = block_device_obj.block_device_make_list_from_dicts( self.context, block_device_mapping) prepped_bdm = self.compute._prep_block_device( self.context, self.instance_object, bdms) self.assertEqual(2, mock_save.call_count) volume_driver_bdm = prepped_bdm['block_device_mapping'][0] self.assertEqual(volume_driver_bdm['connection_info']['serial'], uuids.volume_id) def test_boot_volume_metadata(self, metadata=True): def volume_api_get(*args, **kwargs): if metadata: return { 'size': 1, 'volume_image_metadata': {'vol_test_key': 'vol_test_value', 'min_ram': u'128', 'min_disk': u'256', 'size': u'536870912' }, } else: return {} self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get) expected_no_metadata = {'min_disk': 0, 'min_ram': 0, 'properties': {}, 'size': 0, 'status': 'active'} block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': None, 'volume_id': uuids.volume_id, 'delete_on_termination': False, }] image_meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping) if metadata: self.assertEqual(image_meta['properties']['vol_test_key'], 'vol_test_value') self.assertEqual(128, image_meta['min_ram']) self.assertEqual(256, image_meta['min_disk']) self.assertEqual(units.Gi, image_meta['size']) else: self.assertEqual(expected_no_metadata, image_meta) # Test it with new-style BDMs block_device_mapping = [{ 'boot_index': 0, 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id, 'delete_on_termination': False, }] image_meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping, legacy_bdm=False) if metadata: self.assertEqual(image_meta['properties']['vol_test_key'], 'vol_test_value') self.assertEqual(128, image_meta['min_ram']) self.assertEqual(256, image_meta['min_disk']) self.assertEqual(units.Gi, image_meta['size']) else: self.assertEqual(expected_no_metadata, image_meta) def test_boot_volume_no_metadata(self): self.test_boot_volume_metadata(metadata=False) def test_boot_image_metadata(self, metadata=True): def image_api_get(*args, **kwargs): if metadata: return { 'properties': {'img_test_key': 'img_test_value'} } else: return {} self.stubs.Set(self.compute_api.image_api, 'get', image_api_get) block_device_mapping = [{ 'boot_index': 0, 'source_type': 'image', 'destination_type': 'local', 'image_id': "fake-image", 'delete_on_termination': True, }] image_meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping, legacy_bdm=False) if metadata: self.assertEqual('img_test_value', image_meta['properties']['img_test_key']) else: self.assertEqual(image_meta, {}) def test_boot_image_no_metadata(self): self.test_boot_image_metadata(metadata=False) def test_poll_bandwidth_usage_not_implemented(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(self.compute.driver, 'get_all_bw_counters') self.mox.StubOutWithMock(utils, 'last_completed_audit_period') self.mox.StubOutWithMock(time, 'time') self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host') # Following methods will be called utils.last_completed_audit_period().AndReturn((0, 0)) time.time().AndReturn(10) # Note - time called two more times from Log time.time().AndReturn(20) time.time().AndReturn(21) objects.InstanceList.get_by_host(ctxt, 'fake-mini', use_slave=True).AndReturn([]) self.compute.driver.get_all_bw_counters([]).AndRaise( NotImplementedError) self.mox.ReplayAll() self.flags(bandwidth_poll_interval=1) self.compute._poll_bandwidth_usage(ctxt) # A second call won't call the stubs again as the bandwidth # poll is now disabled self.compute._poll_bandwidth_usage(ctxt) self.mox.UnsetStubs() @mock.patch.object(objects.InstanceList, 'get_by_host') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host): fake_instance = mock.Mock(uuid=uuids.volume_instance) mock_get_by_host.return_value = [fake_instance] volume_bdm = mock.Mock(id=1, is_volume=True) not_volume_bdm = mock.Mock(id=2, is_volume=False) mock_get_by_inst.return_value = [volume_bdm, not_volume_bdm] expected_host_bdms = [{'instance': fake_instance, 'instance_bdms': [volume_bdm]}] got_host_bdms = self.compute._get_host_volume_bdms('fake-context') mock_get_by_host.assert_called_once_with('fake-context', self.compute.host, use_slave=False) mock_get_by_inst.assert_called_once_with('fake-context', uuids.volume_instance, use_slave=False) self.assertEqual(expected_host_bdms, got_host_bdms) def test_poll_volume_usage_disabled(self): ctxt = 'MockContext' self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(utils, 'last_completed_audit_period') # None of the mocks should be called. self.mox.ReplayAll() self.flags(volume_usage_poll_interval=0) self.compute._poll_volume_usage(ctxt) self.mox.UnsetStubs() def test_poll_volume_usage_returns_no_vols(self): ctxt = 'MockContext' self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage') # Following methods are called. self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(ctxt) self.mox.UnsetStubs() def test_poll_volume_usage_with_data(self): ctxt = 'MockContext' self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(self.compute, '_update_volume_usage_cache') self.stubs.Set(self.compute.driver, 'get_all_volume_usage', lambda x, y: [3, 4]) # All the mocks are called self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([1, 2]) self.compute._update_volume_usage_cache(ctxt, [3, 4]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(ctxt) self.mox.UnsetStubs() def test_detach_volume_usage(self): # Test that detach volume update the volume usage cache table correctly instance = self._create_fake_instance_obj() bdm = objects.BlockDeviceMapping(context=self.context, id=1, device_name='/dev/vdb', connection_info='{}', instance_uuid=instance['uuid'], source_type='volume', destination_type='volume', no_device=False, disk_bus='foo', device_type='disk', volume_size=1, volume_id=uuids.volume_id) host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb', 'connection_info': '{}', 'instance_uuid': instance['uuid'], 'volume_id': uuids.volume_id} self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume_and_instance') self.mox.StubOutWithMock(self.compute.driver, 'block_stats') self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage') self.mox.StubOutWithMock(self.compute.driver, 'instance_exists') # The following methods will be called objects.BlockDeviceMapping.get_by_volume_and_instance( self.context, uuids.volume_id, instance.uuid).AndReturn( bdm.obj_clone()) self.compute.driver.block_stats(instance, 'vdb').\ AndReturn([1, 30, 1, 20, None]) self.compute._get_host_volume_bdms(self.context, use_slave=True).AndReturn( host_volume_bdms) self.compute.driver.get_all_volume_usage( self.context, host_volume_bdms).AndReturn( [{'volume': uuids.volume_id, 'rd_req': 1, 'rd_bytes': 10, 'wr_req': 1, 'wr_bytes': 5, 'instance': instance}]) self.compute.driver.instance_exists(mox.IgnoreArg()).AndReturn(True) self.mox.ReplayAll() def fake_get_volume_encryption_metadata(self, context, volume_id): return {} self.stubs.Set(cinder.API, 'get_volume_encryption_metadata', fake_get_volume_encryption_metadata) self.compute.attach_volume(self.context, instance, bdm) # Poll volume usage & then detach the volume. This will update the # total fields in the volume usage cache. self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(self.context) # Check that a volume.usage and volume.attach notification was sent self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) self.compute.detach_volume(self.context, uuids.volume_id, instance) # Check that volume.attach, 2 volume.usage, and volume.detach # notifications were sent self.assertEqual(4, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('compute.instance.volume.attach', msg.event_type) msg = fake_notifier.NOTIFICATIONS[2] self.assertEqual('volume.usage', msg.event_type) payload = msg.payload self.assertEqual(instance['uuid'], payload['instance_id']) self.assertEqual('fake', payload['user_id']) self.assertEqual('fake', payload['tenant_id']) self.assertEqual(1, payload['reads']) self.assertEqual(30, payload['read_bytes']) self.assertEqual(1, payload['writes']) self.assertEqual(20, payload['write_bytes']) self.assertIsNone(payload['availability_zone']) msg = fake_notifier.NOTIFICATIONS[3] self.assertEqual('compute.instance.volume.detach', msg.event_type) # Check the database for the volume_usages = db.vol_get_usage_by_time(self.context, 0) self.assertEqual(1, len(volume_usages)) volume_usage = volume_usages[0] self.assertEqual(0, volume_usage['curr_reads']) self.assertEqual(0, volume_usage['curr_read_bytes']) self.assertEqual(0, volume_usage['curr_writes']) self.assertEqual(0, volume_usage['curr_write_bytes']) self.assertEqual(1, volume_usage['tot_reads']) self.assertEqual(30, volume_usage['tot_read_bytes']) self.assertEqual(1, volume_usage['tot_writes']) self.assertEqual(20, volume_usage['tot_write_bytes']) def test_prepare_image_mapping(self): swap_size = 1 ephemeral_size = 1 instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size} mappings = [ {'virtual': 'ami', 'device': 'sda1'}, {'virtual': 'root', 'device': '/dev/sda1'}, {'virtual': 'swap', 'device': 'sdb4'}, {'virtual': 'ephemeral0', 'device': 'sdc1'}, {'virtual': 'ephemeral1', 'device': 'sdc2'}, ] preped_bdm = self.compute_api._prepare_image_mapping( instance_type, mappings) expected_result = [ { 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': swap_size }, { 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': CONF.default_ephemeral_format, 'boot_index': -1, 'volume_size': ephemeral_size }, { 'device_name': '/dev/sdc2', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': CONF.default_ephemeral_format, 'boot_index': -1, 'volume_size': ephemeral_size } ] for expected, got in zip(expected_result, preped_bdm): self.assertThat(expected, matchers.IsSubDictOf(got)) def test_validate_bdm(self): def fake_get(self, context, res_id): return {'id': res_id, 'size': 4} def fake_check_attach(*args, **kwargs): pass self.stubs.Set(cinder.API, 'get', fake_get) self.stubs.Set(cinder.API, 'get_snapshot', fake_get) self.stubs.Set(cinder.API, 'check_attach', fake_check_attach) volume_id = '55555555-aaaa-bbbb-cccc-555555555555' snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555' image_id = '77777777-aaaa-bbbb-cccc-555555555555' instance = self._create_fake_instance_obj() instance_type = {'swap': 1, 'ephemeral_gb': 2} mappings = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': 'disk', 'volume_id': volume_id, 'guest_format': None, 'boot_index': 1, }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': snapshot_id, 'device_type': 'disk', 'guest_format': None, 'volume_size': 6, 'boot_index': 0, }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda3', 'source_type': 'image', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': 2, 'volume_size': 1 }, anon=True) ] mappings = block_device_obj.block_device_make_list_from_dicts( self.context, mappings) # Make sure it passes at first self.compute_api._validate_bdm(self.context, instance, instance_type, mappings) self.assertEqual(4, mappings[1].volume_size) self.assertEqual(6, mappings[2].volume_size) # Boot sequence mappings[2].boot_index = 2 self.assertRaises(exception.InvalidBDMBootSequence, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings) mappings[2].boot_index = 0 # number of local block_devices self.flags(max_local_block_devices=1) self.assertRaises(exception.InvalidBDMLocalsLimit, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings) ephemerals = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1, 'volume_size': 1 }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/vdc', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1, 'volume_size': 1 }, anon=True) ] ephemerals = block_device_obj.block_device_make_list_from_dicts( self.context, ephemerals) self.flags(max_local_block_devices=4) # More ephemerals are OK as long as they are not over the size limit mappings_ = mappings[:] mappings_.objects.extend(ephemerals) self.compute_api._validate_bdm(self.context, instance, instance_type, mappings_) # Ephemerals over the size limit ephemerals[0].volume_size = 3 mappings_ = mappings[:] mappings_.objects.extend(ephemerals) self.assertRaises(exception.InvalidBDMEphemeralSize, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) # Swap over the size limit mappings[0].volume_size = 3 self.assertRaises(exception.InvalidBDMSwapSize, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings) mappings[0].volume_size = 1 additional_swap = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, anon=True) ] additional_swap = block_device_obj.block_device_make_list_from_dicts( self.context, additional_swap) # More than one swap mappings_ = mappings[:] mappings_.objects.extend(additional_swap) self.assertRaises(exception.InvalidBDMFormat, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) image_no_size = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda4', 'source_type': 'image', 'image_id': image_id, 'destination_type': 'volume', 'boot_index': -1, 'volume_size': None, }, anon=True) ] image_no_size = block_device_obj.block_device_make_list_from_dicts( self.context, image_no_size) mappings_ = mappings[:] mappings_.objects.extend(image_no_size) self.assertRaises(exception.InvalidBDM, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) # blank device without a specified size fails blank_no_size = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda4', 'source_type': 'blank', 'destination_type': 'volume', 'boot_index': -1, 'volume_size': None, }, anon=True) ] blank_no_size = block_device_obj.block_device_make_list_from_dicts( self.context, blank_no_size) mappings_ = mappings[:] mappings_.objects.extend(blank_no_size) self.assertRaises(exception.InvalidBDM, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) def test_validate_bdm_media_service_exceptions(self): instance_type = {'swap': 1, 'ephemeral_gb': 1} bdms = [fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'no_device': None, 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': uuids.volume_id, 'device_name': 'vda', 'boot_index': 0, 'delete_on_termination': False}, anon=True)] bdms = block_device_obj.block_device_make_list_from_dicts( self.context, bdms) # First we test a list of invalid status values that should result # in an InvalidVolume exception being raised. status_values = ( # First two check that the status is 'available'. ('creating', 'detached'), ('error', 'detached'), # Checks that the attach_status is 'detached'. ('available', 'attached') ) for status, attach_status in status_values: if attach_status == 'attached': def fake_volume_get(self, ctxt, volume_id): return {'id': volume_id, 'status': status, 'attach_status': attach_status, 'multiattach': False, 'attachments': {}} else: def fake_volume_get(self, ctxt, volume_id): return {'id': volume_id, 'status': status, 'attach_status': attach_status, 'multiattach': False} self.stubs.Set(cinder.API, 'get', fake_volume_get) self.assertRaises(exception.InvalidVolume, self.compute_api._validate_bdm, self.context, self.instance, instance_type, bdms) # Now we test a 404 case that results in InvalidBDMVolume. def fake_volume_get_not_found(self, context, volume_id): raise exception.VolumeNotFound(volume_id) self.stubs.Set(cinder.API, 'get', fake_volume_get_not_found) self.assertRaises(exception.InvalidBDMVolume, self.compute_api._validate_bdm, self.context, self.instance, instance_type, bdms) # Check that the volume status is 'available' and attach_status is # 'detached' and accept the request if so def fake_volume_get_ok(self, context, volume_id): return {'id': volume_id, 'status': 'available', 'attach_status': 'detached', 'multiattach': False} self.stubs.Set(cinder.API, 'get', fake_volume_get_ok) self.compute_api._validate_bdm(self.context, self.instance, instance_type, bdms) def test_volume_snapshot_create(self): self.assertRaises(messaging.ExpectedException, self.compute.volume_snapshot_create, self.context, self.instance_object, 'fake_id', {}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.volume_snapshot_create, self.context, self.instance_object, 'fake_id', {}) def test_volume_snapshot_delete(self): self.assertRaises(messaging.ExpectedException, self.compute.volume_snapshot_delete, self.context, self.instance_object, 'fake_id', 'fake_id2', {}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.volume_snapshot_delete, self.context, self.instance_object, 'fake_id', 'fake_id2', {}) @mock.patch.object(cinder.API, 'create', side_effect=exception.OverQuota(overs='volumes')) def test_prep_block_device_over_quota_failure(self, mock_create): instance = self._create_fake_instance_obj() bdms = [ block_device.BlockDeviceDict({ 'boot_index': 0, 'guest_format': None, 'connection_info': None, 'device_type': u'disk', 'source_type': 'image', 'destination_type': 'volume', 'volume_size': 1, 'image_id': 1, 'device_name': '/dev/vdb', })] bdms = block_device_obj.block_device_make_list_from_dicts( self.context, bdms) self.assertRaises(exception.VolumeLimitExceeded, compute_manager.ComputeManager()._prep_block_device, self.context, instance, bdms) self.assertTrue(mock_create.called) @mock.patch.object(nova.virt.block_device, 'get_swap') @mock.patch.object(nova.virt.block_device, 'convert_blanks') @mock.patch.object(nova.virt.block_device, 'convert_images') @mock.patch.object(nova.virt.block_device, 'convert_snapshots') @mock.patch.object(nova.virt.block_device, 'convert_volumes') @mock.patch.object(nova.virt.block_device, 'convert_ephemerals') @mock.patch.object(nova.virt.block_device, 'convert_swap') @mock.patch.object(nova.virt.block_device, 'attach_block_devices') def test_prep_block_device_with_blanks(self, attach_block_devices, convert_swap, convert_ephemerals, convert_volumes, convert_snapshots, convert_images, convert_blanks, get_swap): instance = self._create_fake_instance_obj() instance['root_device_name'] = '/dev/vda' root_volume = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'instance_uuid': uuids.block_device_instance, 'source_type': 'image', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'volume_size': 1, 'boot_index': 0})) blank_volume1 = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'volume', 'volume_size': 1, 'boot_index': 1})) blank_volume2 = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'volume', 'volume_size': 1, 'boot_index': 2})) bdms = [blank_volume1, blank_volume2, root_volume] def fake_attach_block_devices(bdm, *args, **kwargs): return bdm convert_swap.return_value = [] convert_ephemerals.return_value = [] convert_volumes.return_value = [blank_volume1, blank_volume2] convert_snapshots.return_value = [] convert_images.return_value = [root_volume] convert_blanks.return_value = [] attach_block_devices.side_effect = fake_attach_block_devices get_swap.return_value = [] expected_block_device_info = { 'root_device_name': '/dev/vda', 'swap': [], 'ephemerals': [], 'block_device_mapping': bdms } manager = compute_manager.ComputeManager() manager.use_legacy_block_device_info = False block_device_info = manager._prep_block_device(self.context, instance, bdms) convert_swap.assert_called_once_with(bdms) convert_ephemerals.assert_called_once_with(bdms) bdm_args = tuple(bdms) convert_volumes.assert_called_once_with(bdm_args) convert_snapshots.assert_called_once_with(bdm_args) convert_images.assert_called_once_with(bdm_args) convert_blanks.assert_called_once_with(bdm_args) self.assertEqual(expected_block_device_info, block_device_info) self.assertEqual(1, attach_block_devices.call_count) get_swap.assert_called_once_with([]) class ComputeTestCase(BaseTestCase): def setUp(self): super(ComputeTestCase, self).setUp() self.useFixture(fixtures.SpawnIsSynchronousFixture()) def test_wrap_instance_fault(self): inst = {"uuid": uuids.instance} called = {'fault_added': False} def did_it_add_fault(*args): called['fault_added'] = True self.stubs.Set(compute_utils, 'add_instance_fault_from_exc', did_it_add_fault) @compute_manager.wrap_instance_fault def failer(self2, context, instance): raise NotImplementedError() self.assertRaises(NotImplementedError, failer, self.compute, self.context, instance=inst) self.assertTrue(called['fault_added']) def test_wrap_instance_fault_instance_in_args(self): inst = {"uuid": uuids.instance} called = {'fault_added': False} def did_it_add_fault(*args): called['fault_added'] = True self.stubs.Set(compute_utils, 'add_instance_fault_from_exc', did_it_add_fault) @compute_manager.wrap_instance_fault def failer(self2, context, instance): raise NotImplementedError() self.assertRaises(NotImplementedError, failer, self.compute, self.context, inst) self.assertTrue(called['fault_added']) def test_wrap_instance_fault_no_instance(self): inst = {"uuid": uuids.instance} called = {'fault_added': False} def did_it_add_fault(*args): called['fault_added'] = True self.stubs.Set(compute_utils, 'add_instance_fault_from_exc', did_it_add_fault) @compute_manager.wrap_instance_fault def failer(self2, context, instance): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.assertRaises(exception.InstanceNotFound, failer, self.compute, self.context, inst) self.assertFalse(called['fault_added']) @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') def test_wrap_instance_event(self, mock_finish, mock_start): inst = {"uuid": uuids.instance} @compute_manager.wrap_instance_event def fake_event(self, context, instance): pass fake_event(self.compute, self.context, instance=inst) self.assertTrue(mock_start.called) self.assertTrue(mock_finish.called) @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') def test_wrap_instance_event_return(self, mock_finish, mock_start): inst = {"uuid": uuids.instance} @compute_manager.wrap_instance_event def fake_event(self, context, instance): return True retval = fake_event(self.compute, self.context, instance=inst) self.assertTrue(retval) self.assertTrue(mock_start.called) self.assertTrue(mock_finish.called) @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') def test_wrap_instance_event_log_exception(self, mock_finish, mock_start): inst = {"uuid": uuids.instance} @compute_manager.wrap_instance_event def fake_event(self2, context, instance): raise exception.NovaException() self.assertRaises(exception.NovaException, fake_event, self.compute, self.context, instance=inst) self.assertTrue(mock_start.called) self.assertTrue(mock_finish.called) args, kwargs = mock_finish.call_args self.assertIsInstance(kwargs['exc_val'], exception.NovaException) def test_object_compat(self): db_inst = fake_instance.fake_db_instance() @compute_manager.object_compat def test_fn(_self, context, instance): self.assertIsInstance(instance, objects.Instance) self.assertEqual(instance.uuid, db_inst['uuid']) self.assertEqual(instance.metadata, db_inst['metadata']) self.assertEqual(instance.system_metadata, db_inst['system_metadata']) test_fn(None, self.context, instance=db_inst) def test_object_compat_no_metas(self): # Tests that we don't try to set metadata/system_metadata on the # instance object using fields that aren't in the db object. db_inst = fake_instance.fake_db_instance() db_inst.pop('metadata', None) db_inst.pop('system_metadata', None) @compute_manager.object_compat def test_fn(_self, context, instance): self.assertIsInstance(instance, objects.Instance) self.assertEqual(instance.uuid, db_inst['uuid']) self.assertNotIn('metadata', instance) self.assertNotIn('system_metadata', instance) test_fn(None, self.context, instance=db_inst) def test_object_compat_more_positional_args(self): db_inst = fake_instance.fake_db_instance() @compute_manager.object_compat def test_fn(_self, context, instance, pos_arg_1, pos_arg_2): self.assertIsInstance(instance, objects.Instance) self.assertEqual(instance.uuid, db_inst['uuid']) self.assertEqual(instance.metadata, db_inst['metadata']) self.assertEqual(instance.system_metadata, db_inst['system_metadata']) self.assertEqual(pos_arg_1, 'fake_pos_arg1') self.assertEqual(pos_arg_2, 'fake_pos_arg2') test_fn(None, self.context, db_inst, 'fake_pos_arg1', 'fake_pos_arg2') def test_create_instance_with_img_ref_associates_config_drive(self): # Make sure create associates a config drive. instance = self._create_fake_instance_obj( params={'config_drive': '1234', }) try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertTrue(instance['config_drive']) finally: db.instance_destroy(self.context, instance['uuid']) def test_create_instance_associates_config_drive(self): # Make sure create associates a config drive. instance = self._create_fake_instance_obj( params={'config_drive': '1234', }) try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertTrue(instance['config_drive']) finally: db.instance_destroy(self.context, instance['uuid']) def test_create_instance_unlimited_memory(self): # Default of memory limit=None is unlimited. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) params = {"memory_mb": 999999999999} filter_properties = {'limits': {'memory_mb': None}} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used']) def test_create_instance_unlimited_disk(self): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) params = {"root_gb": 999999999999, "ephemeral_gb": 99999999999} filter_properties = {'limits': {'disk_gb': None}} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) def test_create_multiple_instances_then_starve(self): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) limits = {'memory_mb': 4096, 'disk_gb': 1000} params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(1024, self.rt.compute_node['memory_mb_used']) self.assertEqual(256, self.rt.compute_node['local_gb_used']) params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(3072, self.rt.compute_node['memory_mb_used']) self.assertEqual(768, self.rt.compute_node['local_gb_used']) params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(3072, self.rt.compute_node['memory_mb_used']) self.assertEqual(768, self.rt.compute_node['local_gb_used']) def test_create_multiple_instance_with_neutron_port(self): instance_type = flavors.get_default_flavor() def fake_is_neutron(): return True self.stubs.Set(utils, 'is_neutron', fake_is_neutron) requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=uuids.port_instance)]) self.assertRaises(exception.MultiplePortsNotApplicable, self.compute_api.create, self.context, instance_type=instance_type, image_href=None, max_count=2, requested_networks=requested_networks) def test_create_instance_with_oversubscribed_ram(self): # Test passing of oversubscribed ram policy from the scheduler. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_mem_mb = resources['memory_mb'] oversub_limit_mb = total_mem_mb * 1.5 instance_mb = int(total_mem_mb * 1.45) # build an instance, specifying an amount of memory that exceeds # total_mem_mb, but is less than the oversubscribed limit: params = {"memory_mb": instance_mb, "root_gb": 128, "ephemeral_gb": 128} instance = self._create_fake_instance_obj(params) limits = {'memory_mb': oversub_limit_mb} filter_properties = {'limits': limits} self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used']) def test_create_instance_with_oversubscribed_ram_fail(self): """Test passing of oversubscribed ram policy from the scheduler, but with insufficient memory. """ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_mem_mb = resources['memory_mb'] oversub_limit_mb = total_mem_mb * 1.5 instance_mb = int(total_mem_mb * 1.55) # build an instance, specifying an amount of memory that exceeds # both total_mem_mb and the oversubscribed limit: params = {"memory_mb": instance_mb, "root_gb": 128, "ephemeral_gb": 128} instance = self._create_fake_instance_obj(params) filter_properties = {'limits': {'memory_mb': oversub_limit_mb}} self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) def test_create_instance_with_oversubscribed_cpu(self): # Test passing of oversubscribed cpu policy from the scheduler. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) limits = {'vcpu': 3} filter_properties = {'limits': limits} # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) self.assertEqual(1, resources['vcpus']) # build an instance, specifying an amount of memory that exceeds # total_mem_mb, but is less than the oversubscribed limit: params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 2} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(2, self.rt.compute_node['vcpus_used']) # create one more instance: params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 1} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(3, self.rt.compute_node['vcpus_used']) # delete the instance: instance['vm_state'] = vm_states.DELETED self.rt.update_usage(self.context, instance=instance) self.assertEqual(2, self.rt.compute_node['vcpus_used']) # now oversubscribe vcpus and fail: params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 2} instance = self._create_fake_instance_obj(params) limits = {'vcpu': 3} self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_create_instance_with_oversubscribed_disk(self): # Test passing of oversubscribed disk policy from the scheduler. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_disk_gb = resources['local_gb'] oversub_limit_gb = total_disk_gb * 1.5 instance_gb = int(total_disk_gb * 1.45) # build an instance, specifying an amount of disk that exceeds # total_disk_gb, but is less than the oversubscribed limit: params = {"root_gb": instance_gb, "memory_mb": 10} instance = self._create_fake_instance_obj(params) limits = {'disk_gb': oversub_limit_gb} filter_properties = {'limits': limits} self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used']) def test_create_instance_with_oversubscribed_disk_fail(self): """Test passing of oversubscribed disk policy from the scheduler, but with insufficient disk. """ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_disk_gb = resources['local_gb'] oversub_limit_gb = total_disk_gb * 1.5 instance_gb = int(total_disk_gb * 1.55) # build an instance, specifying an amount of disk that exceeds # total_disk_gb, but is less than the oversubscribed limit: params = {"root_gb": instance_gb, "memory_mb": 10} instance = self._create_fake_instance_obj(params) limits = {'disk_gb': oversub_limit_gb} self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_create_instance_without_node_param(self): instance = self._create_fake_instance_obj({'node': None}) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertEqual(NODENAME, instance['node']) def test_create_instance_no_image(self): # Create instance with no image provided. params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self._assert_state({'vm_state': vm_states.ACTIVE, 'task_state': None}) @testtools.skipIf(test_utils.is_osx(), 'IPv6 pretty-printing broken on OSX, see bug 1409135') def test_default_access_ip(self): self.flags(default_access_ip_network_name='test1') fake_network.unset_stub_network_methods(self) instance = self._create_fake_instance_obj() orig_update = self.compute._instance_update # Make sure the access_ip_* updates happen in the same DB # update as the set to ACTIVE. def _instance_update(ctxt, instance_uuid, **kwargs): if kwargs.get('vm_state', None) == vm_states.ACTIVE: self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100') self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1') return orig_update(ctxt, instance_uuid, **kwargs) self.stubs.Set(self.compute, '_instance_update', _instance_update) try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertEqual(instance['access_ip_v4'], '192.168.1.100') self.assertEqual(instance['access_ip_v6'], '2001:db8:0:1:dcad:beff:feef:1') finally: db.instance_destroy(self.context, instance['uuid']) def test_no_default_access_ip(self): instance = self._create_fake_instance_obj() try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertFalse(instance['access_ip_v4']) self.assertFalse(instance['access_ip_v6']) finally: db.instance_destroy(self.context, instance['uuid']) def test_fail_to_schedule_persists(self): # check the persistence of the ERROR(scheduling) state. params = {'vm_state': vm_states.ERROR, 'task_state': task_states.SCHEDULING} self._create_fake_instance_obj(params=params) # check state is failed even after the periodic poll self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': task_states.SCHEDULING}) def test_run_instance_setup_block_device_mapping_fail(self): """block device mapping failure test. Make sure that when there is a block device mapping problem, the instance goes to ERROR state, cleaning the task state """ def fake(*args, **kwargs): raise exception.InvalidBDM() self.stubs.Set(nova.compute.manager.ComputeManager, '_prep_block_device', fake) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance( self.context, instance=instance, image={}, request_spec={}, block_device_mapping=[], filter_properties={}, requested_networks=[], injected_files=None, admin_password=None, node=None) # check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) @mock.patch('nova.compute.manager.ComputeManager._prep_block_device', side_effect=exception.OverQuota(overs='volumes')) def test_setup_block_device_over_quota_fail(self, mock_prep_block_dev): """block device mapping over quota failure test. Make sure when we're over volume quota according to Cinder client, the appropriate exception is raised and the instances to ERROR state, cleaning the task state. """ instance = self._create_fake_instance_obj() self.compute.build_and_run_instance( self.context, instance=instance, request_spec={}, filter_properties={}, requested_networks=[], injected_files=None, admin_password=None, node=None, block_device_mapping=[], image={}) # check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) self.assertTrue(mock_prep_block_dev.called) def test_run_instance_spawn_fail(self): """spawn failure test. Make sure that when there is a spawning problem, the instance goes to ERROR state, cleaning the task state. """ def fake(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute.driver, 'spawn', fake) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance( self.context, instance=instance, request_spec={}, filter_properties={}, requested_networks=[], injected_files=None, admin_password=None, block_device_mapping=[], image={}, node=None) # check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) def test_run_instance_dealloc_network_instance_not_found(self): """spawn network deallocate test. Make sure that when an instance is not found during spawn that the network is deallocated """ instance = self._create_fake_instance_obj() def fake(*args, **kwargs): raise exception.InstanceNotFound(instance_id="fake") self.stubs.Set(self.compute.driver, 'spawn', fake) self.mox.StubOutWithMock(self.compute, '_deallocate_network') self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) def test_run_instance_bails_on_missing_instance(self): # Make sure that run_instance() will quickly ignore a deleted instance instance = self._create_fake_instance_obj() with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = exception.InstanceNotFound(instance_id=1) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(mock_save.called) def test_run_instance_bails_on_deleting_instance(self): # Make sure that run_instance() will quickly ignore a deleting instance instance = self._create_fake_instance_obj() with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = exception.UnexpectedDeletingTaskStateError( instance_uuid=instance['uuid'], expected={'task_state': 'bar'}, actual={'task_state': 'foo'}) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(mock_save.called) def test_can_terminate_on_error_state(self): # Make sure that the instance can be terminated in ERROR state. # check failed to schedule --> terminate params = {'vm_state': vm_states.ERROR} instance = self._create_fake_instance_obj(params=params) self.compute.terminate_instance(self.context, instance, [], []) self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, self.context, instance['uuid']) # Double check it's not there for admins, either. self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, self.context.elevated(), instance['uuid']) def test_run_terminate(self): # Make sure it is possible to run and terminate instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) self.compute.terminate_instance(self.context, instance, [], []) instances = db.instance_get_all(self.context) LOG.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) admin_deleted_context = context.get_admin_context( read_deleted="only") instance = db.instance_get_by_uuid(admin_deleted_context, instance['uuid']) self.assertEqual(instance['vm_state'], vm_states.DELETED) self.assertIsNone(instance['task_state']) def test_run_terminate_with_vol_attached(self): """Make sure it is possible to run and terminate instance with volume attached """ instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) def fake_check_attach(*args, **kwargs): pass def fake_reserve_volume(*args, **kwargs): pass def fake_volume_get(self, context, volume_id): return {'id': volume_id, 'attach_status': 'attached', 'attachments': {instance.uuid: { 'attachment_id': 'abc123' } } } def fake_terminate_connection(self, context, volume_id, connector): pass def fake_detach(self, context, volume_id, instance_uuid): pass bdms = [] def fake_rpc_reserve_block_device_name(self, context, instance, device, volume_id, **kwargs): bdm = objects.BlockDeviceMapping( **{'context': context, 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id, 'instance_uuid': instance['uuid'], 'device_name': '/dev/vdc'}) bdm.create() bdms.append(bdm) return bdm self.stubs.Set(cinder.API, 'get', fake_volume_get) self.stubs.Set(cinder.API, 'check_attach', fake_check_attach) self.stubs.Set(cinder.API, 'reserve_volume', fake_reserve_volume) self.stubs.Set(cinder.API, 'terminate_connection', fake_terminate_connection) self.stubs.Set(cinder.API, 'detach', fake_detach) self.stubs.Set(compute_rpcapi.ComputeAPI, 'reserve_block_device_name', fake_rpc_reserve_block_device_name) self.compute_api.attach_volume(self.context, instance, 1, '/dev/vdc') self.compute.terminate_instance(self.context, instance, bdms, []) instances = db.instance_get_all(self.context) LOG.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) bdms = db.block_device_mapping_get_all_by_instance(self.context, instance['uuid']) self.assertEqual(len(bdms), 0) def test_run_terminate_no_image(self): """Make sure instance started without image (from volume) can be termintad without issues """ params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self._assert_state({'vm_state': vm_states.ACTIVE, 'task_state': None}) self.compute.terminate_instance(self.context, instance, [], []) instances = db.instance_get_all(self.context) self.assertEqual(len(instances), 0) def test_terminate_no_network(self): # This is as reported in LP bug 1008875 instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) self.mox.ReplayAll() self.compute.terminate_instance(self.context, instance, [], []) instances = db.instance_get_all(self.context) LOG.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) def test_run_terminate_timestamps(self): # Make sure timestamps are set for launched and destroyed. instance = self._create_fake_instance_obj() instance['launched_at'] = None self.assertIsNone(instance['launched_at']) self.assertIsNone(instance['deleted_at']) launch = timeutils.utcnow() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.refresh() self.assertTrue(instance['launched_at'].replace(tzinfo=None) > launch) self.assertIsNone(instance['deleted_at']) terminate = timeutils.utcnow() self.compute.terminate_instance(self.context, instance, [], []) with utils.temporary_mutation(self.context, read_deleted='only'): instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertTrue(instance['launched_at'].replace( tzinfo=None) < terminate) self.assertTrue(instance['deleted_at'].replace( tzinfo=None) > terminate) def test_run_terminate_deallocate_net_failure_sets_error_state(self): instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) def _fake_deallocate_network(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute, '_deallocate_network', _fake_deallocate_network) self.assertRaises(test.TestingException, self.compute.terminate_instance, self.context, instance, [], []) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertEqual(instance['vm_state'], vm_states.ERROR) def test_stop(self): # Ensure instance can be stopped. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) inst_uuid = instance['uuid'] extra = ['system_metadata', 'metadata'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) self.compute.terminate_instance(self.context, instance, [], []) def test_start(self): # Ensure instance can be started. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) extra = ['system_metadata', 'metadata'] inst_uuid = instance['uuid'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.compute.terminate_instance(self.context, instance, [], []) def test_start_shelved_instance(self): # Ensure shelved instance can be started. self.deleted_image_id = None def fake_delete(self_, ctxt, image_id): self.deleted_image_id = image_id fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) instance = self._create_fake_instance_obj() image = {'id': 'fake_id'} # Adding shelved information to instance system metadata. shelved_time = timeutils.utcnow().isoformat() instance.system_metadata['shelved_at'] = shelved_time instance.system_metadata['shelved_image_id'] = image['id'] instance.system_metadata['shelved_host'] = 'fake-mini' instance.save() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF, "vm_state": vm_states.SHELVED}) extra = ['system_metadata', 'metadata'] inst_uuid = instance['uuid'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.assertEqual(image['id'], self.deleted_image_id) self.assertNotIn('shelved_at', inst_obj.system_metadata) self.assertNotIn('shelved_image_id', inst_obj.system_metadata) self.assertNotIn('shelved_host', inst_obj.system_metadata) self.compute.terminate_instance(self.context, instance, [], []) def test_stop_start_no_image(self): params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) extra = ['system_metadata', 'metadata'] inst_uuid = instance['uuid'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.compute.terminate_instance(self.context, instance, [], []) def test_rescue(self): # Ensure instance can be rescued and unrescued. called = {'rescued': False, 'unrescued': False} def fake_rescue(self, context, instance_ref, network_info, image_meta, rescue_password): called['rescued'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue) def fake_unrescue(self, instance_ref, network_info): called['unrescued'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue', fake_unrescue) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.RESCUING instance.save() self.compute.rescue_instance(self.context, instance, None, None, True) self.assertTrue(called['rescued']) instance.task_state = task_states.UNRESCUING instance.save() self.compute.unrescue_instance(self.context, instance) self.assertTrue(called['unrescued']) self.compute.terminate_instance(self.context, instance, [], []) def test_rescue_notifications(self): # Ensure notifications on instance rescue. def fake_rescue(self, context, instance_ref, network_info, image_meta, rescue_password): pass self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) fake_notifier.NOTIFICATIONS = [] instance.task_state = task_states.RESCUING instance.save() self.compute.rescue_instance(self.context, instance, None, True, True) expected_notifications = ['compute.instance.rescue.start', 'compute.instance.exists', 'compute.instance.rescue.end'] self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS], expected_notifications) for n, msg in enumerate(fake_notifier.NOTIFICATIONS): self.assertEqual(msg.event_type, expected_notifications[n]) self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance.uuid) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) msg = fake_notifier.NOTIFICATIONS[0] self.assertIn('rescue_image_name', msg.payload) self.compute.terminate_instance(self.context, instance, [], []) def test_unrescue_notifications(self): # Ensure notifications on instance rescue. def fake_unrescue(self, instance_ref, network_info): pass self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue', fake_unrescue) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) fake_notifier.NOTIFICATIONS = [] instance.task_state = task_states.UNRESCUING instance.save() self.compute.unrescue_instance(self.context, instance) expected_notifications = ['compute.instance.unrescue.start', 'compute.instance.unrescue.end'] self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS], expected_notifications) for n, msg in enumerate(fake_notifier.NOTIFICATIONS): self.assertEqual(msg.event_type, expected_notifications[n]) self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance.uuid) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_rescue_handle_err(self): # If the driver fails to rescue, instance state should got to ERROR # and the exception should be converted to InstanceNotRescuable inst_obj = self._create_fake_instance_obj() self.mox.StubOutWithMock(self.compute, '_get_rescue_image') self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue') self.compute._get_rescue_image( mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn( objects.ImageMeta.from_dict({})) nova.virt.fake.FakeDriver.rescue( mox.IgnoreArg(), inst_obj, [], mox.IgnoreArg(), 'password' ).AndRaise(RuntimeError("Try again later")) self.mox.ReplayAll() expected_message = ('Instance %s cannot be rescued: ' 'Driver Error: Try again later' % inst_obj.uuid) with testtools.ExpectedException( exception.InstanceNotRescuable, expected_message): self.compute.rescue_instance( self.context, instance=inst_obj, rescue_password='password', rescue_image_ref=None, clean_shutdown=True) self.assertEqual(vm_states.ERROR, inst_obj.vm_state) @mock.patch.object(image_api.API, "get") @mock.patch.object(nova.virt.fake.FakeDriver, "rescue") def test_rescue_with_image_specified(self, mock_rescue, mock_image_get): image_ref = uuids.image_instance rescue_image_meta = {} params = {"task_state": task_states.RESCUING} instance = self._create_fake_instance_obj(params=params) ctxt = context.get_admin_context() mock_context = mock.Mock() mock_context.elevated.return_value = ctxt mock_image_get.return_value = rescue_image_meta self.compute.rescue_instance(mock_context, instance=instance, rescue_password="password", rescue_image_ref=image_ref, clean_shutdown=True) mock_image_get.assert_called_with(ctxt, image_ref) mock_rescue.assert_called_with(ctxt, instance, [], test.MatchType(objects.ImageMeta), 'password') self.compute.terminate_instance(ctxt, instance, [], []) @mock.patch.object(image_api.API, "get") @mock.patch.object(nova.virt.fake.FakeDriver, "rescue") def test_rescue_with_base_image_when_image_not_specified(self, mock_rescue, mock_image_get): image_ref = "image-ref" system_meta = {"image_base_image_ref": image_ref} rescue_image_meta = {} params = {"task_state": task_states.RESCUING, "system_metadata": system_meta} instance = self._create_fake_instance_obj(params=params) ctxt = context.get_admin_context() mock_context = mock.Mock() mock_context.elevated.return_value = ctxt mock_image_get.return_value = rescue_image_meta self.compute.rescue_instance(mock_context, instance=instance, rescue_password="password", rescue_image_ref=None, clean_shutdown=True) mock_image_get.assert_called_with(ctxt, image_ref) mock_rescue.assert_called_with(ctxt, instance, [], test.MatchType(objects.ImageMeta), 'password') self.compute.terminate_instance(self.context, instance, [], []) def test_power_on(self): # Ensure instance can be powered on. called = {'power_on': False} def fake_driver_power_on(self, context, instance, network_info, block_device_info): called['power_on'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on', fake_driver_power_on) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) extra = ['system_metadata', 'metadata'] inst_obj = objects.Instance.get_by_uuid(self.context, instance['uuid'], expected_attrs=extra) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.assertTrue(called['power_on']) self.compute.terminate_instance(self.context, inst_obj, [], []) def test_power_off(self): # Ensure instance can be powered off. called = {'power_off': False} def fake_driver_power_off(self, instance, shutdown_timeout, shutdown_attempts): called['power_off'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off', fake_driver_power_off) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) extra = ['system_metadata', 'metadata'] inst_obj = objects.Instance.get_by_uuid(self.context, instance['uuid'], expected_attrs=extra) inst_obj.task_state = task_states.POWERING_OFF inst_obj.save() self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) self.assertTrue(called['power_off']) self.compute.terminate_instance(self.context, inst_obj, [], []) def test_pause(self): # Ensure instance can be paused and unpaused. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.PAUSING instance.save() fake_notifier.NOTIFICATIONS = [] self.compute.pause_instance(self.context, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.pause.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.pause.end') instance.task_state = task_states.UNPAUSING instance.save() fake_notifier.NOTIFICATIONS = [] self.compute.unpause_instance(self.context, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.unpause.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.unpause.end') self.compute.terminate_instance(self.context, instance, [], []) def test_suspend(self): # ensure instance can be suspended and resumed. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 6) msg = fake_notifier.NOTIFICATIONS[2] self.assertEqual(msg.event_type, 'compute.instance.suspend.start') msg = fake_notifier.NOTIFICATIONS[3] self.assertEqual(msg.event_type, 'compute.instance.suspend.end') self.compute.terminate_instance(self.context, instance, [], []) def test_suspend_error(self): # Ensure vm_state is ERROR when suspend error occurs. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) with mock.patch.object(self.compute.driver, 'suspend', side_effect=test.TestingException): self.assertRaises(test.TestingException, self.compute.suspend_instance, self.context, instance=instance) instance = db.instance_get_by_uuid(self.context, instance.uuid) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_suspend_not_implemented(self): # Ensure expected exception is raised and the vm_state of instance # restore to original value if suspend is not implemented by driver instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) with mock.patch.object(self.compute.driver, 'suspend', side_effect=NotImplementedError('suspend test')): self.assertRaises(NotImplementedError, self.compute.suspend_instance, self.context, instance=instance) instance = db.instance_get_by_uuid(self.context, instance.uuid) self.assertEqual(vm_states.ACTIVE, instance.vm_state) def test_suspend_rescued(self): # ensure rescued instance can be suspended and resumed. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.vm_state = vm_states.RESCUED instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) self.assertEqual(instance.vm_state, vm_states.SUSPENDED) instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(instance.vm_state, vm_states.RESCUED) self.compute.terminate_instance(self.context, instance, [], []) def test_resume_notifications(self): # ensure instance can be suspended and resumed. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 6) msg = fake_notifier.NOTIFICATIONS[4] self.assertEqual(msg.event_type, 'compute.instance.resume.start') msg = fake_notifier.NOTIFICATIONS[5] self.assertEqual(msg.event_type, 'compute.instance.resume.end') self.compute.terminate_instance(self.context, instance, [], []) def test_resume_no_old_state(self): # ensure a suspended instance with no old_vm_state is resumed to the # ACTIVE state instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.vm_state = vm_states.SUSPENDED instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.compute.terminate_instance(self.context, instance, [], []) def test_resume_error(self): # Ensure vm_state is ERROR when resume error occurs. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) instance.task_state = task_states.RESUMING instance.save() with mock.patch.object(self.compute.driver, 'resume', side_effect=test.TestingException): self.assertRaises(test.TestingException, self.compute.resume_instance, self.context, instance) instance = db.instance_get_by_uuid(self.context, instance.uuid) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_rebuild(self): # Ensure instance can be rebuilt. instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_driver(self): # Make sure virt drivers can override default rebuild called = {'rebuild': False} def fake(**kwargs): instance = kwargs['instance'] instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING instance.save(expected_task_state=[task_states.REBUILDING]) instance.task_state = task_states.REBUILD_SPAWNING instance.save( expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) called['rebuild'] = True self.stubs.Set(self.compute.driver, 'rebuild', fake) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.assertTrue(called['rebuild']) self.compute.terminate_instance(self.context, instance, [], []) @mock.patch('nova.compute.manager.ComputeManager._detach_volume') def test_rebuild_driver_with_volumes(self, mock_detach): bdms = block_device_obj.block_device_make_list(self.context, [fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'volume_id': uuids.volume_id, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vda', 'connection_info': '{"driver_volume_type": "rbd"}', 'source_type': 'image', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'boot_index': 0 })]) # Make sure virt drivers can override default rebuild called = {'rebuild': False} def fake(**kwargs): instance = kwargs['instance'] instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING instance.save(expected_task_state=[task_states.REBUILDING]) instance.task_state = task_states.REBUILD_SPAWNING instance.save( expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) called['rebuild'] = True func = kwargs['detach_block_devices'] # Have the fake driver call the function to detach block devices func(self.context, bdms) # Verify volumes to be detached without destroying mock_detach.assert_called_once_with(self.context, bdms[0].volume_id, instance, destroy_bdm=False) self.stubs.Set(self.compute.driver, 'rebuild', fake) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=bdms, recreate=False, on_shared_storage=False) self.assertTrue(called['rebuild']) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_no_image(self): # Ensure instance can be rebuilt when started with no image. params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, '', '', injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_launched_at_time(self): # Ensure instance can be rebuilt. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time)) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) time_fixture.advance_time_delta(cur_time - old_time) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata={}, bdms=[], recreate=False, on_shared_storage=False) instance.refresh() self.assertEqual(cur_time, instance['launched_at'].replace(tzinfo=None)) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_with_injected_files(self): # Ensure instance can be rebuilt with injected files. injected_files = [ (b'/a/b/c', base64.b64encode(b'foobarbaz')), ] self.decoded_files = [ (b'/a/b/c', b'foobarbaz'), ] def _spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info): self.assertEqual(self.decoded_files, injected_files) self.stubs.Set(self.compute.driver, 'spawn', _spawn) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=injected_files, new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.compute.terminate_instance(self.context, instance, [], []) def _test_reboot(self, soft, test_delete=False, test_unrescue=False, fail_reboot=False, fail_running=False): reboot_type = soft and 'SOFT' or 'HARD' task_pending = (soft and task_states.REBOOT_PENDING or task_states.REBOOT_PENDING_HARD) task_started = (soft and task_states.REBOOT_STARTED or task_states.REBOOT_STARTED_HARD) expected_task = (soft and task_states.REBOOTING or task_states.REBOOTING_HARD) expected_tasks = (soft and (task_states.REBOOTING, task_states.REBOOT_PENDING, task_states.REBOOT_STARTED) or (task_states.REBOOTING_HARD, task_states.REBOOT_PENDING_HARD, task_states.REBOOT_STARTED_HARD)) # This is a true unit test, so we don't need the network stubs. fake_network.unset_stub_network_methods(self) self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute, '_instance_update') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.compute.driver, 'reboot') # FIXME(comstud): I don't feel like the context needs to # be elevated at all. Hopefully remove elevated from # reboot_instance and remove the stub here in a future patch. # econtext would just become self.context below then. econtext = self.context.elevated() db_instance = fake_instance.fake_db_instance( **dict(uuid=uuids.db_instance, power_state=power_state.NOSTATE, vm_state=vm_states.ACTIVE, task_state=expected_task, launched_at=timeutils.utcnow())) instance = objects.Instance._from_db_object(econtext, objects.Instance(), db_instance) updated_dbinstance1 = fake_instance.fake_db_instance( **dict(uuid=uuids.db_instance_1, power_state=10003, vm_state=vm_states.ACTIVE, task_state=expected_task, instance_type=flavors.get_default_flavor(), launched_at=timeutils.utcnow())) updated_dbinstance2 = fake_instance.fake_db_instance( **dict(uuid=uuids.db_instance_2, power_state=10003, vm_state=vm_states.ACTIVE, instance_type=flavors.get_default_flavor(), task_state=expected_task, launched_at=timeutils.utcnow())) if test_unrescue: instance.vm_state = vm_states.RESCUED instance.obj_reset_changes() fake_nw_model = network_model.NetworkInfo() fake_block_dev_info = 'fake_block_dev_info' fake_power_state1 = 10001 fake_power_state2 = power_state.RUNNING fake_power_state3 = 10002 # Beginning of calls we expect. self.mox.StubOutWithMock(self.context, 'elevated') self.context.elevated().AndReturn(econtext) self.compute._get_instance_block_device_info( econtext, instance).AndReturn(fake_block_dev_info) self.compute.network_api.get_instance_nw_info( econtext, instance).AndReturn(fake_nw_model) self.compute._notify_about_instance_usage(econtext, instance, 'reboot.start') self.compute._get_power_state(econtext, instance).AndReturn(fake_power_state1) db.instance_update_and_get_original(econtext, instance['uuid'], {'task_state': task_pending, 'expected_task_state': expected_tasks, 'power_state': fake_power_state1}, columns_to_join=['system_metadata', 'extra', 'extra.flavor'] ).AndReturn((None, updated_dbinstance1)) expected_nw_info = fake_nw_model db.instance_update_and_get_original(econtext, updated_dbinstance1['uuid'], {'task_state': task_started, 'expected_task_state': task_pending}, columns_to_join=['system_metadata'] ).AndReturn((None, updated_dbinstance1)) # Annoying. driver.reboot is wrapped in a try/except, and # doesn't re-raise. It eats exception generated by mox if # this is called with the wrong args, so we have to hack # around it. reboot_call_info = {} expected_call_info = { 'args': (econtext, instance, expected_nw_info, reboot_type), 'kwargs': {'block_device_info': fake_block_dev_info}} fault = exception.InstanceNotFound(instance_id='instance-0000') def fake_reboot(*args, **kwargs): reboot_call_info['args'] = args reboot_call_info['kwargs'] = kwargs # NOTE(sirp): Since `bad_volumes_callback` is a function defined # within `reboot_instance`, we don't have access to its value and # can't stub it out, thus we skip that comparison. kwargs.pop('bad_volumes_callback') if fail_reboot: raise fault self.stubs.Set(self.compute.driver, 'reboot', fake_reboot) # Power state should be updated again if not fail_reboot or fail_running: new_power_state = fake_power_state2 self.compute._get_power_state(econtext, instance).AndReturn(fake_power_state2) else: new_power_state = fake_power_state3 self.compute._get_power_state(econtext, instance).AndReturn(fake_power_state3) if test_delete: fault = exception.InstanceNotFound( instance_id=instance['uuid']) db.instance_update_and_get_original( econtext, updated_dbinstance1['uuid'], {'power_state': new_power_state, 'task_state': None, 'vm_state': vm_states.ACTIVE}, columns_to_join=['system_metadata'], ).AndRaise(fault) self.compute._notify_about_instance_usage( econtext, instance, 'reboot.end') elif fail_reboot and not fail_running: db.instance_update_and_get_original( econtext, updated_dbinstance1['uuid'], {'vm_state': vm_states.ERROR}, columns_to_join=['system_metadata'], ).AndRaise(fault) else: db.instance_update_and_get_original( econtext, updated_dbinstance1['uuid'], {'power_state': new_power_state, 'task_state': None, 'vm_state': vm_states.ACTIVE}, columns_to_join=['system_metadata'], ).AndReturn((None, updated_dbinstance2)) if fail_running: self.compute._notify_about_instance_usage(econtext, instance, 'reboot.error', fault=fault) self.compute._notify_about_instance_usage( econtext, instance, 'reboot.end') self.mox.ReplayAll() if not fail_reboot or fail_running: self.compute.reboot_instance(self.context, instance=instance, block_device_info=None, reboot_type=reboot_type) else: self.assertRaises(exception.InstanceNotFound, self.compute.reboot_instance, self.context, instance=instance, block_device_info=None, reboot_type=reboot_type) self.assertEqual(expected_call_info, reboot_call_info) def test_reboot_soft(self): self._test_reboot(True) def test_reboot_soft_and_delete(self): self._test_reboot(True, True) def test_reboot_soft_and_rescued(self): self._test_reboot(True, False, True) def test_reboot_soft_and_delete_and_rescued(self): self._test_reboot(True, True, True) def test_reboot_hard(self): self._test_reboot(False) def test_reboot_hard_and_delete(self): self._test_reboot(False, True) def test_reboot_hard_and_rescued(self): self._test_reboot(False, False, True) def test_reboot_hard_and_delete_and_rescued(self): self._test_reboot(False, True, True) @mock.patch.object(jsonutils, 'to_primitive') def test_reboot_fail(self, mock_to_primitive): self._test_reboot(False, fail_reboot=True) def test_reboot_fail_running(self): self._test_reboot(False, fail_reboot=True, fail_running=True) def test_get_instance_block_device_info_source_image(self): bdms = block_device_obj.block_device_make_list(self.context, [fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'volume_id': uuids.volume_id, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vda', 'connection_info': '{"driver_volume_type": "rbd"}', 'source_type': 'image', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'boot_index': 0 })]) with (mock.patch.object( objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=bdms) ) as mock_get_by_instance: block_device_info = ( self.compute._get_instance_block_device_info( self.context, self._create_fake_instance_obj()) ) expected = { 'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': [{ 'connection_info': { 'driver_volume_type': 'rbd' }, 'mount_device': '/dev/vda', 'delete_on_termination': False }] } self.assertTrue(mock_get_by_instance.called) self.assertEqual(block_device_info, expected) def test_get_instance_block_device_info_passed_bdms(self): bdms = block_device_obj.block_device_make_list(self.context, [fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'volume_id': uuids.volume_id, 'device_name': '/dev/vdd', 'connection_info': '{"driver_volume_type": "rbd"}', 'source_type': 'volume', 'destination_type': 'volume'}) ]) with (mock.patch.object( objects.BlockDeviceMappingList, 'get_by_instance_uuid')) as mock_get_by_instance: block_device_info = ( self.compute._get_instance_block_device_info( self.context, self._create_fake_instance_obj(), bdms=bdms) ) expected = { 'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': [{ 'connection_info': { 'driver_volume_type': 'rbd' }, 'mount_device': '/dev/vdd', 'delete_on_termination': False }] } self.assertFalse(mock_get_by_instance.called) self.assertEqual(block_device_info, expected) def test_get_instance_block_device_info_swap_and_ephemerals(self): instance = self._create_fake_instance_obj() ephemeral0 = fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': None, 'volume_size': 1, 'boot_index': -1 }) ephemeral1 = fake_block_device.FakeDbBlockDeviceDict({ 'id': 2, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdc', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': None, 'volume_size': 2, 'boot_index': -1 }) swap = fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdd', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': 'swap', 'volume_size': 1, 'boot_index': -1 }) bdms = block_device_obj.block_device_make_list(self.context, [swap, ephemeral0, ephemeral1]) with ( mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=bdms) ) as mock_get_by_instance_uuid: expected_block_device_info = { 'swap': {'device_name': '/dev/vdd', 'swap_size': 1}, 'ephemerals': [{'device_name': '/dev/vdb', 'num': 0, 'size': 1, 'virtual_name': 'ephemeral0'}, {'device_name': '/dev/vdc', 'num': 1, 'size': 2, 'virtual_name': 'ephemeral1'}], 'block_device_mapping': [], 'root_device_name': None } block_device_info = ( self.compute._get_instance_block_device_info( self.context, instance) ) mock_get_by_instance_uuid.assert_called_once_with(self.context, instance['uuid']) self.assertEqual(expected_block_device_info, block_device_info) def test_inject_network_info(self): # Ensure we can inject network info. called = {'inject': False} def fake_driver_inject_network(self, instance, network_info): called['inject'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info', fake_driver_inject_network) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.inject_network_info(self.context, instance=instance) self.assertTrue(called['inject']) self.compute.terminate_instance(self.context, instance, [], []) def test_reset_network(self): # Ensure we can reset networking on an instance. called = {'count': 0} def fake_driver_reset_network(self, instance): called['count'] += 1 self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network', fake_driver_reset_network) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.reset_network(self.context, instance) self.assertEqual(called['count'], 1) self.compute.terminate_instance(self.context, instance, [], []) def _get_snapshotting_instance(self): # Ensure instance can be snapshotted. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING instance.save() return instance def test_snapshot(self): inst_obj = self._get_snapshotting_instance() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def test_snapshot_no_image(self): inst_obj = self._get_snapshotting_instance() inst_obj.image_ref = '' inst_obj.save() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def _test_snapshot_fails(self, raise_during_cleanup, method, expected_state=True): def fake_snapshot(*args, **kwargs): raise test.TestingException() self.fake_image_delete_called = False def fake_delete(self_, context, image_id): self.fake_image_delete_called = True if raise_during_cleanup: raise Exception() self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot) fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) inst_obj = self._get_snapshotting_instance() if method == 'snapshot': self.assertRaises(test.TestingException, self.compute.snapshot_instance, self.context, image_id='fakesnap', instance=inst_obj) else: self.assertRaises(test.TestingException, self.compute.backup_instance, self.context, image_id='fakesnap', instance=inst_obj, backup_type='fake', rotation=1) self.assertEqual(expected_state, self.fake_image_delete_called) self._assert_state({'task_state': None}) @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups') def test_backup_fails(self, mock_rotate): self._test_snapshot_fails(False, 'backup') @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups') def test_backup_fails_cleanup_ignores_exception(self, mock_rotate): self._test_snapshot_fails(True, 'backup') @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups') @mock.patch.object(nova.compute.manager.ComputeManager, '_do_snapshot_instance') def test_backup_fails_rotate_backup(self, mock_snap, mock_rotate): mock_rotate.side_effect = test.TestingException() self._test_snapshot_fails(True, 'backup', False) def test_snapshot_fails(self): self._test_snapshot_fails(False, 'snapshot') def test_snapshot_fails_cleanup_ignores_exception(self): self._test_snapshot_fails(True, 'snapshot') def _test_snapshot_deletes_image_on_failure(self, status, exc): self.fake_image_delete_called = False def fake_show(self_, context, image_id, **kwargs): self.assertEqual('fakesnap', image_id) image = {'id': image_id, 'status': status} return image self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) def fake_delete(self_, context, image_id): self.fake_image_delete_called = True self.assertEqual('fakesnap', image_id) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) def fake_snapshot(*args, **kwargs): raise exc self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot) fake_image.stub_out_image_service(self) inst_obj = self._get_snapshotting_instance() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def test_snapshot_fails_with_glance_error(self): image_not_found = exception.ImageNotFound(image_id='fakesnap') self._test_snapshot_deletes_image_on_failure('error', image_not_found) self.assertFalse(self.fake_image_delete_called) self._assert_state({'task_state': None}) def test_snapshot_fails_with_task_state_error(self): deleting_state_error = exception.UnexpectedDeletingTaskStateError( instance_uuid=uuids.instance, expected={'task_state': task_states.IMAGE_SNAPSHOT}, actual={'task_state': task_states.DELETING}) self._test_snapshot_deletes_image_on_failure( 'error', deleting_state_error) self.assertTrue(self.fake_image_delete_called) self._test_snapshot_deletes_image_on_failure( 'active', deleting_state_error) self.assertFalse(self.fake_image_delete_called) def test_snapshot_fails_with_instance_not_found(self): instance_not_found = exception.InstanceNotFound(instance_id='uuid') self._test_snapshot_deletes_image_on_failure( 'error', instance_not_found) self.assertTrue(self.fake_image_delete_called) self._test_snapshot_deletes_image_on_failure( 'active', instance_not_found) self.assertFalse(self.fake_image_delete_called) def test_snapshot_handles_cases_when_instance_is_deleted(self): inst_obj = self._get_snapshotting_instance() inst_obj.task_state = task_states.DELETING inst_obj.save() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def test_snapshot_handles_cases_when_instance_is_not_found(self): inst_obj = self._get_snapshotting_instance() inst_obj2 = objects.Instance.get_by_uuid(self.context, inst_obj.uuid) inst_obj2.destroy() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def _assert_state(self, state_dict): """Assert state of VM is equal to state passed as parameter.""" instances = db.instance_get_all(self.context) self.assertEqual(len(instances), 1) if 'vm_state' in state_dict: self.assertEqual(state_dict['vm_state'], instances[0]['vm_state']) if 'task_state' in state_dict: self.assertEqual(state_dict['task_state'], instances[0]['task_state']) if 'power_state' in state_dict: self.assertEqual(state_dict['power_state'], instances[0]['power_state']) def test_console_output(self): # Make sure we can get console output from instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) output = self.compute.get_console_output(self.context, instance=instance, tail_length=None) self.assertEqual(output, b'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE') self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_bytes(self): # Make sure we can get console output from instance. instance = self._create_fake_instance_obj() with mock.patch.object(self.compute, 'get_console_output') as mock_console_output: mock_console_output.return_value = b'Hello.' output = self.compute.get_console_output(self.context, instance=instance, tail_length=None) self.assertEqual(output, b'Hello.') self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_tail(self): # Make sure we can get console output from instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) output = self.compute.get_console_output(self.context, instance=instance, tail_length=2) self.assertEqual(output, b'ANOTHER\nLAST LINE') self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_not_implemented(self): def fake_not_implemented(*args, **kwargs): raise NotImplementedError() self.stubs.Set(self.compute.driver, 'get_console_output', fake_not_implemented) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_console_output, self.context, instance, 0) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.get_console_output, self.context, instance, 0) self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_instance_not_found(self): def fake_not_found(*args, **kwargs): raise exception.InstanceNotFound(instance_id='fake-instance') self.stubs.Set(self.compute.driver, 'get_console_output', fake_not_found) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_console_output, self.context, instance, 0) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotFound, self.compute.get_console_output, self.context, instance, 0) self.compute.terminate_instance(self.context, instance, [], []) def test_novnc_vnc_console(self): # Make sure we can a vnc console for an instance. self.flags(enabled=True, group='vnc') self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Try with the full instance console = self.compute.get_vnc_console(self.context, 'novnc', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_validate_console_port_vnc(self): self.flags(enabled=True, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleVNC(host="fake_host", port=5900) self.stubs.Set(self.compute.driver, "get_vnc_console", fake_driver_get_console) self.assertTrue(self.compute.validate_console_port( context=self.context, instance=instance, port=5900, console_type="novnc")) def test_validate_console_port_spice(self): self.flags(enabled=True, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88) self.stubs.Set(self.compute.driver, "get_spice_console", fake_driver_get_console) self.assertTrue(self.compute.validate_console_port( context=self.context, instance=instance, port=5900, console_type="spice-html5")) def test_validate_console_port_rdp(self): self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleRDP(host="fake_host", port=5900) self.stubs.Set(self.compute.driver, "get_rdp_console", fake_driver_get_console) self.assertTrue(self.compute.validate_console_port( context=self.context, instance=instance, port=5900, console_type="rdp-html5")) def test_validate_console_port_mks(self): self.flags(enabled=True, group='mks') instance = self._create_fake_instance_obj() with mock.patch.object( self.compute.driver, 'get_mks_console') as mock_getmks: mock_getmks.return_value = ctype.ConsoleMKS(host="fake_host", port=5900) result = self.compute.validate_console_port(context=self.context, instance=instance, port=5900, console_type="webmks") self.assertTrue(result) def test_validate_console_port_wrong_port(self): self.flags(enabled=True, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88) self.stubs.Set(self.compute.driver, "get_vnc_console", fake_driver_get_console) self.assertFalse(self.compute.validate_console_port( context=self.context, instance=instance, port="wrongport", console_type="spice-html5")) def test_xvpvnc_vnc_console(self): # Make sure we can a vnc console for an instance. self.flags(enabled=True, group='vnc') self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) console = self.compute.get_vnc_console(self.context, 'xvpvnc', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_invalid_vnc_console_type(self): # Raise useful error if console type is an unrecognised string. self.flags(enabled=True, group='vnc') self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_vnc_console, self.context, 'invalid', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_vnc_console, self.context, 'invalid', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_missing_vnc_console_type(self): # Raise useful error is console type is None. self.flags(enabled=True, group='vnc') self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_vnc_console, self.context, None, instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_vnc_console, self.context, None, instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_get_vnc_console_not_implemented(self): self.stubs.Set(self.compute.driver, 'get_vnc_console', fake_not_implemented) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_spicehtml5_spice_console(self): # Make sure we can a spice console for an instance. self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Try with the full instance console = self.compute.get_spice_console(self.context, 'spice-html5', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_invalid_spice_console_type(self): # Raise useful error if console type is an unrecognised string self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_spice_console, self.context, 'invalid', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_spice_console, self.context, 'invalid', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_get_spice_console_not_implemented(self): self.stubs.Set(self.compute.driver, 'get_spice_console', fake_not_implemented) self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_missing_spice_console_type(self): # Raise useful error is console type is None self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_spice_console, self.context, None, instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_spice_console, self.context, None, instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_rdphtml5_rdp_console(self): # Make sure we can a rdp console for an instance. self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Try with the full instance console = self.compute.get_rdp_console(self.context, 'rdp-html5', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_invalid_rdp_console_type(self): # Raise useful error if console type is an unrecognised string self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_rdp_console, self.context, 'invalid', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_rdp_console, self.context, 'invalid', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_missing_rdp_console_type(self): # Raise useful error is console type is None self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_rdp_console, self.context, None, instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_rdp_console, self.context, None, instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_vnc_console_instance_not_ready(self): self.flags(enabled=True, group='vnc') self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) def fake_driver_get_console(*args, **kwargs): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.stubs.Set(self.compute.driver, "get_vnc_console", fake_driver_get_console) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotReady, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) def test_spice_console_instance_not_ready(self): self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) def fake_driver_get_console(*args, **kwargs): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.stubs.Set(self.compute.driver, "get_spice_console", fake_driver_get_console) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotReady, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) def test_rdp_console_instance_not_ready(self): self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) def fake_driver_get_console(*args, **kwargs): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.stubs.Set(self.compute.driver, "get_rdp_console", fake_driver_get_console) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotReady, self.compute.get_rdp_console, self.context, 'rdp-html5', instance=instance) def test_vnc_console_disabled(self): self.flags(enabled=False, group='vnc') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeUnavailable, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) def test_spice_console_disabled(self): self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeUnavailable, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) def test_rdp_console_disabled(self): self.flags(enabled=False, group='rdp') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeUnavailable, self.compute.get_rdp_console, self.context, 'rdp-html5', instance=instance) def test_diagnostics(self): # Make sure we can get diagnostics for an instance. expected_diagnostic = {'cpu0_time': 17300000000, 'memory': 524288, 'vda_errors': -1, 'vda_read': 262144, 'vda_read_req': 112, 'vda_write': 5778432, 'vda_write_req': 488, 'vnet1_rx': 2070139, 'vnet1_rx_drop': 0, 'vnet1_rx_errors': 0, 'vnet1_rx_packets': 26701, 'vnet1_tx': 140208, 'vnet1_tx_drop': 0, 'vnet1_tx_errors': 0, 'vnet1_tx_packets': 662, } instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) diagnostics = self.compute.get_diagnostics(self.context, instance=instance) self.assertEqual(diagnostics, expected_diagnostic) self.compute.terminate_instance(self.context, instance, [], []) def test_instance_diagnostics(self): # Make sure we can get diagnostics for an instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) diagnostics = self.compute.get_instance_diagnostics(self.context, instance=instance) expected = {'config_drive': True, 'cpu_details': [{'time': 17300000000}], 'disk_details': [{'errors_count': 0, 'id': 'fake-disk-id', 'read_bytes': 262144, 'read_requests': 112, 'write_bytes': 5778432, 'write_requests': 488}], 'driver': 'fake', 'hypervisor_os': 'fake-os', 'memory_details': {'maximum': 524288, 'used': 0}, 'nic_details': [{'mac_address': '01:23:45:67:89:ab', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 2070139, 'rx_packets': 26701, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 140208, 'tx_packets': 662}], 'state': 'running', 'uptime': 46664, 'version': '1.0'} self.assertEqual(expected, diagnostics) self.compute.terminate_instance(self.context, instance, [], []) def test_add_fixed_ip_usage_notification(self): def dummy(*args, **kwargs): pass self.stubs.Set(network_api.API, 'add_fixed_ip_to_instance', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'inject_network_info', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'reset_network', dummy) instance = self._create_fake_instance_obj() self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) self.compute.add_fixed_ip_to_instance(self.context, network_id=1, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) self.compute.terminate_instance(self.context, instance, [], []) def test_remove_fixed_ip_usage_notification(self): def dummy(*args, **kwargs): pass self.stubs.Set(network_api.API, 'remove_fixed_ip_from_instance', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'inject_network_info', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'reset_network', dummy) instance = self._create_fake_instance_obj() self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) self.compute.remove_fixed_ip_from_instance(self.context, 1, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) self.compute.terminate_instance(self.context, instance, [], []) def test_run_instance_usage_notification(self, request_spec=None): # Ensure run instance generates appropriate usage notification. request_spec = request_spec or {} instance = self._create_fake_instance_obj() expected_image_name = request_spec.get('image', {}).get('name', '') self.compute.build_and_run_instance(self.context, instance, request_spec=request_spec, filter_properties={}, image={'name': expected_image_name}, block_device_mapping=[]) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) instance.refresh() msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') # The last event is the one with the sugar in it. msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.create.end') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertEqual(payload['state'], 'active') self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) self.assertIn('fixed_ips', payload) self.assertTrue(payload['launched_at']) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) self.assertEqual('Success', payload['message']) self.compute.terminate_instance(self.context, instance, [], []) def test_run_instance_image_usage_notification(self): request_spec = {'image': {'name': 'fake_name', 'key': 'value'}} self.test_run_instance_usage_notification(request_spec=request_spec) def test_run_instance_usage_notification_volume_meta(self): # Volume's image metadata won't contain the image name request_spec = {'image': {'key': 'value'}} self.test_run_instance_usage_notification(request_spec=request_spec) def test_run_instance_end_notification_on_abort(self): # Test that an error notif is sent if the build is aborted instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] def build_inst_abort(*args, **kwargs): raise exception.BuildAbortException(reason="already deleted", instance_uuid=instance_uuid) self.stubs.Set(self.compute.driver, 'spawn', build_inst_abort) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.event_type, 'compute.instance.create.error') self.assertEqual('ERROR', msg.priority) payload = msg.payload message = payload['message'] self.assertNotEqual(-1, message.find("already deleted")) def test_run_instance_error_notification_on_reschedule(self): # Test that error notif is sent if the build got rescheduled instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] def build_inst_fail(*args, **kwargs): raise exception.RescheduledException(instance_uuid=instance_uuid, reason="something bad happened") self.stubs.Set(self.compute.driver, 'spawn', build_inst_fail) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.event_type, 'compute.instance.create.error') self.assertEqual('ERROR', msg.priority) payload = msg.payload message = payload['message'] self.assertNotEqual(-1, message.find("something bad happened")) def test_run_instance_error_notification_on_failure(self): # Test that error notif is sent if build fails hard instance = self._create_fake_instance_obj() def build_inst_fail(*args, **kwargs): raise test.TestingException("i'm dying") self.stubs.Set(self.compute.driver, 'spawn', build_inst_fail) self.compute.build_and_run_instance( self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.event_type, 'compute.instance.create.error') self.assertEqual('ERROR', msg.priority) payload = msg.payload message = payload['message'] self.assertNotEqual(-1, message.find("i'm dying")) def test_terminate_usage_notification(self): # Ensure terminate_instance generates correct usage notification. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time)) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) fake_notifier.NOTIFICATIONS = [] time_fixture.advance_time_delta(cur_time - old_time) self.compute.terminate_instance(self.context, instance, [], []) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.delete.start') msg1 = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg1.event_type, 'compute.instance.shutdown.start') msg1 = fake_notifier.NOTIFICATIONS[2] self.assertEqual(msg1.event_type, 'compute.instance.shutdown.end') msg1 = fake_notifier.NOTIFICATIONS[3] self.assertEqual(msg1.event_type, 'compute.instance.delete.end') payload = msg1.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) self.assertIn('terminated_at', payload) self.assertIn('deleted_at', payload) self.assertEqual(payload['terminated_at'], utils.strtime(cur_time)) self.assertEqual(payload['deleted_at'], utils.strtime(cur_time)) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) def test_run_instance_queries_macs(self): # run_instance should ask the driver for node mac addresses and pass # that to the network_api in use. fake_network.unset_stub_network_methods(self) instance = self._create_fake_instance_obj() macs = set(['01:23:45:67:89:ab']) self.mox.StubOutWithMock(self.compute.network_api, "allocate_for_instance") self.compute.network_api.allocate_for_instance( self.context, instance, vpn=False, requested_networks=None, macs=macs, security_groups=[], dhcp_options=None, bind_host_id=self.compute.host).AndReturn( fake_network.fake_get_instance_nw_info(self, 1, 1)) self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance") self.compute.driver.macs_for_instance( mox.IsA(instance_obj.Instance)).AndReturn(macs) self.mox.ReplayAll() self.compute._build_networks_for_instance(self.context, instance, requested_networks=None, security_groups=None) def _create_server_group(self, policies, instance_host): group_instance = self._create_fake_instance_obj( params=dict(host=instance_host)) instance_group = objects.InstanceGroup(self.context) instance_group.user_id = self.user_id instance_group.project_id = self.project_id instance_group.name = 'messi' instance_group.uuid = str(uuid.uuid4()) instance_group.members = [group_instance.uuid] instance_group.policies = policies fake_notifier.NOTIFICATIONS = [] instance_group.create() self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(instance_group.name, msg.payload['name']) self.assertEqual(instance_group.members, msg.payload['members']) self.assertEqual(instance_group.policies, msg.payload['policies']) self.assertEqual(instance_group.project_id, msg.payload['project_id']) self.assertEqual(instance_group.uuid, msg.payload['uuid']) self.assertEqual('servergroup.create', msg.event_type) return instance_group def test_instance_set_to_error_on_uncaught_exception(self): # Test that instance is set to error state when exception is raised. instance = self._create_fake_instance_obj() fake_network.unset_stub_network_methods(self) @mock.patch.object(self.compute.network_api, 'allocate_for_instance', side_effect=messaging.RemoteError()) @mock.patch.object(self.compute.network_api, 'deallocate_for_instance') def _do_test(mock_deallocate, mock_allocate): self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.refresh() self.assertEqual(vm_states.ERROR, instance.vm_state) self.compute.terminate_instance(self.context, instance, [], []) _do_test() def test_delete_instance_keeps_net_on_power_off_fail(self): self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.mox.StubOutWithMock(self.compute, '_deallocate_network') exp = exception.InstancePowerOffFailure(reason='') self.compute.driver.destroy(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(exp) # mox will detect if _deallocate_network gets called unexpectedly self.mox.ReplayAll() instance = self._create_fake_instance_obj() self.assertRaises(exception.InstancePowerOffFailure, self.compute._delete_instance, self.context, instance, [], self.none_quotas) def test_delete_instance_loses_net_on_other_fail(self): self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.mox.StubOutWithMock(self.compute, '_deallocate_network') exp = test.TestingException() self.compute.driver.destroy(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(exp) self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() instance = self._create_fake_instance_obj() self.assertRaises(test.TestingException, self.compute._delete_instance, self.context, instance, [], self.none_quotas) def test_delete_instance_deletes_console_auth_tokens(self): instance = self._create_fake_instance_obj() self.flags(enabled=True, group='vnc') self.tokens_deleted = False def fake_delete_tokens(*args, **kwargs): self.tokens_deleted = True cauth_rpcapi = self.compute.consoleauth_rpcapi self.stubs.Set(cauth_rpcapi, 'delete_tokens_for_instance', fake_delete_tokens) self.compute._delete_instance(self.context, instance, [], self.none_quotas) self.assertTrue(self.tokens_deleted) def test_delete_instance_deletes_console_auth_tokens_cells(self): instance = self._create_fake_instance_obj() self.flags(enabled=True, group='vnc') self.flags(enable=True, group='cells') self.tokens_deleted = False def fake_delete_tokens(*args, **kwargs): self.tokens_deleted = True cells_rpcapi = self.compute.cells_rpcapi self.stubs.Set(cells_rpcapi, 'consoleauth_delete_tokens', fake_delete_tokens) self.compute._delete_instance(self.context, instance, [], self.none_quotas) self.assertTrue(self.tokens_deleted) def test_delete_instance_changes_power_state(self): """Test that the power state is NOSTATE after deleting an instance.""" instance = self._create_fake_instance_obj() self.compute._delete_instance(self.context, instance, [], self.none_quotas) self.assertEqual(power_state.NOSTATE, instance.power_state) def test_instance_termination_exception_sets_error(self): """Test that we handle InstanceTerminationFailure which is propagated up from the underlying driver. """ instance = self._create_fake_instance_obj() def fake_delete_instance(context, instance, bdms, reservations=None): raise exception.InstanceTerminationFailure(reason='') self.stubs.Set(self.compute, '_delete_instance', fake_delete_instance) self.assertRaises(exception.InstanceTerminationFailure, self.compute.terminate_instance, self.context, instance, [], []) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertEqual(instance['vm_state'], vm_states.ERROR) def test_network_is_deallocated_on_spawn_failure(self): # When a spawn fails the network must be deallocated. instance = self._create_fake_instance_obj() self.mox.StubOutWithMock(self.compute, "_prep_block_device") self.compute._prep_block_device( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(messaging.RemoteError('', '', '')) self.mox.ReplayAll() self.compute.build_and_run_instance( self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.terminate_instance(self.context, instance, [], []) def test_lock(self): # FIXME(comstud): This test is such crap. This is testing # compute API lock functionality in a test class for the compute # manager by running an instance. Hello? We should just have # unit tests in test_compute_api that test the check_instance_lock # decorator and make sure that appropriate compute_api methods # have the decorator. instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) non_admin_context = context.RequestContext(None, None, is_admin=False) def check_task_state(task_state): instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['task_state'], task_state) instance.refresh() # should fail with locked nonadmin context self.compute_api.lock(self.context, instance) self.assertRaises(exception.InstanceIsLocked, self.compute_api.reboot, non_admin_context, instance, 'SOFT') check_task_state(None) # should fail with invalid task state self.compute_api.unlock(self.context, instance) instance.task_state = task_states.REBOOTING instance.save() self.assertRaises(exception.InstanceInvalidState, self.compute_api.reboot, non_admin_context, instance, 'SOFT') check_task_state(task_states.REBOOTING) # should succeed with admin context instance.task_state = None instance.save() self.compute_api.reboot(self.context, instance, 'SOFT') check_task_state(task_states.REBOOTING) self.compute.terminate_instance(self.context, instance, [], []) def _check_locked_by(self, instance_uuid, locked_by): instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['locked'], locked_by is not None) self.assertEqual(instance['locked_by'], locked_by) return instance def test_override_owner_lock(self): # FIXME(comstud): This test is such crap. This is testing # compute API lock functionality in a test class for the compute # manager by running an instance. Hello? We should just have # unit tests in test_compute_api that test the check_instance_lock # decorator and make sure that appropriate compute_api methods # have the decorator. admin_context = context.RequestContext('admin-user', 'admin-project', is_admin=True) instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Ensure that an admin can override the owner lock self.compute_api.lock(self.context, instance) self._check_locked_by(instance_uuid, 'owner') self.compute_api.unlock(admin_context, instance) self._check_locked_by(instance_uuid, None) def test_upgrade_owner_lock(self): # FIXME(comstud): This test is such crap. This is testing # compute API lock functionality in a test class for the compute # manager by running an instance. Hello? We should just have # unit tests in test_compute_api that test the check_instance_lock # decorator and make sure that appropriate compute_api methods # have the decorator. admin_context = context.RequestContext('admin-user', 'admin-project', is_admin=True) instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Ensure that an admin can upgrade the lock and that # the owner can no longer unlock self.compute_api.lock(self.context, instance) self.compute_api.lock(admin_context, instance) self._check_locked_by(instance_uuid, 'admin') instance.refresh() self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.unlock, self.context, instance) self._check_locked_by(instance_uuid, 'admin') self.compute_api.unlock(admin_context, instance) self._check_locked_by(instance_uuid, None) def _test_state_revert(self, instance, operation, pre_task_state, kwargs=None, vm_state=None): if kwargs is None: kwargs = {} # The API would have set task_state, so do that here to test # that the state gets reverted on failure db.instance_update(self.context, instance['uuid'], {"task_state": pre_task_state}) orig_elevated = self.context.elevated orig_notify = self.compute._notify_about_instance_usage def _get_an_exception(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.context, 'elevated', _get_an_exception) self.stubs.Set(self.compute, '_notify_about_instance_usage', _get_an_exception) func = getattr(self.compute, operation) self.assertRaises(test.TestingException, func, self.context, instance=instance, **kwargs) # self.context.elevated() is called in tearDown() self.stubs.Set(self.context, 'elevated', orig_elevated) self.stubs.Set(self.compute, '_notify_about_instance_usage', orig_notify) # Fetch the instance's task_state and make sure it reverted to None. instance = db.instance_get_by_uuid(self.context, instance['uuid']) if vm_state: self.assertEqual(instance.vm_state, vm_state) self.assertIsNone(instance["task_state"]) def test_state_revert(self): # ensure that task_state is reverted after a failed operation. migration = objects.Migration(context=self.context.elevated()) migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13' migration.new_instance_type_id = '1' instance_type = objects.Flavor() actions = [ ("reboot_instance", task_states.REBOOTING, {'block_device_info': [], 'reboot_type': 'SOFT'}), ("stop_instance", task_states.POWERING_OFF, {'clean_shutdown': True}), ("start_instance", task_states.POWERING_ON), ("terminate_instance", task_states.DELETING, {'bdms': [], 'reservations': []}, vm_states.ERROR), ("soft_delete_instance", task_states.SOFT_DELETING, {'reservations': []}), ("restore_instance", task_states.RESTORING), ("rebuild_instance", task_states.REBUILDING, {'orig_image_ref': None, 'image_ref': None, 'injected_files': [], 'new_pass': '', 'orig_sys_metadata': {}, 'bdms': [], 'recreate': False, 'on_shared_storage': False}), ("set_admin_password", task_states.UPDATING_PASSWORD, {'new_pass': None}), ("rescue_instance", task_states.RESCUING, {'rescue_password': None, 'rescue_image_ref': None, 'clean_shutdown': True}), ("unrescue_instance", task_states.UNRESCUING), ("revert_resize", task_states.RESIZE_REVERTING, {'migration': migration, 'reservations': []}), ("prep_resize", task_states.RESIZE_PREP, {'image': {}, 'instance_type': instance_type, 'reservations': [], 'request_spec': {}, 'filter_properties': {}, 'node': None, 'clean_shutdown': True}), ("resize_instance", task_states.RESIZE_PREP, {'migration': migration, 'image': {}, 'reservations': [], 'instance_type': {}, 'clean_shutdown': True}), ("pause_instance", task_states.PAUSING), ("unpause_instance", task_states.UNPAUSING), ("suspend_instance", task_states.SUSPENDING), ("resume_instance", task_states.RESUMING), ] self._stub_out_resize_network_methods() instance = self._create_fake_instance_obj() for operation in actions: if 'revert_resize' in operation: migration.source_compute = 'fake-mini' def fake_migration_save(*args, **kwargs): raise test.TestingException() self.stubs.Set(migration, 'save', fake_migration_save) self._test_state_revert(instance, *operation) def _ensure_quota_reservations_committed(self, instance): """Mock up commit of quota reservations.""" reservations = list('fake_res') self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit') nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations, project_id=instance['project_id'], user_id=instance['user_id']) self.mox.ReplayAll() return reservations def _ensure_quota_reservations_rolledback(self, instance): """Mock up rollback of quota reservations.""" reservations = list('fake_res') self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback') nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations, project_id=instance['project_id'], user_id=instance['user_id']) self.mox.ReplayAll() return reservations def test_quotas_successful_delete(self): instance = self._create_fake_instance_obj() resvs = self._ensure_quota_reservations_committed(instance) self.compute.terminate_instance(self.context, instance, bdms=[], reservations=resvs) def test_quotas_failed_delete(self): instance = self._create_fake_instance_obj() def fake_shutdown_instance(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute, '_shutdown_instance', fake_shutdown_instance) resvs = self._ensure_quota_reservations_rolledback(instance) self.assertRaises(test.TestingException, self.compute.terminate_instance, self.context, instance, bdms=[], reservations=resvs) def test_quotas_successful_soft_delete(self): instance = self._create_fake_instance_obj( params=dict(task_state=task_states.SOFT_DELETING)) resvs = self._ensure_quota_reservations_committed(instance) self.compute.soft_delete_instance(self.context, instance, reservations=resvs) def test_quotas_failed_soft_delete(self): instance = self._create_fake_instance_obj( params=dict(task_state=task_states.SOFT_DELETING)) def fake_soft_delete(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute.driver, 'soft_delete', fake_soft_delete) resvs = self._ensure_quota_reservations_rolledback(instance) self.assertRaises(test.TestingException, self.compute.soft_delete_instance, self.context, instance, reservations=resvs) def test_quotas_destroy_of_soft_deleted_instance(self): instance = self._create_fake_instance_obj( params=dict(vm_state=vm_states.SOFT_DELETED)) # Termination should be successful, but quota reservations # rolled back because the instance was in SOFT_DELETED state. resvs = self._ensure_quota_reservations_rolledback(instance) self.compute.terminate_instance(self.context, instance, bdms=[], reservations=resvs) def _stub_out_resize_network_methods(self): def fake(cls, ctxt, instance, *args, **kwargs): pass self.stubs.Set(network_api.API, 'setup_networks_on_host', fake) self.stubs.Set(network_api.API, 'migrate_instance_start', fake) self.stubs.Set(network_api.API, 'migrate_instance_finish', fake) def _test_finish_resize(self, power_on, resize_instance=True): # Contrived test to ensure finish_resize doesn't raise anything and # also tests resize from ACTIVE or STOPPED state which determines # if the resized instance is powered on or not. vm_state = None if power_on: vm_state = vm_states.ACTIVE else: vm_state = vm_states.STOPPED params = {'vm_state': vm_state} instance = self._create_fake_instance_obj(params) image = {} disk_info = 'fake-disk-info' instance_type = flavors.get_default_flavor() if not resize_instance: old_instance_type = flavors.get_flavor_by_name('m1.tiny') instance_type['root_gb'] = old_instance_type['root_gb'] instance_type['swap'] = old_instance_type['swap'] instance_type['ephemeral_gb'] = old_instance_type['ephemeral_gb'] instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) instance.task_state = task_states.RESIZE_MIGRATED instance.save() # NOTE(mriedem): make sure prep_resize set old_vm_state correctly sys_meta = instance.system_metadata self.assertIn('old_vm_state', sys_meta) if power_on: self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state']) else: self.assertEqual(vm_states.STOPPED, sys_meta['old_vm_state']) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') orig_mig_save = migration.save orig_inst_save = instance.save network_api = self.compute.network_api self.mox.StubOutWithMock(network_api, 'setup_networks_on_host') self.mox.StubOutWithMock(network_api, 'migrate_instance_finish') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'finish_migration') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(migration, 'save') self.mox.StubOutWithMock(instance, 'save') def _mig_save(): self.assertEqual(migration.status, 'finished') self.assertEqual(vm_state, instance.vm_state) self.assertEqual(task_states.RESIZE_FINISH, instance.task_state) self.assertTrue(migration._context.is_admin) orig_mig_save() def _instance_save0(expected_task_state=None): self.assertEqual(task_states.RESIZE_MIGRATED, expected_task_state) self.assertEqual(instance_type['id'], instance.instance_type_id) self.assertEqual(task_states.RESIZE_FINISH, instance.task_state) orig_inst_save(expected_task_state=expected_task_state) def _instance_save1(expected_task_state=None): self.assertEqual(task_states.RESIZE_FINISH, expected_task_state) self.assertEqual(vm_states.RESIZED, instance.vm_state) self.assertIsNone(instance.task_state) self.assertIn('launched_at', instance.obj_what_changed()) orig_inst_save(expected_task_state=expected_task_state) network_api.setup_networks_on_host(self.context, instance, 'fake-mini') network_api.migrate_instance_finish(self.context, mox.IsA(objects.Instance), mox.IsA(dict)) self.compute.network_api.get_instance_nw_info( self.context, instance).AndReturn('fake-nwinfo1') # First save to update old/current flavor and task state exp_kwargs = dict(expected_task_state=task_states.RESIZE_MIGRATED) instance.save(**exp_kwargs).WithSideEffects(_instance_save0) self.compute._notify_about_instance_usage( self.context, instance, 'finish_resize.start', network_info='fake-nwinfo1') self.compute._get_instance_block_device_info( self.context, instance, refresh_conn_info=True).AndReturn('fake-bdminfo') # nova.conf sets the default flavor to m1.small and the test # sets the default flavor to m1.tiny so they should be different # which makes this a resize self.compute.driver.finish_migration(self.context, migration, instance, disk_info, 'fake-nwinfo1', mox.IsA(objects.ImageMeta), resize_instance, 'fake-bdminfo', power_on) # Ensure instance status updates is after the migration finish migration.save().WithSideEffects(_mig_save) exp_kwargs = dict(expected_task_state=task_states.RESIZE_FINISH) instance.save(**exp_kwargs).WithSideEffects(_instance_save1) self.compute._notify_about_instance_usage( self.context, instance, 'finish_resize.end', network_info='fake-nwinfo1') # NOTE(comstud): This actually does the mox.ReplayAll() reservations = self._ensure_quota_reservations_committed(instance) self.compute.finish_resize(self.context, migration=migration, disk_info=disk_info, image=image, instance=instance, reservations=reservations) def test_finish_resize_from_active(self): self._test_finish_resize(power_on=True) def test_finish_resize_from_stopped(self): self._test_finish_resize(power_on=False) def test_finish_resize_without_resize_instance(self): self._test_finish_resize(power_on=True, resize_instance=False) def test_finish_resize_with_volumes(self): """Contrived test to ensure finish_resize doesn't raise anything.""" # create instance instance = self._create_fake_instance_obj() # create volume volume = {'instance_uuid': None, 'device_name': None, 'id': 'fake', 'size': 200, 'attach_status': 'detached'} bdm = objects.BlockDeviceMapping( **{'context': self.context, 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id, 'instance_uuid': instance['uuid'], 'device_name': '/dev/vdc'}) bdm.create() # stub out volume attach def fake_volume_get(self, context, volume_id): return volume self.stubs.Set(cinder.API, "get", fake_volume_get) def fake_volume_check_attach(self, context, volume_id, instance): pass self.stubs.Set(cinder.API, "check_attach", fake_volume_check_attach) def fake_get_volume_encryption_metadata(self, context, volume_id): return {} self.stubs.Set(cinder.API, 'get_volume_encryption_metadata', fake_get_volume_encryption_metadata) orig_connection_data = { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % uuids.volume_id, 'target_portal': '127.0.0.0.1:3260', 'volume_id': uuids.volume_id, } connection_info = { 'driver_volume_type': 'iscsi', 'data': orig_connection_data, } def fake_init_conn(self, context, volume_id, session): return connection_info self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn) def fake_attach(self, context, volume_id, instance_uuid, device_name, mode='rw'): volume['instance_uuid'] = instance_uuid volume['device_name'] = device_name self.stubs.Set(cinder.API, "attach", fake_attach) # stub out virt driver attach def fake_get_volume_connector(*args, **kwargs): return {} self.stubs.Set(self.compute.driver, 'get_volume_connector', fake_get_volume_connector) def fake_attach_volume(*args, **kwargs): pass self.stubs.Set(self.compute.driver, 'attach_volume', fake_attach_volume) # attach volume to instance self.compute.attach_volume(self.context, instance, bdm) # assert volume attached correctly self.assertEqual(volume['device_name'], '/dev/vdc') disk_info = db.block_device_mapping_get_all_by_instance( self.context, instance.uuid) self.assertEqual(len(disk_info), 1) for bdm in disk_info: self.assertEqual(bdm['device_name'], volume['device_name']) self.assertEqual(bdm['connection_info'], jsonutils.dumps(connection_info)) # begin resize instance_type = flavors.get_default_flavor() instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) # fake out detach for prep_resize (and later terminate) def fake_terminate_connection(self, context, volume, connector): connection_info['data'] = None self.stubs.Set(cinder.API, "terminate_connection", fake_terminate_connection) self._stub_out_resize_network_methods() migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, reservations=[], instance_type=jsonutils.to_primitive(instance_type), clean_shutdown=True) # assert bdm is unchanged disk_info = db.block_device_mapping_get_all_by_instance( self.context, instance.uuid) self.assertEqual(len(disk_info), 1) for bdm in disk_info: self.assertEqual(bdm['device_name'], volume['device_name']) cached_connection_info = jsonutils.loads(bdm['connection_info']) self.assertEqual(cached_connection_info['data'], orig_connection_data) # but connection was terminated self.assertIsNone(connection_info['data']) # stub out virt driver finish_migration def fake(*args, **kwargs): pass self.stubs.Set(self.compute.driver, 'finish_migration', fake) instance.task_state = task_states.RESIZE_MIGRATED instance.save() reservations = self._ensure_quota_reservations_committed(instance) # new initialize connection new_connection_data = dict(orig_connection_data) new_iqn = 'iqn.2010-10.org.openstack:%s.2' % uuids.volume_id, new_connection_data['target_iqn'] = new_iqn def fake_init_conn_with_data(self, context, volume, session): connection_info['data'] = new_connection_data return connection_info self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn_with_data) self.compute.finish_resize(self.context, migration=migration, disk_info={}, image={}, instance=instance, reservations=reservations) # assert volume attached correctly disk_info = db.block_device_mapping_get_all_by_instance( self.context, instance['uuid']) self.assertEqual(len(disk_info), 1) for bdm in disk_info: self.assertEqual(bdm['connection_info'], jsonutils.dumps(connection_info)) # stub out detach def fake_detach(self, context, volume_uuid): volume['device_path'] = None volume['instance_uuid'] = None self.stubs.Set(cinder.API, "detach", fake_detach) # clean up self.compute.terminate_instance(self.context, instance, [], []) def test_finish_resize_handles_error(self): # Make sure we don't leave the instance in RESIZE on error. def throw_up(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute.driver, 'finish_migration', throw_up) self._stub_out_resize_network_methods() old_flavor_name = 'm1.tiny' instance = self._create_fake_instance_obj(type_name=old_flavor_name) reservations = self._ensure_quota_reservations_rolledback(instance) instance_type = flavors.get_flavor_by_name('m1.small') self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') instance.refresh() instance.task_state = task_states.RESIZE_MIGRATED instance.save() self.assertRaises(test.TestingException, self.compute.finish_resize, self.context, migration=migration, disk_info={}, image={}, instance=instance, reservations=reservations) instance.refresh() self.assertEqual(vm_states.ERROR, instance.vm_state) old_flavor = flavors.get_flavor_by_name(old_flavor_name) self.assertEqual(old_flavor['memory_mb'], instance.memory_mb) self.assertEqual(old_flavor['vcpus'], instance.vcpus) self.assertEqual(old_flavor['root_gb'], instance.root_gb) self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb) self.assertEqual(old_flavor['id'], instance.instance_type_id) self.assertNotEqual(instance_type['id'], instance.instance_type_id) def test_set_instance_info(self): old_flavor_name = 'm1.tiny' new_flavor_name = 'm1.small' instance = self._create_fake_instance_obj(type_name=old_flavor_name) new_flavor = flavors.get_flavor_by_name(new_flavor_name) self.compute._set_instance_info(instance, new_flavor.obj_clone()) self.assertEqual(new_flavor['memory_mb'], instance.memory_mb) self.assertEqual(new_flavor['vcpus'], instance.vcpus) self.assertEqual(new_flavor['root_gb'], instance.root_gb) self.assertEqual(new_flavor['ephemeral_gb'], instance.ephemeral_gb) self.assertEqual(new_flavor['id'], instance.instance_type_id) def test_rebuild_instance_notification(self): # Ensure notifications on instance migrate/resize. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time)) inst_ref = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, inst_ref, {}, {}, {}, block_device_mapping=[]) time_fixture.advance_time_delta(cur_time - old_time) fake_notifier.NOTIFICATIONS = [] instance = db.instance_get_by_uuid(self.context, inst_ref['uuid']) orig_sys_metadata = db.instance_system_metadata_get(self.context, inst_ref['uuid']) image_ref = instance["image_ref"] new_image_ref = image_ref + '-new_image_ref' db.instance_update(self.context, inst_ref['uuid'], {'image_ref': new_image_ref}) password = "new_password" inst_ref.task_state = task_states.REBUILDING inst_ref.save() self.compute.rebuild_instance(self.context, inst_ref, image_ref, new_image_ref, injected_files=[], new_pass=password, orig_sys_metadata=orig_sys_metadata, bdms=[], recreate=False, on_shared_storage=False) inst_ref.refresh() image_ref_url = glance.generate_image_url(image_ref) new_image_ref_url = glance.generate_image_url(new_image_ref) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.exists') self.assertEqual(msg.payload['image_ref_url'], image_ref_url) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.rebuild.start') self.assertEqual(msg.payload['image_ref_url'], new_image_ref_url) self.assertEqual(msg.payload['image_name'], 'fake_name') msg = fake_notifier.NOTIFICATIONS[2] self.assertEqual(msg.event_type, 'compute.instance.rebuild.end') self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['image_name'], 'fake_name') self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], inst_ref['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) self.assertEqual(payload['launched_at'], utils.strtime(cur_time)) self.assertEqual(payload['image_ref_url'], new_image_ref_url) self.compute.terminate_instance(self.context, inst_ref, [], []) def test_finish_resize_instance_notification(self): # Ensure notifications on instance migrate/resize. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time)) instance = self._create_fake_instance_obj() new_type = flavors.get_flavor_by_name('m1.small') new_type_id = new_type['id'] flavor_id = new_type['flavorid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = 'foo' instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=new_type, image={}, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) self._stub_out_resize_network_methods() migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, instance_type=new_type, reservations=[], clean_shutdown=True) time_fixture.advance_time_delta(cur_time - old_time) fake_notifier.NOTIFICATIONS = [] self.compute.finish_resize(self.context, migration=migration, reservations=[], disk_info={}, image={}, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.finish_resize.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.finish_resize.end') self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance.uuid) self.assertEqual(payload['instance_type'], 'm1.small') self.assertEqual(str(payload['instance_type_id']), str(new_type_id)) self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) self.assertEqual(payload['launched_at'], utils.strtime(cur_time)) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_resize_instance_notification(self): # Ensure notifications on instance migrate/resize. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time)) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) time_fixture.advance_time_delta(cur_time - old_time) fake_notifier.NOTIFICATIONS = [] instance.host = 'foo' instance.task_state = task_states.RESIZE_PREP instance.save() instance_type = flavors.get_default_flavor() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) db.migration_get_by_instance_and_status(self.context.elevated(), instance.uuid, 'pre-migrating') self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.exists') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.resize.prep.start') msg = fake_notifier.NOTIFICATIONS[2] self.assertEqual(msg.event_type, 'compute.instance.resize.prep.end') self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance.uuid) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_prep_resize_instance_migration_error_on_none_host(self): """Ensure prep_resize raises a migration error if destination host is not defined """ instance = self._create_fake_instance_obj() reservations = self._ensure_quota_reservations_rolledback(instance) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = None instance.save() instance_type = flavors.get_default_flavor() self.assertRaises(exception.MigrationError, self.compute.prep_resize, self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) self.compute.terminate_instance(self.context, instance, [], []) def test_resize_instance_driver_error(self): # Ensure instance status set to Error on resize error. def throw_up(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off', throw_up) instance = self._create_fake_instance_obj() instance_type = flavors.get_default_flavor() reservations = self._ensure_quota_reservations_rolledback(instance) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) instance.task_state = task_states.RESIZE_PREP instance.save() migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') # verify self.assertRaises(test.TestingException, self.compute.resize_instance, self.context, instance=instance, migration=migration, image={}, reservations=reservations, instance_type=jsonutils.to_primitive(instance_type), clean_shutdown=True) # NOTE(comstud): error path doesn't use objects, so our object # is not updated. Refresh and compare against the DB. instance.refresh() self.assertEqual(instance.vm_state, vm_states.ERROR) self.compute.terminate_instance(self.context, instance, [], []) def test_resize_instance_driver_rollback(self): # Ensure instance status set to Running after rollback. def throw_up(*args, **kwargs): raise exception.InstanceFaultRollback(test.TestingException()) self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off', throw_up) instance = self._create_fake_instance_obj() instance_type = flavors.get_default_flavor() reservations = self._ensure_quota_reservations_rolledback(instance) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) instance.task_state = task_states.RESIZE_PREP instance.save() migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') self.assertRaises(test.TestingException, self.compute.resize_instance, self.context, instance=instance, migration=migration, image={}, reservations=reservations, instance_type=jsonutils.to_primitive(instance_type), clean_shutdown=True) # NOTE(comstud): error path doesn't use objects, so our object # is not updated. Refresh and compare against the DB. instance.refresh() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertIsNone(instance.task_state) self.compute.terminate_instance(self.context, instance, [], []) def _test_resize_instance(self, clean_shutdown=True): # Ensure instance can be migrated/resized. instance = self._create_fake_instance_obj() instance_type = flavors.get_default_flavor() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) # verify 'old_vm_state' was set on system_metadata instance.refresh() sys_meta = instance.system_metadata self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state']) self._stub_out_resize_network_methods() instance.task_state = task_states.RESIZE_PREP instance.save() migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') with test.nested( mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value='fake_bdms'), mock.patch.object( self.compute, '_get_instance_block_device_info', return_value='fake_bdinfo'), mock.patch.object(self.compute, '_terminate_volume_connections'), mock.patch.object(self.compute, '_get_power_off_values', return_value=(1, 2)) ) as (mock_get_by_inst_uuid, mock_get_instance_vol_bdinfo, mock_terminate_vol_conn, mock_get_power_off_values): self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, reservations=[], instance_type=jsonutils.to_primitive(instance_type), clean_shutdown=clean_shutdown) mock_get_instance_vol_bdinfo.assert_called_once_with( self.context, instance, bdms='fake_bdms') mock_terminate_vol_conn.assert_called_once_with(self.context, instance, 'fake_bdms') mock_get_power_off_values.assert_called_once_with(self.context, instance, clean_shutdown) self.assertEqual(migration.dest_compute, instance.host) self.compute.terminate_instance(self.context, instance, [], []) def test_resize_instance(self): self._test_resize_instance() def test_resize_instance_forced_shutdown(self): self._test_resize_instance(clean_shutdown=False) def _test_confirm_resize(self, power_on, numa_topology=None): # Common test case method for confirm_resize def fake(*args, **kwargs): pass def fake_confirm_migration_driver(*args, **kwargs): # Confirm the instance uses the new type in finish_resize self.assertEqual('3', instance.flavor.flavorid) old_vm_state = None p_state = None if power_on: old_vm_state = vm_states.ACTIVE p_state = power_state.RUNNING else: old_vm_state = vm_states.STOPPED p_state = power_state.SHUTDOWN params = {'vm_state': old_vm_state, 'power_state': p_state} instance = self._create_fake_instance_obj(params) self.flags(allow_resize_to_same_host=True) self.stubs.Set(self.compute.driver, 'finish_migration', fake) self.stubs.Set(self.compute.driver, 'confirm_migration', fake_confirm_migration_driver) self._stub_out_resize_network_methods() reservations = self._ensure_quota_reservations_committed(instance) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Confirm the instance size before the resize starts instance.refresh() instance_type_ref = db.flavor_get(self.context, instance.instance_type_id) self.assertEqual(instance_type_ref['flavorid'], '1') instance.vm_state = old_vm_state instance.power_state = p_state instance.numa_topology = numa_topology instance.save() new_instance_type_ref = flavors.get_flavor_by_flavor_id(3) self.compute.prep_resize(self.context, instance=instance, instance_type=new_instance_type_ref, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') migration_context = objects.MigrationContext.get_by_instance_uuid( self.context.elevated(), instance.uuid) self.assertIsInstance(migration_context.old_numa_topology, numa_topology.__class__) self.assertIsNone(migration_context.new_numa_topology) # NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata sys_meta = instance.system_metadata self.assertEqual(old_vm_state, sys_meta['old_vm_state']) instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, reservations=[], instance_type=new_instance_type_ref, clean_shutdown=True) self.compute.finish_resize(self.context, migration=migration, reservations=[], disk_info={}, image={}, instance=instance) # Prove that the instance size is now the new size instance_type_ref = db.flavor_get(self.context, instance.instance_type_id) self.assertEqual(instance_type_ref['flavorid'], '3') # Prove that the NUMA topology has also been updated to that of the new # flavor - meaning None self.assertIsNone(instance.numa_topology) # Finally, confirm the resize and verify the new flavor is applied instance.task_state = None instance.save() self.compute.confirm_resize(self.context, instance=instance, reservations=reservations, migration=migration) instance.refresh() instance_type_ref = db.flavor_get(self.context, instance.instance_type_id) self.assertEqual(instance_type_ref['flavorid'], '3') self.assertEqual('fake-mini', migration.source_compute) self.assertEqual(old_vm_state, instance.vm_state) self.assertIsNone(instance.task_state) self.assertIsNone(instance.migration_context) self.assertEqual(p_state, instance.power_state) self.compute.terminate_instance(self.context, instance, [], []) def test_confirm_resize_from_active(self): self._test_confirm_resize(power_on=True) def test_confirm_resize_from_stopped(self): self._test_confirm_resize(power_on=False) def test_confirm_resize_with_migration_context(self): numa_topology = ( test_instance_numa_topology.get_fake_obj_numa_topology( self.context)) self._test_confirm_resize(power_on=True, numa_topology=numa_topology) def _test_finish_revert_resize(self, power_on, remove_old_vm_state=False, numa_topology=None): """Convenience method that does most of the work for the test_finish_revert_resize tests. :param power_on -- True if testing resize from ACTIVE state, False if testing resize from STOPPED state. :param remove_old_vm_state -- True if testing a case where the 'old_vm_state' system_metadata is not present when the finish_revert_resize method is called. """ def fake(*args, **kwargs): pass def fake_finish_revert_migration_driver(*args, **kwargs): # Confirm the instance uses the old type in finish_revert_resize inst = args[1] self.assertEqual('1', inst.flavor.flavorid) old_vm_state = None if power_on: old_vm_state = vm_states.ACTIVE else: old_vm_state = vm_states.STOPPED params = {'vm_state': old_vm_state} instance = self._create_fake_instance_obj(params) self.stubs.Set(self.compute.driver, 'finish_migration', fake) self.stubs.Set(self.compute.driver, 'finish_revert_migration', fake_finish_revert_migration_driver) self._stub_out_resize_network_methods() reservations = self._ensure_quota_reservations_committed(instance) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.refresh() instance_type_ref = db.flavor_get(self.context, instance.instance_type_id) self.assertEqual(instance_type_ref['flavorid'], '1') old_vm_state = instance['vm_state'] instance.host = 'foo' instance.vm_state = old_vm_state instance.numa_topology = numa_topology instance.save() new_instance_type_ref = flavors.get_flavor_by_flavor_id(3) self.compute.prep_resize(self.context, instance=instance, instance_type=new_instance_type_ref, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') migration_context = objects.MigrationContext.get_by_instance_uuid( self.context.elevated(), instance.uuid) self.assertIsInstance(migration_context.old_numa_topology, numa_topology.__class__) # NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata sys_meta = instance.system_metadata self.assertEqual(old_vm_state, sys_meta['old_vm_state']) instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, reservations=[], instance_type=new_instance_type_ref, clean_shutdown=True) self.compute.finish_resize(self.context, migration=migration, reservations=[], disk_info={}, image={}, instance=instance) # Prove that the instance size is now the new size instance_type_ref = flavors.get_flavor_by_flavor_id(3) self.assertEqual(instance_type_ref['flavorid'], '3') # Prove that the NUMA topology has also been updated to that of the new # flavor - meaning None self.assertIsNone(instance.numa_topology) instance.task_state = task_states.RESIZE_REVERTING instance.save() self.compute.revert_resize(self.context, migration=migration, instance=instance, reservations=reservations) instance.refresh() if remove_old_vm_state: # need to wipe out the old_vm_state from system_metadata # before calling finish_revert_resize sys_meta = instance.system_metadata sys_meta.pop('old_vm_state') # Have to reset for save() to work instance.system_metadata = sys_meta instance.save() self.compute.finish_revert_resize(self.context, migration=migration, instance=instance, reservations=reservations) self.assertIsNone(instance.task_state) instance_type_ref = db.flavor_get(self.context, instance['instance_type_id']) self.assertEqual(instance_type_ref['flavorid'], '1') self.assertEqual(instance.host, migration.source_compute) self.assertEqual(migration.dest_compute, migration.source_compute) self.assertIsInstance(instance.numa_topology, numa_topology.__class__) if remove_old_vm_state: self.assertEqual(vm_states.ACTIVE, instance.vm_state) else: self.assertEqual(old_vm_state, instance.vm_state) def test_finish_revert_resize_from_active(self): self._test_finish_revert_resize(power_on=True) def test_finish_revert_resize_from_stopped(self): self._test_finish_revert_resize(power_on=False) def test_finish_revert_resize_from_stopped_remove_old_vm_state(self): # in this case we resize from STOPPED but end up with ACTIVE # because the old_vm_state value is not present in # finish_revert_resize self._test_finish_revert_resize(power_on=False, remove_old_vm_state=True) def test_finish_revert_resize_migration_context(self): numa_topology = ( test_instance_numa_topology.get_fake_obj_numa_topology( self.context)) self._test_finish_revert_resize(power_on=True, numa_topology=numa_topology) def test_get_by_flavor_id(self): flavor_type = flavors.get_flavor_by_flavor_id(1) self.assertEqual(flavor_type['name'], 'm1.tiny') def test_resize_instance_handles_migration_error(self): # Ensure vm_state is ERROR when error occurs. def raise_migration_failure(*args): raise test.TestingException() self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off', raise_migration_failure) instance = self._create_fake_instance_obj() reservations = self._ensure_quota_reservations_rolledback(instance) instance_type = flavors.get_default_flavor() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') instance.task_state = task_states.RESIZE_PREP instance.save() self.assertRaises(test.TestingException, self.compute.resize_instance, self.context, instance=instance, migration=migration, image={}, reservations=reservations, instance_type=jsonutils.to_primitive(instance_type), clean_shutdown=True) # NOTE(comstud): error path doesn't use objects, so our object # is not updated. Refresh and compare against the DB. instance.refresh() self.assertEqual(instance.vm_state, vm_states.ERROR) self.compute.terminate_instance(self.context, instance, [], []) def test_pre_live_migration_instance_has_no_fixed_ip(self): # Confirm that no exception is raised if there is no fixed ip on # pre_live_migration instance = self._create_fake_instance_obj() c = context.get_admin_context() self.mox.ReplayAll() self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance), {'block_device_mapping': []}, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) def test_pre_live_migration_works_correctly(self): # Confirm setup_compute_volume is called when volume is mounted. def stupid(*args, **kwargs): return fake_network.fake_get_instance_nw_info(self) self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', stupid) # creating instance testdata instance = self._create_fake_instance_obj({'host': 'dummy'}) c = context.get_admin_context() nw_info = fake_network.fake_get_instance_nw_info(self) # creating mocks self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration') self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance), {'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': []}, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.StubOutWithMock(self.compute.driver, 'ensure_filtering_rules_for_instance') self.compute.driver.ensure_filtering_rules_for_instance( mox.IsA(instance), nw_info) self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') self.compute.network_api.setup_networks_on_host(c, instance, self.compute.host) fake_notifier.NOTIFICATIONS = [] # start test self.mox.ReplayAll() migrate_data = {'is_shared_instance_path': False} ret = self.compute.pre_live_migration(c, instance=instance, block_migration=False, disk=None, migrate_data=migrate_data) self.assertIsNone(ret) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.live_migration.pre.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.live_migration.pre.end') # cleanup db.instance_destroy(c, instance['uuid']) @mock.patch('nova.objects.Migration.save') def test_live_migration_exception_rolls_back(self, mock_save): # Confirm exception when pre_live_migration fails. c = context.get_admin_context() instance = self._create_fake_instance_obj( {'host': 'src_host', 'task_state': task_states.MIGRATING}) updated_instance = self._create_fake_instance_obj( {'host': 'fake-dest-host'}) dest_host = updated_instance['host'] fake_bdms = [ objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'volume_id': uuids.volume_id_1, 'source_type': 'volume', 'destination_type': 'volume'})), objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'volume_id': uuids.volume_id_2, 'source_type': 'volume', 'destination_type': 'volume'})) ] migrate_data = migrate_data_obj.XenapiLiveMigrateData( block_migration=True) # creating mocks self.mox.StubOutWithMock(self.compute.driver, 'get_instance_disk_info') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'pre_live_migration') self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'remove_volume_connection') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'rollback_live_migration_at_destination') block_device_info = { 'swap': None, 'ephemerals': [], 'block_device_mapping': [], 'root_device_name': None} self.compute.driver.get_instance_disk_info( instance, block_device_info=block_device_info).AndReturn('fake_disk') self.compute.compute_rpcapi.pre_live_migration(c, instance, True, 'fake_disk', dest_host, migrate_data).AndRaise(test.TestingException()) self.compute.network_api.setup_networks_on_host(c, instance, self.compute.host) objects.BlockDeviceMappingList.get_by_instance_uuid(c, instance.uuid).MultipleTimes().AndReturn(fake_bdms) self.compute.compute_rpcapi.remove_volume_connection( c, uuids.volume_id_1, instance, dest_host) self.compute.compute_rpcapi.remove_volume_connection( c, uuids.volume_id_2, instance, dest_host) self.compute.compute_rpcapi.rollback_live_migration_at_destination( c, instance, dest_host, destroy_disks=True, migrate_data=mox.IsA(migrate_data_obj.LiveMigrateData)) # start test self.mox.ReplayAll() migration = objects.Migration() self.assertRaises(test.TestingException, self.compute.live_migration, c, dest=dest_host, block_migration=True, instance=instance, migration=migration, migrate_data=migrate_data) instance.refresh() self.assertEqual('src_host', instance.host) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) self.assertEqual('failed', migration.status) @mock.patch.object(compute_utils, 'EventReporter') @mock.patch('nova.objects.Migration.save') def test_live_migration_works_correctly(self, mock_save, event_mock): # Confirm live_migration() works as expected correctly. # creating instance testdata c = context.get_admin_context() instance = self._create_fake_instance_obj(context=c) instance.host = self.compute.host dest = 'desthost' migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_instance_path=False, is_shared_block_storage=False) self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'pre_live_migration') self.compute.compute_rpcapi.pre_live_migration( c, instance, False, None, dest, migrate_data).AndReturn( migrate_data) self.mox.StubOutWithMock(self.compute.network_api, 'migrate_instance_start') migration = {'source_compute': instance['host'], 'dest_compute': dest} self.compute.network_api.migrate_instance_start(c, instance, migration) self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'post_live_migration_at_destination') self.compute.compute_rpcapi.post_live_migration_at_destination( c, instance, False, dest) self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') self.mox.StubOutWithMock(self.compute.instance_events, 'clear_events_for_instance') self.compute.instance_events.clear_events_for_instance( mox.IgnoreArg()) # start test self.mox.ReplayAll() migration = objects.Migration() ret = self.compute.live_migration(c, dest=dest, instance=instance, block_migration=False, migration=migration, migrate_data=migrate_data) self.assertIsNone(ret) event_mock.assert_called_with( c, 'compute_live_migration', instance.uuid) # cleanup instance.destroy() self.assertEqual('completed', migration.status) def test_post_live_migration_no_shared_storage_working_correctly(self): """Confirm post_live_migration() works correctly as expected for non shared storage migration. """ # Create stubs result = {} # No share storage live migration don't need to destroy at source # server because instance has been migrated to destination, but a # cleanup for block device and network are needed. def fakecleanup(*args, **kwargs): result['cleanup'] = True self.stubs.Set(self.compute.driver, 'cleanup', fakecleanup) dest = 'desthost' srchost = self.compute.host # creating testdata c = context.get_admin_context() instance = self._create_fake_instance_obj({ 'host': srchost, 'state_description': 'migrating', 'state': power_state.PAUSED, 'task_state': task_states.MIGRATING, 'power_state': power_state.PAUSED}) # creating mocks self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') self.compute.driver.unfilter_instance(instance, []) self.mox.StubOutWithMock(self.compute.network_api, 'migrate_instance_start') migration = {'source_compute': srchost, 'dest_compute': dest, } self.compute.network_api.migrate_instance_start(c, instance, migration) self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'post_live_migration_at_destination') self.compute.compute_rpcapi.post_live_migration_at_destination( c, instance, False, dest) self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') self.mox.StubOutWithMock(self.compute.instance_events, 'clear_events_for_instance') self.compute.instance_events.clear_events_for_instance( mox.IgnoreArg()) # start test self.mox.ReplayAll() migrate_data = objects.LibvirtLiveMigrateData( is_shared_instance_path=False, is_shared_block_storage=False, block_migration=False) self.compute._post_live_migration(c, instance, dest, migrate_data=migrate_data) self.assertIn('cleanup', result) self.assertTrue(result['cleanup']) def test_post_live_migration_working_correctly(self): # Confirm post_live_migration() works as expected correctly. dest = 'desthost' srchost = self.compute.host # creating testdata c = context.get_admin_context() instance = self._create_fake_instance_obj({ 'host': srchost, 'state_description': 'migrating', 'state': power_state.PAUSED}, context=c) instance.update({'task_state': task_states.MIGRATING, 'power_state': power_state.PAUSED}) instance.save() migration_obj = objects.Migration() migrate_data = migrate_data_obj.LiveMigrateData( migration=migration_obj) # creating mocks with test.nested( mock.patch.object(self.compute.driver, 'post_live_migration'), mock.patch.object(self.compute.driver, 'unfilter_instance'), mock.patch.object(self.compute.network_api, 'migrate_instance_start'), mock.patch.object(self.compute.compute_rpcapi, 'post_live_migration_at_destination'), mock.patch.object(self.compute.driver, 'post_live_migration_at_source'), mock.patch.object(self.compute.network_api, 'setup_networks_on_host'), mock.patch.object(self.compute.instance_events, 'clear_events_for_instance'), mock.patch.object(self.compute, 'update_available_resource'), mock.patch.object(migration_obj, 'save'), ) as ( post_live_migration, unfilter_instance, migrate_instance_start, post_live_migration_at_destination, post_live_migration_at_source, setup_networks_on_host, clear_events, update_available_resource, mig_save ): self.compute._post_live_migration(c, instance, dest, migrate_data=migrate_data) post_live_migration.assert_has_calls([ mock.call(c, instance, {'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': []}, migrate_data)]) unfilter_instance.assert_has_calls([mock.call(instance, [])]) migration = {'source_compute': srchost, 'dest_compute': dest, } migrate_instance_start.assert_has_calls([ mock.call(c, instance, migration)]) post_live_migration_at_destination.assert_has_calls([ mock.call(c, instance, False, dest)]) post_live_migration_at_source.assert_has_calls( [mock.call(c, instance, [])]) clear_events.assert_called_once_with(instance) update_available_resource.assert_has_calls([mock.call(c)]) self.assertEqual('completed', migration_obj.status) mig_save.assert_called_once_with() def test_post_live_migration_terminate_volume_connections(self): c = context.get_admin_context() instance = self._create_fake_instance_obj({ 'host': self.compute.host, 'state_description': 'migrating', 'state': power_state.PAUSED}, context=c) instance.update({'task_state': task_states.MIGRATING, 'power_state': power_state.PAUSED}) instance.save() bdms = block_device_obj.block_device_make_list(c, [fake_block_device.FakeDbBlockDeviceDict({ 'source_type': 'blank', 'guest_format': None, 'destination_type': 'local'}), fake_block_device.FakeDbBlockDeviceDict({ 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id}), ]) with test.nested( mock.patch.object(self.compute.network_api, 'migrate_instance_start'), mock.patch.object(self.compute.compute_rpcapi, 'post_live_migration_at_destination'), mock.patch.object(self.compute.network_api, 'setup_networks_on_host'), mock.patch.object(self.compute.instance_events, 'clear_events_for_instance'), mock.patch.object(self.compute, '_get_instance_block_device_info'), mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid'), mock.patch.object(self.compute.driver, 'get_volume_connector'), mock.patch.object(cinder.API, 'terminate_connection') ) as ( migrate_instance_start, post_live_migration_at_destination, setup_networks_on_host, clear_events_for_instance, get_instance_volume_block_device_info, get_by_instance_uuid, get_volume_connector, terminate_connection ): get_by_instance_uuid.return_value = bdms get_volume_connector.return_value = 'fake-connector' self.compute._post_live_migration(c, instance, 'dest_host') terminate_connection.assert_called_once_with( c, uuids.volume_id, 'fake-connector') @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') def test_rollback_live_migration(self, mock_bdms): c = context.get_admin_context() instance = mock.MagicMock() migration = mock.MagicMock() migrate_data = {'migration': migration} mock_bdms.return_value = [] @mock.patch.object(self.compute, '_live_migration_cleanup_flags') @mock.patch.object(self.compute, 'network_api') def _test(mock_nw_api, mock_lmcf): mock_lmcf.return_value = False, False self.compute._rollback_live_migration(c, instance, 'foo', False, migrate_data=migrate_data) mock_nw_api.setup_networks_on_host.assert_called_once_with( c, instance, self.compute.host) _test() self.assertEqual('error', migration.status) self.assertEqual(0, instance.progress) migration.save.assert_called_once_with() @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') def test_rollback_live_migration_set_migration_status(self, mock_bdms): c = context.get_admin_context() instance = mock.MagicMock() migration = mock.MagicMock() migrate_data = {'migration': migration} mock_bdms.return_value = [] @mock.patch.object(self.compute, '_live_migration_cleanup_flags') @mock.patch.object(self.compute, 'network_api') def _test(mock_nw_api, mock_lmcf): mock_lmcf.return_value = False, False self.compute._rollback_live_migration(c, instance, 'foo', False, migrate_data=migrate_data, migration_status='fake') mock_nw_api.setup_networks_on_host.assert_called_once_with( c, instance, self.compute.host) _test() self.assertEqual('fake', migration.status) migration.save.assert_called_once_with() def test_rollback_live_migration_at_destination_correctly(self): # creating instance testdata c = context.get_admin_context() instance = self._create_fake_instance_obj({'host': 'dummy'}) fake_notifier.NOTIFICATIONS = [] self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') self.compute.network_api.setup_networks_on_host(c, instance, self.compute.host, teardown=True) self.mox.StubOutWithMock(self.compute.driver, 'rollback_live_migration_at_destination') self.compute.driver.rollback_live_migration_at_destination(c, instance, [], {'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': []}, destroy_disks=True, migrate_data=None) # start test self.mox.ReplayAll() ret = self.compute.rollback_live_migration_at_destination(c, instance=instance, destroy_disks=True, migrate_data=None) self.assertIsNone(ret) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.live_migration.rollback.dest.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.live_migration.rollback.dest.end') @mock.patch('nova.network.api.API.setup_networks_on_host', side_effect=test.TestingException) @mock.patch('nova.virt.driver.ComputeDriver.' 'rollback_live_migration_at_destination') @mock.patch('nova.objects.migrate_data.LiveMigrateData.' 'detect_implementation') def test_rollback_live_migration_at_destination_network_fails( self, mock_detect, mock_rollback, net_mock): c = context.get_admin_context() instance = self._create_fake_instance_obj() self.assertRaises(test.TestingException, self.compute.rollback_live_migration_at_destination, c, instance, destroy_disks=True, migrate_data={}) mock_rollback.assert_called_once_with( c, instance, mock.ANY, mock.ANY, destroy_disks=True, migrate_data=mock_detect.return_value) def test_run_kill_vm(self): # Detect when a vm is terminated behind the scenes. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) instance_uuid = instances[0]['uuid'] self.compute.driver._test_remove_vm(instance_uuid) # Force the compute manager to do its periodic poll ctxt = context.get_admin_context() self.compute._sync_power_states(ctxt) instances = db.instance_get_all(self.context) LOG.info("After force-killing instances: %s", instances) self.assertEqual(len(instances), 1) self.assertIsNone(instances[0]['task_state']) def _fill_fault(self, values): extra = {x: None for x in ['created_at', 'deleted_at', 'updated_at', 'deleted']} extra['id'] = 1 extra['details'] = '' extra.update(values) return extra def test_add_instance_fault(self): instance = self._create_fake_instance_obj() exc_info = None def fake_db_fault_create(ctxt, values): self.assertIn('raise NotImplementedError', values['details']) del values['details'] expected = { 'code': 500, 'message': 'test', 'instance_uuid': instance['uuid'], 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) try: raise NotImplementedError('test') except NotImplementedError: exc_info = sys.exc_info() self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, NotImplementedError('test'), exc_info) def test_add_instance_fault_with_remote_error(self): instance = self._create_fake_instance_obj() exc_info = None raised_exc = None def fake_db_fault_create(ctxt, values): global exc_info global raised_exc self.assertIn('raise messaging.RemoteError', values['details']) del values['details'] expected = { 'code': 500, 'instance_uuid': instance['uuid'], 'message': 'Remote error: test My Test Message\nNone.', 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) try: raise messaging.RemoteError('test', 'My Test Message') except messaging.RemoteError as exc: raised_exc = exc exc_info = sys.exc_info() self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, raised_exc, exc_info) def test_add_instance_fault_user_error(self): instance = self._create_fake_instance_obj() exc_info = None def fake_db_fault_create(ctxt, values): expected = { 'code': 400, 'message': 'fake details', 'details': '', 'instance_uuid': instance['uuid'], 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) user_exc = exception.Invalid('fake details', code=400) try: raise user_exc except exception.Invalid: exc_info = sys.exc_info() self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, user_exc, exc_info) def test_add_instance_fault_no_exc_info(self): instance = self._create_fake_instance_obj() def fake_db_fault_create(ctxt, values): expected = { 'code': 500, 'message': 'test', 'details': '', 'instance_uuid': instance['uuid'], 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, NotImplementedError('test')) def test_add_instance_fault_long_message(self): instance = self._create_fake_instance_obj() message = 300 * 'a' def fake_db_fault_create(ctxt, values): expected = { 'code': 500, 'message': message[:255], 'details': '', 'instance_uuid': instance['uuid'], 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, NotImplementedError(message)) def test_add_instance_fault_with_message(self): instance = self._create_fake_instance_obj() exc_info = None def fake_db_fault_create(ctxt, values): self.assertIn('raise NotImplementedError', values['details']) del values['details'] expected = { 'code': 500, 'message': 'hoge', 'instance_uuid': instance['uuid'], 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) try: raise NotImplementedError('test') except NotImplementedError: exc_info = sys.exc_info() self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, NotImplementedError('test'), exc_info, fault_message='hoge') def _test_cleanup_running(self, action): admin_context = context.get_admin_context() deleted_at = (timeutils.utcnow() - datetime.timedelta(hours=1, minutes=5)) instance1 = self._create_fake_instance_obj({"deleted_at": deleted_at, "deleted": True}) instance2 = self._create_fake_instance_obj({"deleted_at": deleted_at, "deleted": True}) self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.compute._get_instances_on_driver( admin_context, {'deleted': True, 'soft_deleted': False, 'host': self.compute.host}).AndReturn([instance1, instance2]) self.flags(running_deleted_instance_timeout=3600, running_deleted_instance_action=action) return admin_context, instance1, instance2 def test_cleanup_running_deleted_instances_reap(self): ctxt, inst1, inst2 = self._test_cleanup_running('reap') bdms = block_device_obj.block_device_make_list(ctxt, []) self.mox.StubOutWithMock(self.compute, "_shutdown_instance") self.mox.StubOutWithMock(objects.BlockDeviceMappingList, "get_by_instance_uuid") # Simulate an error and make sure cleanup proceeds with next instance. self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\ AndRaise(test.TestingException) objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt, inst1.uuid, use_slave=True).AndReturn(bdms) objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt, inst2.uuid, use_slave=True).AndReturn(bdms) self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\ AndReturn(None) self.mox.StubOutWithMock(self.compute, "_cleanup_volumes") self.compute._cleanup_volumes(ctxt, inst1['uuid'], bdms).\ AndReturn(None) self.mox.ReplayAll() self.compute._cleanup_running_deleted_instances(ctxt) def test_cleanup_running_deleted_instances_shutdown(self): ctxt, inst1, inst2 = self._test_cleanup_running('shutdown') self.mox.StubOutWithMock(self.compute.driver, 'set_bootable') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.compute.driver.set_bootable(inst1, False) self.compute.driver.power_off(inst1) self.compute.driver.set_bootable(inst2, False) self.compute.driver.power_off(inst2) self.mox.ReplayAll() self.compute._cleanup_running_deleted_instances(ctxt) def test_cleanup_running_deleted_instances_shutdown_notimpl(self): ctxt, inst1, inst2 = self._test_cleanup_running('shutdown') self.mox.StubOutWithMock(self.compute.driver, 'set_bootable') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.compute.driver.set_bootable(inst1, False).AndRaise( NotImplementedError) compute_manager.LOG.warn(mox.IgnoreArg()) self.compute.driver.power_off(inst1) self.compute.driver.set_bootable(inst2, False).AndRaise( NotImplementedError) compute_manager.LOG.warn(mox.IgnoreArg()) self.compute.driver.power_off(inst2) self.mox.ReplayAll() self.compute._cleanup_running_deleted_instances(ctxt) def test_cleanup_running_deleted_instances_shutdown_error(self): ctxt, inst1, inst2 = self._test_cleanup_running('shutdown') self.mox.StubOutWithMock(self.compute.driver, 'set_bootable') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(compute_manager.LOG, 'exception') e = test.TestingException('bad') self.compute.driver.set_bootable(inst1, False) self.compute.driver.power_off(inst1).AndRaise(e) compute_manager.LOG.warn(mox.IgnoreArg()) self.compute.driver.set_bootable(inst2, False) self.compute.driver.power_off(inst2).AndRaise(e) compute_manager.LOG.warn(mox.IgnoreArg()) self.mox.ReplayAll() self.compute._cleanup_running_deleted_instances(ctxt) def test_running_deleted_instances(self): admin_context = context.get_admin_context() self.compute.host = 'host' instance = self._create_fake_instance_obj() instance.deleted = True now = timeutils.utcnow() instance.deleted_at = now self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.compute._get_instances_on_driver( admin_context, {'deleted': True, 'soft_deleted': False, 'host': self.compute.host}).AndReturn([instance]) self.mox.StubOutWithMock(timeutils, 'is_older_than') timeutils.is_older_than(now, CONF.running_deleted_instance_timeout).AndReturn(True) self.mox.ReplayAll() val = self.compute._running_deleted_instances(admin_context) self.assertEqual(val, [instance]) def _heal_instance_info_cache(self, _get_instance_nw_info_raise=False, _get_instance_nw_info_raise_cache=False): # Update on every call for the test self.flags(heal_instance_info_cache_interval=-1) ctxt = context.get_admin_context() instance_map = {} instances = [] for x in range(8): inst_uuid = getattr(uuids, 'db_instance_%i' % x) instance_map[inst_uuid] = fake_instance.fake_db_instance( uuid=inst_uuid, host=CONF.host, created_at=None) # These won't be in our instance since they're not requested instances.append(instance_map[inst_uuid]) call_info = {'get_all_by_host': 0, 'get_by_uuid': 0, 'get_nw_info': 0, 'expected_instance': None} def fake_instance_get_all_by_host(context, host, columns_to_join, use_slave=False): call_info['get_all_by_host'] += 1 self.assertEqual([], columns_to_join) return instances[:] def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join, use_slave=False): if instance_uuid not in instance_map: raise exception.InstanceNotFound(instance_id=instance_uuid) call_info['get_by_uuid'] += 1 self.assertEqual(['system_metadata', 'info_cache', 'extra', 'extra.flavor'], columns_to_join) return instance_map[instance_uuid] # NOTE(comstud): Override the stub in setUp() def fake_get_instance_nw_info(context, instance, use_slave=False): # Note that this exception gets caught in compute/manager # and is ignored. However, the below increment of # 'get_nw_info' won't happen, and you'll get an assert # failure checking it below. self.assertEqual(call_info['expected_instance']['uuid'], instance['uuid']) call_info['get_nw_info'] += 1 if _get_instance_nw_info_raise: raise exception.InstanceNotFound(instance_id=instance['uuid']) if _get_instance_nw_info_raise_cache: raise exception.InstanceInfoCacheNotFound( instance_uuid=instance['uuid']) self.stub_out('nova.db.instance_get_all_by_host', fake_instance_get_all_by_host) self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get_by_uuid) self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake_get_instance_nw_info) # Make an instance appear to be still Building instances[0]['vm_state'] = vm_states.BUILDING # Make an instance appear to be Deleting instances[1]['task_state'] = task_states.DELETING # '0', '1' should be skipped.. call_info['expected_instance'] = instances[2] self.compute._heal_instance_info_cache(ctxt) self.assertEqual(1, call_info['get_all_by_host']) self.assertEqual(0, call_info['get_by_uuid']) self.assertEqual(1, call_info['get_nw_info']) call_info['expected_instance'] = instances[3] self.compute._heal_instance_info_cache(ctxt) self.assertEqual(1, call_info['get_all_by_host']) self.assertEqual(1, call_info['get_by_uuid']) self.assertEqual(2, call_info['get_nw_info']) # Make an instance switch hosts instances[4]['host'] = 'not-me' # Make an instance disappear instance_map.pop(instances[5]['uuid']) # Make an instance switch to be Deleting instances[6]['task_state'] = task_states.DELETING # '4', '5', and '6' should be skipped.. call_info['expected_instance'] = instances[7] self.compute._heal_instance_info_cache(ctxt) self.assertEqual(1, call_info['get_all_by_host']) self.assertEqual(4, call_info['get_by_uuid']) self.assertEqual(3, call_info['get_nw_info']) # Should be no more left. self.assertEqual(0, len(self.compute._instance_uuids_to_heal)) # This should cause a DB query now, so get a list of instances # where none can be processed to make sure we handle that case # cleanly. Use just '0' (Building) and '1' (Deleting) instances = instances[0:2] self.compute._heal_instance_info_cache(ctxt) # Should have called the list once more self.assertEqual(2, call_info['get_all_by_host']) # Stays the same because we remove invalid entries from the list self.assertEqual(4, call_info['get_by_uuid']) # Stays the same because we didn't find anything to process self.assertEqual(3, call_info['get_nw_info']) def test_heal_instance_info_cache(self): self._heal_instance_info_cache() def test_heal_instance_info_cache_with_instance_exception(self): self._heal_instance_info_cache(_get_instance_nw_info_raise=True) def test_heal_instance_info_cache_with_info_cache_exception(self): self._heal_instance_info_cache(_get_instance_nw_info_raise_cache=True) @mock.patch('nova.objects.InstanceList.get_by_filters') @mock.patch('nova.compute.api.API.unrescue') def test_poll_rescued_instances(self, unrescue, get): timed_out_time = timeutils.utcnow() - datetime.timedelta(minutes=5) not_timed_out_time = timeutils.utcnow() instances = [objects.Instance( uuid=uuids.pool_instance_1, vm_state=vm_states.RESCUED, launched_at=timed_out_time), objects.Instance( uuid=uuids.pool_instance_2, vm_state=vm_states.RESCUED, launched_at=timed_out_time), objects.Instance( uuid=uuids.pool_instance_3, vm_state=vm_states.RESCUED, launched_at=not_timed_out_time)] unrescued_instances = {uuids.pool_instance_1: False, uuids.pool_instance_2: False} def fake_instance_get_all_by_filters(context, filters, expected_attrs=None, use_slave=False): self.assertEqual(["system_metadata"], expected_attrs) return instances get.side_effect = fake_instance_get_all_by_filters def fake_unrescue(context, instance): unrescued_instances[instance['uuid']] = True unrescue.side_effect = fake_unrescue self.flags(rescue_timeout=60) ctxt = context.get_admin_context() self.compute._poll_rescued_instances(ctxt) for instance in unrescued_instances.values(): self.assertTrue(instance) @mock.patch('nova.objects.InstanceList.get_by_filters') def test_poll_rebooting_instances(self, get): reboot_timeout = 60 updated_at = timeutils.utcnow() - datetime.timedelta(minutes=5) to_poll = [objects.Instance( uuid=uuids.pool_instance_1, task_state=task_states.REBOOTING, updated_at=updated_at), objects.Instance( uuid=uuids.pool_instance_2, task_state=task_states.REBOOT_STARTED, updated_at=updated_at), objects.Instance( uuid=uuids.pool_instance_3, task_state=task_states.REBOOT_PENDING, updated_at=updated_at)] self.flags(reboot_timeout=reboot_timeout) get.return_value = to_poll ctxt = context.get_admin_context() with (mock.patch.object( self.compute.driver, 'poll_rebooting_instances' )) as mock_poll: self.compute._poll_rebooting_instances(ctxt) mock_poll.assert_called_with(reboot_timeout, to_poll) filters = {'host': 'fake-mini', 'task_state': [ task_states.REBOOTING, task_states.REBOOT_STARTED, task_states.REBOOT_PENDING]} get.assert_called_once_with(ctxt, filters, expected_attrs=[], use_slave=True) def test_poll_unconfirmed_resizes(self): instances = [ fake_instance.fake_db_instance(uuid=uuids.migration_instance_1, vm_state=vm_states.RESIZED, task_state=None), fake_instance.fake_db_instance(uuid=uuids.migration_instance_none), fake_instance.fake_db_instance(uuid=uuids.migration_instance_2, vm_state=vm_states.ERROR, task_state=None), fake_instance.fake_db_instance(uuid=uuids.migration_instance_3, vm_state=vm_states.ACTIVE, task_state= task_states.REBOOTING), fake_instance.fake_db_instance(uuid=uuids.migration_instance_4, vm_state=vm_states.RESIZED, task_state=None), fake_instance.fake_db_instance(uuid=uuids.migration_instance_5, vm_state=vm_states.ACTIVE, task_state=None), # The expceted migration result will be None instead of error # since _poll_unconfirmed_resizes will not change it # when the instance vm state is RESIZED and task state # is deleting, see bug 1301696 for more detail fake_instance.fake_db_instance(uuid=uuids.migration_instance_6, vm_state=vm_states.RESIZED, task_state='deleting'), fake_instance.fake_db_instance(uuid=uuids.migration_instance_7, vm_state=vm_states.RESIZED, task_state='soft-deleting'), fake_instance.fake_db_instance(uuid=uuids.migration_instance_8, vm_state=vm_states.ACTIVE, task_state='resize_finish')] expected_migration_status = {uuids.migration_instance_1: 'confirmed', uuids.migration_instance_none: 'error', uuids.migration_instance_2: 'error', uuids.migration_instance_3: 'error', uuids.migration_instance_4: None, uuids.migration_instance_5: 'error', uuids.migration_instance_6: None, uuids.migration_instance_7: None, uuids.migration_instance_8: None} migrations = [] for i, instance in enumerate(instances, start=1): fake_mig = test_migration.fake_db_migration() fake_mig.update({'id': i, 'instance_uuid': instance['uuid'], 'status': None}) migrations.append(fake_mig) def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join=None, use_slave=False): self.assertIn('metadata', columns_to_join) self.assertIn('system_metadata', columns_to_join) # raise InstanceNotFound exception for non-existing instance # represented by UUID: uuids.migration_instance_none if instance_uuid == uuids.db_instance_nonexist: raise exception.InstanceNotFound(instance_id=instance_uuid) for instance in instances: if instance['uuid'] == instance_uuid: return instance def fake_migration_get_unconfirmed_by_dest_compute(context, resize_confirm_window, dest_compute, use_slave=False): self.assertEqual(dest_compute, CONF.host) return migrations def fake_migration_update(context, mid, updates): for migration in migrations: if migration['id'] == mid: migration.update(updates) return migration def fake_confirm_resize(context, instance, migration=None): # raise exception for uuids.migration_instance_4 to check # migration status does not get set to 'error' on confirm_resize # failure. if instance['uuid'] == uuids.migration_instance_4: raise test.TestingException('bomb') self.assertIsNotNone(migration) for migration2 in migrations: if (migration2['instance_uuid'] == migration['instance_uuid']): migration2['status'] = 'confirmed' self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get_by_uuid) self.stub_out('nova.db.migration_get_unconfirmed_by_dest_compute', fake_migration_get_unconfirmed_by_dest_compute) self.stub_out('nova.db.migration_update', fake_migration_update) self.stubs.Set(self.compute.compute_api, 'confirm_resize', fake_confirm_resize) def fetch_instance_migration_status(instance_uuid): for migration in migrations: if migration['instance_uuid'] == instance_uuid: return migration['status'] self.flags(resize_confirm_window=60) ctxt = context.get_admin_context() self.compute._poll_unconfirmed_resizes(ctxt) for instance_uuid, status in six.iteritems(expected_migration_status): self.assertEqual(status, fetch_instance_migration_status(instance_uuid)) def test_instance_build_timeout_mixed_instances(self): # Tests that instances which failed to build within the configured # instance_build_timeout value are set to error state. self.flags(instance_build_timeout=30) ctxt = context.get_admin_context() created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60) filters = {'vm_state': vm_states.BUILDING, 'host': CONF.host} # these are the ones that are expired old_instances = [] for x in range(4): instance = {'uuid': str(uuid.uuid4()), 'created_at': created_at} instance.update(filters) old_instances.append(fake_instance.fake_db_instance(**instance)) # not expired instances = list(old_instances) # copy the contents of old_instances new_instance = { 'uuid': str(uuid.uuid4()), 'created_at': timeutils.utcnow(), } sort_key = 'created_at' sort_dir = 'desc' new_instance.update(filters) instances.append(fake_instance.fake_db_instance(**new_instance)) # creating mocks with test.nested( mock.patch.object(self.compute.db.sqlalchemy.api, 'instance_get_all_by_filters', return_value=instances), mock.patch.object(objects.Instance, 'save'), ) as ( instance_get_all_by_filters, conductor_instance_update ): # run the code self.compute._check_instance_build_time(ctxt) # check our assertions instance_get_all_by_filters.assert_called_once_with( ctxt, filters, sort_key, sort_dir, marker=None, columns_to_join=[], limit=None) self.assertThat(conductor_instance_update.mock_calls, testtools_matchers.HasLength(len(old_instances))) for inst in old_instances: conductor_instance_update.assert_has_calls([ mock.call()]) def test_get_resource_tracker_fail(self): self.assertRaises(exception.NovaException, self.compute._get_resource_tracker, 'invalidnodename') @mock.patch.object(objects.Instance, 'save') def test_instance_update_host_check(self, mock_save): # make sure rt usage doesn't happen if the host or node is different def fail_get(nodename): raise test.TestingException("wrong host/node") self.stubs.Set(self.compute, '_get_resource_tracker', fail_get) instance = self._create_fake_instance_obj({'host': 'someotherhost'}) self.compute._instance_update(self.context, instance, vcpus=4) instance = self._create_fake_instance_obj({'node': 'someothernode'}) self.compute._instance_update(self.context, instance, vcpus=4) params = {'host': 'someotherhost', 'node': 'someothernode'} instance = self._create_fake_instance_obj(params) self.compute._instance_update(self.context, instance, vcpus=4) @mock.patch('nova.objects.MigrationList.get_by_filters') @mock.patch('nova.objects.Migration.save') def test_destroy_evacuated_instance_on_shared_storage(self, mock_save, mock_get): fake_context = context.get_admin_context() # instances in central db instances = [ # those are still related to this host self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}) ] # those are already been evacuated to other host evacuated_instance = self._create_fake_instance_obj( {'host': 'otherhost'}) migration = objects.Migration(instance_uuid=evacuated_instance.uuid) mock_get.return_value = [migration] instances.append(evacuated_instance) self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute, '_is_instance_storage_shared') self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.compute._get_instances_on_driver( fake_context, {'deleted': False}).AndReturn(instances) self.compute.network_api.get_instance_nw_info( fake_context, evacuated_instance).AndReturn('fake_network_info') self.compute._get_instance_block_device_info( fake_context, evacuated_instance).AndReturn('fake_bdi') self.compute._is_instance_storage_shared(fake_context, evacuated_instance).AndReturn(True) self.compute.driver.destroy(fake_context, evacuated_instance, 'fake_network_info', 'fake_bdi', False) self.mox.ReplayAll() self.compute._destroy_evacuated_instances(fake_context) mock_get.assert_called_once_with(fake_context, {'source_compute': self.compute.host, 'status': ['accepted', 'done'], 'migration_type': 'evacuation'}) @mock.patch('nova.objects.MigrationList.get_by_filters') @mock.patch('nova.objects.Migration.save') def test_destroy_evacuated_instance_with_disks(self, mock_save, mock_get): fake_context = context.get_admin_context() # instances in central db instances = [ # those are still related to this host self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}) ] # those are already been evacuated to other host evacuated_instance = self._create_fake_instance_obj( {'host': 'otherhost'}) migration = objects.Migration(instance_uuid=evacuated_instance.uuid) mock_get.return_value = [migration] instances.append(evacuated_instance) self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.driver, 'check_instance_shared_storage_local') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'check_instance_shared_storage') self.mox.StubOutWithMock(self.compute.driver, 'check_instance_shared_storage_cleanup') self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.compute._get_instances_on_driver( fake_context, {'deleted': False}).AndReturn(instances) self.compute.network_api.get_instance_nw_info( fake_context, evacuated_instance).AndReturn('fake_network_info') self.compute._get_instance_block_device_info( fake_context, evacuated_instance).AndReturn('fake_bdi') self.compute.driver.check_instance_shared_storage_local(fake_context, evacuated_instance).AndReturn({'filename': 'tmpfilename'}) self.compute.compute_rpcapi.check_instance_shared_storage(fake_context, evacuated_instance, {'filename': 'tmpfilename'}, host=None).AndReturn(False) self.compute.driver.check_instance_shared_storage_cleanup(fake_context, {'filename': 'tmpfilename'}) self.compute.driver.destroy(fake_context, evacuated_instance, 'fake_network_info', 'fake_bdi', True) self.mox.ReplayAll() self.compute._destroy_evacuated_instances(fake_context) @mock.patch('nova.objects.MigrationList.get_by_filters') @mock.patch('nova.objects.Migration.save') def test_destroy_evacuated_instance_not_implemented(self, mock_save, mock_get): fake_context = context.get_admin_context() # instances in central db instances = [ # those are still related to this host self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}) ] # those are already been evacuated to other host evacuated_instance = self._create_fake_instance_obj( {'host': 'otherhost'}) migration = objects.Migration(instance_uuid=evacuated_instance.uuid) mock_get.return_value = [migration] instances.append(evacuated_instance) self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.driver, 'check_instance_shared_storage_local') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'check_instance_shared_storage') self.mox.StubOutWithMock(self.compute.driver, 'check_instance_shared_storage_cleanup') self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.compute._get_instances_on_driver( fake_context, {'deleted': False}).AndReturn(instances) self.compute.network_api.get_instance_nw_info( fake_context, evacuated_instance).AndReturn('fake_network_info') self.compute._get_instance_block_device_info( fake_context, evacuated_instance).AndReturn('fake_bdi') self.compute.driver.check_instance_shared_storage_local(fake_context, evacuated_instance).AndRaise(NotImplementedError()) self.compute.driver.destroy(fake_context, evacuated_instance, 'fake_network_info', 'fake_bdi', True) self.mox.ReplayAll() self.compute._destroy_evacuated_instances(fake_context) def test_complete_partial_deletion(self): admin_context = context.get_admin_context() instance = objects.Instance() instance.id = 1 instance.uuid = uuids.instance instance.vm_state = vm_states.DELETED instance.task_state = None instance.system_metadata = {'fake_key': 'fake_value'} instance.vcpus = 1 instance.memory_mb = 1 instance.project_id = 'fake-prj' instance.user_id = 'fake-user' instance.deleted = False def fake_destroy(): instance.deleted = True self.stubs.Set(instance, 'destroy', fake_destroy) self.stub_out('nova.db.block_device_mapping_get_all_by_instance', lambda *a, **k: None) self.stubs.Set(self.compute, '_complete_deletion', lambda *a, **k: None) self.stubs.Set(objects.Quotas, 'reserve', lambda *a, **k: None) self.compute._complete_partial_deletion(admin_context, instance) self.assertNotEqual(0, instance.deleted) def test_terminate_instance_updates_tracker(self): rt = self.compute._get_resource_tracker(NODENAME) admin_context = context.get_admin_context() self.assertEqual(0, rt.compute_node.vcpus_used) instance = self._create_fake_instance_obj() instance.vcpus = 1 rt.instance_claim(admin_context, instance) self.assertEqual(1, rt.compute_node.vcpus_used) self.compute.terminate_instance(admin_context, instance, [], []) self.assertEqual(0, rt.compute_node.vcpus_used) @mock.patch('nova.compute.manager.ComputeManager' '._notify_about_instance_usage') @mock.patch('nova.objects.Quotas.reserve') # NOTE(cdent): At least in this test destroy() on the instance sets it # state back to active, meaning the resource tracker won't # update properly. @mock.patch('nova.objects.Instance.destroy') def test_init_deleted_instance_updates_tracker(self, noop1, noop2, noop3): rt = self.compute._get_resource_tracker(NODENAME) admin_context = context.get_admin_context() self.assertEqual(0, rt.compute_node.vcpus_used) instance = self._create_fake_instance_obj() instance.vcpus = 1 self.assertEqual(0, rt.compute_node.vcpus_used) rt.instance_claim(admin_context, instance) self.compute._init_instance(admin_context, instance) self.assertEqual(1, rt.compute_node.vcpus_used) instance.vm_state = vm_states.DELETED self.compute._init_instance(admin_context, instance) self.assertEqual(0, rt.compute_node.vcpus_used) def test_init_instance_for_partial_deletion(self): admin_context = context.get_admin_context() instance = objects.Instance(admin_context) instance.id = 1 instance.vm_state = vm_states.DELETED instance.deleted = False instance.host = self.compute.host def fake_partial_deletion(context, instance): instance['deleted'] = instance['id'] self.stubs.Set(self.compute, '_complete_partial_deletion', fake_partial_deletion) self.compute._init_instance(admin_context, instance) self.assertNotEqual(0, instance['deleted']) def test_partial_deletion_raise_exception(self): admin_context = context.get_admin_context() instance = objects.Instance(admin_context) instance.uuid = str(uuid.uuid4()) instance.vm_state = vm_states.DELETED instance.deleted = False instance.host = self.compute.host self.mox.StubOutWithMock(self.compute, '_complete_partial_deletion') self.compute._complete_partial_deletion( admin_context, instance).AndRaise(ValueError) self.mox.ReplayAll() self.compute._init_instance(admin_context, instance) def test_add_remove_fixed_ip_updates_instance_updated_at(self): def _noop(*args, **kwargs): pass self.stubs.Set(self.compute.network_api, 'add_fixed_ip_to_instance', _noop) self.stubs.Set(self.compute.network_api, 'remove_fixed_ip_from_instance', _noop) instance = self._create_fake_instance_obj() updated_at_1 = instance['updated_at'] self.compute.add_fixed_ip_to_instance(self.context, 'fake', instance) updated_at_2 = db.instance_get_by_uuid(self.context, instance['uuid'])['updated_at'] self.compute.remove_fixed_ip_from_instance(self.context, 'fake', instance) updated_at_3 = db.instance_get_by_uuid(self.context, instance['uuid'])['updated_at'] updated_ats = (updated_at_1, updated_at_2, updated_at_3) self.assertEqual(len(updated_ats), len(set(updated_ats))) def test_no_pending_deletes_for_soft_deleted_instances(self): self.flags(reclaim_instance_interval=0) ctxt = context.get_admin_context() instance = self._create_fake_instance_obj( params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'deleted_at': timeutils.utcnow()}) self.compute._run_pending_deletes(ctxt) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertFalse(instance['cleaned']) def test_reclaim_queued_deletes(self): self.flags(reclaim_instance_interval=3600) ctxt = context.get_admin_context() # Active self._create_fake_instance_obj(params={'host': CONF.host}) # Deleted not old enough self._create_fake_instance_obj(params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'deleted_at': timeutils.utcnow()}) # Deleted old enough (only this one should be reclaimed) deleted_at = (timeutils.utcnow() - datetime.timedelta(hours=1, minutes=5)) self._create_fake_instance_obj( params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'deleted_at': deleted_at}) # Restoring # NOTE(hanlind): This specifically tests for a race condition # where restoring a previously soft deleted instance sets # deleted_at back to None, causing reclaim to think it can be # deleted, see LP #1186243. self._create_fake_instance_obj( params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'task_state': task_states.RESTORING}) self.mox.StubOutWithMock(self.compute, '_delete_instance') self.compute._delete_instance( ctxt, mox.IsA(objects.Instance), [], mox.IsA(objects.Quotas)) self.mox.ReplayAll() self.compute._reclaim_queued_deletes(ctxt) def test_reclaim_queued_deletes_continue_on_error(self): # Verify that reclaim continues on error. self.flags(reclaim_instance_interval=3600) ctxt = context.get_admin_context() deleted_at = (timeutils.utcnow() - datetime.timedelta(hours=1, minutes=5)) instance1 = self._create_fake_instance_obj( params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'deleted_at': deleted_at}) instance2 = self._create_fake_instance_obj( params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'deleted_at': deleted_at}) instances = [] instances.append(instance1) instances.append(instance2) self.mox.StubOutWithMock(objects.InstanceList, 'get_by_filters') self.mox.StubOutWithMock(self.compute, '_deleted_old_enough') self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(self.compute, '_delete_instance') objects.InstanceList.get_by_filters( ctxt, mox.IgnoreArg(), expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, use_slave=True ).AndReturn(instances) # The first instance delete fails. self.compute._deleted_old_enough(instance1, 3600).AndReturn(True) objects.BlockDeviceMappingList.get_by_instance_uuid( ctxt, instance1.uuid).AndReturn([]) self.compute._delete_instance(ctxt, instance1, [], self.none_quotas).AndRaise( test.TestingException) # The second instance delete that follows. self.compute._deleted_old_enough(instance2, 3600).AndReturn(True) objects.BlockDeviceMappingList.get_by_instance_uuid( ctxt, instance2.uuid).AndReturn([]) self.compute._delete_instance(ctxt, instance2, [], self.none_quotas) self.mox.ReplayAll() self.compute._reclaim_queued_deletes(ctxt) def test_sync_power_states(self): ctxt = self.context.elevated() self._create_fake_instance_obj({'host': self.compute.host}) self._create_fake_instance_obj({'host': self.compute.host}) self._create_fake_instance_obj({'host': self.compute.host}) self.mox.StubOutWithMock(self.compute.driver, 'get_info') self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state') # Check to make sure task continues on error. self.compute.driver.get_info(mox.IgnoreArg()).AndRaise( exception.InstanceNotFound(instance_id=uuids.instance)) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.NOSTATE).AndRaise( exception.InstanceNotFound(instance_id=uuids.instance)) self.compute.driver.get_info(mox.IgnoreArg()).AndReturn( hardware.InstanceInfo(state=power_state.RUNNING)) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.RUNNING, use_slave=True) self.compute.driver.get_info(mox.IgnoreArg()).AndReturn( hardware.InstanceInfo(state=power_state.SHUTDOWN)) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.SHUTDOWN, use_slave=True) self.mox.ReplayAll() self.compute._sync_power_states(ctxt) def _test_lifecycle_event(self, lifecycle_event, vm_power_state, is_actual_state=True): instance = self._create_fake_instance_obj() uuid = instance['uuid'] self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state') self.mox.StubOutWithMock(self.compute, '_get_power_state') actual_state = (vm_power_state if vm_power_state is not None and is_actual_state else power_state.NOSTATE) self.compute._get_power_state( mox.IgnoreArg(), mox.ContainsKeyValue('uuid', uuid)).AndReturn(actual_state) if actual_state == vm_power_state: self.compute._sync_instance_power_state( mox.IgnoreArg(), mox.ContainsKeyValue('uuid', uuid), vm_power_state) self.mox.ReplayAll() self.compute.handle_events(event.LifecycleEvent(uuid, lifecycle_event)) self.mox.VerifyAll() self.mox.UnsetStubs() def test_lifecycle_events(self): self._test_lifecycle_event(event.EVENT_LIFECYCLE_STOPPED, power_state.SHUTDOWN) self._test_lifecycle_event(event.EVENT_LIFECYCLE_STOPPED, power_state.SHUTDOWN, is_actual_state=False) self._test_lifecycle_event(event.EVENT_LIFECYCLE_STARTED, power_state.RUNNING) self._test_lifecycle_event(event.EVENT_LIFECYCLE_PAUSED, power_state.PAUSED) self._test_lifecycle_event(event.EVENT_LIFECYCLE_RESUMED, power_state.RUNNING) self._test_lifecycle_event(-1, None) def test_lifecycle_event_non_existent_instance(self): # No error raised for non-existent instance because of inherent race # between database updates and hypervisor events. See bug #1180501. event_instance = event.LifecycleEvent('does-not-exist', event.EVENT_LIFECYCLE_STOPPED) self.compute.handle_events(event_instance) @mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(objects.Quotas, 'rollback') def test_confirm_resize_roll_back_quota_migration_not_found(self, mock_rollback, mock_get_by_id): instance = self._create_fake_instance_obj() migration = objects.Migration() migration.instance_uuid = instance.uuid migration.status = 'finished' migration.id = 0 mock_get_by_id.side_effect = exception.MigrationNotFound( migration_id=0) self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) self.assertTrue(mock_rollback.called) @mock.patch.object(instance_obj.Instance, 'get_by_uuid') @mock.patch.object(objects.Quotas, 'rollback') def test_confirm_resize_roll_back_quota_instance_not_found(self, mock_rollback, mock_get_by_id): instance = self._create_fake_instance_obj() migration = objects.Migration() migration.instance_uuid = instance.uuid migration.status = 'finished' migration.id = 0 mock_get_by_id.side_effect = exception.InstanceNotFound( instance_id=instance.uuid) self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) self.assertTrue(mock_rollback.called) @mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(objects.Quotas, 'rollback') def test_confirm_resize_roll_back_quota_status_confirmed(self, mock_rollback, mock_get_by_id): instance = self._create_fake_instance_obj() migration = objects.Migration() migration.instance_uuid = instance.uuid migration.status = 'confirmed' migration.id = 0 mock_get_by_id.return_value = migration self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) self.assertTrue(mock_rollback.called) @mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(objects.Quotas, 'rollback') def test_confirm_resize_roll_back_quota_status_dummy(self, mock_rollback, mock_get_by_id): instance = self._create_fake_instance_obj() migration = objects.Migration() migration.instance_uuid = instance.uuid migration.status = 'dummy' migration.id = 0 mock_get_by_id.return_value = migration self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) self.assertTrue(mock_rollback.called) def test_allow_confirm_resize_on_instance_in_deleting_task_state(self): instance = self._create_fake_instance_obj() old_type = instance.flavor new_type = flavors.get_flavor_by_flavor_id('4') instance.flavor = new_type instance.old_flavor = old_type instance.new_flavor = new_type fake_rt = self.mox.CreateMockAnything() def fake_drop_move_claim(*args, **kwargs): pass def fake_get_resource_tracker(self): return fake_rt def fake_setup_networks_on_host(self, *args, **kwargs): pass self.stubs.Set(fake_rt, 'drop_move_claim', fake_drop_move_claim) self.stubs.Set(self.compute, '_get_resource_tracker', fake_get_resource_tracker) self.stubs.Set(self.compute.network_api, 'setup_networks_on_host', fake_setup_networks_on_host) migration = objects.Migration(context=self.context.elevated()) migration.instance_uuid = instance.uuid migration.status = 'finished' migration.migration_type = 'resize' migration.create() instance.task_state = task_states.DELETING instance.vm_state = vm_states.RESIZED instance.system_metadata = {} instance.save() self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) instance.refresh() self.assertEqual(vm_states.ACTIVE, instance['vm_state']) def _get_instance_and_bdm_for_dev_defaults_tests(self): instance = self._create_fake_instance_obj( params={'root_device_name': '/dev/vda'}) block_device_mapping = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'boot_index': 0})]) return instance, block_device_mapping def test_default_block_device_names_empty_instance_root_dev(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() instance.root_device_name = None self.mox.StubOutWithMock(objects.Instance, 'save') self.mox.StubOutWithMock(self.compute, '_default_device_names_for_instance') self.compute._default_device_names_for_instance(instance, '/dev/vda', [], [], [bdm for bdm in bdms]) self.mox.ReplayAll() self.compute._default_block_device_names(self.context, instance, {}, bdms) self.assertEqual('/dev/vda', instance.root_device_name) def test_default_block_device_names_empty_root_device(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() bdms[0]['device_name'] = None self.mox.StubOutWithMock(self.compute, '_default_device_names_for_instance') self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save') bdms[0].save().AndReturn(None) self.compute._default_device_names_for_instance(instance, '/dev/vda', [], [], [bdm for bdm in bdms]) self.mox.ReplayAll() self.compute._default_block_device_names(self.context, instance, {}, bdms) def test_default_block_device_names_no_root_device(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() instance.root_device_name = None bdms[0]['device_name'] = None self.mox.StubOutWithMock(objects.Instance, 'save') self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save') self.mox.StubOutWithMock(self.compute, '_default_root_device_name') self.mox.StubOutWithMock(self.compute, '_default_device_names_for_instance') self.compute._default_root_device_name(instance, mox.IgnoreArg(), bdms[0]).AndReturn('/dev/vda') bdms[0].save().AndReturn(None) self.compute._default_device_names_for_instance(instance, '/dev/vda', [], [], [bdm for bdm in bdms]) self.mox.ReplayAll() self.compute._default_block_device_names(self.context, instance, {}, bdms) self.assertEqual('/dev/vda', instance.root_device_name) def test_default_block_device_names_with_blank_volumes(self): instance = self._create_fake_instance_obj() image_meta = {} root_volume = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'instance_uuid': uuids.block_device_instance, 'source_type': 'volume', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'boot_index': 0})) blank_volume1 = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'id': 2, 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'volume', 'boot_index': -1})) blank_volume2 = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'volume', 'boot_index': -1})) ephemeral = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'id': 4, 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'local'})) swap = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'id': 5, 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'local', 'guest_format': 'swap' })) bdms = block_device_obj.block_device_make_list( self.context, [root_volume, blank_volume1, blank_volume2, ephemeral, swap]) with test.nested( mock.patch.object(self.compute, '_default_root_device_name', return_value='/dev/vda'), mock.patch.object(objects.BlockDeviceMapping, 'save'), mock.patch.object(self.compute, '_default_device_names_for_instance') ) as (default_root_device, object_save, default_device_names): self.compute._default_block_device_names(self.context, instance, image_meta, bdms) default_root_device.assert_called_once_with(instance, image_meta, bdms[0]) self.assertEqual('/dev/vda', instance.root_device_name) self.assertTrue(object_save.called) default_device_names.assert_called_once_with(instance, '/dev/vda', [bdms[-2]], [bdms[-1]], [bdm for bdm in bdms[:-2]]) def test_reserve_block_device_name(self): instance = self._create_fake_instance_obj( params={'root_device_name': '/dev/vda'}) bdm = objects.BlockDeviceMapping( **{'context': self.context, 'source_type': 'image', 'destination_type': 'local', 'image_id': uuids.image_instance, 'device_name': '/dev/vda', 'instance_uuid': instance.uuid}) bdm.create() self.compute.reserve_block_device_name(self.context, instance, '/dev/vdb', uuids.block_device_instance, 'virtio', 'disk') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance.uuid) bdms = list(bdms) self.assertEqual(len(bdms), 2) bdms.sort(key=operator.attrgetter('device_name')) vol_bdm = bdms[1] self.assertEqual(vol_bdm.source_type, 'volume') self.assertIsNone(vol_bdm.boot_index) self.assertIsNone(vol_bdm.guest_format) self.assertEqual(vol_bdm.destination_type, 'volume') self.assertEqual(vol_bdm.device_name, '/dev/vdb') self.assertEqual(vol_bdm.volume_id, uuids.block_device_instance) self.assertEqual(vol_bdm.disk_bus, 'virtio') self.assertEqual(vol_bdm.device_type, 'disk') def test_reserve_block_device_name_with_iso_instance(self): instance = self._create_fake_instance_obj( params={'root_device_name': '/dev/hda'}) bdm = objects.BlockDeviceMapping( context=self.context, **{'source_type': 'image', 'destination_type': 'local', 'image_id': 'fake-image-id', 'device_name': '/dev/hda', 'instance_uuid': instance.uuid}) bdm.create() self.compute.reserve_block_device_name(self.context, instance, '/dev/vdb', uuids.block_device_instance, 'ide', 'disk') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance.uuid) bdms = list(bdms) self.assertEqual(2, len(bdms)) bdms.sort(key=operator.attrgetter('device_name')) vol_bdm = bdms[1] self.assertEqual('volume', vol_bdm.source_type) self.assertEqual('volume', vol_bdm.destination_type) self.assertEqual('/dev/hdb', vol_bdm.device_name) self.assertEqual(uuids.block_device_instance, vol_bdm.volume_id) self.assertEqual('ide', vol_bdm.disk_bus) self.assertEqual('disk', vol_bdm.device_type) @mock.patch.object(cinder.API, 'get_snapshot') def test_quiesce(self, mock_snapshot_get): # ensure instance can be quiesced and unquiesced instance = self._create_fake_instance_obj() mapping = [{'source_type': 'snapshot', 'snapshot_id': 'fake-id1'}, {'source_type': 'snapshot', 'snapshot_id': 'fake-id2'}] # unquiesce should wait until volume snapshots are completed mock_snapshot_get.side_effect = [{'status': 'creating'}, {'status': 'available'}] * 2 self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.quiesce_instance(self.context, instance) self.compute.unquiesce_instance(self.context, instance, mapping) self.compute.terminate_instance(self.context, instance, [], []) mock_snapshot_get.assert_any_call(mock.ANY, 'fake-id1') mock_snapshot_get.assert_any_call(mock.ANY, 'fake-id2') self.assertEqual(4, mock_snapshot_get.call_count) def test_instance_fault_message_no_rescheduled_details_without_retry(self): """This test simulates a spawn failure with no retry data. If driver spawn raises an exception and there is no retry data available, the instance fault message should not contain any details about rescheduling. The fault message field is limited in size and a long message about rescheduling displaces the original error message. """ class TestException(Exception): pass instance = self._create_fake_instance_obj() with mock.patch.object(self.compute.driver, 'spawn') as mock_spawn: mock_spawn.side_effect = TestException('Preserve this') self.compute.build_and_run_instance( self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertEqual('Preserve this', instance.fault.message) class ComputeAPITestCase(BaseTestCase): def setUp(self): def fake_get_nw_info(cls, ctxt, instance): self.assertTrue(ctxt.is_admin) return fake_network.fake_get_instance_nw_info(self, 1, 1) super(ComputeAPITestCase, self).setUp() self.useFixture(fixtures.SpawnIsSynchronousFixture()) self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) self.security_group_api = ( openstack_driver.get_openstack_security_group_driver()) self.compute_api = compute.API( security_group_api=self.security_group_api) self.fake_image = { 'id': 'f9000000-0000-0000-0000-000000000000', 'name': 'fake_name', 'status': 'active', 'properties': {'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id'}, } def fake_show(obj, context, image_id, **kwargs): if image_id: return self.fake_image else: raise exception.ImageNotFound(image_id=image_id) self.fake_show = fake_show # Mock out build_instances and rebuild_instance since nothing in these # tests should need those to actually run. We do this to avoid # possible races with other tests that actually test those methods # and mock things out within them, like conductor tests. self.build_instances_mock = mock.Mock(autospec=True) self.compute_api.compute_task_api.build_instances = \ self.build_instances_mock self.rebuild_instance_mock = mock.Mock(autospec=True) self.compute_api.compute_task_api.rebuild_instance = \ self.rebuild_instance_mock def _run_instance(self, params=None): instance = self._create_fake_instance_obj(params, services=True) instance_uuid = instance['uuid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.refresh() self.assertIsNone(instance['task_state']) return instance, instance_uuid def test_create_with_too_little_ram(self): # Test an instance type with too little memory. inst_type = flavors.get_default_flavor() inst_type['memory_mb'] = 1 self.fake_image['min_ram'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorMemoryTooSmall, self.compute_api.create, self.context, inst_type, self.fake_image['id']) # Now increase the inst_type memory and make sure all is fine. inst_type['memory_mb'] = 2 (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) def test_create_with_too_little_disk(self): # Test an instance type with too little disk space. inst_type = flavors.get_default_flavor() inst_type['root_gb'] = 1 self.fake_image['min_disk'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorDiskSmallerThanMinDisk, self.compute_api.create, self.context, inst_type, self.fake_image['id']) # Now increase the inst_type disk space and make sure all is fine. inst_type['root_gb'] = 2 (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) def test_create_with_too_large_image(self): # Test an instance type with too little disk space. inst_type = flavors.get_default_flavor() inst_type['root_gb'] = 1 self.fake_image['size'] = '1073741825' self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorDiskSmallerThanImage, self.compute_api.create, self.context, inst_type, self.fake_image['id']) # Reduce image to 1 GB limit and ensure it works self.fake_image['size'] = '1073741824' (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) def test_create_just_enough_ram_and_disk(self): # Test an instance type with just enough ram and disk space. inst_type = flavors.get_default_flavor() inst_type['root_gb'] = 2 inst_type['memory_mb'] = 2 self.fake_image['min_ram'] = 2 self.fake_image['min_disk'] = 2 self.fake_image['name'] = 'fake_name' self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) def test_create_with_no_ram_and_disk_reqs(self): # Test an instance type with no min_ram or min_disk. inst_type = flavors.get_default_flavor() inst_type['root_gb'] = 1 inst_type['memory_mb'] = 1 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) def test_create_bdm_from_flavor(self): instance_type_params = { 'flavorid': 'test', 'name': 'test', 'swap': 1024, 'ephemeral_gb': 1, 'root_gb': 1, } self._create_instance_type(params=instance_type_params) inst_type = flavors.get_flavor_by_name('test') self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) instance_uuid = refs[0]['uuid'] bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance_uuid) ephemeral = list(filter(block_device.new_format_is_ephemeral, bdms)) self.assertEqual(1, len(ephemeral)) swap = list(filter(block_device.new_format_is_swap, bdms)) self.assertEqual(1, len(swap)) self.assertEqual(1024, swap[0].volume_size) self.assertEqual(1, ephemeral[0].volume_size) def test_create_with_deleted_image(self): # If we're given a deleted image by glance, we should not be able to # build from it inst_type = flavors.get_default_flavor() self.fake_image['name'] = 'fake_name' self.fake_image['status'] = 'DELETED' self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) expected_message = ( exception.ImageNotActive.msg_fmt % {'image_id': self.fake_image['id']}) with testtools.ExpectedException(exception.ImageNotActive, expected_message): self.compute_api.create(self.context, inst_type, self.fake_image['id']) @mock.patch('nova.virt.hardware.numa_get_constraints') def test_create_with_numa_topology(self, numa_constraints_mock): inst_type = flavors.get_default_flavor() numa_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=512), objects.InstanceNUMACell( id=1, cpuset=set([3, 4]), memory=512)]) numa_constraints_mock.return_value = numa_topology instances, resv_id = self.compute_api.create(self.context, inst_type, self.fake_image['id']) numa_constraints_mock.assert_called_once_with( inst_type, test.MatchType(objects.ImageMeta)) self.assertEqual( numa_topology.cells[0].obj_to_primitive(), instances[0].numa_topology.cells[0].obj_to_primitive()) self.assertEqual( numa_topology.cells[1].obj_to_primitive(), instances[0].numa_topology.cells[1].obj_to_primitive()) def test_create_instance_defaults_display_name(self): # Verify that an instance cannot be created without a display_name. cases = [dict(), dict(display_name=None)] for instance in cases: (ref, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), 'f5000000-0000-0000-0000-000000000000', **instance) self.assertIsNotNone(ref[0]['display_name']) def test_create_instance_sets_system_metadata(self): # Make sure image properties are copied into system metadata. (ref, resv_id) = self.compute_api.create( self.context, instance_type=flavors.get_default_flavor(), image_href='f5000000-0000-0000-0000-000000000000') sys_metadata = db.instance_system_metadata_get(self.context, ref[0]['uuid']) image_props = {'image_kernel_id': 'fake_kernel_id', 'image_ramdisk_id': 'fake_ramdisk_id', 'image_something_else': 'meow', } for key, value in six.iteritems(image_props): self.assertIn(key, sys_metadata) self.assertEqual(value, sys_metadata[key]) def test_create_saves_flavor(self): instance_type = flavors.get_default_flavor() (ref, resv_id) = self.compute_api.create( self.context, instance_type=instance_type, image_href=uuids.image_href_id) instance = objects.Instance.get_by_uuid(self.context, ref[0]['uuid']) self.assertEqual(instance_type.flavorid, instance.flavor.flavorid) self.assertNotIn('instance_type_id', instance.system_metadata) def test_create_instance_associates_security_groups(self): # Make sure create associates security groups. group = self._create_group() (ref, resv_id) = self.compute_api.create( self.context, instance_type=flavors.get_default_flavor(), image_href=uuids.image_href_id, security_group=['testgroup']) groups_for_instance = db.security_group_get_by_instance( self.context, ref[0]['uuid']) self.assertEqual(1, len(groups_for_instance)) self.assertEqual(group.id, groups_for_instance[0].id) group_with_instances = db.security_group_get(self.context, group.id, columns_to_join=['instances']) self.assertEqual(1, len(group_with_instances.instances)) def test_create_instance_with_invalid_security_group_raises(self): instance_type = flavors.get_default_flavor() pre_build_len = len(db.instance_get_all(self.context)) self.assertRaises(exception.SecurityGroupNotFoundForProject, self.compute_api.create, self.context, instance_type=instance_type, image_href=None, security_group=['this_is_a_fake_sec_group']) self.assertEqual(pre_build_len, len(db.instance_get_all(self.context))) def test_create_with_large_user_data(self): # Test an instance type with too much user data. inst_type = flavors.get_default_flavor() self.fake_image['min_ram'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.InstanceUserDataTooLarge, self.compute_api.create, self.context, inst_type, self.fake_image['id'], user_data=(b'1' * 65536)) def test_create_with_malformed_user_data(self): # Test an instance type with malformed user data. inst_type = flavors.get_default_flavor() self.fake_image['min_ram'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.InstanceUserDataMalformed, self.compute_api.create, self.context, inst_type, self.fake_image['id'], user_data=b'banana') def test_create_with_base64_user_data(self): # Test an instance type with ok much user data. inst_type = flavors.get_default_flavor() self.fake_image['min_ram'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) # NOTE(mikal): a string of length 48510 encodes to 65532 characters of # base64 (refs, resv_id) = self.compute_api.create( self.context, inst_type, self.fake_image['id'], user_data=base64.encodestring(b'1' * 48510)) def test_populate_instance_for_create(self): base_options = {'image_ref': self.fake_image['id'], 'system_metadata': {'fake': 'value'}, 'uuid': uuids.instance} instance = objects.Instance() instance.update(base_options) inst_type = flavors.get_flavor_by_name("m1.tiny") instance = self.compute_api._populate_instance_for_create( self.context, instance, self.fake_image, 1, security_groups=objects.SecurityGroupList(), instance_type=inst_type) self.assertEqual(str(base_options['image_ref']), instance['system_metadata']['image_base_image_ref']) self.assertEqual(vm_states.BUILDING, instance['vm_state']) self.assertEqual(task_states.SCHEDULING, instance['task_state']) self.assertEqual(1, instance['launch_index']) self.assertIsNotNone(instance.get('uuid')) self.assertEqual([], instance.security_groups.objects) def test_default_hostname_generator(self): fake_uuids = [str(uuid.uuid4()) for x in range(4)] orig_populate = self.compute_api._populate_instance_for_create def _fake_populate(context, base_options, *args, **kwargs): base_options['uuid'] = fake_uuids.pop(0) return orig_populate(context, base_options, *args, **kwargs) self.stubs.Set(self.compute_api, '_populate_instance_for_create', _fake_populate) cases = [(None, 'server-%s' % fake_uuids[0]), ('Hello, Server!', 'hello-server'), ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'), ('hello_server', 'hello-server')] for display_name, hostname in cases: (ref, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id, display_name=display_name) self.assertEqual(ref[0]['hostname'], hostname) def test_instance_create_adds_to_instance_group(self): self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) group = objects.InstanceGroup(self.context) group.uuid = str(uuid.uuid4()) group.project_id = self.context.project_id group.user_id = self.context.user_id group.create() inst_type = flavors.get_default_flavor() (refs, resv_id) = self.compute_api.create( self.context, inst_type, self.fake_image['id'], scheduler_hints={'group': group.uuid}) group = objects.InstanceGroup.get_by_uuid(self.context, group.uuid) self.assertIn(refs[0]['uuid'], group.members) def test_instance_create_with_group_name_fails(self): self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) inst_type = flavors.get_default_flavor() self.assertRaises( exception.InvalidInput, self.compute_api.create, self.context, inst_type, self.fake_image['id'], scheduler_hints={'group': 'non-uuid'}) def test_instance_create_with_group_uuid_fails_group_not_exist(self): self.stub_out('nova.tests.unit.image.fake._FakeImageService.show', self.fake_show) inst_type = flavors.get_default_flavor() self.assertRaises( exception.InstanceGroupNotFound, self.compute_api.create, self.context, inst_type, self.fake_image['id'], scheduler_hints={'group': '5b674f73-c8cf-40ef-9965-3b6fe4b304b1'}) def test_destroy_instance_disassociates_security_groups(self): # Make sure destroying disassociates security groups. group = self._create_group() (ref, resv_id) = self.compute_api.create( self.context, instance_type=flavors.get_default_flavor(), image_href=uuids.image_href_id, security_group=['testgroup']) db.instance_destroy(self.context, ref[0]['uuid']) group = db.security_group_get(self.context, group['id'], columns_to_join=['instances']) self.assertEqual(0, len(group['instances'])) def test_destroy_security_group_disassociates_instances(self): # Make sure destroying security groups disassociates instances. group = self._create_group() (ref, resv_id) = self.compute_api.create( self.context, instance_type=flavors.get_default_flavor(), image_href=uuids.image_href_id, security_group=['testgroup']) db.security_group_destroy(self.context, group['id']) admin_deleted_context = context.get_admin_context( read_deleted="only") group = db.security_group_get(admin_deleted_context, group['id'], columns_to_join=['instances']) self.assertEqual(0, len(group['instances'])) def _test_rebuild(self, vm_state): instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance = objects.Instance.get_by_uuid(self.context, instance_uuid) self.assertIsNone(instance.task_state) # Set some image metadata that should get wiped out and reset # as well as some other metadata that should be preserved. instance.system_metadata.update({ 'image_kernel_id': 'old-data', 'image_ramdisk_id': 'old_data', 'image_something_else': 'old-data', 'image_should_remove': 'bye-bye', 'preserved': 'preserve this!'}) instance.save() # Make sure Compute API updates the image_ref before casting to # compute manager. info = {'image_ref': None, 'clean': False} def fake_rpc_rebuild(context, **kwargs): info['image_ref'] = kwargs['instance'].image_ref info['clean'] = ('progress' not in kwargs['instance'].obj_what_changed()) self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance', fake_rpc_rebuild) image_ref = instance["image_ref"] + '-new_image_ref' password = "new_password" instance.vm_state = vm_state instance.save() self.compute_api.rebuild(self.context, instance, image_ref, password) self.assertEqual(info['image_ref'], image_ref) self.assertTrue(info['clean']) instance.refresh() self.assertEqual(instance.task_state, task_states.REBUILDING) sys_meta = {k: v for k, v in instance.system_metadata.items() if not k.startswith('instance_type')} self.assertEqual(sys_meta, {'image_kernel_id': 'fake_kernel_id', 'image_min_disk': '1', 'image_ramdisk_id': 'fake_ramdisk_id', 'image_something_else': 'meow', 'preserved': 'preserve this!'}) def test_rebuild(self): self._test_rebuild(vm_state=vm_states.ACTIVE) def test_rebuild_in_error_state(self): self._test_rebuild(vm_state=vm_states.ERROR) def test_rebuild_in_error_not_launched(self): instance = self._create_fake_instance_obj(params={'image_ref': ''}) self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"vm_state": vm_states.ERROR, "launched_at": None}) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertRaises(exception.InstanceInvalidState, self.compute_api.rebuild, self.context, instance, instance['image_ref'], "new password") def test_rebuild_no_image(self): instance = self._create_fake_instance_obj(params={'image_ref': ''}) instance_uuid = instance.uuid self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.compute_api.rebuild(self.context, instance, '', 'new_password') instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['task_state'], task_states.REBUILDING) def test_rebuild_with_deleted_image(self): # If we're given a deleted image by glance, we should not be able to # rebuild from it instance = self._create_fake_instance_obj(params={'image_ref': '1'}) self.fake_image['name'] = 'fake_name' self.fake_image['status'] = 'DELETED' self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) expected_message = ( exception.ImageNotActive.msg_fmt % {'image_id': self.fake_image['id']}) with testtools.ExpectedException(exception.ImageNotActive, expected_message): self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_rebuild_with_too_little_ram(self): instance = self._create_fake_instance_obj(params={'image_ref': '1'}) instance.flavor.memory_mb = 64 instance.flavor.root_gb = 1 self.fake_image['min_ram'] = 128 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorMemoryTooSmall, self.compute_api.rebuild, self.context, instance, self.fake_image['id'], 'new_password') # Reduce image memory requirements and make sure it works self.fake_image['min_ram'] = 64 self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_rebuild_with_too_little_disk(self): instance = self._create_fake_instance_obj(params={'image_ref': '1'}) def fake_extract_flavor(_inst, prefix=''): if prefix == '': f = objects.Flavor(**test_flavor.fake_flavor) f.memory_mb = 64 f.root_gb = 1 return f else: raise KeyError() self.stubs.Set(flavors, 'extract_flavor', fake_extract_flavor) self.fake_image['min_disk'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorDiskSmallerThanMinDisk, self.compute_api.rebuild, self.context, instance, self.fake_image['id'], 'new_password') # Reduce image disk requirements and make sure it works self.fake_image['min_disk'] = 1 self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_rebuild_with_just_enough_ram_and_disk(self): instance = self._create_fake_instance_obj(params={'image_ref': '1'}) def fake_extract_flavor(_inst, prefix=''): if prefix == '': f = objects.Flavor(**test_flavor.fake_flavor) f.memory_mb = 64 f.root_gb = 1 return f else: raise KeyError() self.stubs.Set(flavors, 'extract_flavor', fake_extract_flavor) self.fake_image['min_ram'] = 64 self.fake_image['min_disk'] = 1 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_rebuild_with_no_ram_and_disk_reqs(self): instance = self._create_fake_instance_obj(params={'image_ref': '1'}) def fake_extract_flavor(_inst, prefix=''): if prefix == '': f = objects.Flavor(**test_flavor.fake_flavor) f.memory_mb = 64 f.root_gb = 1 return f else: raise KeyError() self.stubs.Set(flavors, 'extract_flavor', fake_extract_flavor) self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_rebuild_with_too_large_image(self): instance = self._create_fake_instance_obj(params={'image_ref': '1'}) def fake_extract_flavor(_inst, prefix=''): if prefix == '': f = objects.Flavor(**test_flavor.fake_flavor) f.memory_mb = 64 f.root_gb = 1 return f else: raise KeyError() self.stubs.Set(flavors, 'extract_flavor', fake_extract_flavor) self.fake_image['size'] = '1073741825' self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorDiskSmallerThanImage, self.compute_api.rebuild, self.context, instance, self.fake_image['id'], 'new_password') # Reduce image to 1 GB limit and ensure it works self.fake_image['size'] = '1073741824' self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_hostname_create(self): # Ensure instance hostname is set during creation. inst_type = flavors.get_flavor_by_name('m1.tiny') (instances, _) = self.compute_api.create(self.context, inst_type, image_href=uuids.image_href_id, display_name='test host') self.assertEqual('test-host', instances[0]['hostname']) def _fake_rescue_block_devices(self, instance, status="in-use"): fake_bdms = block_device_obj.block_device_make_list(self.context, [fake_block_device.FakeDbBlockDeviceDict( {'device_name': '/dev/vda', 'source_type': 'volume', 'boot_index': 0, 'destination_type': 'volume', 'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'})]) volume = {'id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66', 'state': 'active', 'instance_uuid': instance['uuid']} return fake_bdms, volume @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(cinder.API, 'get') def test_rescue_volume_backed_no_image(self, mock_get_vol, mock_get_bdms): # Instance started without an image params = {'image_ref': ''} volume_backed_inst_1 = self._create_fake_instance_obj(params=params) bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_1) mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"} mock_get_bdms.return_value = bdms with mock.patch.object(self.compute, '_prep_block_device'): self.compute.build_and_run_instance(self.context, volume_backed_inst_1, {}, {}, {}, block_device_mapping=[]) self.assertRaises(exception.InstanceNotRescuable, self.compute_api.rescue, self.context, volume_backed_inst_1) @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(cinder.API, 'get') def test_rescue_volume_backed_placeholder_image(self, mock_get_vol, mock_get_bdms): # Instance started with a placeholder image (for metadata) volume_backed_inst_2 = self._create_fake_instance_obj( {'image_ref': 'my_placeholder_img', 'root_device_name': '/dev/vda'}) bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_2) mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"} mock_get_bdms.return_value = bdms with mock.patch.object(self.compute, '_prep_block_device'): self.compute.build_and_run_instance(self.context, volume_backed_inst_2, {}, {}, {}, block_device_mapping=[]) self.assertRaises(exception.InstanceNotRescuable, self.compute_api.rescue, self.context, volume_backed_inst_2) def test_get(self): # Test get instance. exp_instance = self._create_fake_instance_obj() instance = self.compute_api.get(self.context, exp_instance.uuid, want_objects=True) self.assertEqual(exp_instance.id, instance.id) def test_get_with_admin_context(self): # Test get instance. c = context.get_admin_context() exp_instance = self._create_fake_instance_obj() instance = self.compute_api.get(c, exp_instance['uuid'], want_objects=True) self.assertEqual(exp_instance.id, instance.id) def test_get_all_by_name_regexp(self): # Test searching instances by name (display_name). c = context.get_admin_context() instance1 = self._create_fake_instance_obj({'display_name': 'woot'}) instance2 = self._create_fake_instance_obj({ 'display_name': 'woo'}) instance3 = self._create_fake_instance_obj({ 'display_name': 'not-woot'}) instances = self.compute_api.get_all(c, search_opts={'name': '^woo.*'}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance1['uuid'], instance_uuids) self.assertIn(instance2['uuid'], instance_uuids) instances = self.compute_api.get_all(c, search_opts={'name': '^woot.*'}) instance_uuids = [instance['uuid'] for instance in instances] self.assertEqual(len(instances), 1) self.assertIn(instance1['uuid'], instance_uuids) instances = self.compute_api.get_all(c, search_opts={'name': '.*oot.*'}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance1['uuid'], instance_uuids) self.assertIn(instance3['uuid'], instance_uuids) instances = self.compute_api.get_all(c, search_opts={'name': '^n.*'}) self.assertEqual(len(instances), 1) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance3['uuid'], instance_uuids) instances = self.compute_api.get_all(c, search_opts={'name': 'noth.*'}) self.assertEqual(len(instances), 0) def test_get_all_by_multiple_options_at_once(self): # Test searching by multiple options at once. c = context.get_admin_context() def fake_network_info(ip): info = [{ 'address': 'aa:bb:cc:dd:ee:ff', 'id': 1, 'network': { 'bridge': 'br0', 'id': 1, 'label': 'private', 'subnets': [{ 'cidr': '192.168.0.0/24', 'ips': [{ 'address': ip, 'type': 'fixed', }] }] } }] return jsonutils.dumps(info) instance1 = self._create_fake_instance_obj({ 'display_name': 'woot', 'uuid': '00000000-0000-0000-0000-000000000010', 'info_cache': objects.InstanceInfoCache( network_info=fake_network_info('192.168.0.1'))}) self._create_fake_instance_obj({ # instance2 'display_name': 'woo', 'uuid': '00000000-0000-0000-0000-000000000020', 'info_cache': objects.InstanceInfoCache( network_info=fake_network_info('192.168.0.2'))}) instance3 = self._create_fake_instance_obj({ 'display_name': 'not-woot', 'uuid': '00000000-0000-0000-0000-000000000030', 'info_cache': objects.InstanceInfoCache( network_info=fake_network_info('192.168.0.3'))}) # ip ends up matching 2nd octet here.. so all 3 match ip # but 'name' only matches one instances = self.compute_api.get_all(c, search_opts={'ip': '.*\.1', 'name': 'not.*'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance3['uuid']) # ip ends up matching any ip with a '1' in the last octet.. # so instance 1 and 3.. but name should only match #1 # but 'name' only matches one instances = self.compute_api.get_all(c, search_opts={'ip': '.*\.1$', 'name': '^woo.*'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance1['uuid']) # same as above but no match on name (name matches instance1 # but the ip query doesn't instances = self.compute_api.get_all(c, search_opts={'ip': '.*\.2$', 'name': '^woot.*'}) self.assertEqual(len(instances), 0) # ip matches all 3... ipv6 matches #2+#3...name matches #3 instances = self.compute_api.get_all(c, search_opts={'ip': '.*\.1', 'name': 'not.*', 'ip6': '^.*12.*34.*'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance3['uuid']) def test_get_all_by_image(self): # Test searching instances by image. c = context.get_admin_context() instance1 = self._create_fake_instance_obj({'image_ref': '1234'}) instance2 = self._create_fake_instance_obj({'image_ref': '4567'}) instance3 = self._create_fake_instance_obj({'image_ref': '4567'}) instances = self.compute_api.get_all(c, search_opts={'image': '123'}) self.assertEqual(len(instances), 0) instances = self.compute_api.get_all(c, search_opts={'image': '1234'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance1['uuid']) instances = self.compute_api.get_all(c, search_opts={'image': '4567'}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance2['uuid'], instance_uuids) self.assertIn(instance3['uuid'], instance_uuids) # Test passing a list as search arg instances = self.compute_api.get_all(c, search_opts={'image': ['1234', '4567']}) self.assertEqual(len(instances), 3) def test_get_all_by_flavor(self): # Test searching instances by image. c = context.get_admin_context() flavor_dict = {f.flavorid: f for f in objects.FlavorList.get_all(c)} instance1 = self._create_fake_instance_obj( {'instance_type_id': flavor_dict['1'].id}) instance2 = self._create_fake_instance_obj( {'instance_type_id': flavor_dict['2'].id}) instance3 = self._create_fake_instance_obj( {'instance_type_id': flavor_dict['2'].id}) instances = self.compute_api.get_all(c, search_opts={'flavor': 5}) self.assertEqual(len(instances), 0) # ensure unknown filter maps to an exception self.assertRaises(exception.FlavorNotFound, self.compute_api.get_all, c, search_opts={'flavor': 99}) instances = self.compute_api.get_all(c, search_opts={'flavor': 1}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['id'], instance1['id']) instances = self.compute_api.get_all(c, search_opts={'flavor': 2}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance2['uuid'], instance_uuids) self.assertIn(instance3['uuid'], instance_uuids) def test_get_all_by_state(self): # Test searching instances by state. c = context.get_admin_context() instance1 = self._create_fake_instance_obj({ 'power_state': power_state.SHUTDOWN, }) instance2 = self._create_fake_instance_obj({ 'power_state': power_state.RUNNING, }) instance3 = self._create_fake_instance_obj({ 'power_state': power_state.RUNNING, }) instances = self.compute_api.get_all(c, search_opts={'power_state': power_state.SUSPENDED}) self.assertEqual(len(instances), 0) instances = self.compute_api.get_all(c, search_opts={'power_state': power_state.SHUTDOWN}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance1['uuid']) instances = self.compute_api.get_all(c, search_opts={'power_state': power_state.RUNNING}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance2['uuid'], instance_uuids) self.assertIn(instance3['uuid'], instance_uuids) # Test passing a list as search arg instances = self.compute_api.get_all(c, search_opts={'power_state': [power_state.SHUTDOWN, power_state.RUNNING]}) self.assertEqual(len(instances), 3) def test_get_all_by_metadata(self): # Test searching instances by metadata. c = context.get_admin_context() self._create_fake_instance_obj() # instance0 self._create_fake_instance_obj({ # instance1 'metadata': {'key1': 'value1'}}) instance2 = self._create_fake_instance_obj({ 'metadata': {'key2': 'value2'}}) instance3 = self._create_fake_instance_obj({ 'metadata': {'key3': 'value3'}}) instance4 = self._create_fake_instance_obj({ 'metadata': {'key3': 'value3', 'key4': 'value4'}}) # get all instances instances = self.compute_api.get_all(c, search_opts={'metadata': u"{}"}) self.assertEqual(len(instances), 5) # wrong key/value combination instances = self.compute_api.get_all(c, search_opts={'metadata': u'{"key1": "value3"}'}) self.assertEqual(len(instances), 0) # non-existing keys instances = self.compute_api.get_all(c, search_opts={'metadata': u'{"key5": "value1"}'}) self.assertEqual(len(instances), 0) # find existing instance instances = self.compute_api.get_all(c, search_opts={'metadata': u'{"key2": "value2"}'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance2['uuid']) instances = self.compute_api.get_all(c, search_opts={'metadata': u'{"key3": "value3"}'}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance3['uuid'], instance_uuids) self.assertIn(instance4['uuid'], instance_uuids) # multiple criteria as a dict instances = self.compute_api.get_all(c, search_opts={'metadata': u'{"key3": "value3","key4": "value4"}'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance4['uuid']) # multiple criteria as a list instances = self.compute_api.get_all(c, search_opts= {'metadata': u'[{"key4": "value4"},{"key3": "value3"}]'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance4['uuid']) def test_get_all_by_system_metadata(self): # Test searching instances by system metadata. c = context.get_admin_context() instance1 = self._create_fake_instance_obj({ 'system_metadata': {'key1': 'value1'}}) # find existing instance instances = self.compute_api.get_all(c, search_opts={'system_metadata': u'{"key1": "value1"}'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance1['uuid']) def test_all_instance_metadata(self): self._create_fake_instance_obj({'metadata': {'key1': 'value1'}, 'user_id': 'user1', 'project_id': 'project1'}) self._create_fake_instance_obj({'metadata': {'key2': 'value2'}, 'user_id': 'user2', 'project_id': 'project2'}) _context = self.context _context.user_id = 'user1' _context.project_id = 'project1' metadata = self.compute_api.get_all_instance_metadata(_context, search_filts=[]) self.assertEqual(1, len(metadata)) self.assertEqual(metadata[0]['key'], 'key1') _context.user_id = 'user2' _context.project_id = 'project2' metadata = self.compute_api.get_all_instance_metadata(_context, search_filts=[]) self.assertEqual(1, len(metadata)) self.assertEqual(metadata[0]['key'], 'key2') _context = context.get_admin_context() metadata = self.compute_api.get_all_instance_metadata(_context, search_filts=[]) self.assertEqual(2, len(metadata)) def test_instance_metadata(self): meta_changes = [None] self.flags(notify_on_state_change='vm_state') def fake_change_instance_metadata(inst, ctxt, diff, instance=None, instance_uuid=None): meta_changes[0] = diff self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata', fake_change_instance_metadata) _context = context.get_admin_context() instance = self._create_fake_instance_obj({'metadata': {'key1': 'value1'}}) metadata = self.compute_api.get_instance_metadata(_context, instance) self.assertEqual(metadata, {'key1': 'value1'}) self.compute_api.update_instance_metadata(_context, instance, {'key2': 'value2'}) metadata = self.compute_api.get_instance_metadata(_context, instance) self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'}) self.assertEqual(meta_changes, [{'key2': ['+', 'value2']}]) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] payload = msg.payload self.assertIn('metadata', payload) self.assertEqual(payload['metadata'], metadata) new_metadata = {'key2': 'bah', 'key3': 'value3'} self.compute_api.update_instance_metadata(_context, instance, new_metadata, delete=True) metadata = self.compute_api.get_instance_metadata(_context, instance) self.assertEqual(metadata, new_metadata) self.assertEqual(meta_changes, [{ 'key1': ['-'], 'key2': ['+', 'bah'], 'key3': ['+', 'value3'], }]) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[1] payload = msg.payload self.assertIn('metadata', payload) self.assertEqual(payload['metadata'], metadata) self.compute_api.delete_instance_metadata(_context, instance, 'key2') metadata = self.compute_api.get_instance_metadata(_context, instance) self.assertEqual(metadata, {'key3': 'value3'}) self.assertEqual(meta_changes, [{'key2': ['-']}]) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3) msg = fake_notifier.NOTIFICATIONS[2] payload = msg.payload self.assertIn('metadata', payload) self.assertEqual(payload['metadata'], {'key3': 'value3'}) def test_disallow_metadata_changes_during_building(self): def fake_change_instance_metadata(inst, ctxt, diff, instance=None, instance_uuid=None): pass self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata', fake_change_instance_metadata) instance = self._create_fake_instance_obj( {'vm_state': vm_states.BUILDING}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.delete_instance_metadata, self.context, instance, "key") self.assertRaises(exception.InstanceInvalidState, self.compute_api.update_instance_metadata, self.context, instance, "key") @staticmethod def _parse_db_block_device_mapping(bdm_ref): attr_list = ('delete_on_termination', 'device_name', 'no_device', 'virtual_name', 'volume_id', 'volume_size', 'snapshot_id') bdm = {} for attr in attr_list: val = bdm_ref.get(attr, None) if val: bdm[attr] = val return bdm def test_create_block_device_mapping(self): def _compare_bdm_object(obj1, obj2, extra_keys=()): for key in (('device_name', 'source_type', 'destination_type') + extra_keys): self.assertEqual(getattr(obj1, key), getattr(obj2, key)) swap_size = ephemeral_size = 1 instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size} instance = self._create_fake_instance_obj() mappings = [ {'virtual': 'ami', 'device': 'sda1'}, {'virtual': 'root', 'device': '/dev/sda1'}, {'virtual': 'swap', 'device': 'sdb4'}, {'virtual': 'swap', 'device': 'sdb3'}, {'virtual': 'swap', 'device': 'sdb2'}, {'virtual': 'swap', 'device': 'sdb1'}, {'virtual': 'ephemeral0', 'device': 'sdc1'}, {'virtual': 'ephemeral1', 'device': 'sdc2'}, {'virtual': 'ephemeral2', 'device': 'sdc3'}] block_device_mapping = [ # root {'device_name': '/dev/sda1', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000', 'delete_on_termination': False}, # overwrite swap {'device_name': '/dev/sdb2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111', 'delete_on_termination': False}, {'device_name': '/dev/sdb3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'}, {'device_name': '/dev/sdb4', 'no_device': True}, # overwrite ephemeral {'device_name': '/dev/sdc1', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}, {'device_name': '/dev/sdc2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444', 'delete_on_termination': False}, {'device_name': '/dev/sdc3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'}, {'device_name': '/dev/sdc4', 'no_device': True}, # volume {'device_name': '/dev/sdd1', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666', 'delete_on_termination': False}, {'device_name': '/dev/sdd2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'}, {'device_name': '/dev/sdd3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'}, {'device_name': '/dev/sdd4', 'no_device': True}] image_mapping = self.compute_api._prepare_image_mapping( instance_type, mappings) image_mapping = block_device_obj.block_device_make_list_from_dicts( self.context, image_mapping) self.compute_api._create_block_device_mapping( instance_type, instance['uuid'], image_mapping) bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance['uuid']) expected_result = [ {'source_type': 'blank', 'destination_type': 'local', 'guest_format': 'swap', 'device_name': '/dev/sdb1', 'volume_size': swap_size, 'delete_on_termination': True}, {'source_type': 'blank', 'destination_type': 'local', 'guest_format': CONF.default_ephemeral_format, 'device_name': '/dev/sdc3', 'delete_on_termination': True}, {'source_type': 'blank', 'destination_type': 'local', 'guest_format': CONF.default_ephemeral_format, 'device_name': '/dev/sdc1', 'delete_on_termination': True}, {'source_type': 'blank', 'destination_type': 'local', 'guest_format': CONF.default_ephemeral_format, 'device_name': '/dev/sdc2', 'delete_on_termination': True}, ] expected_result = block_device_obj.block_device_make_list_from_dicts( self.context, map(fake_block_device.AnonFakeDbBlockDeviceDict, expected_result)) bdms.sort(key=operator.attrgetter('device_name')) expected_result.sort(key=operator.attrgetter('device_name')) self.assertEqual(len(bdms), len(expected_result)) for expected, got in zip(expected_result, bdms): _compare_bdm_object( expected, got, extra_keys=('guest_format', 'delete_on_termination')) block_device_mapping = ( block_device_obj.block_device_make_list_from_dicts( self.context, map(fake_block_device.AnonFakeDbBlockDeviceDict, block_device_mapping))) self.compute_api._create_block_device_mapping( flavors.get_default_flavor(), instance['uuid'], block_device_mapping) bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance['uuid']) expected_result = [ {'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000', 'device_name': '/dev/sda1', 'source_type': 'snapshot', 'destination_type': 'volume'}, {'source_type': 'blank', 'destination_type': 'local', 'guest_format': 'swap', 'device_name': '/dev/sdb1', 'volume_size': swap_size, 'delete_on_termination': True}, {'device_name': '/dev/sdb2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111', 'delete_on_termination': False}, {'device_name': '/dev/sdb3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'}, {'device_name': '/dev/sdb4', 'no_device': True}, {'device_name': '/dev/sdc1', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}, {'device_name': '/dev/sdc2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444', 'delete_on_termination': False}, {'device_name': '/dev/sdc3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'}, {'no_device': True, 'device_name': '/dev/sdc4'}, {'device_name': '/dev/sdd1', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666', 'delete_on_termination': False}, {'device_name': '/dev/sdd2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'}, {'device_name': '/dev/sdd3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'}, {'no_device': True, 'device_name': '/dev/sdd4'}] expected_result = block_device_obj.block_device_make_list_from_dicts( self.context, map(fake_block_device.AnonFakeDbBlockDeviceDict, expected_result)) bdms.sort(key=operator.itemgetter('device_name')) expected_result.sort(key=operator.itemgetter('device_name')) self.assertEqual(len(bdms), len(expected_result)) for expected, got in zip(expected_result, bdms): _compare_bdm_object( expected, got, extra_keys=('snapshot_id', 'delete_on_termination')) def _test_check_and_transform_bdm(self, bdms, expected_bdms, image_bdms=None, base_options=None, legacy_bdms=False, legacy_image_bdms=False): image_bdms = image_bdms or [] image_meta = {} if image_bdms: image_meta = {'properties': {'block_device_mapping': image_bdms}} if not legacy_image_bdms: image_meta['properties']['bdm_v2'] = True base_options = base_options or {'root_device_name': 'vda', 'image_ref': FAKE_IMAGE_REF} transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, image_meta, 1, 1, bdms, legacy_bdms) for expected, got in zip(expected_bdms, transformed_bdm): self.assertEqual(dict(expected.items()), dict(got.items())) def test_check_and_transform_legacy_bdm_no_image_bdms(self): legacy_bdms = [ {'device_name': '/dev/vda', 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}] expected_bdms = [block_device.BlockDeviceDict.from_legacy( legacy_bdms[0])] expected_bdms[0]['boot_index'] = 0 expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, expected_bdms) self._test_check_and_transform_bdm(legacy_bdms, expected_bdms, legacy_bdms=True) def test_check_and_transform_legacy_bdm_legacy_image_bdms(self): image_bdms = [ {'device_name': '/dev/vda', 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}] legacy_bdms = [ {'device_name': '/dev/vdb', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444', 'delete_on_termination': False}] expected_bdms = [ block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]), block_device.BlockDeviceDict.from_legacy(image_bdms[0])] expected_bdms[0]['boot_index'] = -1 expected_bdms[1]['boot_index'] = 0 expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, expected_bdms) self._test_check_and_transform_bdm(legacy_bdms, expected_bdms, image_bdms=image_bdms, legacy_bdms=True, legacy_image_bdms=True) def test_check_and_transform_legacy_bdm_image_bdms(self): legacy_bdms = [ {'device_name': '/dev/vdb', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444', 'delete_on_termination': False}] image_bdms = [block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444', 'boot_index': 0})] expected_bdms = [ block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]), image_bdms[0]] expected_bdms[0]['boot_index'] = -1 expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, expected_bdms) self._test_check_and_transform_bdm(legacy_bdms, expected_bdms, image_bdms=image_bdms, legacy_bdms=True) def test_check_and_transform_bdm_no_image_bdms(self): bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': FAKE_IMAGE_REF, 'boot_index': 0})] expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, bdms) self._test_check_and_transform_bdm(bdms, expected_bdms) def test_check_and_transform_bdm_image_bdms(self): bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': FAKE_IMAGE_REF, 'boot_index': 0})] image_bdms = [block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444'})] expected_bdms = bdms + image_bdms expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, expected_bdms) self._test_check_and_transform_bdm(bdms, expected_bdms, image_bdms=image_bdms) def test_check_and_transform_bdm_image_bdms_w_overrides(self): bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': FAKE_IMAGE_REF, 'boot_index': 0}), block_device.BlockDeviceDict({'device_name': 'vdb', 'no_device': True})] image_bdms = [block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444', 'device_name': '/dev/vdb'})] expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, bdms) self._test_check_and_transform_bdm(bdms, expected_bdms, image_bdms=image_bdms) def test_check_and_transform_bdm_image_bdms_w_overrides_complex(self): bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': FAKE_IMAGE_REF, 'boot_index': 0}), block_device.BlockDeviceDict({'device_name': 'vdb', 'no_device': True}), block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '11111111-aaaa-bbbb-cccc-222222222222', 'device_name': 'vdc'})] image_bdms = [ block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444', 'device_name': '/dev/vdb'}), block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '55555555-aaaa-bbbb-cccc-666666666666', 'device_name': '/dev/vdc'}), block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '77777777-aaaa-bbbb-cccc-8888888888888', 'device_name': '/dev/vdd'})] expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, bdms + [image_bdms[2]]) self._test_check_and_transform_bdm(bdms, expected_bdms, image_bdms=image_bdms) def test_check_and_transform_bdm_legacy_image_bdms(self): bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': FAKE_IMAGE_REF, 'boot_index': 0})] image_bdms = [{'device_name': '/dev/vda', 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}] expected_bdms = [block_device.BlockDeviceDict.from_legacy( image_bdms[0])] expected_bdms[0]['boot_index'] = 0 expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, expected_bdms) self._test_check_and_transform_bdm(bdms, expected_bdms, image_bdms=image_bdms, legacy_image_bdms=True) def test_check_and_transform_image(self): base_options = {'root_device_name': 'vdb', 'image_ref': FAKE_IMAGE_REF} fake_legacy_bdms = [ {'device_name': '/dev/vda', 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}] image_meta = {'properties': {'block_device_mapping': [ {'device_name': '/dev/vda', 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333', 'boot_index': 0}]}} # We get an image BDM transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, {}, 1, 1, fake_legacy_bdms, True) self.assertEqual(len(transformed_bdm), 2) # No image BDM created if image already defines a root BDM base_options['root_device_name'] = 'vda' base_options['image_ref'] = None transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, image_meta, 1, 1, [], True) self.assertEqual(len(transformed_bdm), 1) # No image BDM created transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, {}, 1, 1, fake_legacy_bdms, True) self.assertEqual(len(transformed_bdm), 1) # Volumes with multiple instances fails self.assertRaises(exception.InvalidRequest, self.compute_api._check_and_transform_bdm, self.context, base_options, {}, {}, 1, 2, fake_legacy_bdms, True) # Volume backed so no image_ref in base_options # v2 bdms contains a root image to volume mapping # image_meta contains a snapshot as the image # is created by nova image-create from a volume backed server # see bug 1381598 fake_v2_bdms = [{'boot_index': 0, 'connection_info': None, 'delete_on_termination': None, 'destination_type': u'volume', 'image_id': FAKE_IMAGE_REF, 'source_type': u'image', 'volume_id': None, 'volume_size': 1}] base_options['image_ref'] = None transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, image_meta, 1, 1, fake_v2_bdms, False) self.assertEqual(len(transformed_bdm), 1) # Image BDM overrides mappings base_options['image_ref'] = FAKE_IMAGE_REF image_meta = { 'properties': { 'mappings': [ {'virtual': 'ephemeral0', 'device': 'vdb'}], 'bdm_v2': True, 'block_device_mapping': [ {'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'volume', 'volume_size': 1}]}} transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, image_meta, 1, 1, [], False) self.assertEqual(1, len(transformed_bdm)) self.assertEqual('volume', transformed_bdm[0]['destination_type']) self.assertEqual('/dev/vdb', transformed_bdm[0]['device_name']) def test_volume_size(self): ephemeral_size = 2 swap_size = 3 volume_size = 5 swap_bdm = {'source_type': 'blank', 'guest_format': 'swap', 'destination_type': 'local'} ephemeral_bdm = {'source_type': 'blank', 'guest_format': None, 'destination_type': 'local'} volume_bdm = {'source_type': 'volume', 'volume_size': volume_size, 'destination_type': 'volume'} blank_bdm = {'source_type': 'blank', 'destination_type': 'volume'} inst_type = {'ephemeral_gb': ephemeral_size, 'swap': swap_size} self.assertEqual( self.compute_api._volume_size(inst_type, ephemeral_bdm), ephemeral_size) ephemeral_bdm['volume_size'] = 42 self.assertEqual( self.compute_api._volume_size(inst_type, ephemeral_bdm), 42) self.assertEqual( self.compute_api._volume_size(inst_type, swap_bdm), swap_size) swap_bdm['volume_size'] = 42 self.assertEqual( self.compute_api._volume_size(inst_type, swap_bdm), 42) self.assertEqual( self.compute_api._volume_size(inst_type, volume_bdm), volume_size) self.assertIsNone( self.compute_api._volume_size(inst_type, blank_bdm)) def test_is_volume_backed_instance_no_bdm_no_image(self): ctxt = self.context instance = self._create_fake_instance_obj({'image_ref': ''}) self.assertTrue( self.compute_api.is_volume_backed_instance(ctxt, instance, None)) def test_is_volume_backed_instance_empty_bdm_with_image(self): ctxt = self.context instance = self._create_fake_instance_obj({ 'root_device_name': 'vda', 'image_ref': FAKE_IMAGE_REF }) self.assertFalse( self.compute_api.is_volume_backed_instance( ctxt, instance, block_device_obj.block_device_make_list(ctxt, []))) def test_is_volume_backed_instance_bdm_volume_no_image(self): ctxt = self.context instance = self._create_fake_instance_obj({ 'root_device_name': 'vda', 'image_ref': '' }) bdms = block_device_obj.block_device_make_list(ctxt, [fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'device_name': '/dev/vda', 'volume_id': uuids.volume_id, 'instance_uuid': 'f8000000-0000-0000-0000-000000000000', 'boot_index': 0, 'destination_type': 'volume'})]) self.assertTrue( self.compute_api.is_volume_backed_instance(ctxt, instance, bdms)) def test_is_volume_backed_instance_bdm_local_no_image(self): # if the root device is local the instance is not volume backed, even # if no image_ref is set. ctxt = self.context instance = self._create_fake_instance_obj({ 'root_device_name': 'vda', 'image_ref': '' }) bdms = block_device_obj.block_device_make_list(ctxt, [fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'device_name': '/dev/vda', 'volume_id': uuids.volume_id, 'destination_type': 'local', 'instance_uuid': 'f8000000-0000-0000-0000-000000000000', 'boot_index': 0, 'snapshot_id': None}), fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'device_name': '/dev/vdb', 'instance_uuid': 'f8000000-0000-0000-0000-000000000000', 'boot_index': 1, 'destination_type': 'volume', 'volume_id': 'c2ec2156-d75e-11e2-985b-5254009297d6', 'snapshot_id': None})]) self.assertFalse( self.compute_api.is_volume_backed_instance(ctxt, instance, bdms)) def test_is_volume_backed_instance_bdm_volume_with_image(self): ctxt = self.context instance = self._create_fake_instance_obj({ 'root_device_name': 'vda', 'image_ref': FAKE_IMAGE_REF }) bdms = block_device_obj.block_device_make_list(ctxt, [fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'device_name': '/dev/vda', 'volume_id': uuids.volume_id, 'boot_index': 0, 'destination_type': 'volume'})]) self.assertTrue( self.compute_api.is_volume_backed_instance(ctxt, instance, bdms)) def test_is_volume_backed_instance_bdm_snapshot(self): ctxt = self.context instance = self._create_fake_instance_obj({'root_device_name': 'vda'}) bdms = block_device_obj.block_device_make_list(ctxt, [fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'device_name': '/dev/vda', 'snapshot_id': 'de8836ac-d75e-11e2-8271-5254009297d6', 'instance_uuid': 'f8000000-0000-0000-0000-000000000000', 'destination_type': 'volume', 'boot_index': 0, 'volume_id': None})]) self.assertTrue( self.compute_api.is_volume_backed_instance(ctxt, instance, bdms)) @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def test_is_volume_backed_instance_empty_bdm_by_uuid(self, mock_bdms): ctxt = self.context instance = self._create_fake_instance_obj() mock_bdms.return_value = \ block_device_obj.block_device_make_list(ctxt, []) self.assertFalse( self.compute_api.is_volume_backed_instance(ctxt, instance, None)) mock_bdms.assert_called_with(ctxt, instance.uuid) def test_reservation_id_one_instance(self): """Verify building an instance has a reservation_id that matches return value from create. """ (refs, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id) self.assertEqual(len(refs), 1) self.assertEqual(refs[0]['reservation_id'], resv_id) def test_reservation_ids_two_instances(self): """Verify building 2 instances at once results in a reservation_id being returned equal to reservation id set in both instances. """ (refs, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id, min_count=2, max_count=2) self.assertEqual(len(refs), 2) self.assertIsNotNone(resv_id) for instance in refs: self.assertEqual(instance['reservation_id'], resv_id) def test_multi_instance_display_name_template(self): self.flags(multi_instance_display_name_template='%(name)s') (refs, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id, min_count=2, max_count=2, display_name='x') self.assertEqual(refs[0]['display_name'], 'x') self.assertEqual(refs[0]['hostname'], 'x') self.assertEqual(refs[1]['display_name'], 'x') self.assertEqual(refs[1]['hostname'], 'x') self.flags(multi_instance_display_name_template='%(name)s-%(count)d') self._multi_instance_display_name_default() self.flags(multi_instance_display_name_template='%(name)s-%(uuid)s') (refs, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id, min_count=2, max_count=2, display_name='x') self.assertEqual(refs[0]['display_name'], 'x-%s' % refs[0]['uuid']) self.assertEqual(refs[0]['hostname'], 'x-%s' % refs[0]['uuid']) self.assertEqual(refs[1]['display_name'], 'x-%s' % refs[1]['uuid']) self.assertEqual(refs[1]['hostname'], 'x-%s' % refs[1]['uuid']) def test_multi_instance_display_name_default(self): self._multi_instance_display_name_default() def _multi_instance_display_name_default(self): (refs, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id, min_count=2, max_count=2, display_name='x') self.assertEqual(refs[0]['display_name'], 'x-1') self.assertEqual(refs[0]['hostname'], 'x-1') self.assertEqual(refs[1]['display_name'], 'x-2') self.assertEqual(refs[1]['hostname'], 'x-2') def test_instance_architecture(self): # Test the instance architecture. i_ref = self._create_fake_instance_obj() self.assertEqual(i_ref['architecture'], arch.X86_64) def test_instance_unknown_architecture(self): # Test if the architecture is unknown. instance = self._create_fake_instance_obj( params={'architecture': ''}) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertNotEqual(instance['architecture'], 'Unknown') def test_instance_name_template(self): # Test the instance_name template. self.flags(instance_name_template='instance-%d') i_ref = self._create_fake_instance_obj() self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id']) self.flags(instance_name_template='instance-%(uuid)s') i_ref = self._create_fake_instance_obj() self.assertEqual(i_ref['name'], 'instance-%s' % i_ref['uuid']) self.flags(instance_name_template='%(id)d-%(uuid)s') i_ref = self._create_fake_instance_obj() self.assertEqual(i_ref['name'], '%d-%s' % (i_ref['id'], i_ref['uuid'])) # not allowed.. default is uuid self.flags(instance_name_template='%(name)s') i_ref = self._create_fake_instance_obj() self.assertEqual(i_ref['name'], i_ref['uuid']) def test_add_remove_fixed_ip(self): instance = self._create_fake_instance_obj(params={'host': CONF.host}) self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance', lambda *a, **kw: None) self.compute_api.add_fixed_ip(self.context, instance, '1') self.compute_api.remove_fixed_ip(self.context, instance, '192.168.1.1') self.compute_api.delete(self.context, instance) def test_attach_volume_invalid(self): instance = fake_instance.fake_instance_obj(None, **{ 'locked': False, 'vm_state': vm_states.ACTIVE, 'task_state': None, 'launched_at': timeutils.utcnow()}) self.assertRaises(exception.InvalidDevicePath, self.compute_api.attach_volume, self.context, instance, None, '/invalid') def test_check_dev_name_assign_dev_name(self): instance = self._create_fake_instance_obj() bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'instance_uuid': instance.uuid, 'volume_id': 'vol-id', 'source_type': 'volume', 'destination_type': 'volume', 'device_name': None, 'boot_index': None, 'disk_bus': None, 'device_type': None }))] self.compute._check_dev_name(bdms, instance) self.assertIsNotNone(bdms[0].device_name) @mock.patch.object(compute_manager.ComputeManager, '_get_device_name_for_instance') def test_check_dev_name_skip_bdms_with_dev_name(self, mock_get_dev_name): instance = self._create_fake_instance_obj() bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'instance_uuid': instance.uuid, 'volume_id': 'vol-id', 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vda', 'boot_index': None, 'disk_bus': None, 'device_type': None }))] self.compute._check_dev_name(bdms, instance) self.assertFalse(mock_get_dev_name.called) def test_no_attach_volume_in_rescue_state(self): def fake(*args, **kwargs): pass def fake_volume_get(self, context, volume_id): return {'id': volume_id} self.stubs.Set(cinder.API, 'get', fake_volume_get) self.stubs.Set(cinder.API, 'check_attach', fake) self.stubs.Set(cinder.API, 'reserve_volume', fake) instance = fake_instance.fake_instance_obj(None, **{ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'locked': False, 'vm_state': vm_states.RESCUED}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.attach_volume, self.context, instance, None, '/dev/vdb') def test_no_attach_volume_in_suspended_state(self): instance = fake_instance.fake_instance_obj(None, **{ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'locked': False, 'vm_state': vm_states.SUSPENDED}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.attach_volume, self.context, instance, {'id': 'fake-volume-id'}, '/dev/vdb') def test_no_detach_volume_in_rescue_state(self): # Ensure volume can be detached from instance params = {'vm_state': vm_states.RESCUED} instance = self._create_fake_instance_obj(params=params) volume = {'id': 1, 'attach_status': 'attached', 'instance_uuid': instance['uuid']} self.assertRaises(exception.InstanceInvalidState, self.compute_api.detach_volume, self.context, instance, volume) @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(cinder.API, 'get') def test_no_rescue_in_volume_state_attaching(self, mock_get_vol, mock_get_bdms): # Make sure a VM cannot be rescued while volume is being attached instance = self._create_fake_instance_obj() bdms, volume = self._fake_rescue_block_devices(instance) mock_get_vol.return_value = {'id': volume['id'], 'status': "attaching"} mock_get_bdms.return_value = bdms self.assertRaises(exception.InvalidVolume, self.compute_api.rescue, self.context, instance) def test_vnc_console(self): # Make sure we can a vnc console for an instance. fake_instance = self._fake_instance( {'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_console_type = "novnc" fake_connect_info = {'token': 'fake_token', 'console_type': fake_console_type, 'host': 'fake_console_host', 'port': 'fake_console_port', 'internal_access_path': 'fake_access_path', 'instance_uuid': fake_instance.uuid, 'access_url': 'fake_console_url'} rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_vnc_console') rpcapi.get_vnc_console( self.context, instance=fake_instance, console_type=fake_console_type).AndReturn(fake_connect_info) self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi, 'authorize_console') self.compute_api.consoleauth_rpcapi.authorize_console( self.context, 'fake_token', fake_console_type, 'fake_console_host', 'fake_console_port', 'fake_access_path', 'f3000000-0000-0000-0000-000000000000', access_url='fake_console_url') self.mox.ReplayAll() console = self.compute_api.get_vnc_console(self.context, fake_instance, fake_console_type) self.assertEqual(console, {'url': 'fake_console_url'}) def test_get_vnc_console_no_host(self): instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_vnc_console, self.context, instance, 'novnc') def test_spice_console(self): # Make sure we can a spice console for an instance. fake_instance = self._fake_instance( {'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_console_type = "spice-html5" fake_connect_info = {'token': 'fake_token', 'console_type': fake_console_type, 'host': 'fake_console_host', 'port': 'fake_console_port', 'internal_access_path': 'fake_access_path', 'instance_uuid': fake_instance.uuid, 'access_url': 'fake_console_url'} rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_spice_console') rpcapi.get_spice_console( self.context, instance=fake_instance, console_type=fake_console_type).AndReturn(fake_connect_info) self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi, 'authorize_console') self.compute_api.consoleauth_rpcapi.authorize_console( self.context, 'fake_token', fake_console_type, 'fake_console_host', 'fake_console_port', 'fake_access_path', 'f3000000-0000-0000-0000-000000000000', access_url='fake_console_url') self.mox.ReplayAll() console = self.compute_api.get_spice_console(self.context, fake_instance, fake_console_type) self.assertEqual(console, {'url': 'fake_console_url'}) def test_get_spice_console_no_host(self): instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_spice_console, self.context, instance, 'spice') def test_rdp_console(self): # Make sure we can a rdp console for an instance. fake_instance = self._fake_instance({ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_console_type = "rdp-html5" fake_connect_info = {'token': 'fake_token', 'console_type': fake_console_type, 'host': 'fake_console_host', 'port': 'fake_console_port', 'internal_access_path': 'fake_access_path', 'instance_uuid': fake_instance.uuid, 'access_url': 'fake_console_url'} rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_rdp_console') rpcapi.get_rdp_console( self.context, instance=fake_instance, console_type=fake_console_type).AndReturn(fake_connect_info) self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi, 'authorize_console') self.compute_api.consoleauth_rpcapi.authorize_console( self.context, 'fake_token', fake_console_type, 'fake_console_host', 'fake_console_port', 'fake_access_path', 'f3000000-0000-0000-0000-000000000000', access_url='fake_console_url') self.mox.ReplayAll() console = self.compute_api.get_rdp_console(self.context, fake_instance, fake_console_type) self.assertEqual(console, {'url': 'fake_console_url'}) def test_get_rdp_console_no_host(self): instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_rdp_console, self.context, instance, 'rdp') def test_serial_console(self): # Make sure we can get a serial proxy url for an instance. fake_instance = self._fake_instance({ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_console_type = 'serial' fake_connect_info = {'token': 'fake_token', 'console_type': fake_console_type, 'host': 'fake_serial_host', 'port': 'fake_tcp_port', 'internal_access_path': 'fake_access_path', 'instance_uuid': fake_instance.uuid, 'access_url': 'fake_access_url'} rpcapi = compute_rpcapi.ComputeAPI with test.nested( mock.patch.object(rpcapi, 'get_serial_console', return_value=fake_connect_info), mock.patch.object(self.compute_api.consoleauth_rpcapi, 'authorize_console') ) as (mock_get_serial_console, mock_authorize_console): self.compute_api.consoleauth_rpcapi.authorize_console( self.context, 'fake_token', fake_console_type, 'fake_serial_host', 'fake_tcp_port', 'fake_access_path', 'f3000000-0000-0000-0000-000000000000', access_url='fake_access_url') console = self.compute_api.get_serial_console(self.context, fake_instance, fake_console_type) self.assertEqual(console, {'url': 'fake_access_url'}) def test_get_serial_console_no_host(self): # Make sure an exception is raised when instance is not Active. instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_serial_console, self.context, instance, 'serial') def test_mks_console(self): fake_instance = self._fake_instance({ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_console_type = 'webmks' fake_connect_info = {'token': 'fake_token', 'console_type': fake_console_type, 'host': 'fake_mks_host', 'port': 'fake_tcp_port', 'internal_access_path': 'fake_access_path', 'instance_uuid': fake_instance.uuid, 'access_url': 'fake_access_url'} with test.nested( mock.patch.object(self.compute_api.compute_rpcapi, 'get_mks_console', return_value=fake_connect_info), mock.patch.object(self.compute_api.consoleauth_rpcapi, 'authorize_console') ) as (mock_get_mks_console, mock_authorize_console): console = self.compute_api.get_mks_console(self.context, fake_instance, fake_console_type) self.assertEqual(console, {'url': 'fake_access_url'}) def test_get_mks_console_no_host(self): # Make sure an exception is raised when instance is not Active. instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_mks_console, self.context, instance, 'mks') def test_console_output(self): fake_instance = self._fake_instance({ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_tail_length = 699 fake_console_output = 'fake console output' rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_console_output') rpcapi.get_console_output( self.context, instance=fake_instance, tail_length=fake_tail_length).AndReturn(fake_console_output) self.mox.ReplayAll() output = self.compute_api.get_console_output(self.context, fake_instance, tail_length=fake_tail_length) self.assertEqual(output, fake_console_output) def test_console_output_no_host(self): instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_console_output, self.context, instance) def test_attach_interface(self): new_type = flavors.get_flavor_by_flavor_id('4') instance = objects.Instance(image_ref=uuids.image_instance, system_metadata={}, flavor=new_type, host='fake-host') self.mox.StubOutWithMock(self.compute.network_api, 'allocate_port_for_instance') nwinfo = [fake_network_cache_model.new_vif()] network_id = nwinfo[0]['network']['id'] port_id = nwinfo[0]['id'] req_ip = '1.2.3.4' self.compute.network_api.allocate_port_for_instance( self.context, instance, port_id, network_id, req_ip, bind_host_id='fake-host' ).AndReturn(nwinfo) self.mox.ReplayAll() vif = self.compute.attach_interface(self.context, instance, network_id, port_id, req_ip) self.assertEqual(vif['id'], network_id) return nwinfo, port_id def test_attach_interface_failed(self): new_type = flavors.get_flavor_by_flavor_id('4') instance = objects.Instance( id=42, uuid=uuids.interface_failed_instance, image_ref='foo', system_metadata={}, flavor=new_type, host='fake-host') nwinfo = [fake_network_cache_model.new_vif()] network_id = nwinfo[0]['network']['id'] port_id = nwinfo[0]['id'] req_ip = '1.2.3.4' with test.nested( mock.patch.object(self.compute.driver, 'attach_interface'), mock.patch.object(self.compute.network_api, 'allocate_port_for_instance'), mock.patch.object(self.compute.network_api, 'deallocate_port_for_instance')) as ( mock_attach, mock_allocate, mock_deallocate): mock_allocate.return_value = nwinfo mock_attach.side_effect = exception.NovaException("attach_failed") self.assertRaises(exception.InterfaceAttachFailed, self.compute.attach_interface, self.context, instance, network_id, port_id, req_ip) mock_allocate.assert_called_once_with(self.context, instance, network_id, port_id, req_ip, bind_host_id='fake-host') mock_deallocate.assert_called_once_with(self.context, instance, port_id) def test_detach_interface(self): nwinfo, port_id = self.test_attach_interface() self.stubs.Set(self.compute.network_api, 'deallocate_port_for_instance', lambda a, b, c: []) instance = objects.Instance() instance.info_cache = objects.InstanceInfoCache.new( self.context, uuids.info_cache_instance) instance.info_cache.network_info = network_model.NetworkInfo.hydrate( nwinfo) self.compute.detach_interface(self.context, instance, port_id) self.assertEqual(self.compute.driver._interfaces, {}) def test_detach_interface_failed(self): nwinfo, port_id = self.test_attach_interface() instance = objects.Instance(id=42) instance['uuid'] = uuids.info_cache_instance instance.info_cache = objects.InstanceInfoCache.new( self.context, uuids.info_cache_instance) instance.info_cache.network_info = network_model.NetworkInfo.hydrate( nwinfo) with test.nested( mock.patch.object(self.compute.driver, 'detach_interface', side_effect=exception.NovaException('detach_failed')), mock.patch.object(self.compute.network_api, 'deallocate_port_for_instance')) as ( mock_detach, mock_deallocate): self.assertRaises(exception.InterfaceDetachFailed, self.compute.detach_interface, self.context, instance, port_id) self.assertFalse(mock_deallocate.called) @mock.patch.object(compute_manager.LOG, 'warning') def test_detach_interface_deallocate_port_for_instance_failed(self, warn_mock): # Tests that when deallocate_port_for_instance fails we log the failure # before exiting compute.detach_interface. nwinfo, port_id = self.test_attach_interface() instance = objects.Instance(id=42, uuid=uuidutils.generate_uuid()) instance.info_cache = objects.InstanceInfoCache.new( self.context, uuids.info_cache_instance) instance.info_cache.network_info = network_model.NetworkInfo.hydrate( nwinfo) # Sometimes neutron errors slip through the neutronv2 API so we want # to make sure we catch those in the compute manager and not just # NovaExceptions. error = neutron_exceptions.PortNotFoundClient() with test.nested( mock.patch.object(self.compute.driver, 'detach_interface'), mock.patch.object(self.compute.network_api, 'deallocate_port_for_instance', side_effect=error), mock.patch.object(self.compute, '_instance_update')) as ( mock_detach, mock_deallocate, mock_instance_update): ex = self.assertRaises(neutron_exceptions.PortNotFoundClient, self.compute.detach_interface, self.context, instance, port_id) self.assertEqual(error, ex) mock_deallocate.assert_called_once_with( self.context, instance, port_id) self.assertEqual(1, warn_mock.call_count) def test_attach_volume(self): fake_bdm = fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id, 'device_name': '/dev/vdb'}) bdm = block_device_obj.BlockDeviceMapping()._from_db_object( self.context, block_device_obj.BlockDeviceMapping(), fake_bdm) instance = self._create_fake_instance_obj() instance.id = 42 fake_volume = {'id': 'fake-volume-id'} with test.nested( mock.patch.object(cinder.API, 'get', return_value=fake_volume), mock.patch.object(cinder.API, 'check_attach'), mock.patch.object(cinder.API, 'reserve_volume'), mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name', return_value=bdm), mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume') ) as (mock_get, mock_check_attach, mock_reserve_vol, mock_reserve_bdm, mock_attach): self.compute_api.attach_volume( self.context, instance, 'fake-volume-id', '/dev/vdb', 'ide', 'cdrom') mock_reserve_bdm.assert_called_once_with( self.context, instance, '/dev/vdb', 'fake-volume-id', disk_bus='ide', device_type='cdrom') self.assertEqual(mock_get.call_args, mock.call(self.context, 'fake-volume-id')) self.assertEqual(mock_check_attach.call_args, mock.call( self.context, fake_volume, instance=instance)) mock_reserve_vol.assert_called_once_with( self.context, 'fake-volume-id') a, kw = mock_attach.call_args self.assertEqual(a[2].device_name, '/dev/vdb') self.assertEqual(a[2].volume_id, uuids.volume_id) def test_attach_volume_shelved_offloaded(self): instance = self._create_fake_instance_obj() with test.nested( mock.patch.object(compute_api.API, '_check_attach_and_reserve_volume'), mock.patch.object(cinder.API, 'attach') ) as (mock_attach_and_reserve, mock_attach): self.compute_api._attach_volume_shelved_offloaded( self.context, instance, 'fake-volume-id', '/dev/vdb', 'ide', 'cdrom') mock_attach_and_reserve.assert_called_once_with(self.context, 'fake-volume-id', instance) mock_attach.assert_called_once_with(self.context, 'fake-volume-id', instance.uuid, '/dev/vdb') self.assertTrue(mock_attach.called) def test_attach_volume_no_device(self): called = {} def fake_check_attach(*args, **kwargs): called['fake_check_attach'] = True def fake_reserve_volume(*args, **kwargs): called['fake_reserve_volume'] = True def fake_volume_get(self, context, volume_id): called['fake_volume_get'] = True return {'id': volume_id} def fake_rpc_attach_volume(self, context, instance, bdm): called['fake_rpc_attach_volume'] = True def fake_rpc_reserve_block_device_name(self, context, instance, device, volume_id, **kwargs): called['fake_rpc_reserve_block_device_name'] = True bdm = block_device_obj.BlockDeviceMapping(context=context) bdm['device_name'] = '/dev/vdb' return bdm self.stubs.Set(cinder.API, 'get', fake_volume_get) self.stubs.Set(cinder.API, 'check_attach', fake_check_attach) self.stubs.Set(cinder.API, 'reserve_volume', fake_reserve_volume) self.stubs.Set(compute_rpcapi.ComputeAPI, 'reserve_block_device_name', fake_rpc_reserve_block_device_name) self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume', fake_rpc_attach_volume) instance = self._create_fake_instance_obj() self.compute_api.attach_volume(self.context, instance, 1, device=None) self.assertTrue(called.get('fake_check_attach')) self.assertTrue(called.get('fake_reserve_volume')) self.assertTrue(called.get('fake_volume_get')) self.assertTrue(called.get('fake_rpc_reserve_block_device_name')) self.assertTrue(called.get('fake_rpc_attach_volume')) def test_detach_volume(self): # Ensure volume can be detached from instance called = {} instance = self._create_fake_instance_obj() # Set attach_status to 'fake' as nothing is reading the value. volume = {'id': 1, 'attach_status': 'fake'} def fake_check_detach(*args, **kwargs): called['fake_check_detach'] = True def fake_begin_detaching(*args, **kwargs): called['fake_begin_detaching'] = True def fake_rpc_detach_volume(self, context, **kwargs): called['fake_rpc_detach_volume'] = True self.stubs.Set(cinder.API, 'check_detach', fake_check_detach) self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching) self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume', fake_rpc_detach_volume) self.compute_api.detach_volume(self.context, instance, volume) self.assertTrue(called.get('fake_check_detach')) self.assertTrue(called.get('fake_begin_detaching')) self.assertTrue(called.get('fake_rpc_detach_volume')) @mock.patch.object(compute_api.API, '_check_and_begin_detach') @mock.patch.object(compute_api.API, '_local_cleanup_bdm_volumes') @mock.patch.object(objects.BlockDeviceMapping, 'get_by_volume_id') def test_detach_volume_shelved_offloaded(self, mock_block_dev, mock_local_cleanup, mock_check_begin_detach): mock_block_dev.return_value = [block_device_obj.BlockDeviceMapping( context=context)] instance = self._create_fake_instance_obj() volume = {'id': 1, 'attach_status': 'fake'} self.compute_api._detach_volume_shelved_offloaded(self.context, instance, volume) mock_check_begin_detach.assert_called_once_with(self.context, volume, instance) self.assertTrue(mock_local_cleanup.called) def test_detach_invalid_volume(self): # Ensure exception is raised while detaching an un-attached volume fake_instance = self._fake_instance({ 'uuid': 'f7000000-0000-0000-0000-000000000001', 'locked': False, 'launched_at': timeutils.utcnow(), 'vm_state': vm_states.ACTIVE, 'task_state': None}) volume = {'id': 1, 'attach_status': 'detached', 'status': 'available'} self.assertRaises(exception.InvalidVolume, self.compute_api.detach_volume, self.context, fake_instance, volume) def test_detach_unattached_volume(self): # Ensure exception is raised when volume's idea of attached # instance doesn't match. fake_instance = self._fake_instance({ 'uuid': 'f7000000-0000-0000-0000-000000000001', 'locked': False, 'launched_at': timeutils.utcnow(), 'vm_state': vm_states.ACTIVE, 'task_state': None}) volume = {'id': 1, 'attach_status': 'attached', 'status': 'in-use', 'attachments': {'fake_uuid': {'attachment_id': 'fakeid'}}} self.assertRaises(exception.VolumeUnattached, self.compute_api.detach_volume, self.context, fake_instance, volume) def test_detach_suspended_instance_fails(self): fake_instance = self._fake_instance({ 'uuid': 'f7000000-0000-0000-0000-000000000001', 'locked': False, 'launched_at': timeutils.utcnow(), 'vm_state': vm_states.SUSPENDED, 'task_state': None}) # Unused volume = {} self.assertRaises(exception.InstanceInvalidState, self.compute_api.detach_volume, self.context, fake_instance, volume) def test_detach_volume_libvirt_is_down(self): # Ensure rollback during detach if libvirt goes down called = {} instance = self._create_fake_instance_obj() fake_bdm = fake_block_device.FakeDbBlockDeviceDict( {'device_name': '/dev/vdb', 'volume_id': uuids.volume_id, 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': '{"test": "test"}'}) def fake_libvirt_driver_instance_exists(_instance): called['fake_libvirt_driver_instance_exists'] = True return False def fake_libvirt_driver_detach_volume_fails(*args, **kwargs): called['fake_libvirt_driver_detach_volume_fails'] = True raise AttributeError() def fake_roll_detaching(*args, **kwargs): called['fake_roll_detaching'] = True self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching) self.stubs.Set(self.compute.driver, "instance_exists", fake_libvirt_driver_instance_exists) self.stubs.Set(self.compute.driver, "detach_volume", fake_libvirt_driver_detach_volume_fails) self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume_and_instance') objects.BlockDeviceMapping.get_by_volume_and_instance( self.context, 1, instance.uuid).\ AndReturn(objects.BlockDeviceMapping( context=self.context, **fake_bdm)) self.mox.ReplayAll() self.assertRaises(AttributeError, self.compute.detach_volume, self.context, 1, instance) self.assertTrue(called.get('fake_libvirt_driver_instance_exists')) self.assertTrue(called.get('fake_roll_detaching')) def test_detach_volume_not_found(self): # Ensure that a volume can be detached even when it is removed # from an instance but remaining in bdm. See bug #1367964. instance = self._create_fake_instance_obj() fake_bdm = fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-id', 'device_name': '/dev/vdb', 'connection_info': '{"test": "test"}'}) bdm = objects.BlockDeviceMapping(context=self.context, **fake_bdm) # Stub out fake_volume_get so cinder api does not raise exception # and manager gets to call bdm.destroy() def fake_volume_get(self, context, volume_id): return {'id': volume_id} self.stub_out('nova.volume.cinder.API.get', fake_volume_get) with test.nested( mock.patch.object(self.compute.driver, 'detach_volume', side_effect=exception.DiskNotFound('sdb')), mock.patch.object(objects.BlockDeviceMapping, 'get_by_volume_and_instance', return_value=bdm), mock.patch.object(cinder.API, 'terminate_connection'), mock.patch.object(bdm, 'destroy'), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute.volume_api, 'detach'), mock.patch.object(self.compute.driver, 'get_volume_connector', return_value='fake-connector') ) as (mock_detach_volume, mock_volume, mock_terminate_connection, mock_destroy, mock_notify, mock_detach, mock_volume_connector): self.compute.detach_volume(self.context, 'fake-id', instance) self.assertTrue(mock_detach_volume.called) mock_terminate_connection.assert_called_once_with(self.context, 'fake-id', 'fake-connector') mock_destroy.assert_called_once_with() mock_detach.assert_called_once_with(mock.ANY, 'fake-id', instance.uuid, None) def test_terminate_with_volumes(self): # Make sure that volumes get detached during instance termination. admin = context.get_admin_context() instance = self._create_fake_instance_obj() volume_id = 'fake' values = {'instance_uuid': instance['uuid'], 'device_name': '/dev/vdc', 'delete_on_termination': False, 'volume_id': volume_id, 'destination_type': 'volume' } db.block_device_mapping_create(admin, values) def fake_volume_get(self, context, volume_id): return {'id': volume_id} self.stubs.Set(cinder.API, "get", fake_volume_get) # Stub out and record whether it gets detached result = {"detached": False} def fake_detach(self, context, volume_id_param, instance_uuid): result["detached"] = volume_id_param == volume_id self.stubs.Set(cinder.API, "detach", fake_detach) def fake_terminate_connection(self, context, volume_id, connector): return {} self.stubs.Set(cinder.API, "terminate_connection", fake_terminate_connection) # Kill the instance and check that it was detached bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( admin, instance['uuid']) self.compute.terminate_instance(admin, instance, bdms, []) self.assertTrue(result["detached"]) def test_terminate_deletes_all_bdms(self): admin = context.get_admin_context() instance = self._create_fake_instance_obj() img_bdm = {'context': admin, 'instance_uuid': instance['uuid'], 'device_name': '/dev/vda', 'source_type': 'image', 'destination_type': 'local', 'delete_on_termination': False, 'boot_index': 0, 'image_id': 'fake_image'} vol_bdm = {'context': admin, 'instance_uuid': instance['uuid'], 'device_name': '/dev/vdc', 'source_type': 'volume', 'destination_type': 'volume', 'delete_on_termination': False, 'volume_id': 'fake_vol'} bdms = [] for bdm in img_bdm, vol_bdm: bdm_obj = objects.BlockDeviceMapping(**bdm) bdm_obj.create() bdms.append(bdm_obj) self.stub_out('nova.volume.cinder.API.terminate_connection', mox.MockAnything()) self.stub_out('nova.volume.cinder.API.detach', mox.MockAnything()) def fake_volume_get(self, context, volume_id): return {'id': volume_id} self.stub_out('nova.volume.cinder.API.get', fake_volume_get) self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything()) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.terminate_instance(self.context, instance, bdms, []) bdms = db.block_device_mapping_get_all_by_instance(admin, instance['uuid']) self.assertEqual(len(bdms), 0) def test_inject_network_info(self): instance = self._create_fake_instance_obj(params={'host': CONF.host}) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance = self.compute_api.get(self.context, instance['uuid'], want_objects=True) self.compute_api.inject_network_info(self.context, instance) def test_reset_network(self): instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance = self.compute_api.get(self.context, instance['uuid'], want_objects=True) self.compute_api.reset_network(self.context, instance) def test_lock(self): instance = self._create_fake_instance_obj() self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance', lambda *a, **kw: None) self.compute_api.lock(self.context, instance) def test_unlock(self): instance = self._create_fake_instance_obj() self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance', lambda *a, **kw: None) self.compute_api.unlock(self.context, instance) def test_add_remove_security_group(self): instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance = self.compute_api.get(self.context, instance.uuid, want_objects=True) security_group_name = self._create_group()['name'] self.security_group_api.add_to_instance(self.context, instance, security_group_name) self.security_group_api.remove_from_instance(self.context, instance, security_group_name) def test_get_diagnostics(self): instance = self._create_fake_instance_obj() rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_diagnostics') rpcapi.get_diagnostics(self.context, instance=instance) self.mox.ReplayAll() self.compute_api.get_diagnostics(self.context, instance) def test_get_instance_diagnostics(self): instance = self._create_fake_instance_obj() rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_instance_diagnostics') rpcapi.get_instance_diagnostics(self.context, instance=instance) self.mox.ReplayAll() self.compute_api.get_instance_diagnostics(self.context, instance) @mock.patch.object(compute_rpcapi.ComputeAPI, 'refresh_instance_security_rules') def test_refresh_instance_security_rules(self, mock_refresh): inst1 = self._create_fake_instance_obj() inst2 = self._create_fake_instance_obj({'host': None}) self.security_group_api._refresh_instance_security_rules( self.context, [inst1, inst2]) mock_refresh.assert_called_once_with(self.context, inst1.host, inst1) @mock.patch.object(compute_rpcapi.ComputeAPI, 'refresh_instance_security_rules') def test_refresh_instance_security_rules_empty(self, mock_refresh): self.security_group_api._refresh_instance_security_rules(self.context, []) self.assertFalse(mock_refresh.called) @mock.patch.object(compute_api.SecurityGroupAPI, '_refresh_instance_security_rules') @mock.patch.object(objects.InstanceList, 'get_by_grantee_security_group_ids') def test_secgroup_refresh(self, mock_get, mock_refresh): mock_get.return_value = mock.sentinel.instances self.security_group_api.trigger_members_refresh(mock.sentinel.ctxt, mock.sentinel.ids) mock_get.assert_called_once_with(mock.sentinel.ctxt, mock.sentinel.ids) mock_refresh.assert_called_once_with(mock.sentinel.ctxt, mock.sentinel.instances) @mock.patch.object(compute_api.SecurityGroupAPI, '_refresh_instance_security_rules') @mock.patch.object(objects.InstanceList, 'get_by_security_group_id') def test_secrule_refresh(self, mock_get, mock_refresh): mock_get.return_value = mock.sentinel.instances self.security_group_api.trigger_rules_refresh(mock.sentinel.ctxt, mock.sentinel.id) mock_get.assert_called_once_with(mock.sentinel.ctxt, mock.sentinel.id) mock_refresh.assert_called_once_with(mock.sentinel.ctxt, mock.sentinel.instances) def test_live_migrate(self): instance, instance_uuid = self._run_instance() rpcapi = self.compute_api.compute_task_api fake_spec = objects.RequestSpec() @mock.patch.object(rpcapi, 'live_migrate_instance') @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(self.compute_api, '_record_action_start') def do_test(record_action_start, get_by_instance_uuid, live_migrate_instance): get_by_instance_uuid.return_value = fake_spec self.compute_api.live_migrate(self.context, instance, block_migration=True, disk_over_commit=True, host_name='fake_dest_host') record_action_start.assert_called_once_with(self.context, instance, 'live-migration') live_migrate_instance.assert_called_once_with( self.context, instance, 'fake_dest_host', block_migration=True, disk_over_commit=True, request_spec=fake_spec) do_test() instance.refresh() self.assertEqual(instance['task_state'], task_states.MIGRATING) def test_evacuate(self): instance = self._create_fake_instance_obj(services=True) self.assertIsNone(instance.task_state) ctxt = self.context.elevated() fake_spec = objects.RequestSpec() def fake_rebuild_instance(*args, **kwargs): instance.host = kwargs['host'] instance.save() @mock.patch.object(self.compute_api.compute_task_api, 'rebuild_instance') @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(self.compute_api.servicegroup_api, 'service_is_up') def do_test(service_is_up, get_by_instance_uuid, rebuild_instance): service_is_up.return_value = False get_by_instance_uuid.return_value = fake_spec rebuild_instance.side_effect = fake_rebuild_instance self.compute_api.evacuate(ctxt, instance, host='fake_dest_host', on_shared_storage=True, admin_password=None) rebuild_instance.assert_called_once_with( ctxt, instance=instance, new_pass=None, injected_files=None, image_ref=None, orig_image_ref=None, orig_sys_metadata=None, bdms=None, recreate=True, on_shared_storage=True, request_spec=fake_spec, host='fake_dest_host') do_test() instance.refresh() self.assertEqual(instance.task_state, task_states.REBUILDING) self.assertEqual(instance.host, 'fake_dest_host') migs = objects.MigrationList.get_by_filters( self.context, {'source_host': 'fake_host'}) self.assertEqual(1, len(migs)) self.assertEqual(self.compute.host, migs[0].source_compute) self.assertEqual('accepted', migs[0].status) self.assertEqual('compute.instance.evacuate', fake_notifier.NOTIFICATIONS[0].event_type) def test_fail_evacuate_from_non_existing_host(self): inst = {} inst['vm_state'] = vm_states.ACTIVE inst['launched_at'] = timeutils.utcnow() inst['image_ref'] = FAKE_IMAGE_REF inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id inst['project_id'] = self.project_id inst['host'] = 'fake_host' inst['node'] = NODENAME type_id = flavors.get_flavor_by_name('m1.tiny')['id'] inst['instance_type_id'] = type_id inst['ami_launch_index'] = 0 inst['memory_mb'] = 0 inst['vcpus'] = 0 inst['root_gb'] = 0 inst['ephemeral_gb'] = 0 inst['architecture'] = arch.X86_64 inst['os_type'] = 'Linux' instance = self._create_fake_instance_obj(inst) self.assertIsNone(instance.task_state) self.assertRaises(exception.ComputeHostNotFound, self.compute_api.evacuate, self.context.elevated(), instance, host='fake_dest_host', on_shared_storage=True, admin_password=None) def test_fail_evacuate_from_running_host(self): instance = self._create_fake_instance_obj(services=True) self.assertIsNone(instance.task_state) def fake_service_is_up(*args, **kwargs): return True self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up', fake_service_is_up) self.assertRaises(exception.ComputeServiceInUse, self.compute_api.evacuate, self.context.elevated(), instance, host='fake_dest_host', on_shared_storage=True, admin_password=None) def test_fail_evacuate_instance_in_wrong_state(self): states = [vm_states.BUILDING, vm_states.PAUSED, vm_states.SUSPENDED, vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.DELETED] instances = [self._create_fake_instance_obj({'vm_state': state}) for state in states] for instance in instances: self.assertRaises(exception.InstanceInvalidState, self.compute_api.evacuate, self.context, instance, host='fake_dest_host', on_shared_storage=True, admin_password=None) def test_get_migrations(self): migration = test_migration.fake_db_migration() filters = {'host': 'host1'} self.mox.StubOutWithMock(db, "migration_get_all_by_filters") db.migration_get_all_by_filters(self.context, filters).AndReturn([migration]) self.mox.ReplayAll() migrations = self.compute_api.get_migrations(self.context, filters) self.assertEqual(1, len(migrations)) self.assertEqual(migrations[0].id, migration['id']) @mock.patch("nova.db.migration_get_in_progress_by_instance") def test_get_migrations_in_progress_by_instance(self, mock_get): migration = test_migration.fake_db_migration(instance_uuid="1234") mock_get.return_value = [migration] db.migration_get_in_progress_by_instance(self.context, "1234") migrations = self.compute_api.get_migrations_in_progress_by_instance( self.context, "1234") self.assertEqual(1, len(migrations)) self.assertEqual(migrations[0].id, migration['id']) @mock.patch("nova.db.migration_get_by_id_and_instance") def test_get_migration_by_id_and_instance(self, mock_get): migration = test_migration.fake_db_migration(instance_uuid="1234") mock_get.return_value = migration db.migration_get_by_id_and_instance( self.context, migration['id'], uuid) res = self.compute_api.get_migration_by_id_and_instance( self.context, migration['id'], "1234") self.assertEqual(res.id, migration['id']) class ComputeAPIIpFilterTestCase(test.NoDBTestCase): '''Verifies the IP filtering in the compute API.''' def setUp(self): super(ComputeAPIIpFilterTestCase, self).setUp() self.compute_api = compute.API() def _get_ip_filtering_instances(self): '''Utility function to get instances for the IP filtering tests.''' info = [{ 'address': 'aa:bb:cc:dd:ee:ff', 'id': 1, 'network': { 'bridge': 'br0', 'id': 1, 'label': 'private', 'subnets': [{ 'cidr': '192.168.0.0/24', 'ips': [{ 'address': '192.168.0.10', 'type': 'fixed' }, { 'address': '192.168.0.11', 'type': 'fixed' }] }] } }, { 'address': 'aa:bb:cc:dd:ee:ff', 'id': 2, 'network': { 'bridge': 'br1', 'id': 2, 'label': 'private', 'subnets': [{ 'cidr': '192.164.0.0/24', 'ips': [{ 'address': '192.164.0.10', 'type': 'fixed' }] }] } }] info1 = objects.InstanceInfoCache(network_info=jsonutils.dumps(info)) inst1 = objects.Instance(id=1, info_cache=info1) info[0]['network']['subnets'][0]['ips'][0]['address'] = '192.168.0.20' info[0]['network']['subnets'][0]['ips'][1]['address'] = '192.168.0.21' info[1]['network']['subnets'][0]['ips'][0]['address'] = '192.164.0.20' info2 = objects.InstanceInfoCache(network_info=jsonutils.dumps(info)) inst2 = objects.Instance(id=2, info_cache=info2) return objects.InstanceList(objects=[inst1, inst2]) def test_ip_filtering_no_matches(self): instances = self._get_ip_filtering_instances() insts = self.compute_api._ip_filter(instances, {'ip': '.*30'}, None) self.assertEqual(0, len(insts)) def test_ip_filtering_one_match(self): instances = self._get_ip_filtering_instances() for val in ('192.168.0.10', '192.168.0.1', '192.164.0.10', '.*10'): insts = self.compute_api._ip_filter(instances, {'ip': val}, None) self.assertEqual([1], [i.id for i in insts]) def test_ip_filtering_one_match_limit(self): instances = self._get_ip_filtering_instances() for limit in (None, 1, 2): insts = self.compute_api._ip_filter(instances, {'ip': '.*10'}, limit) self.assertEqual([1], [i.id for i in insts]) def test_ip_filtering_two_matches(self): instances = self._get_ip_filtering_instances() for val in ('192.16', '192.168', '192.164'): insts = self.compute_api._ip_filter(instances, {'ip': val}, None) self.assertEqual([1, 2], [i.id for i in insts]) def test_ip_filtering_two_matches_limit(self): instances = self._get_ip_filtering_instances() # Up to 2 match, based on the passed limit for limit in (None, 1, 2, 3): insts = self.compute_api._ip_filter(instances, {'ip': '192.168.0.*'}, limit) expected_ids = [1, 2] if limit: expected_len = min(limit, len(expected_ids)) expected_ids = expected_ids[:expected_len] self.assertEqual(expected_ids, [inst.id for inst in insts]) def test_ip_filtering_no_limit_to_db(self): c = context.get_admin_context() # Limit is not supplied to the DB when using an IP filter with mock.patch('nova.objects.InstanceList.get_by_filters') as m_get: self.compute_api.get_all(c, search_opts={'ip': '.10'}, limit=1) self.assertEqual(1, m_get.call_count) kwargs = m_get.call_args[1] self.assertIsNone(kwargs['limit']) def test_ip_filtering_pass_limit_to_db(self): c = context.get_admin_context() # No IP filter, verify that the limit is passed with mock.patch('nova.objects.InstanceList.get_by_filters') as m_get: self.compute_api.get_all(c, search_opts={}, limit=1) self.assertEqual(1, m_get.call_count) kwargs = m_get.call_args[1] self.assertEqual(1, kwargs['limit']) def fake_rpc_method(context, method, **kwargs): pass def _create_service_entries(context, values=[['avail_zone1', ['fake_host1', 'fake_host2']], ['avail_zone2', ['fake_host3']]]): for (avail_zone, hosts) in values: for host in hosts: db.service_create(context, {'host': host, 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0}) return values class ComputeAPIAggrTestCase(BaseTestCase): """This is for unit coverage of aggregate-related methods defined in nova.compute.api. """ def setUp(self): super(ComputeAPIAggrTestCase, self).setUp() self.api = compute_api.AggregateAPI() self.context = context.get_admin_context() self.stubs.Set(self.api.compute_rpcapi.client, 'call', fake_rpc_method) self.stubs.Set(self.api.compute_rpcapi.client, 'cast', fake_rpc_method) def test_aggregate_no_zone(self): # Ensure we can create an aggregate without an availability zone aggr = self.api.create_aggregate(self.context, 'fake_aggregate', None) self.api.delete_aggregate(self.context, aggr.id) db.aggregate_get(self.context.elevated(read_deleted='yes'), aggr.id) self.assertRaises(exception.AggregateNotFound, self.api.delete_aggregate, self.context, aggr.id) def test_check_az_for_aggregate(self): # Ensure all conflict hosts can be returned values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host1 = values[0][1][0] fake_host2 = values[0][1][1] aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host1) aggr1 = self._init_aggregate_with_host(aggr1, None, None, fake_host2) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host2) aggr2 = self._init_aggregate_with_host(aggr2, None, None, fake_host1) metadata = {'availability_zone': 'another_zone'} self.assertRaises(exception.InvalidAggregateActionUpdate, self.api.update_aggregate, self.context, aggr2.id, metadata) def test_update_aggregate(self): # Ensure metadata can be updated. aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') fake_notifier.NOTIFICATIONS = [] self.api.update_aggregate(self.context, aggr.id, {'name': 'new_fake_aggregate'}) self.assertIsNone(availability_zones._get_cache().get('cache')) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updateprop.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updateprop.end') def test_update_aggregate_no_az(self): # Ensure metadata without availability zone can be # updated,even the aggregate contains hosts belong # to another availability zone values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'name': 'new_fake_aggregate'} fake_notifier.NOTIFICATIONS = [] self.api.update_aggregate(self.context, aggr2.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updateprop.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updateprop.end') def test_update_aggregate_az_change(self): # Ensure availability zone can be updated, # when the aggregate is the only one with # availability zone values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'availability_zone': 'new_fake_zone'} fake_notifier.NOTIFICATIONS = [] self.api.update_aggregate(self.context, aggr1.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') def test_update_aggregate_az_fails(self): # Ensure aggregate's availability zone can't be updated, # when aggregate has hosts in other availability zone fake_notifier.NOTIFICATIONS = [] values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'availability_zone': 'another_zone'} self.assertRaises(exception.InvalidAggregateActionUpdate, self.api.update_aggregate, self.context, aggr2.id, metadata) fake_host2 = values[0][1][1] aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3', None, fake_host2) metadata = {'availability_zone': fake_zone} self.api.update_aggregate(self.context, aggr3.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15) msg = fake_notifier.NOTIFICATIONS[13] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[14] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') aggr4 = self.api.create_aggregate(self.context, 'fake_aggregate', None) metadata = {'availability_zone': ""} self.assertRaises(exception.InvalidAggregateActionUpdate, self.api.update_aggregate, self.context, aggr4.id, metadata) def test_update_aggregate_az_fails_with_nova_az(self): # Ensure aggregate's availability zone can't be updated, # when aggregate has hosts in other availability zone fake_notifier.NOTIFICATIONS = [] values = _create_service_entries(self.context) fake_host = values[0][1][0] self._init_aggregate_with_host(None, 'fake_aggregate1', CONF.default_availability_zone, fake_host) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'availability_zone': 'another_zone'} self.assertRaises(exception.InvalidAggregateActionUpdate, self.api.update_aggregate, self.context, aggr2.id, metadata) def test_update_aggregate_metadata(self): # Ensure metadata can be updated. aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') metadata = {'foo_key1': 'foo_value1', 'foo_key2': 'foo_value2', 'availability_zone': 'fake_zone'} fake_notifier.NOTIFICATIONS = [] availability_zones._get_cache().add('fake_key', 'fake_value') aggr = self.api.update_aggregate_metadata(self.context, aggr.id, metadata) self.assertIsNone(availability_zones._get_cache().get('fake_key')) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') fake_notifier.NOTIFICATIONS = [] metadata['foo_key1'] = None expected_payload_meta_data = {'foo_key1': None, 'foo_key2': 'foo_value2', 'availability_zone': 'fake_zone'} expected = self.api.update_aggregate_metadata(self.context, aggr.id, metadata) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('aggregate.updatemetadata.start', msg.event_type) self.assertEqual(expected_payload_meta_data, msg.payload['meta_data']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('aggregate.updatemetadata.end', msg.event_type) self.assertEqual(expected_payload_meta_data, msg.payload['meta_data']) self.assertThat(expected.metadata, matchers.DictMatches({'availability_zone': 'fake_zone', 'foo_key2': 'foo_value2'})) def test_update_aggregate_metadata_no_az(self): # Ensure metadata without availability zone can be # updated,even the aggregate contains hosts belong # to another availability zone values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'foo_key2': 'foo_value3'} fake_notifier.NOTIFICATIONS = [] aggr2 = self.api.update_aggregate_metadata(self.context, aggr2.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') self.assertThat(aggr2.metadata, matchers.DictMatches({'foo_key2': 'foo_value3'})) def test_update_aggregate_metadata_az_change(self): # Ensure availability zone can be updated, # when the aggregate is the only one with # availability zone values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'availability_zone': 'new_fake_zone'} fake_notifier.NOTIFICATIONS = [] self.api.update_aggregate_metadata(self.context, aggr1.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') def test_update_aggregate_az_do_not_replace_existing_metadata(self): # Ensure that that update of the aggregate availability zone # does not replace the aggregate existing metadata aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') metadata = {'foo_key1': 'foo_value1'} aggr = self.api.update_aggregate_metadata(self.context, aggr.id, metadata) metadata = {'availability_zone': 'new_fake_zone'} aggr = self.api.update_aggregate(self.context, aggr.id, metadata) self.assertThat(aggr.metadata, matchers.DictMatches( {'availability_zone': 'new_fake_zone', 'foo_key1': 'foo_value1'})) def test_update_aggregate_metadata_az_fails(self): # Ensure aggregate's availability zone can't be updated, # when aggregate has hosts in other availability zone fake_notifier.NOTIFICATIONS = [] values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'availability_zone': 'another_zone'} self.assertRaises(exception.InvalidAggregateActionUpdateMeta, self.api.update_aggregate_metadata, self.context, aggr2.id, metadata) aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3', None, fake_host) metadata = {'availability_zone': fake_zone} self.api.update_aggregate_metadata(self.context, aggr3.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15) msg = fake_notifier.NOTIFICATIONS[13] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[14] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') aggr4 = self.api.create_aggregate(self.context, 'fake_aggregate', None) metadata = {'availability_zone': ""} self.assertRaises(exception.InvalidAggregateActionUpdateMeta, self.api.update_aggregate_metadata, self.context, aggr4.id, metadata) def test_delete_aggregate(self): # Ensure we can delete an aggregate. fake_notifier.NOTIFICATIONS = [] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.create.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.create.end') fake_notifier.NOTIFICATIONS = [] self.api.delete_aggregate(self.context, aggr.id) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.delete.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.delete.end') db.aggregate_get(self.context.elevated(read_deleted='yes'), aggr.id) self.assertRaises(exception.AggregateNotFound, self.api.delete_aggregate, self.context, aggr.id) def test_delete_non_empty_aggregate(self): # Ensure InvalidAggregateAction is raised when non empty aggregate. _create_service_entries(self.context, [['fake_availability_zone', ['fake_host']]]) aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_availability_zone') self.api.add_host_to_aggregate(self.context, aggr.id, 'fake_host') self.assertRaises(exception.InvalidAggregateActionDelete, self.api.delete_aggregate, self.context, aggr.id) def test_add_host_to_aggregate(self): # Ensure we can add a host to an aggregate. values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) def fake_add_aggregate_host(*args, **kwargs): hosts = kwargs["aggregate"].hosts self.assertIn(fake_host, hosts) self.stubs.Set(self.api.compute_rpcapi, 'add_aggregate_host', fake_add_aggregate_host) self.mox.StubOutWithMock(availability_zones, 'update_host_availability_zone_cache') availability_zones.update_host_availability_zone_cache(self.context, fake_host) self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] aggr = self.api.add_host_to_aggregate(self.context, aggr.id, fake_host) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.addhost.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.addhost.end') self.assertEqual(len(aggr.hosts), 1) def test_add_host_to_aggr_with_no_az(self): values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) aggr = self.api.add_host_to_aggregate(self.context, aggr.id, fake_host) aggr_no_az = self.api.create_aggregate(self.context, 'fake_aggregate2', None) aggr_no_az = self.api.add_host_to_aggregate(self.context, aggr_no_az.id, fake_host) self.assertIn(fake_host, aggr.hosts) self.assertIn(fake_host, aggr_no_az.hosts) def test_add_host_to_multi_az(self): # Ensure we can't add a host to different availability zone values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) aggr = self.api.add_host_to_aggregate(self.context, aggr.id, fake_host) self.assertEqual(len(aggr.hosts), 1) fake_zone2 = "another_zone" aggr2 = self.api.create_aggregate(self.context, 'fake_aggregate2', fake_zone2) self.assertRaises(exception.InvalidAggregateActionAdd, self.api.add_host_to_aggregate, self.context, aggr2.id, fake_host) def test_add_host_to_multi_az_with_nova_agg(self): # Ensure we can't add a host if already existing in an agg with AZ set # to default values = _create_service_entries(self.context) fake_host = values[0][1][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', CONF.default_availability_zone) aggr = self.api.add_host_to_aggregate(self.context, aggr.id, fake_host) self.assertEqual(len(aggr.hosts), 1) fake_zone2 = "another_zone" aggr2 = self.api.create_aggregate(self.context, 'fake_aggregate2', fake_zone2) self.assertRaises(exception.InvalidAggregateActionAdd, self.api.add_host_to_aggregate, self.context, aggr2.id, fake_host) def test_add_host_to_aggregate_multiple(self): # Ensure we can add multiple hosts to an aggregate. values = _create_service_entries(self.context) fake_zone = values[0][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) for host in values[0][1]: aggr = self.api.add_host_to_aggregate(self.context, aggr.id, host) self.assertEqual(len(aggr.hosts), len(values[0][1])) def test_add_host_to_aggregate_raise_not_found(self): # Ensure ComputeHostNotFound is raised when adding invalid host. aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') fake_notifier.NOTIFICATIONS = [] self.assertRaises(exception.ComputeHostNotFound, self.api.add_host_to_aggregate, self.context, aggr.id, 'invalid_host') self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) self.assertEqual(fake_notifier.NOTIFICATIONS[1].publisher_id, 'compute.fake-mini') def test_remove_host_from_aggregate_active(self): # Ensure we can remove a host from an aggregate. values = _create_service_entries(self.context) fake_zone = values[0][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) for host in values[0][1]: aggr = self.api.add_host_to_aggregate(self.context, aggr.id, host) host_to_remove = values[0][1][0] def fake_remove_aggregate_host(*args, **kwargs): hosts = kwargs["aggregate"].hosts self.assertNotIn(host_to_remove, hosts) self.stubs.Set(self.api.compute_rpcapi, 'remove_aggregate_host', fake_remove_aggregate_host) self.mox.StubOutWithMock(availability_zones, 'update_host_availability_zone_cache') availability_zones.update_host_availability_zone_cache(self.context, host_to_remove) self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] expected = self.api.remove_host_from_aggregate(self.context, aggr.id, host_to_remove) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.removehost.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.removehost.end') self.assertEqual(len(aggr.hosts) - 1, len(expected.hosts)) def test_remove_host_from_aggregate_raise_not_found(self): # Ensure ComputeHostNotFound is raised when removing invalid host. _create_service_entries(self.context, [['fake_zone', ['fake_host']]]) aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') self.assertRaises(exception.ComputeHostNotFound, self.api.remove_host_from_aggregate, self.context, aggr.id, 'invalid_host') def test_aggregate_list(self): aggregate = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') metadata = {'foo_key1': 'foo_value1', 'foo_key2': 'foo_value2'} meta_aggregate = self.api.create_aggregate(self.context, 'fake_aggregate2', 'fake_zone2') self.api.update_aggregate_metadata(self.context, meta_aggregate.id, metadata) aggregate_list = self.api.get_aggregate_list(self.context) self.assertIn(aggregate.id, map(lambda x: x.id, aggregate_list)) self.assertIn(meta_aggregate.id, map(lambda x: x.id, aggregate_list)) self.assertIn('fake_aggregate', map(lambda x: x.name, aggregate_list)) self.assertIn('fake_aggregate2', map(lambda x: x.name, aggregate_list)) self.assertIn('fake_zone', map(lambda x: x.availability_zone, aggregate_list)) self.assertIn('fake_zone2', map(lambda x: x.availability_zone, aggregate_list)) test_agg_meta = aggregate_list[1].metadata self.assertIn('foo_key1', test_agg_meta) self.assertIn('foo_key2', test_agg_meta) self.assertEqual('foo_value1', test_agg_meta['foo_key1']) self.assertEqual('foo_value2', test_agg_meta['foo_key2']) def test_aggregate_list_with_hosts(self): values = _create_service_entries(self.context) fake_zone = values[0][0] host_aggregate = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) self.api.add_host_to_aggregate(self.context, host_aggregate.id, values[0][1][0]) aggregate_list = self.api.get_aggregate_list(self.context) aggregate = aggregate_list[0] hosts = aggregate.hosts if 'hosts' in aggregate else None self.assertIn(values[0][1][0], hosts) class ComputeAPIAggrCallsSchedulerTestCase(test.NoDBTestCase): """This is for making sure that all Aggregate API methods which are updating the aggregates DB table also notifies the Scheduler by using its client. """ def setUp(self): super(ComputeAPIAggrCallsSchedulerTestCase, self).setUp() self.api = compute_api.AggregateAPI() self.context = context.RequestContext('fake', 'fake') @mock.patch.object(scheduler_client.SchedulerClient, 'update_aggregates') def test_create_aggregate(self, update_aggregates): with mock.patch.object(objects.Aggregate, 'create'): agg = self.api.create_aggregate(self.context, 'fake', None) update_aggregates.assert_called_once_with(self.context, [agg]) @mock.patch.object(scheduler_client.SchedulerClient, 'update_aggregates') def test_update_aggregate(self, update_aggregates): self.api.is_safe_to_update_az = mock.Mock() agg = objects.Aggregate() with mock.patch.object(objects.Aggregate, 'get_by_id', return_value=agg): self.api.update_aggregate(self.context, 1, {}) update_aggregates.assert_called_once_with(self.context, [agg]) @mock.patch.object(scheduler_client.SchedulerClient, 'update_aggregates') def test_update_aggregate_metadata(self, update_aggregates): self.api.is_safe_to_update_az = mock.Mock() agg = objects.Aggregate() agg.update_metadata = mock.Mock() with mock.patch.object(objects.Aggregate, 'get_by_id', return_value=agg): self.api.update_aggregate_metadata(self.context, 1, {}) update_aggregates.assert_called_once_with(self.context, [agg]) @mock.patch.object(scheduler_client.SchedulerClient, 'delete_aggregate') def test_delete_aggregate(self, delete_aggregate): self.api.is_safe_to_update_az = mock.Mock() agg = objects.Aggregate(hosts=[]) agg.destroy = mock.Mock() with mock.patch.object(objects.Aggregate, 'get_by_id', return_value=agg): self.api.delete_aggregate(self.context, 1) delete_aggregate.assert_called_once_with(self.context, agg) @mock.patch.object(scheduler_client.SchedulerClient, 'update_aggregates') def test_add_host_to_aggregate(self, update_aggregates): self.api.is_safe_to_update_az = mock.Mock() self.api._update_az_cache_for_host = mock.Mock() agg = objects.Aggregate(name='fake', metadata={}) agg.add_host = mock.Mock() with test.nested( mock.patch.object(objects.Service, 'get_by_compute_host'), mock.patch.object(objects.Aggregate, 'get_by_id', return_value=agg)): self.api.add_host_to_aggregate(self.context, 1, 'fakehost') update_aggregates.assert_called_once_with(self.context, [agg]) @mock.patch.object(scheduler_client.SchedulerClient, 'update_aggregates') def test_remove_host_from_aggregate(self, update_aggregates): self.api._update_az_cache_for_host = mock.Mock() agg = objects.Aggregate(name='fake', metadata={}) agg.delete_host = mock.Mock() with test.nested( mock.patch.object(objects.Service, 'get_by_compute_host'), mock.patch.object(objects.Aggregate, 'get_by_id', return_value=agg)): self.api.remove_host_from_aggregate(self.context, 1, 'fakehost') update_aggregates.assert_called_once_with(self.context, [agg]) class ComputeAggrTestCase(BaseTestCase): """This is for unit coverage of aggregate-related methods defined in nova.compute.manager. """ def setUp(self): super(ComputeAggrTestCase, self).setUp() self.context = context.get_admin_context() values = {'name': 'test_aggr'} az = {'availability_zone': 'test_zone'} self.aggr = db.aggregate_create(self.context, values, metadata=az) def test_add_aggregate_host(self): def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): fake_driver_add_to_aggregate.called = True return {"foo": "bar"} self.stubs.Set(self.compute.driver, "add_to_aggregate", fake_driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="host", aggregate=jsonutils.to_primitive(self.aggr), slave_info=None) self.assertTrue(fake_driver_add_to_aggregate.called) def test_remove_aggregate_host(self): def fake_driver_remove_from_aggregate(context, aggregate, host, **_ignore): fake_driver_remove_from_aggregate.called = True self.assertEqual("host", host, "host") return {"foo": "bar"} self.stubs.Set(self.compute.driver, "remove_from_aggregate", fake_driver_remove_from_aggregate) self.compute.remove_aggregate_host(self.context, aggregate=jsonutils.to_primitive(self.aggr), host="host", slave_info=None) self.assertTrue(fake_driver_remove_from_aggregate.called) def test_add_aggregate_host_passes_slave_info_to_driver(self): def driver_add_to_aggregate(context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate['id'], self.aggr['id']) self.assertEqual(host, "the_host") self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) self.stubs.Set(self.compute.driver, "add_to_aggregate", driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="the_host", slave_info="SLAVE_INFO", aggregate=jsonutils.to_primitive(self.aggr)) def test_remove_from_aggregate_passes_slave_info_to_driver(self): def driver_remove_from_aggregate(context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate['id'], self.aggr['id']) self.assertEqual(host, "the_host") self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) self.stubs.Set(self.compute.driver, "remove_from_aggregate", driver_remove_from_aggregate) self.compute.remove_aggregate_host(self.context, aggregate=jsonutils.to_primitive(self.aggr), host="the_host", slave_info="SLAVE_INFO") class ComputePolicyTestCase(BaseTestCase): def setUp(self): super(ComputePolicyTestCase, self).setUp() self.compute_api = compute.API() def test_actions_are_prefixed(self): self.mox.StubOutWithMock(policy, 'enforce') nova.policy.enforce(self.context, 'compute:reboot', {}) self.mox.ReplayAll() compute_api.check_policy(self.context, 'reboot', {}) def test_wrapped_method(self): instance = self._create_fake_instance_obj(params={'host': None, 'cell_name': 'foo'}) # force delete to fail rules = {"compute:delete": [["false:false"]]} self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.delete, self.context, instance) # reset rules to allow deletion rules = {"compute:delete": []} self.policy.set_rules(rules) self.compute_api.delete(self.context, instance) def test_create_fail(self): rules = {"compute:create": [["false:false"]]} self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.create, self.context, '1', '1') def test_create_attach_volume_fail(self): rules = { "compute:create": [], "compute:create:attach_network": [["false:false"]], "compute:create:attach_volume": [], } self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.create, self.context, '1', '1', requested_networks='blah', block_device_mapping='blah') def test_create_attach_network_fail(self): rules = { "compute:create": [], "compute:create:attach_network": [], "compute:create:attach_volume": [["false:false"]], } self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.create, self.context, '1', '1', requested_networks='blah', block_device_mapping='blah') def test_get_fail(self): instance = self._create_fake_instance_obj() rules = { "compute:get": [["false:false"]], } self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.get, self.context, instance['uuid']) def test_get_all_fail(self): rules = { "compute:get_all": [["false:false"]], } self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.get_all, self.context) def test_force_host_fail(self): rules = {"compute:create": [], "compute:create:forced_host": [["role:fake"]], "network:validate_networks": []} self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.create, self.context, None, '1', availability_zone='1', forced_host='1') def test_force_host_pass(self): rules = {"compute:create": [], "compute:create:forced_host": [], "network:validate_networks": []} self.policy.set_rules(rules) self.compute_api.create(self.context, objects.Flavor(id=1, disabled=False, memory_mb=256, vcpus=1, root_gb=1, ephemeral_gb=1, swap=0), image_href=uuids.host_instance, availability_zone='1', forced_host='1') class DisabledInstanceTypesTestCase(BaseTestCase): """Some instance-types are marked 'disabled' which means that they will not show up in customer-facing listings. We do, however, want those instance-types to be available for emergency migrations and for rebuilding of existing instances. One legitimate use of the 'disabled' field would be when phasing out a particular instance-type. We still want customers to be able to use an instance that of the old type, and we want Ops to be able perform migrations against it, but we *don't* want customers building new instances with the phased-out instance-type. """ def setUp(self): super(DisabledInstanceTypesTestCase, self).setUp() self.compute_api = compute.API() self.inst_type = flavors.get_default_flavor() def test_can_build_instance_from_visible_instance_type(self): self.inst_type['disabled'] = False # Assert that exception.FlavorNotFound is not raised self.compute_api.create(self.context, self.inst_type, image_href=uuids.image_instance) def test_cannot_build_instance_from_disabled_instance_type(self): self.inst_type['disabled'] = True self.assertRaises(exception.FlavorNotFound, self.compute_api.create, self.context, self.inst_type, None) def test_can_resize_to_visible_instance_type(self): instance = self._create_fake_instance_obj() orig_get_flavor_by_flavor_id =\ flavors.get_flavor_by_flavor_id def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None, read_deleted="yes"): instance_type = orig_get_flavor_by_flavor_id(flavor_id, ctxt, read_deleted) instance_type['disabled'] = False return instance_type self.stubs.Set(flavors, 'get_flavor_by_flavor_id', fake_get_flavor_by_flavor_id) self._stub_migrate_server() self.compute_api.resize(self.context, instance, '4') def test_cannot_resize_to_disabled_instance_type(self): instance = self._create_fake_instance_obj() orig_get_flavor_by_flavor_id = \ flavors.get_flavor_by_flavor_id def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None, read_deleted="yes"): instance_type = orig_get_flavor_by_flavor_id(flavor_id, ctxt, read_deleted) instance_type['disabled'] = True return instance_type self.stubs.Set(flavors, 'get_flavor_by_flavor_id', fake_get_flavor_by_flavor_id) self.assertRaises(exception.FlavorNotFound, self.compute_api.resize, self.context, instance, '4') class ComputeReschedulingTestCase(BaseTestCase): """Tests re-scheduling logic for new build requests.""" def setUp(self): super(ComputeReschedulingTestCase, self).setUp() self.expected_task_state = task_states.SCHEDULING def fake_update(*args, **kwargs): self.updated_task_state = kwargs.get('task_state') self.stubs.Set(self.compute, '_instance_update', fake_update) def _reschedule(self, request_spec=None, filter_properties=None, exc_info=None): if not filter_properties: filter_properties = {} instance = self._create_fake_instance_obj() scheduler_method = self.compute.compute_task_api.resize_instance method_args = (instance, None, dict(filter_properties=filter_properties), {}, None) return self.compute._reschedule(self.context, request_spec, filter_properties, instance, scheduler_method, method_args, self.expected_task_state, exc_info=exc_info) def test_reschedule_no_filter_properties(self): # no filter_properties will disable re-scheduling. self.assertFalse(self._reschedule()) def test_reschedule_no_retry_info(self): # no retry info will also disable re-scheduling. filter_properties = {} self.assertFalse(self._reschedule(filter_properties=filter_properties)) def test_reschedule_no_request_spec(self): # no request spec will also disable re-scheduling. retry = dict(num_attempts=1) filter_properties = dict(retry=retry) self.assertFalse(self._reschedule(filter_properties=filter_properties)) def test_reschedule_success(self): retry = dict(num_attempts=1) filter_properties = dict(retry=retry) request_spec = {'num_instances': 1} try: raise test.TestingException("just need an exception") except test.TestingException: exc_info = sys.exc_info() exc_str = traceback.format_exception_only(exc_info[0], exc_info[1]) self.assertTrue(self._reschedule(filter_properties=filter_properties, request_spec=request_spec, exc_info=exc_info)) self.assertEqual(self.updated_task_state, self.expected_task_state) self.assertEqual(exc_str, filter_properties['retry']['exc']) class InnerTestingException(Exception): pass class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase): """Test logic and exception handling around rescheduling prep resize requests """ def setUp(self): super(ComputeRescheduleResizeOrReraiseTestCase, self).setUp() self.instance = self._create_fake_instance_obj() self.instance_uuid = self.instance['uuid'] self.instance_type = flavors.get_flavor_by_name( "m1.tiny") def test_reschedule_resize_or_reraise_called(self): """Verify the rescheduling logic gets called when there is an error during prep_resize. """ inst_obj = self._create_fake_instance_obj() self.mox.StubOutWithMock(self.compute.db, 'migration_create') self.mox.StubOutWithMock(self.compute, '_reschedule_resize_or_reraise') self.compute.db.migration_create(mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(test.TestingException("Original")) self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None, inst_obj, mox.IgnoreArg(), self.instance_type, mox.IgnoreArg(), {}, {}) self.mox.ReplayAll() self.compute.prep_resize(self.context, image=None, instance=inst_obj, instance_type=self.instance_type, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) def test_reschedule_fails_with_exception(self): """Original exception should be raised if the _reschedule method raises another exception """ instance = self._create_fake_instance_obj() scheduler_hint = dict(filter_properties={}) method_args = (instance, None, scheduler_hint, self.instance_type, None) self.mox.StubOutWithMock(self.compute, "_reschedule") self.compute._reschedule( self.context, None, None, instance, self.compute.compute_task_api.resize_instance, method_args, task_states.RESIZE_PREP).AndRaise( InnerTestingException("Inner")) self.mox.ReplayAll() try: raise test.TestingException("Original") except Exception: exc_info = sys.exc_info() self.assertRaises(test.TestingException, self.compute._reschedule_resize_or_reraise, self.context, None, instance, exc_info, self.instance_type, self.none_quotas, {}, {}) def test_reschedule_false(self): """Original exception should be raised if the resize is not rescheduled. """ instance = self._create_fake_instance_obj() scheduler_hint = dict(filter_properties={}) method_args = (instance, None, scheduler_hint, self.instance_type, None) self.mox.StubOutWithMock(self.compute, "_reschedule") self.compute._reschedule( self.context, None, None, instance, self.compute.compute_task_api.resize_instance, method_args, task_states.RESIZE_PREP).AndReturn(False) self.mox.ReplayAll() try: raise test.TestingException("Original") except Exception: exc_info = sys.exc_info() self.assertRaises(test.TestingException, self.compute._reschedule_resize_or_reraise, self.context, None, instance, exc_info, self.instance_type, self.none_quotas, {}, {}) def test_reschedule_true(self): # If rescheduled, the original resize exception should be logged. instance = self._create_fake_instance_obj() scheduler_hint = dict(filter_properties={}) method_args = (instance, None, scheduler_hint, self.instance_type, None) try: raise test.TestingException("Original") except Exception: exc_info = sys.exc_info() self.mox.StubOutWithMock(self.compute, "_reschedule") self.mox.StubOutWithMock(self.compute, "_log_original_error") self.compute._reschedule(self.context, {}, {}, instance, self.compute.compute_task_api.resize_instance, method_args, task_states.RESIZE_PREP, exc_info).AndReturn(True) self.compute._log_original_error(exc_info, instance.uuid) self.mox.ReplayAll() self.compute._reschedule_resize_or_reraise( self.context, None, instance, exc_info, self.instance_type, self.none_quotas, {}, {}) class ComputeInactiveImageTestCase(BaseTestCase): def setUp(self): super(ComputeInactiveImageTestCase, self).setUp() def fake_show(meh, context, id, **kwargs): return {'id': id, 'name': 'fake_name', 'status': 'deleted', 'min_ram': 0, 'min_disk': 0, 'properties': {'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'something_else': 'meow'}} fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) self.compute_api = compute.API() def test_create_instance_with_deleted_image(self): # Make sure we can't start an instance with a deleted image. inst_type = flavors.get_flavor_by_name('m1.tiny') self.assertRaises(exception.ImageNotActive, self.compute_api.create, self.context, inst_type, uuids.image_instance) class EvacuateHostTestCase(BaseTestCase): def setUp(self): super(EvacuateHostTestCase, self).setUp() self.inst = self._create_fake_instance_obj( {'host': 'fake_host_2', 'node': 'fakenode2'}) self.inst.task_state = task_states.REBUILDING self.inst.save() def fake_get_compute_info(context, host): cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename) return cn self.stubs.Set(self.compute, '_get_compute_info', fake_get_compute_info) self.useFixture(fixtures.SpawnIsSynchronousFixture()) def tearDown(self): db.instance_destroy(self.context, self.inst.uuid) super(EvacuateHostTestCase, self).tearDown() def _rebuild(self, on_shared_storage=True, migration=None, send_node=False): network_api = self.compute.network_api ctxt = context.get_admin_context() node = limits = None if send_node: node = NODENAME limits = {} @mock.patch.object(network_api, 'setup_networks_on_host') @mock.patch.object(network_api, 'setup_instance_network_on_host') @mock.patch('nova.context.RequestContext.elevated', return_value=ctxt) def _test_rebuild(mock_context, mock_setup_instance_network_on_host, mock_setup_networks_on_host): orig_image_ref = None image_ref = None injected_files = None bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, self.inst.uuid) self.compute.rebuild_instance( ctxt, self.inst, orig_image_ref, image_ref, injected_files, 'newpass', {}, bdms, recreate=True, on_shared_storage=on_shared_storage, migration=migration, scheduled_node=node, limits=limits) mock_setup_networks_on_host.assert_called_once_with( ctxt, self.inst, self.inst.host) mock_setup_instance_network_on_host.assert_called_once_with( ctxt, self.inst, self.inst.host) _test_rebuild() def test_rebuild_on_host_updated_target(self): """Confirm evacuate scenario updates host and node.""" self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) def fake_get_compute_info(context, host): self.assertTrue(context.is_admin) self.assertEqual('fake-mini', host) cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename) return cn self.stubs.Set(self.compute, '_get_compute_info', fake_get_compute_info) self.mox.ReplayAll() self._rebuild() # Should be on destination host instance = db.instance_get(self.context, self.inst.id) self.assertEqual(instance['host'], self.compute.host) self.assertEqual(NODENAME, instance['node']) def test_rebuild_on_host_updated_target_node_not_found(self): """Confirm evacuate scenario where compute_node isn't found.""" self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) def fake_get_compute_info(context, host): raise exception.ComputeHostNotFound(host=host) self.stubs.Set(self.compute, '_get_compute_info', fake_get_compute_info) self.mox.ReplayAll() self._rebuild() # Should be on destination host instance = db.instance_get(self.context, self.inst.id) self.assertEqual(instance['host'], self.compute.host) self.assertIsNone(instance['node']) def test_rebuild_on_host_node_passed(self): patch_get_info = mock.patch.object(self.compute, '_get_compute_info') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) with patch_get_info as get_compute_info, patch_on_disk: self._rebuild(send_node=True) self.assertEqual(0, get_compute_info.call_count) # Should be on destination host and node set to what was passed in instance = db.instance_get(self.context, self.inst.id) self.assertEqual(instance['host'], self.compute.host) self.assertEqual(instance['node'], NODENAME) def test_rebuild_with_instance_in_stopped_state(self): """Confirm evacuate scenario updates vm_state to stopped if instance is in stopped state """ # Initialize the VM to stopped state db.instance_update(self.context, self.inst.uuid, {"vm_state": vm_states.STOPPED}) self.inst.vm_state = vm_states.STOPPED self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.mox.ReplayAll() self._rebuild() # Check the vm state is reset to stopped instance = db.instance_get(self.context, self.inst.id) self.assertEqual(instance['vm_state'], vm_states.STOPPED) def test_rebuild_with_wrong_shared_storage(self): """Confirm evacuate scenario does not update host.""" self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, lambda: self._rebuild(on_shared_storage=False)) # Should remain on original host instance = db.instance_get(self.context, self.inst.id) self.assertEqual(instance['host'], 'fake_host_2') def test_rebuild_on_host_with_volumes(self): """Confirm evacuate scenario reconnects volumes.""" values = {'instance_uuid': self.inst.uuid, 'source_type': 'volume', 'device_name': '/dev/vdc', 'delete_on_termination': False, 'volume_id': uuids.volume_id, 'connection_info': '{}'} db.block_device_mapping_create(self.context, values) def fake_volume_get(self, context, volume): return {'id': 'fake_volume_id'} self.stubs.Set(cinder.API, "get", fake_volume_get) # Stub out and record whether it gets detached result = {"detached": False} def fake_detach(self, context, volume, instance_uuid, attachment_id): result["detached"] = volume["id"] == 'fake_volume_id' self.stubs.Set(cinder.API, "detach", fake_detach) self.mox.StubOutWithMock(self.compute, '_driver_detach_volume') self.compute._driver_detach_volume(mox.IsA(self.context), mox.IsA(instance_obj.Instance), mox.IsA(objects.BlockDeviceMapping)) def fake_terminate_connection(self, context, volume, connector): return {} self.stubs.Set(cinder.API, "terminate_connection", fake_terminate_connection) # make sure volumes attach, detach are called self.mox.StubOutWithMock(self.compute.volume_api, 'detach') self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg(), mox.IgnoreArg(), None) self.mox.StubOutWithMock(self.compute, '_prep_block_device') self.compute._prep_block_device(mox.IsA(self.context), mox.IsA(objects.Instance), mox.IgnoreArg()) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.mox.ReplayAll() self._rebuild() # cleanup bdms = db.block_device_mapping_get_all_by_instance(self.context, self.inst.uuid) if not bdms: self.fail('BDM entry for the attached volume is missing') for bdm in bdms: db.block_device_mapping_destroy(self.context, bdm['id']) def test_rebuild_on_host_with_shared_storage(self): """Confirm evacuate scenario on shared storage.""" self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.compute.driver.spawn(mox.IsA(self.context), mox.IsA(objects.Instance), mox.IsA(objects.ImageMeta), mox.IgnoreArg(), 'newpass', network_info=mox.IgnoreArg(), block_device_info=mox.IgnoreArg()) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.mox.ReplayAll() self._rebuild() def test_rebuild_on_host_without_shared_storage(self): """Confirm evacuate scenario without shared storage (rebuild from image) """ self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.compute.driver.spawn(mox.IsA(self.context), mox.IsA(objects.Instance), mox.IsA(objects.ImageMeta), mox.IgnoreArg(), mox.IsA('newpass'), network_info=mox.IgnoreArg(), block_device_info=mox.IgnoreArg()) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: False) self.mox.ReplayAll() self._rebuild(on_shared_storage=False) def test_rebuild_on_host_instance_exists(self): """Rebuild if instance exists raises an exception.""" db.instance_update(self.context, self.inst.uuid, {"task_state": task_states.SCHEDULING}) self.compute.build_and_run_instance(self.context, self.inst, {}, {}, {}, block_device_mapping=[]) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.assertRaises(exception.InstanceExists, lambda: self._rebuild(on_shared_storage=True)) def test_driver_does_not_support_recreate(self): with mock.patch.dict(self.compute.driver.capabilities, supports_recreate=False): self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.assertRaises(exception.InstanceRecreateNotSupported, lambda: self._rebuild(on_shared_storage=True)) def test_on_shared_storage_not_provided_host_without_shared_storage(self): self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.compute.driver.spawn(mox.IsA(self.context), mox.IsA(objects.Instance), mox.IsA(objects.ImageMeta), mox.IgnoreArg(), mox.IsA('newpass'), network_info=mox.IgnoreArg(), block_device_info=mox.IgnoreArg()) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: False) self.mox.ReplayAll() self._rebuild(on_shared_storage=None) def test_on_shared_storage_not_provided_host_with_shared_storage(self): self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.compute.driver.spawn(mox.IsA(self.context), mox.IsA(objects.Instance), mox.IsA(objects.ImageMeta), mox.IgnoreArg(), 'newpass', network_info=mox.IgnoreArg(), block_device_info=mox.IgnoreArg()) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.mox.ReplayAll() self._rebuild(on_shared_storage=None) def test_rebuild_migration_passed_in(self): migration = mock.Mock(spec=objects.Migration) patch_spawn = mock.patch.object(self.compute.driver, 'spawn') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) with patch_spawn, patch_on_disk: self._rebuild(migration=migration) self.assertEqual('done', migration.status) migration.save.assert_called_once_with() def test_rebuild_migration_node_passed_in(self): patch_spawn = mock.patch.object(self.compute.driver, 'spawn') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) with patch_spawn, patch_on_disk: self._rebuild(send_node=True) migrations = objects.MigrationList.get_in_progress_by_host_and_node( self.context, self.compute.host, NODENAME) self.assertEqual(1, len(migrations)) migration = migrations[0] self.assertEqual("evacuation", migration.migration_type) self.assertEqual("pre-migrating", migration.status) def test_rebuild_migration_claim_fails(self): migration = mock.Mock(spec=objects.Migration) patch_spawn = mock.patch.object(self.compute.driver, 'spawn') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) patch_claim = mock.patch.object( self.compute._resource_tracker_dict[NODENAME], 'rebuild_claim', side_effect=exception.ComputeResourcesUnavailable(reason="boom")) with patch_spawn, patch_on_disk, patch_claim: self.assertRaises(exception.BuildAbortException, self._rebuild, migration=migration, send_node=True) self.assertEqual("failed", migration.status) migration.save.assert_called_once_with() def test_rebuild_fails_migration_failed(self): migration = mock.Mock(spec=objects.Migration) patch_spawn = mock.patch.object(self.compute.driver, 'spawn') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) patch_claim = mock.patch.object( self.compute._resource_tracker_dict[NODENAME], 'rebuild_claim') patch_rebuild = mock.patch.object( self.compute, '_do_rebuild_instance_with_claim', side_effect=test.TestingException()) with patch_spawn, patch_on_disk, patch_claim, patch_rebuild: self.assertRaises(test.TestingException, self._rebuild, migration=migration, send_node=True) self.assertEqual("failed", migration.status) migration.save.assert_called_once_with() def test_rebuild_numa_migration_context_honoured(self): numa_topology = ( test_instance_numa_topology.get_fake_obj_numa_topology( self.context)) # NOTE(ndipanov): Make sure that we pass the topology from the context def fake_spawn(context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): self.assertIsNone(instance.numa_topology) self.inst.numa_topology = numa_topology patch_spawn = mock.patch.object(self.compute.driver, 'spawn', side_effect=fake_spawn) patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) with patch_spawn, patch_on_disk: self._rebuild(send_node=True) self.assertIsNone(self.inst.numa_topology) self.assertIsNone(self.inst.migration_context) class ComputeInjectedFilesTestCase(BaseTestCase): # Test that running instances with injected_files decodes files correctly def setUp(self): super(ComputeInjectedFilesTestCase, self).setUp() self.instance = self._create_fake_instance_obj() self.stubs.Set(self.compute.driver, 'spawn', self._spawn) self.useFixture(fixtures.SpawnIsSynchronousFixture()) def _spawn(self, context, instance, image_meta, injected_files, admin_password, nw_info, block_device_info, db_api=None): self.assertEqual(self.expected, injected_files) def _test(self, injected_files, decoded_files): self.expected = decoded_files self.compute.build_and_run_instance(self.context, self.instance, {}, {}, {}, block_device_mapping=[], injected_files=injected_files) def test_injected_none(self): # test an input of None for injected_files self._test(None, []) def test_injected_empty(self): # test an input of [] for injected_files self._test([], []) def test_injected_success(self): # test with valid b64 encoded content. injected_files = [ ('/a/b/c', base64.b64encode(b'foobarbaz')), ('/d/e/f', base64.b64encode(b'seespotrun')), ] decoded_files = [ ('/a/b/c', 'foobarbaz'), ('/d/e/f', 'seespotrun'), ] self._test(injected_files, decoded_files) def test_injected_invalid(self): # test with invalid b64 encoded content injected_files = [ ('/a/b/c', base64.b64encode(b'foobarbaz')), ('/d/e/f', 'seespotrun'), ] self.assertRaises(exception.Base64Exception, self.compute.build_and_run_instance, self.context, self.instance, {}, {}, {}, block_device_mapping=[], injected_files=injected_files) class CheckConfigDriveTestCase(test.NoDBTestCase): # NOTE(sirp): `TestCase` is far too heavyweight for this test, this should # probably derive from a `test.FastTestCase` that omits DB and env # handling def setUp(self): super(CheckConfigDriveTestCase, self).setUp() self.compute_api = compute.API() def _assertCheck(self, expected, config_drive): self.assertEqual(expected, self.compute_api._check_config_drive(config_drive)) def _assertInvalid(self, config_drive): self.assertRaises(exception.ConfigDriveInvalidValue, self.compute_api._check_config_drive, config_drive) def test_config_drive_false_values(self): self._assertCheck('', None) self._assertCheck('', '') self._assertCheck('', 'False') self._assertCheck('', 'f') self._assertCheck('', '0') def test_config_drive_true_values(self): self._assertCheck(True, 'True') self._assertCheck(True, 't') self._assertCheck(True, '1') def test_config_drive_bogus_values_raise(self): self._assertInvalid('asd') self._assertInvalid(uuidutils.generate_uuid()) class CheckRequestedImageTestCase(test.TestCase): def setUp(self): super(CheckRequestedImageTestCase, self).setUp() self.compute_api = compute.API() self.context = context.RequestContext( 'fake_user_id', 'fake_project_id') self.instance_type = flavors.get_default_flavor() self.instance_type['memory_mb'] = 64 self.instance_type['root_gb'] = 1 def test_no_image_specified(self): self.compute_api._check_requested_image(self.context, None, None, self.instance_type, None) def test_image_status_must_be_active(self): image = dict(id='123', status='foo') self.assertRaises(exception.ImageNotActive, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, None) image['status'] = 'active' self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_image_min_ram_check(self): image = dict(id='123', status='active', min_ram='65') self.assertRaises(exception.FlavorMemoryTooSmall, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, None) image['min_ram'] = '64' self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_image_min_disk_check(self): image = dict(id='123', status='active', min_disk='2') self.assertRaises(exception.FlavorDiskSmallerThanMinDisk, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, None) image['min_disk'] = '1' self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_image_too_large(self): image = dict(id='123', status='active', size='1073741825') self.assertRaises(exception.FlavorDiskSmallerThanImage, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, None) image['size'] = '1073741824' self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_root_gb_zero_disables_size_check(self): self.instance_type['root_gb'] = 0 image = dict(id='123', status='active', size='1073741825') self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_root_gb_zero_disables_min_disk(self): self.instance_type['root_gb'] = 0 image = dict(id='123', status='active', min_disk='2') self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_config_drive_option(self): image = {'id': 1, 'status': 'active'} image['properties'] = {'img_config_drive': 'optional'} self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) image['properties'] = {'img_config_drive': 'mandatory'} self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) image['properties'] = {'img_config_drive': 'bar'} self.assertRaises(exception.InvalidImageConfigDrive, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, None) def test_volume_blockdevicemapping(self): # We should allow a root volume which is larger than the flavor root # disk. # We should allow a root volume created from an image whose min_disk is # larger than the flavor root disk. image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=self.instance_type.root_gb * units.Gi, min_disk=self.instance_type.root_gb + 1) volume_uuid = str(uuid.uuid4()) root_bdm = block_device_obj.BlockDeviceMapping( source_type='volume', destination_type='volume', volume_id=volume_uuid, volume_size=self.instance_type.root_gb + 1) self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, root_bdm) def test_volume_blockdevicemapping_min_disk(self): # A bdm object volume smaller than the image's min_disk should not be # allowed image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=self.instance_type.root_gb * units.Gi, min_disk=self.instance_type.root_gb + 1) volume_uuid = str(uuid.uuid4()) root_bdm = block_device_obj.BlockDeviceMapping( source_type='image', destination_type='volume', image_id=image_uuid, volume_id=volume_uuid, volume_size=self.instance_type.root_gb) self.assertRaises(exception.VolumeSmallerThanMinDisk, self.compute_api._check_requested_image, self.context, image_uuid, image, self.instance_type, root_bdm) def test_volume_blockdevicemapping_min_disk_no_size(self): # We should allow a root volume whose size is not given image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=self.instance_type.root_gb * units.Gi, min_disk=self.instance_type.root_gb) volume_uuid = str(uuid.uuid4()) root_bdm = block_device_obj.BlockDeviceMapping( source_type='volume', destination_type='volume', volume_id=volume_uuid, volume_size=None) self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, root_bdm) def test_image_blockdevicemapping(self): # Test that we can succeed when passing bdms, and the root bdm isn't a # volume image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=self.instance_type.root_gb * units.Gi, min_disk=0) root_bdm = block_device_obj.BlockDeviceMapping( source_type='image', destination_type='local', image_id=image_uuid) self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, root_bdm) def test_image_blockdevicemapping_too_big(self): # We should do a size check against flavor if we were passed bdms but # the root bdm isn't a volume image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=(self.instance_type.root_gb + 1) * units.Gi, min_disk=0) root_bdm = block_device_obj.BlockDeviceMapping( source_type='image', destination_type='local', image_id=image_uuid) self.assertRaises(exception.FlavorDiskSmallerThanImage, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, root_bdm) def test_image_blockdevicemapping_min_disk(self): # We should do a min_disk check against flavor if we were passed bdms # but the root bdm isn't a volume image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=0, min_disk=self.instance_type.root_gb + 1) root_bdm = block_device_obj.BlockDeviceMapping( source_type='image', destination_type='local', image_id=image_uuid) self.assertRaises(exception.FlavorDiskSmallerThanMinDisk, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, root_bdm) class ComputeHooksTestCase(test.BaseHookTestCase): def test_delete_instance_has_hook(self): delete_func = compute_manager.ComputeManager._delete_instance self.assert_has_hook('delete_instance', delete_func) def test_create_instance_has_hook(self): create_func = compute_api.API.create self.assert_has_hook('create_instance', create_func) def test_build_instance_has_hook(self): build_instance_func = (compute_manager.ComputeManager. _do_build_and_run_instance) self.assert_has_hook('build_instance', build_instance_func) nova-13.0.0/nova/tests/unit/compute/test_virtapi.py0000664000567000056710000001443012701407773023540 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mox3 import mox from nova.compute import manager as compute_manager from nova import context from nova import db from nova import exception from nova import objects from nova import test from nova.virt import fake from nova.virt import virtapi class VirtAPIBaseTest(test.NoDBTestCase, test.APICoverage): cover_api = virtapi.VirtAPI def setUp(self): super(VirtAPIBaseTest, self).setUp() self.context = context.RequestContext('fake-user', 'fake-project') self.set_up_virtapi() def set_up_virtapi(self): self.virtapi = virtapi.VirtAPI() def assertExpected(self, method, *args, **kwargs): self.assertRaises(NotImplementedError, getattr(self.virtapi, method), self.context, *args, **kwargs) def test_wait_for_instance_event(self): self.assertExpected('wait_for_instance_event', 'instance', ['event']) class FakeVirtAPITest(VirtAPIBaseTest): cover_api = fake.FakeVirtAPI def set_up_virtapi(self): self.virtapi = fake.FakeVirtAPI() def assertExpected(self, method, *args, **kwargs): if method == 'wait_for_instance_event': run = False with self.virtapi.wait_for_instance_event(*args, **kwargs): run = True self.assertTrue(run) return self.mox.StubOutWithMock(db, method) if method in ('aggregate_metadata_add', 'aggregate_metadata_delete', 'security_group_rule_get_by_security_group'): # NOTE(danms): FakeVirtAPI will convert the first argument to # argument['id'], so expect that in the actual db call e_args = tuple([args[0]['id']] + list(args[1:])) elif method == 'security_group_get_by_instance': e_args = tuple([args[0]['uuid']] + list(args[1:])) else: e_args = args getattr(db, method)(self.context, *e_args, **kwargs).AndReturn( 'it worked') self.mox.ReplayAll() result = getattr(self.virtapi, method)(self.context, *args, **kwargs) self.assertEqual(result, 'it worked') class FakeCompute(object): def __init__(self): self.conductor_api = mox.MockAnything() self.db = mox.MockAnything() self._events = [] self.instance_events = mock.MagicMock() self.instance_events.prepare_for_instance_event.side_effect = \ self._prepare_for_instance_event def _event_waiter(self): event = mock.MagicMock() event.status = 'completed' return event def _prepare_for_instance_event(self, instance, event_name): m = mock.MagicMock() m.instance = instance m.event_name = event_name m.wait.side_effect = self._event_waiter self._events.append(m) return m class ComputeVirtAPITest(VirtAPIBaseTest): cover_api = compute_manager.ComputeVirtAPI def set_up_virtapi(self): self.compute = FakeCompute() self.virtapi = compute_manager.ComputeVirtAPI(self.compute) def assertExpected(self, method, *args, **kwargs): self.mox.StubOutWithMock(self.compute.conductor_api, method) getattr(self.compute.conductor_api, method)( self.context, *args, **kwargs).AndReturn('it worked') self.mox.ReplayAll() result = getattr(self.virtapi, method)(self.context, *args, **kwargs) self.assertEqual(result, 'it worked') def test_wait_for_instance_event(self): and_i_ran = '' event_1_tag = objects.InstanceExternalEvent.make_key( 'event1') event_2_tag = objects.InstanceExternalEvent.make_key( 'event2', 'tag') events = { 'event1': event_1_tag, ('event2', 'tag'): event_2_tag, } with self.virtapi.wait_for_instance_event('instance', events.keys()): and_i_ran = 'I ran so far a-waa-y' self.assertEqual('I ran so far a-waa-y', and_i_ran) self.assertEqual(2, len(self.compute._events)) for event in self.compute._events: self.assertEqual('instance', event.instance) self.assertIn(event.event_name, events.values()) event.wait.assert_called_once_with() def test_wait_for_instance_event_failed(self): def _failer(): event = mock.MagicMock() event.status = 'failed' return event @mock.patch.object(self.virtapi._compute, '_event_waiter', _failer) def do_test(): with self.virtapi.wait_for_instance_event('instance', ['foo']): pass self.assertRaises(exception.NovaException, do_test) def test_wait_for_instance_event_failed_callback(self): def _failer(): event = mock.MagicMock() event.status = 'failed' return event @mock.patch.object(self.virtapi._compute, '_event_waiter', _failer) def do_test(): callback = mock.MagicMock() with self.virtapi.wait_for_instance_event('instance', ['foo'], error_callback=callback): pass callback.assert_called_with('foo', 'instance') do_test() def test_wait_for_instance_event_timeout(self): class TestException(Exception): pass def _failer(): raise TestException() @mock.patch.object(self.virtapi._compute, '_event_waiter', _failer) @mock.patch('eventlet.timeout.Timeout') def do_test(timeout): with self.virtapi.wait_for_instance_event('instance', ['foo']): pass self.assertRaises(TestException, do_test) nova-13.0.0/nova/tests/unit/compute/test_host_api.py0000664000567000056710000005105412701407773023673 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_serialization import jsonutils from nova.cells import utils as cells_utils from nova import compute from nova import context from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_notifier from nova.tests.unit.objects import test_objects from nova.tests.unit.objects import test_service class ComputeHostAPITestCase(test.TestCase): def setUp(self): super(ComputeHostAPITestCase, self).setUp() self.host_api = compute.HostAPI() self.ctxt = context.get_admin_context() fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) def _compare_obj(self, obj, db_obj): test_objects.compare_obj(self, obj, db_obj, allow_missing=test_service.OPTIONAL) def _compare_objs(self, obj_list, db_obj_list): for index, obj in enumerate(obj_list): self._compare_obj(obj, db_obj_list[index]) def _mock_rpc_call(self, method, **kwargs): self.mox.StubOutWithMock(self.host_api.rpcapi, method) getattr(self.host_api.rpcapi, method)( self.ctxt, **kwargs).AndReturn('fake-result') def _mock_assert_host_exists(self): """Sets it so that the host API always thinks that 'fake_host' exists. """ def fake_assert_host_exists(context, host_name, must_be_up=False): return 'fake_host' self.stubs.Set(self.host_api, '_assert_host_exists', fake_assert_host_exists) def test_set_host_enabled(self): self._mock_assert_host_exists() self._mock_rpc_call('set_host_enabled', host='fake_host', enabled='fake_enabled') self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] result = self.host_api.set_host_enabled(self.ctxt, 'fake_host', 'fake_enabled') self.assertEqual('fake-result', result) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('HostAPI.set_enabled.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_enabled', msg.payload['enabled']) self.assertEqual('fake_host', msg.payload['host_name']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('HostAPI.set_enabled.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_enabled', msg.payload['enabled']) self.assertEqual('fake_host', msg.payload['host_name']) def test_host_name_from_assert_hosts_exists(self): self._mock_assert_host_exists() self._mock_rpc_call('set_host_enabled', host='fake_host', enabled='fake_enabled') self.mox.ReplayAll() result = self.host_api.set_host_enabled(self.ctxt, 'fake_hosT', 'fake_enabled') self.assertEqual('fake-result', result) def test_get_host_uptime(self): self._mock_assert_host_exists() self._mock_rpc_call('get_host_uptime', host='fake_host') self.mox.ReplayAll() result = self.host_api.get_host_uptime(self.ctxt, 'fake_host') self.assertEqual('fake-result', result) def test_get_host_uptime_service_down(self): def fake_service_get_by_compute_host(context, host_name): return dict(test_service.fake_service, id=1) self.stubs.Set(self.host_api.db, 'service_get_by_compute_host', fake_service_get_by_compute_host) def fake_service_is_up(service): return False self.stubs.Set(self.host_api.servicegroup_api, 'service_is_up', fake_service_is_up) self.assertRaises(exception.ComputeServiceUnavailable, self.host_api.get_host_uptime, self.ctxt, 'fake_host') def test_host_power_action(self): self._mock_assert_host_exists() self._mock_rpc_call('host_power_action', host='fake_host', action='fake_action') self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] result = self.host_api.host_power_action(self.ctxt, 'fake_host', 'fake_action') self.assertEqual('fake-result', result) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('HostAPI.power_action.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_action', msg.payload['action']) self.assertEqual('fake_host', msg.payload['host_name']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('HostAPI.power_action.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_action', msg.payload['action']) self.assertEqual('fake_host', msg.payload['host_name']) def test_set_host_maintenance(self): self._mock_assert_host_exists() self._mock_rpc_call('host_maintenance_mode', host='fake_host', host_param='fake_host', mode='fake_mode') self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host', 'fake_mode') self.assertEqual('fake-result', result) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('HostAPI.set_maintenance.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_host', msg.payload['host_name']) self.assertEqual('fake_mode', msg.payload['mode']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('HostAPI.set_maintenance.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_host', msg.payload['host_name']) self.assertEqual('fake_mode', msg.payload['mode']) def test_service_get_all_no_zones(self): services = [dict(test_service.fake_service, id=1, topic='compute', host='host1'), dict(test_service.fake_service, topic='compute', host='host2')] self.mox.StubOutWithMock(self.host_api.db, 'service_get_all') # Test no filters self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt) self.mox.VerifyAll() self._compare_objs(result, services) # Test no filters #2 self.mox.ResetAll() self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters={}) self.mox.VerifyAll() self._compare_objs(result, services) # Test w/ filter self.mox.ResetAll() self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters=dict(host='host2')) self.mox.VerifyAll() self._compare_objs(result, [services[1]]) def test_service_get_all(self): services = [dict(test_service.fake_service, topic='compute', host='host1'), dict(test_service.fake_service, topic='compute', host='host2')] exp_services = [] for service in services: exp_service = {} exp_service.update(availability_zone='nova', **service) exp_services.append(exp_service) self.mox.StubOutWithMock(self.host_api.db, 'service_get_all') # Test no filters self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, set_zones=True) self.mox.VerifyAll() self._compare_objs(result, exp_services) # Test no filters #2 self.mox.ResetAll() self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters={}, set_zones=True) self.mox.VerifyAll() self._compare_objs(result, exp_services) # Test w/ filter self.mox.ResetAll() self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters=dict(host='host2'), set_zones=True) self.mox.VerifyAll() self._compare_objs(result, [exp_services[1]]) # Test w/ zone filter but no set_zones arg. self.mox.ResetAll() self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() filters = {'availability_zone': 'nova'} result = self.host_api.service_get_all(self.ctxt, filters=filters) self.mox.VerifyAll() self._compare_objs(result, exp_services) def test_service_get_by_compute_host(self): self.mox.StubOutWithMock(self.host_api.db, 'service_get_by_compute_host') self.host_api.db.service_get_by_compute_host(self.ctxt, 'fake-host').AndReturn(test_service.fake_service) self.mox.ReplayAll() result = self.host_api.service_get_by_compute_host(self.ctxt, 'fake-host') self.assertEqual(test_service.fake_service['id'], result.id) def test_service_update(self): host_name = 'fake-host' binary = 'nova-compute' params_to_update = dict(disabled=True) service_id = 42 expected_result = dict(test_service.fake_service, id=service_id) self.mox.StubOutWithMock(self.host_api.db, 'service_get_by_host_and_binary') self.host_api.db.service_get_by_host_and_binary(self.ctxt, host_name, binary).AndReturn(expected_result) self.mox.StubOutWithMock(self.host_api.db, 'service_update') self.host_api.db.service_update( self.ctxt, service_id, params_to_update).AndReturn(expected_result) self.mox.ReplayAll() result = self.host_api.service_update( self.ctxt, host_name, binary, params_to_update) self._compare_obj(result, expected_result) @mock.patch.object(objects.InstanceList, 'get_by_host', return_value = ['fake-responses']) def test_instance_get_all_by_host(self, mock_get): result = self.host_api.instance_get_all_by_host(self.ctxt, 'fake-host') self.assertEqual(['fake-responses'], result) def test_task_log_get_all(self): self.mox.StubOutWithMock(self.host_api.db, 'task_log_get_all') self.host_api.db.task_log_get_all(self.ctxt, 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state').AndReturn('fake-response') self.mox.ReplayAll() result = self.host_api.task_log_get_all(self.ctxt, 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state') self.assertEqual('fake-response', result) def test_service_delete(self): with test.nested( mock.patch.object(objects.Service, 'get_by_id', return_value=objects.Service()), mock.patch.object(objects.Service, 'destroy') ) as ( get_by_id, destroy ): self.host_api.service_delete(self.ctxt, 1) get_by_id.assert_called_once_with(self.ctxt, 1) destroy.assert_called_once_with() class ComputeHostAPICellsTestCase(ComputeHostAPITestCase): def setUp(self): self.flags(enable=True, group='cells') self.flags(cell_type='api', group='cells') super(ComputeHostAPICellsTestCase, self).setUp() def _mock_rpc_call(self, method, **kwargs): if 'host_param' in kwargs: kwargs.pop('host_param') else: kwargs.pop('host') rpc_message = { 'method': method, 'namespace': None, 'args': kwargs, 'version': self.host_api.rpcapi.client.target.version, } cells_rpcapi = self.host_api.rpcapi.client.cells_rpcapi self.mox.StubOutWithMock(cells_rpcapi, 'proxy_rpc_to_manager') cells_rpcapi.proxy_rpc_to_manager(self.ctxt, rpc_message, 'compute.fake_host', call=True).AndReturn('fake-result') def test_service_get_all_no_zones(self): services = [ cells_utils.ServiceProxy( objects.Service(id=1, topic='compute', host='host1'), 'cell1'), cells_utils.ServiceProxy( objects.Service(id=2, topic='compute', host='host2'), 'cell1')] fake_filters = {'host': 'host1'} self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_get_all') self.host_api.cells_rpcapi.service_get_all(self.ctxt, filters=fake_filters).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters=fake_filters) self.assertEqual(services, result) def _test_service_get_all(self, fake_filters, **kwargs): service_attrs = dict(test_service.fake_service) del service_attrs['version'] services = [ cells_utils.ServiceProxy( objects.Service(**dict(service_attrs, id=1, topic='compute', host='host1')), 'cell1'), cells_utils.ServiceProxy( objects.Service(**dict(service_attrs, id=2, topic='compute', host='host2')), 'cell1')] exp_services = [] for service in services: exp_service = copy.copy(service) exp_service.update({'availability_zone': 'nova'}) exp_services.append(exp_service) self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_get_all') self.host_api.cells_rpcapi.service_get_all(self.ctxt, filters=fake_filters).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters=fake_filters, **kwargs) self.mox.VerifyAll() self.assertEqual(jsonutils.to_primitive(exp_services), jsonutils.to_primitive(result)) def test_service_get_all(self): fake_filters = {'availability_zone': 'nova'} self._test_service_get_all(fake_filters) def test_service_get_all_set_zones(self): fake_filters = {'key1': 'val1'} self._test_service_get_all(fake_filters, set_zones=True) def test_service_get_by_compute_host(self): self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_get_by_compute_host') obj = objects.Service(id=1, host='fake') fake_service = cells_utils.ServiceProxy(obj, 'cell1') self.host_api.cells_rpcapi.service_get_by_compute_host(self.ctxt, 'fake-host').AndReturn(fake_service) self.mox.ReplayAll() result = self.host_api.service_get_by_compute_host(self.ctxt, 'fake-host') self.assertEqual(fake_service, result) def test_service_update(self): host_name = 'fake-host' binary = 'nova-compute' params_to_update = dict(disabled=True) obj = objects.Service(id=42, host='fake') fake_service = cells_utils.ServiceProxy(obj, 'cell1') self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_update') self.host_api.cells_rpcapi.service_update( self.ctxt, host_name, binary, params_to_update).AndReturn(fake_service) self.mox.ReplayAll() result = self.host_api.service_update( self.ctxt, host_name, binary, params_to_update) self.assertEqual(fake_service, result) def test_service_delete(self): cell_service_id = cells_utils.cell_with_item('cell1', 1) with mock.patch.object(self.host_api.cells_rpcapi, 'service_delete') as service_delete: self.host_api.service_delete(self.ctxt, cell_service_id) service_delete.assert_called_once_with( self.ctxt, cell_service_id) @mock.patch.object(objects.InstanceList, 'get_by_host') def test_instance_get_all_by_host(self, mock_get): instances = [dict(id=1, cell_name='cell1', host='host1'), dict(id=2, cell_name='cell2', host='host1'), dict(id=3, cell_name='cell1', host='host2')] mock_get.return_value = instances expected_result = [instances[0], instances[2]] cell_and_host = cells_utils.cell_with_item('cell1', 'fake-host') result = self.host_api.instance_get_all_by_host(self.ctxt, cell_and_host) self.assertEqual(expected_result, result) def test_task_log_get_all(self): self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'task_log_get_all') self.host_api.cells_rpcapi.task_log_get_all(self.ctxt, 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state').AndReturn('fake-response') self.mox.ReplayAll() result = self.host_api.task_log_get_all(self.ctxt, 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state') self.assertEqual('fake-response', result) def test_get_host_uptime_service_down(self): # The corresponding Compute test case depends on the # _assert_host_exists which is a no-op in the cells api pass def test_get_host_uptime(self): self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'get_host_uptime') self.host_api.cells_rpcapi.get_host_uptime(self.ctxt, 'fake-host'). \ AndReturn('fake-response') self.mox.ReplayAll() result = self.host_api.get_host_uptime(self.ctxt, 'fake-host') self.assertEqual('fake-response', result) nova-13.0.0/nova/tests/unit/compute/monitors/0000775000567000056710000000000012701410205022301 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/compute/monitors/cpu/0000775000567000056710000000000012701410205023070 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/compute/monitors/cpu/__init__.py0000664000567000056710000000000012701407773025207 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py0000664000567000056710000000660312701407773027065 0ustar jenkinsjenkins00000000000000# Copyright 2013 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Compute Driver CPU resource monitor.""" import mock from nova.compute.monitors.cpu import virt_driver from nova import objects from nova import test class FakeDriver(object): def get_host_cpu_stats(self): return {'kernel': 5664160000000, 'idle': 1592705190000000, 'frequency': 800, 'user': 26728850000000, 'iowait': 6121490000000} class FakeResourceTracker(object): driver = FakeDriver() class VirtDriverCPUMonitorTestCase(test.NoDBTestCase): def test_get_metric_names(self): monitor = virt_driver.Monitor(FakeResourceTracker()) names = monitor.get_metric_names() self.assertEqual(10, len(names)) self.assertIn("cpu.frequency", names) self.assertIn("cpu.user.time", names) self.assertIn("cpu.kernel.time", names) self.assertIn("cpu.idle.time", names) self.assertIn("cpu.iowait.time", names) self.assertIn("cpu.user.percent", names) self.assertIn("cpu.kernel.percent", names) self.assertIn("cpu.idle.percent", names) self.assertIn("cpu.iowait.percent", names) self.assertIn("cpu.percent", names) def test_get_metrics(self): metrics = objects.MonitorMetricList() monitor = virt_driver.Monitor(FakeResourceTracker()) monitor.add_metrics_to_list(metrics) names = monitor.get_metric_names() for metric in metrics.objects: self.assertIn(metric.name, names) # Some conversion to a dict to ease testing... metrics = {m.name: m.value for m in metrics.objects} self.assertEqual(metrics["cpu.frequency"], 800) self.assertEqual(metrics["cpu.user.time"], 26728850000000) self.assertEqual(metrics["cpu.kernel.time"], 5664160000000) self.assertEqual(metrics["cpu.idle.time"], 1592705190000000) self.assertEqual(metrics["cpu.iowait.time"], 6121490000000) self.assertEqual(metrics["cpu.user.percent"], 1) self.assertEqual(metrics["cpu.kernel.percent"], 0) self.assertEqual(metrics["cpu.idle.percent"], 97) self.assertEqual(metrics["cpu.iowait.percent"], 0) self.assertEqual(metrics["cpu.percent"], 2) def test_ensure_single_sampling(self): # We want to ensure that the virt driver's get_host_cpu_stats() # is only ever called once, otherwise values for monitor metrics # might be illogical -- e.g. pct cpu times for user/system/idle # may add up to more than 100. metrics = objects.MonitorMetricList() monitor = virt_driver.Monitor(FakeResourceTracker()) with mock.patch.object(FakeDriver, 'get_host_cpu_stats') as mocked: monitor.add_metrics_to_list(metrics) mocked.assert_called_once_with() nova-13.0.0/nova/tests/unit/compute/monitors/__init__.py0000664000567000056710000000000012701407773024420 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/compute/monitors/test_monitors.py0000664000567000056710000000426312701407773025611 0ustar jenkinsjenkins00000000000000# Copyright 2013 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for resource monitors.""" import mock from nova.compute import monitors from nova import test class MonitorsTestCase(test.NoDBTestCase): """Test case for monitors.""" @mock.patch('stevedore.enabled.EnabledExtensionManager') def test_check_enabled_monitor(self, _mock_ext_manager): class FakeExt(object): def __init__(self, ept, name): self.entry_point_target = ept self.name = name # We check to ensure only one CPU monitor is loaded... self.flags(compute_monitors=['mon1', 'mon2']) handler = monitors.MonitorHandler(None) ext_cpu_mon1 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor', 'mon1') ext_cpu_mon2 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor', 'mon2') self.assertTrue(handler.check_enabled_monitor(ext_cpu_mon1)) self.assertFalse(handler.check_enabled_monitor(ext_cpu_mon2)) # We check to ensure that the auto-prefixing of the CPU # namespace is handled properly... self.flags(compute_monitors=['cpu.mon1', 'mon2']) handler = monitors.MonitorHandler(None) ext_cpu_mon1 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor', 'mon1') ext_cpu_mon2 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor', 'mon2') self.assertTrue(handler.check_enabled_monitor(ext_cpu_mon1)) self.assertFalse(handler.check_enabled_monitor(ext_cpu_mon2)) nova-13.0.0/nova/tests/unit/compute/test_compute_xen.py0000664000567000056710000000531312701410011024363 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for expectations of behaviour from the Xen driver.""" from oslo_utils import importutils from nova.compute import power_state import nova.conf from nova import context from nova import objects from nova.objects import instance as instance_obj from nova.tests.unit.compute import eventlet_utils from nova.tests.unit import fake_instance from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi import vm_utils CONF = nova.conf.CONF CONF.import_opt('compute_manager', 'nova.service') class ComputeXenTestCase(stubs.XenAPITestBaseNoDB): def setUp(self): super(ComputeXenTestCase, self).setUp() self.flags(compute_driver='xenapi.XenAPIDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.compute = importutils.import_object(CONF.compute_manager) # execute power syncing synchronously for testing: self.compute._sync_power_pool = eventlet_utils.SyncPool() def test_sync_power_states_instance_not_found(self): db_instance = fake_instance.fake_db_instance() ctxt = context.get_admin_context() instance_list = instance_obj._make_instance_list(ctxt, objects.InstanceList(), [db_instance], None) instance = instance_list[0] self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host') self.mox.StubOutWithMock(self.compute.driver, 'get_num_instances') self.mox.StubOutWithMock(vm_utils, 'lookup') self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state') objects.InstanceList.get_by_host(ctxt, self.compute.host, expected_attrs=[], use_slave=True).AndReturn(instance_list) self.compute.driver.get_num_instances().AndReturn(1) vm_utils.lookup(self.compute.driver._session, instance['name'], False).AndReturn(None) self.compute._sync_instance_power_state(ctxt, instance, power_state.NOSTATE) self.mox.ReplayAll() self.compute._sync_power_states(ctxt) nova-13.0.0/nova/tests/unit/compute/__init__.py0000664000567000056710000000000012701407773022546 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/compute/test_compute_mgr.py0000664000567000056710000071166312701410011024372 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for ComputeManager().""" import datetime import time import uuid from cinderclient import exceptions as cinder_exception from eventlet import event as eventlet_event import mock from mox3 import mox import netaddr from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import uuidutils import six import nova from nova.compute import build_results from nova.compute import manager from nova.compute import power_state from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api from nova import context from nova import db from nova import exception from nova.network import api as network_api from nova.network import model as network_model from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import migrate_data as migrate_data_obj from nova import test from nova.tests import fixtures from nova.tests.unit.compute import fake_resource_tracker from nova.tests.unit import fake_block_device from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance from nova.tests.unit import fake_network from nova.tests.unit import fake_network_cache_model from nova.tests.unit import fake_server_actions from nova.tests.unit.objects import test_instance_fault from nova.tests.unit.objects import test_instance_info_cache from nova.tests import uuidsentinel as uuids from nova import utils from nova.virt import driver as virt_driver from nova.virt import event as virtevent from nova.virt import fake as fake_driver from nova.virt import hardware CONF = cfg.CONF CONF.import_opt('compute_manager', 'nova.service') class ComputeManagerUnitTestCase(test.NoDBTestCase): def setUp(self): super(ComputeManagerUnitTestCase, self).setUp() self.flags(use_local=True, group='conductor') self.compute = importutils.import_object(CONF.compute_manager) self.context = context.RequestContext('fake', 'fake') fake_server_actions.stub_out_action_events(self.stubs) self.useFixture(fixtures.SpawnIsSynchronousFixture()) @mock.patch.object(manager.ComputeManager, '_get_power_state') @mock.patch.object(manager.ComputeManager, '_sync_instance_power_state') @mock.patch.object(objects.Instance, 'get_by_uuid') def _test_handle_lifecycle_event(self, mock_get, mock_sync, mock_get_power_state, transition, event_pwr_state, current_pwr_state): event = mock.Mock() event.get_instance_uuid.return_value = mock.sentinel.uuid event.get_transition.return_value = transition mock_get_power_state.return_value = current_pwr_state self.compute.handle_lifecycle_event(event) mock_get.assert_called_with(mock.ANY, mock.sentinel.uuid, expected_attrs=[]) if event_pwr_state == current_pwr_state: mock_sync.assert_called_with(mock.ANY, mock_get.return_value, event_pwr_state) else: self.assertFalse(mock_sync.called) def test_handle_lifecycle_event(self): event_map = {virtevent.EVENT_LIFECYCLE_STOPPED: power_state.SHUTDOWN, virtevent.EVENT_LIFECYCLE_STARTED: power_state.RUNNING, virtevent.EVENT_LIFECYCLE_PAUSED: power_state.PAUSED, virtevent.EVENT_LIFECYCLE_RESUMED: power_state.RUNNING, virtevent.EVENT_LIFECYCLE_SUSPENDED: power_state.SUSPENDED, } for transition, pwr_state in six.iteritems(event_map): self._test_handle_lifecycle_event(transition=transition, event_pwr_state=pwr_state, current_pwr_state=pwr_state) def test_handle_lifecycle_event_state_mismatch(self): self._test_handle_lifecycle_event( transition=virtevent.EVENT_LIFECYCLE_STOPPED, event_pwr_state=power_state.SHUTDOWN, current_pwr_state=power_state.RUNNING) def test_delete_instance_info_cache_delete_ordering(self): call_tracker = mock.Mock() call_tracker.clear_events_for_instance.return_value = None mgr_class = self.compute.__class__ orig_delete = mgr_class._delete_instance specd_compute = mock.create_autospec(mgr_class) # spec out everything except for the method we really want # to test, then use call_tracker to verify call sequence specd_compute._delete_instance = orig_delete mock_inst = mock.Mock() mock_inst.uuid = uuids.instance mock_inst.save = mock.Mock() mock_inst.destroy = mock.Mock() mock_inst.system_metadata = mock.Mock() def _mark_notify(*args, **kwargs): call_tracker._notify_about_instance_usage(*args, **kwargs) def _mark_shutdown(*args, **kwargs): call_tracker._shutdown_instance(*args, **kwargs) specd_compute.instance_events = call_tracker specd_compute._notify_about_instance_usage = _mark_notify specd_compute._shutdown_instance = _mark_shutdown mock_inst.info_cache = call_tracker specd_compute._delete_instance(specd_compute, self.context, mock_inst, mock.Mock(), mock.Mock()) methods_called = [n for n, a, k in call_tracker.mock_calls] self.assertEqual(['clear_events_for_instance', '_notify_about_instance_usage', '_shutdown_instance', 'delete'], methods_called) @mock.patch.object(manager.ComputeManager, '_get_resource_tracker') @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes') @mock.patch.object(manager.ComputeManager, '_get_compute_nodes_in_db') def test_update_available_resource(self, get_db_nodes, get_avail_nodes, get_rt): info = {'cn_id': 1} def _make_compute_node(hyp_hostname): cn = mock.Mock(spec_set=['hypervisor_hostname', 'id', 'destroy']) cn.id = info['cn_id'] info['cn_id'] += 1 cn.hypervisor_hostname = hyp_hostname return cn def _make_rt(node): n = mock.Mock(spec_set=['update_available_resource', 'nodename']) n.nodename = node return n ctxt = mock.Mock() db_nodes = [_make_compute_node('node1'), _make_compute_node('node2'), _make_compute_node('node3'), _make_compute_node('node4')] avail_nodes = set(['node2', 'node3', 'node4', 'node5']) avail_nodes_l = list(avail_nodes) rts = [_make_rt(node) for node in avail_nodes_l] # Make the 2nd and 3rd ones raise exc = exception.ComputeHostNotFound(host='fake') rts[1].update_available_resource.side_effect = exc exc = test.TestingException() rts[2].update_available_resource.side_effect = exc rts_iter = iter(rts) def _get_rt_side_effect(*args, **kwargs): return next(rts_iter) expected_rt_dict = {avail_nodes_l[0]: rts[0], avail_nodes_l[2]: rts[2], avail_nodes_l[3]: rts[3]} get_db_nodes.return_value = db_nodes get_avail_nodes.return_value = avail_nodes get_rt.side_effect = _get_rt_side_effect self.compute.update_available_resource(ctxt) get_db_nodes.assert_called_once_with(ctxt, use_slave=True) self.assertEqual(sorted([mock.call(node) for node in avail_nodes]), sorted(get_rt.call_args_list)) for rt in rts: rt.update_available_resource.assert_called_once_with(ctxt) self.assertEqual(expected_rt_dict, self.compute._resource_tracker_dict) # First node in set should have been removed from DB for db_node in db_nodes: if db_node.hypervisor_hostname == 'node1': db_node.destroy.assert_called_once_with() else: self.assertFalse(db_node.destroy.called) def test_delete_instance_without_info_cache(self): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_states.ERROR, host=self.compute.host, expected_attrs=['system_metadata']) quotas = mock.create_autospec(objects.Quotas, spec_set=True) with test.nested( mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute, '_shutdown_instance'), mock.patch.object(instance, 'obj_load_attr'), mock.patch.object(instance, 'save'), mock.patch.object(instance, 'destroy') ) as ( compute_notify_about_instance_usage, comupte_shutdown_instance, instance_obj_load_attr, instance_save, instance_destroy ): instance.info_cache = None self.compute._delete_instance(self.context, instance, [], quotas) @mock.patch.object(network_api.API, 'allocate_for_instance') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(time, 'sleep') def test_allocate_network_succeeds_after_retries( self, mock_sleep, mock_save, mock_allocate_for_instance): self.flags(network_allocate_retries=8) instance = fake_instance.fake_instance_obj( self.context, expected_attrs=['system_metadata']) is_vpn = 'fake-is-vpn' req_networks = 'fake-req-networks' macs = 'fake-macs' sec_groups = 'fake-sec-groups' final_result = 'meow' dhcp_options = None mock_allocate_for_instance.side_effect = [ test.TestingException()] * 7 + [final_result] expected_sleep_times = [1, 2, 4, 8, 16, 30, 30, 30] res = self.compute._allocate_network_async(self.context, instance, req_networks, macs, sec_groups, is_vpn, dhcp_options) mock_sleep.has_calls(expected_sleep_times) self.assertEqual(final_result, res) # Ensure save is not called in while allocating networks, the instance # is saved after the allocation. self.assertFalse(mock_save.called) self.assertEqual('True', instance.system_metadata['network_allocated']) def test_allocate_network_fails(self): self.flags(network_allocate_retries=0) nwapi = self.compute.network_api self.mox.StubOutWithMock(nwapi, 'allocate_for_instance') instance = {} is_vpn = 'fake-is-vpn' req_networks = 'fake-req-networks' macs = 'fake-macs' sec_groups = 'fake-sec-groups' dhcp_options = None nwapi.allocate_for_instance( self.context, instance, vpn=is_vpn, requested_networks=req_networks, macs=macs, security_groups=sec_groups, dhcp_options=dhcp_options, bind_host_id=instance.get('host')).AndRaise( test.TestingException()) self.mox.ReplayAll() self.assertRaises(test.TestingException, self.compute._allocate_network_async, self.context, instance, req_networks, macs, sec_groups, is_vpn, dhcp_options) def test_allocate_network_neg_conf_value_treated_as_zero(self): self.flags(network_allocate_retries=-1) nwapi = self.compute.network_api self.mox.StubOutWithMock(nwapi, 'allocate_for_instance') instance = {} is_vpn = 'fake-is-vpn' req_networks = 'fake-req-networks' macs = 'fake-macs' sec_groups = 'fake-sec-groups' dhcp_options = None # Only attempted once. nwapi.allocate_for_instance( self.context, instance, vpn=is_vpn, requested_networks=req_networks, macs=macs, security_groups=sec_groups, dhcp_options=dhcp_options, bind_host_id=instance.get('host')).AndRaise( test.TestingException()) self.mox.ReplayAll() self.assertRaises(test.TestingException, self.compute._allocate_network_async, self.context, instance, req_networks, macs, sec_groups, is_vpn, dhcp_options) @mock.patch.object(network_api.API, 'allocate_for_instance') @mock.patch.object(manager.ComputeManager, '_instance_update') @mock.patch.object(time, 'sleep') def test_allocate_network_with_conf_value_is_one( self, sleep, _instance_update, allocate_for_instance): self.flags(network_allocate_retries=1) instance = fake_instance.fake_instance_obj( self.context, expected_attrs=['system_metadata']) is_vpn = 'fake-is-vpn' req_networks = 'fake-req-networks' macs = 'fake-macs' sec_groups = 'fake-sec-groups' dhcp_options = None final_result = 'zhangtralon' allocate_for_instance.side_effect = [test.TestingException(), final_result] res = self.compute._allocate_network_async(self.context, instance, req_networks, macs, sec_groups, is_vpn, dhcp_options) self.assertEqual(final_result, res) self.assertEqual(1, sleep.call_count) @mock.patch('nova.compute.manager.ComputeManager.' '_do_build_and_run_instance') def _test_max_concurrent_builds(self, mock_dbari): with mock.patch.object(self.compute, '_build_semaphore') as mock_sem: instance = objects.Instance(uuid=str(uuid.uuid4())) for i in (1, 2, 3): self.compute.build_and_run_instance(self.context, instance, mock.sentinel.image, mock.sentinel.request_spec, {}) self.assertEqual(3, mock_sem.__enter__.call_count) def test_max_concurrent_builds_limited(self): self.flags(max_concurrent_builds=2) self._test_max_concurrent_builds() def test_max_concurrent_builds_unlimited(self): self.flags(max_concurrent_builds=0) self._test_max_concurrent_builds() def test_max_concurrent_builds_semaphore_limited(self): self.flags(max_concurrent_builds=123) self.assertEqual(123, manager.ComputeManager()._build_semaphore.balance) def test_max_concurrent_builds_semaphore_unlimited(self): self.flags(max_concurrent_builds=0) compute = manager.ComputeManager() self.assertEqual(0, compute._build_semaphore.balance) self.assertIsInstance(compute._build_semaphore, compute_utils.UnlimitedSemaphore) def test_nil_out_inst_obj_host_and_node_sets_nil(self): instance = fake_instance.fake_instance_obj(self.context, uuid=uuids.instance, host='foo-host', node='foo-node') self.assertIsNotNone(instance.host) self.assertIsNotNone(instance.node) self.compute._nil_out_instance_obj_host_and_node(instance) self.assertIsNone(instance.host) self.assertIsNone(instance.node) def test_init_host(self): our_host = self.compute.host inst = fake_instance.fake_db_instance( vm_state=vm_states.ACTIVE, info_cache=dict(test_instance_info_cache.fake_info_cache, network_info=None), security_groups=None) startup_instances = [inst, inst, inst] def _do_mock_calls(defer_iptables_apply): self.compute.driver.init_host(host=our_host) context.get_admin_context().AndReturn(self.context) db.instance_get_all_by_host( self.context, our_host, columns_to_join=['info_cache', 'metadata'] ).AndReturn(startup_instances) if defer_iptables_apply: self.compute.driver.filter_defer_apply_on() self.compute._destroy_evacuated_instances(self.context) self.compute._init_instance(self.context, mox.IsA(objects.Instance)) self.compute._init_instance(self.context, mox.IsA(objects.Instance)) self.compute._init_instance(self.context, mox.IsA(objects.Instance)) if defer_iptables_apply: self.compute.driver.filter_defer_apply_off() self.mox.StubOutWithMock(self.compute.driver, 'init_host') self.mox.StubOutWithMock(self.compute.driver, 'filter_defer_apply_on') self.mox.StubOutWithMock(self.compute.driver, 'filter_defer_apply_off') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(self.compute, '_destroy_evacuated_instances') self.mox.StubOutWithMock(self.compute, '_init_instance') # Test with defer_iptables_apply self.flags(defer_iptables_apply=True) _do_mock_calls(True) self.mox.ReplayAll() self.compute.init_host() self.mox.VerifyAll() # Test without defer_iptables_apply self.mox.ResetAll() self.flags(defer_iptables_apply=False) _do_mock_calls(False) self.mox.ReplayAll() self.compute.init_host() # tearDown() uses context.get_admin_context(), so we have # to do the verification here and unstub it. self.mox.VerifyAll() self.mox.UnsetStubs() @mock.patch('nova.objects.InstanceList') @mock.patch('nova.objects.MigrationList.get_by_filters') def test_cleanup_host(self, mock_miglist_get, mock_instance_list): # just testing whether the cleanup_host method # when fired will invoke the underlying driver's # equivalent method. mock_miglist_get.return_value = [] mock_instance_list.get_by_host.return_value = [] with mock.patch.object(self.compute, 'driver') as mock_driver: self.compute.init_host() mock_driver.init_host.assert_called_once_with(host='fake-mini') self.compute.cleanup_host() # register_event_listener is called on startup (init_host) and # in cleanup_host mock_driver.register_event_listener.assert_has_calls([ mock.call(self.compute.handle_events), mock.call(None)]) mock_driver.cleanup_host.assert_called_once_with(host='fake-mini') def test_init_virt_events_disabled(self): self.flags(handle_virt_lifecycle_events=False, group='workarounds') with mock.patch.object(self.compute.driver, 'register_event_listener') as mock_register: self.compute.init_virt_events() self.assertFalse(mock_register.called) @mock.patch('nova.objects.MigrationList.get_by_filters') @mock.patch('nova.objects.Migration.save') def test_init_host_with_evacuated_instance(self, mock_save, mock_mig_get): our_host = self.compute.host not_our_host = 'not-' + our_host deleted_instance = fake_instance.fake_instance_obj( self.context, host=not_our_host, uuid=uuids.deleted_instance) migration = objects.Migration(instance_uuid=deleted_instance.uuid) mock_mig_get.return_value = [migration] self.mox.StubOutWithMock(self.compute.driver, 'init_host') self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(self.compute, 'init_virt_events') self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.mox.StubOutWithMock(self.compute, '_init_instance') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.compute.driver.init_host(host=our_host) context.get_admin_context().AndReturn(self.context) db.instance_get_all_by_host(self.context, our_host, columns_to_join=['info_cache', 'metadata'] ).AndReturn([]) self.compute.init_virt_events() # simulate failed instance self.compute._get_instances_on_driver( self.context, {'deleted': False}).AndReturn([deleted_instance]) self.compute.network_api.get_instance_nw_info( self.context, deleted_instance).AndRaise( exception.InstanceNotFound(instance_id=deleted_instance['uuid'])) # ensure driver.destroy is called so that driver may # clean up any dangling files self.compute.driver.destroy(self.context, deleted_instance, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.compute.init_host() # tearDown() uses context.get_admin_context(), so we have # to do the verification here and unstub it. self.mox.VerifyAll() self.mox.UnsetStubs() def test_init_instance_with_binding_failed_vif_type(self): # this instance will plug a 'binding_failed' vif instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, info_cache=None, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, task_state=None, host=self.compute.host, expected_attrs=['info_cache']) with test.nested( mock.patch.object(context, 'get_admin_context', return_value=self.context), mock.patch.object(compute_utils, 'get_nw_info_for_instance', return_value=network_model.NetworkInfo()), mock.patch.object(self.compute.driver, 'plug_vifs', side_effect=exception.VirtualInterfacePlugException( "Unexpected vif_type=binding_failed")), mock.patch.object(self.compute, '_set_instance_obj_error_state') ) as (get_admin_context, get_nw_info, plug_vifs, set_error_state): self.compute._init_instance(self.context, instance) set_error_state.assert_called_once_with(self.context, instance) def test__get_power_state_InstanceNotFound(self): instance = fake_instance.fake_instance_obj( self.context, power_state=power_state.RUNNING) with mock.patch.object(self.compute.driver, 'get_info', side_effect=exception.InstanceNotFound(instance_id=1)): self.assertEqual(self.compute._get_power_state(self.context, instance), power_state.NOSTATE) def test__get_power_state_NotFound(self): instance = fake_instance.fake_instance_obj( self.context, power_state=power_state.RUNNING) with mock.patch.object(self.compute.driver, 'get_info', side_effect=exception.NotFound()): self.assertRaises(exception.NotFound, self.compute._get_power_state, self.context, instance) def test_init_instance_failed_resume_sets_error(self): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, info_cache=None, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, task_state=None, host=self.compute.host, expected_attrs=['info_cache']) self.flags(resume_guests_state_on_host_boot=True) self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs') self.mox.StubOutWithMock(self.compute.driver, 'resume_state_on_host_boot') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state') self.compute._get_power_state(mox.IgnoreArg(), instance).AndReturn(power_state.SHUTDOWN) self.compute._get_power_state(mox.IgnoreArg(), instance).AndReturn(power_state.SHUTDOWN) self.compute._get_power_state(mox.IgnoreArg(), instance).AndReturn(power_state.SHUTDOWN) self.compute.driver.plug_vifs(instance, mox.IgnoreArg()) self.compute._get_instance_block_device_info(mox.IgnoreArg(), instance).AndReturn('fake-bdm') self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(), instance, mox.IgnoreArg(), 'fake-bdm').AndRaise(test.TestingException) self.compute._set_instance_obj_error_state(mox.IgnoreArg(), instance) self.mox.ReplayAll() self.compute._init_instance('fake-context', instance) @mock.patch.object(objects.BlockDeviceMapping, 'destroy') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(objects.Instance, 'destroy') @mock.patch.object(objects.Instance, 'obj_load_attr') @mock.patch.object(objects.quotas.Quotas, 'commit') @mock.patch.object(objects.quotas.Quotas, 'reserve') @mock.patch.object(objects.quotas, 'ids_from_instance') def test_init_instance_complete_partial_deletion( self, mock_ids_from_instance, mock_reserve, mock_commit, mock_inst_destroy, mock_obj_load_attr, mock_get_by_instance_uuid, mock_bdm_destroy): """Test to complete deletion for instances in DELETED status but not marked as deleted in the DB """ instance = fake_instance.fake_instance_obj( self.context, project_id='fake', uuid=uuids.instance, vcpus=1, memory_mb=64, power_state=power_state.SHUTDOWN, vm_state=vm_states.DELETED, host=self.compute.host, task_state=None, deleted=False, deleted_at=None, metadata={}, system_metadata={}, expected_attrs=['metadata', 'system_metadata']) # Make sure instance vm_state is marked as 'DELETED' but instance is # not destroyed from db. self.assertEqual(vm_states.DELETED, instance.vm_state) self.assertFalse(instance.deleted) deltas = {'instances': -1, 'cores': -instance.vcpus, 'ram': -instance.memory_mb} def fake_inst_destroy(): instance.deleted = True instance.deleted_at = timeutils.utcnow() mock_ids_from_instance.return_value = (instance.project_id, instance.user_id) mock_inst_destroy.side_effect = fake_inst_destroy() self.compute._init_instance(self.context, instance) # Make sure that instance.destroy method was called and # instance was deleted from db. self.assertTrue(mock_reserve.called) self.assertTrue(mock_commit.called) self.assertNotEqual(0, instance.deleted) mock_reserve.assert_called_once_with(project_id=instance.project_id, user_id=instance.user_id, **deltas) @mock.patch('nova.compute.manager.LOG') def test_init_instance_complete_partial_deletion_raises_exception( self, mock_log): instance = fake_instance.fake_instance_obj( self.context, project_id='fake', uuid=uuids.instance, vcpus=1, memory_mb=64, power_state=power_state.SHUTDOWN, vm_state=vm_states.DELETED, host=self.compute.host, task_state=None, deleted=False, deleted_at=None, metadata={}, system_metadata={}, expected_attrs=['metadata', 'system_metadata']) with mock.patch.object(self.compute, '_complete_partial_deletion') as mock_deletion: mock_deletion.side_effect = test.TestingException() self.compute._init_instance(self, instance) msg = u'Failed to complete a deletion' mock_log.exception.assert_called_once_with(msg, instance=instance) def test_init_instance_stuck_in_deleting(self): instance = fake_instance.fake_instance_obj( self.context, project_id='fake', uuid=uuids.instance, vcpus=1, memory_mb=64, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, host=self.compute.host, task_state=task_states.DELETING) self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(self.compute, '_delete_instance') self.mox.StubOutWithMock(instance, 'obj_load_attr') self.mox.StubOutWithMock(self.compute, '_create_reservations') bdms = [] quotas = objects.quotas.Quotas(self.context) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance.uuid).AndReturn(bdms) self.compute._create_reservations(self.context, instance, instance.project_id, instance.user_id).AndReturn(quotas) self.compute._delete_instance(self.context, instance, bdms, mox.IgnoreArg()) self.mox.ReplayAll() self.compute._init_instance(self.context, instance) @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def test_init_instance_stuck_in_deleting_raises_exception( self, mock_get_by_instance_uuid, mock_get_by_uuid): instance = fake_instance.fake_instance_obj( self.context, project_id='fake', uuid=uuids.instance, vcpus=1, memory_mb=64, metadata={}, system_metadata={}, host=self.compute.host, vm_state=vm_states.ACTIVE, task_state=task_states.DELETING, expected_attrs=['metadata', 'system_metadata']) bdms = [] reservations = ['fake-resv'] def _create_patch(name, attr): patcher = mock.patch.object(name, attr) mocked_obj = patcher.start() self.addCleanup(patcher.stop) return mocked_obj mock_delete_instance = _create_patch(self.compute, '_delete_instance') mock_set_instance_error_state = _create_patch( self.compute, '_set_instance_obj_error_state') mock_create_reservations = _create_patch(self.compute, '_create_reservations') mock_create_reservations.return_value = reservations mock_get_by_instance_uuid.return_value = bdms mock_get_by_uuid.return_value = instance mock_delete_instance.side_effect = test.TestingException('test') self.compute._init_instance(self.context, instance) mock_set_instance_error_state.assert_called_once_with( self.context, instance) def _test_init_instance_reverts_crashed_migrations(self, old_vm_state=None): power_on = True if (not old_vm_state or old_vm_state == vm_states.ACTIVE) else False sys_meta = { 'old_vm_state': old_vm_state } instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_states.ERROR, task_state=task_states.RESIZE_MIGRATING, power_state=power_state.SHUTDOWN, system_metadata=sys_meta, host=self.compute.host, expected_attrs=['system_metadata']) self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance') self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs') self.mox.StubOutWithMock(self.compute.driver, 'finish_revert_migration') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.driver, 'get_info') self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute, '_retry_reboot') self.compute._retry_reboot(self.context, instance).AndReturn( (False, None)) compute_utils.get_nw_info_for_instance(instance).AndReturn( network_model.NetworkInfo()) self.compute.driver.plug_vifs(instance, []) self.compute._get_instance_block_device_info( self.context, instance).AndReturn([]) self.compute.driver.finish_revert_migration(self.context, instance, [], [], power_on) instance.save() self.compute.driver.get_info(instance).AndReturn( hardware.InstanceInfo(state=power_state.SHUTDOWN)) self.compute.driver.get_info(instance).AndReturn( hardware.InstanceInfo(state=power_state.SHUTDOWN)) self.mox.ReplayAll() self.compute._init_instance(self.context, instance) self.assertIsNone(instance.task_state) def test_init_instance_reverts_crashed_migration_from_active(self): self._test_init_instance_reverts_crashed_migrations( old_vm_state=vm_states.ACTIVE) def test_init_instance_reverts_crashed_migration_from_stopped(self): self._test_init_instance_reverts_crashed_migrations( old_vm_state=vm_states.STOPPED) def test_init_instance_reverts_crashed_migration_no_old_state(self): self._test_init_instance_reverts_crashed_migrations(old_vm_state=None) def test_init_instance_resets_crashed_live_migration(self): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_states.ACTIVE, host=self.compute.host, task_state=task_states.MIGRATING) with test.nested( mock.patch.object(instance, 'save'), mock.patch('nova.compute.utils.get_nw_info_for_instance', return_value=network_model.NetworkInfo()) ) as (save, get_nw_info): self.compute._init_instance(self.context, instance) save.assert_called_once_with(expected_task_state=['migrating']) get_nw_info.assert_called_once_with(instance) self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ACTIVE, instance.vm_state) def _test_init_instance_sets_building_error(self, vm_state, task_state=None): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_state, host=self.compute.host, task_state=task_state) with mock.patch.object(instance, 'save') as save: self.compute._init_instance(self.context, instance) save.assert_called_once_with() self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_init_instance_sets_building_error(self): self._test_init_instance_sets_building_error(vm_states.BUILDING) def test_init_instance_sets_rebuilding_errors(self): tasks = [task_states.REBUILDING, task_states.REBUILD_BLOCK_DEVICE_MAPPING, task_states.REBUILD_SPAWNING] vms = [vm_states.ACTIVE, vm_states.STOPPED] for vm_state in vms: for task_state in tasks: self._test_init_instance_sets_building_error( vm_state, task_state) def _test_init_instance_sets_building_tasks_error(self, instance): instance.host = self.compute.host with mock.patch.object(instance, 'save') as save: self.compute._init_instance(self.context, instance) save.assert_called_once_with() self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_init_instance_sets_building_tasks_error_scheduling(self): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=None, task_state=task_states.SCHEDULING) self._test_init_instance_sets_building_tasks_error(instance) def test_init_instance_sets_building_tasks_error_block_device(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = None instance.task_state = task_states.BLOCK_DEVICE_MAPPING self._test_init_instance_sets_building_tasks_error(instance) def test_init_instance_sets_building_tasks_error_networking(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = None instance.task_state = task_states.NETWORKING self._test_init_instance_sets_building_tasks_error(instance) def test_init_instance_sets_building_tasks_error_spawning(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = None instance.task_state = task_states.SPAWNING self._test_init_instance_sets_building_tasks_error(instance) def _test_init_instance_cleans_image_states(self, instance): with mock.patch.object(instance, 'save') as save: self.compute._get_power_state = mock.Mock() self.compute.driver.post_interrupted_snapshot_cleanup = mock.Mock() instance.info_cache = None instance.power_state = power_state.RUNNING instance.host = self.compute.host self.compute._init_instance(self.context, instance) save.assert_called_once_with() self.compute.driver.post_interrupted_snapshot_cleanup.\ assert_called_once_with(self.context, instance) self.assertIsNone(instance.task_state) @mock.patch('nova.compute.manager.ComputeManager._get_power_state', return_value=power_state.RUNNING) @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def _test_init_instance_cleans_task_states(self, powerstate, state, mock_get_uuid, mock_get_power_state): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.info_cache = None instance.power_state = power_state.RUNNING instance.vm_state = vm_states.ACTIVE instance.task_state = state instance.host = self.compute.host mock_get_power_state.return_value = powerstate self.compute._init_instance(self.context, instance) return instance def test_init_instance_cleans_image_state_pending_upload(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_PENDING_UPLOAD self._test_init_instance_cleans_image_states(instance) def test_init_instance_cleans_image_state_uploading(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_UPLOADING self._test_init_instance_cleans_image_states(instance) def test_init_instance_cleans_image_state_snapshot(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT self._test_init_instance_cleans_image_states(instance) def test_init_instance_cleans_image_state_snapshot_pending(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING self._test_init_instance_cleans_image_states(instance) @mock.patch.object(objects.Instance, 'save') def test_init_instance_cleans_running_pausing(self, mock_save): instance = self._test_init_instance_cleans_task_states( power_state.RUNNING, task_states.PAUSING) mock_save.assert_called_once_with() self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) @mock.patch.object(objects.Instance, 'save') def test_init_instance_cleans_running_unpausing(self, mock_save): instance = self._test_init_instance_cleans_task_states( power_state.RUNNING, task_states.UNPAUSING) mock_save.assert_called_once_with() self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) @mock.patch('nova.compute.manager.ComputeManager.unpause_instance') def test_init_instance_cleans_paused_unpausing(self, mock_unpause): def fake_unpause(context, instance): instance.task_state = None mock_unpause.side_effect = fake_unpause instance = self._test_init_instance_cleans_task_states( power_state.PAUSED, task_states.UNPAUSING) mock_unpause.assert_called_once_with(self.context, instance) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) def test_init_instance_errors_when_not_migrating(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ERROR instance.task_state = task_states.IMAGE_UPLOADING instance.host = self.compute.host self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance') self.mox.ReplayAll() self.compute._init_instance(self.context, instance) self.mox.VerifyAll() def test_init_instance_deletes_error_deleting_instance(self): instance = fake_instance.fake_instance_obj( self.context, project_id='fake', uuid=uuids.instance, vcpus=1, memory_mb=64, vm_state=vm_states.ERROR, host=self.compute.host, task_state=task_states.DELETING) self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(self.compute, '_delete_instance') self.mox.StubOutWithMock(instance, 'obj_load_attr') self.mox.StubOutWithMock(objects.quotas, 'ids_from_instance') self.mox.StubOutWithMock(self.compute, '_create_reservations') bdms = [] quotas = objects.quotas.Quotas(self.context) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance.uuid).AndReturn(bdms) objects.quotas.ids_from_instance(self.context, instance).AndReturn( (instance.project_id, instance.user_id)) self.compute._create_reservations(self.context, instance, instance.project_id, instance.user_id).AndReturn(quotas) self.compute._delete_instance(self.context, instance, bdms, mox.IgnoreArg()) self.mox.ReplayAll() self.compute._init_instance(self.context, instance) self.mox.VerifyAll() def test_init_instance_resize_prep(self): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_states.ACTIVE, host=self.compute.host, task_state=task_states.RESIZE_PREP, power_state=power_state.RUNNING) with test.nested( mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING), mock.patch.object(compute_utils, 'get_nw_info_for_instance'), mock.patch.object(instance, 'save', autospec=True) ) as (mock_get_power_state, mock_nw_info, mock_instance_save): self.compute._init_instance(self.context, instance) mock_instance_save.assert_called_once_with() self.assertIsNone(instance.task_state) @mock.patch('nova.context.RequestContext.elevated') @mock.patch('nova.compute.utils.get_nw_info_for_instance') @mock.patch( 'nova.compute.manager.ComputeManager._get_instance_block_device_info') @mock.patch('nova.virt.driver.ComputeDriver.destroy') @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector') def _test_shutdown_instance_exception(self, exc, mock_connector, mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated): mock_connector.side_effect = exc mock_elevated.return_value = self.context instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_states.ERROR, task_state=task_states.DELETING) bdms = [mock.Mock(id=1, is_volume=True)] self.compute._shutdown_instance(self.context, instance, bdms, notify=False, try_deallocate_networks=False) def test_shutdown_instance_endpoint_not_found(self): exc = cinder_exception.EndpointNotFound self._test_shutdown_instance_exception(exc) def test_shutdown_instance_client_exception(self): exc = cinder_exception.ClientException(code=9001) self._test_shutdown_instance_exception(exc) def test_shutdown_instance_volume_not_found(self): exc = exception.VolumeNotFound(volume_id=42) self._test_shutdown_instance_exception(exc) def test_shutdown_instance_disk_not_found(self): exc = exception.DiskNotFound(location="not\\here") self._test_shutdown_instance_exception(exc) def _test_init_instance_retries_reboot(self, instance, reboot_type, return_power_state): instance.host = self.compute.host with test.nested( mock.patch.object(self.compute, '_get_power_state', return_value=return_power_state), mock.patch.object(self.compute, 'reboot_instance'), mock.patch.object(compute_utils, 'get_nw_info_for_instance') ) as ( _get_power_state, reboot_instance, get_nw_info_for_instance ): self.compute._init_instance(self.context, instance) call = mock.call(self.context, instance, block_device_info=None, reboot_type=reboot_type) reboot_instance.assert_has_calls([call]) def test_init_instance_retries_reboot_pending(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_PENDING for state in vm_states.ALLOW_SOFT_REBOOT: instance.vm_state = state self._test_init_instance_retries_reboot(instance, 'SOFT', power_state.RUNNING) def test_init_instance_retries_reboot_pending_hard(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_PENDING_HARD for state in vm_states.ALLOW_HARD_REBOOT: # NOTE(dave-mcnally) while a reboot of a vm in error state is # possible we don't attempt to recover an error during init if state == vm_states.ERROR: continue instance.vm_state = state self._test_init_instance_retries_reboot(instance, 'HARD', power_state.RUNNING) def test_init_instance_retries_reboot_pending_soft_became_hard(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_PENDING for state in vm_states.ALLOW_HARD_REBOOT: # NOTE(dave-mcnally) while a reboot of a vm in error state is # possible we don't attempt to recover an error during init if state == vm_states.ERROR: continue instance.vm_state = state with mock.patch.object(instance, 'save'): self._test_init_instance_retries_reboot(instance, 'HARD', power_state.SHUTDOWN) self.assertEqual(task_states.REBOOT_PENDING_HARD, instance.task_state) def test_init_instance_retries_reboot_started(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.REBOOT_STARTED with mock.patch.object(instance, 'save'): self._test_init_instance_retries_reboot(instance, 'HARD', power_state.NOSTATE) def test_init_instance_retries_reboot_started_hard(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.REBOOT_STARTED_HARD self._test_init_instance_retries_reboot(instance, 'HARD', power_state.NOSTATE) def _test_init_instance_cleans_reboot_state(self, instance): instance.host = self.compute.host with test.nested( mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING), mock.patch.object(instance, 'save', autospec=True), mock.patch.object(compute_utils, 'get_nw_info_for_instance') ) as ( _get_power_state, instance_save, get_nw_info_for_instance ): self.compute._init_instance(self.context, instance) instance_save.assert_called_once_with() self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ACTIVE, instance.vm_state) def test_init_instance_cleans_image_state_reboot_started(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.REBOOT_STARTED instance.power_state = power_state.RUNNING self._test_init_instance_cleans_reboot_state(instance) def test_init_instance_cleans_image_state_reboot_started_hard(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.REBOOT_STARTED_HARD instance.power_state = power_state.RUNNING self._test_init_instance_cleans_reboot_state(instance) def test_init_instance_retries_power_off(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.id = 1 instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_OFF instance.host = self.compute.host with mock.patch.object(self.compute, 'stop_instance'): self.compute._init_instance(self.context, instance) call = mock.call(self.context, instance, True) self.compute.stop_instance.assert_has_calls([call]) def test_init_instance_retries_power_on(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.id = 1 instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_ON instance.host = self.compute.host with mock.patch.object(self.compute, 'start_instance'): self.compute._init_instance(self.context, instance) call = mock.call(self.context, instance) self.compute.start_instance.assert_has_calls([call]) def test_init_instance_retries_power_on_silent_exception(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.id = 1 instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_ON instance.host = self.compute.host with mock.patch.object(self.compute, 'start_instance', return_value=Exception): init_return = self.compute._init_instance(self.context, instance) call = mock.call(self.context, instance) self.compute.start_instance.assert_has_calls([call]) self.assertIsNone(init_return) def test_init_instance_retries_power_off_silent_exception(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.id = 1 instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_OFF instance.host = self.compute.host with mock.patch.object(self.compute, 'stop_instance', return_value=Exception): init_return = self.compute._init_instance(self.context, instance) call = mock.call(self.context, instance, True) self.compute.stop_instance.assert_has_calls([call]) self.assertIsNone(init_return) def test_get_instances_on_driver(self): driver_instances = [] for x in range(10): driver_instances.append(fake_instance.fake_db_instance()) self.mox.StubOutWithMock(self.compute.driver, 'list_instance_uuids') self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') self.compute.driver.list_instance_uuids().AndReturn( [inst['uuid'] for inst in driver_instances]) db.instance_get_all_by_filters( self.context, {'uuid': [inst['uuid'] for inst in driver_instances]}, 'created_at', 'desc', columns_to_join=None, limit=None, marker=None).AndReturn(driver_instances) self.mox.ReplayAll() result = self.compute._get_instances_on_driver(self.context) self.assertEqual([x['uuid'] for x in driver_instances], [x['uuid'] for x in result]) @mock.patch('nova.virt.driver.ComputeDriver.list_instance_uuids') @mock.patch('nova.db.api.instance_get_all_by_filters') def test_get_instances_on_driver_empty(self, mock_list, mock_db): mock_list.return_value = [] result = self.compute._get_instances_on_driver(self.context) # instance_get_all_by_filters should not be called self.assertEqual(0, mock_db.call_count) self.assertEqual([], [x['uuid'] for x in result]) def test_get_instances_on_driver_fallback(self): # Test getting instances when driver doesn't support # 'list_instance_uuids' self.compute.host = 'host' filters = {'host': self.compute.host} self.flags(instance_name_template='inst-%i') all_instances = [] driver_instances = [] for x in range(10): instance = fake_instance.fake_db_instance(name='inst-%i' % x, id=x) if x % 2: driver_instances.append(instance) all_instances.append(instance) self.mox.StubOutWithMock(self.compute.driver, 'list_instance_uuids') self.mox.StubOutWithMock(self.compute.driver, 'list_instances') self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') self.compute.driver.list_instance_uuids().AndRaise( NotImplementedError()) self.compute.driver.list_instances().AndReturn( [inst['name'] for inst in driver_instances]) db.instance_get_all_by_filters( self.context, filters, 'created_at', 'desc', columns_to_join=None, limit=None, marker=None).AndReturn(all_instances) self.mox.ReplayAll() result = self.compute._get_instances_on_driver(self.context, filters) self.assertEqual([x['uuid'] for x in driver_instances], [x['uuid'] for x in result]) def test_instance_usage_audit(self): instances = [objects.Instance(uuid=uuids.instance)] @classmethod def fake_task_log(*a, **k): pass @classmethod def fake_get(*a, **k): return instances self.flags(instance_usage_audit=True) self.stubs.Set(objects.TaskLog, 'get', fake_task_log) self.stubs.Set(objects.InstanceList, 'get_active_by_window_joined', fake_get) self.stubs.Set(objects.TaskLog, 'begin_task', fake_task_log) self.stubs.Set(objects.TaskLog, 'end_task', fake_task_log) self.mox.StubOutWithMock(compute_utils, 'notify_usage_exists') compute_utils.notify_usage_exists(self.compute.notifier, self.context, instances[0], ignore_missing_network_data=False) self.mox.ReplayAll() self.compute._instance_usage_audit(self.context) @mock.patch.object(objects.InstanceList, 'get_by_host') def test_sync_power_states(self, mock_get): instance = mock.Mock() mock_get.return_value = [instance] with mock.patch.object(self.compute._sync_power_pool, 'spawn_n') as mock_spawn: self.compute._sync_power_states(mock.sentinel.context) mock_get.assert_called_with(mock.sentinel.context, self.compute.host, expected_attrs=[], use_slave=True) mock_spawn.assert_called_once_with(mock.ANY, instance) def _get_sync_instance(self, power_state, vm_state, task_state=None, shutdown_terminate=False): instance = objects.Instance() instance.uuid = uuids.instance instance.power_state = power_state instance.vm_state = vm_state instance.host = self.compute.host instance.task_state = task_state instance.shutdown_terminate = shutdown_terminate self.mox.StubOutWithMock(instance, 'refresh') self.mox.StubOutWithMock(instance, 'save') return instance def test_sync_instance_power_state_match(self): instance = self._get_sync_instance(power_state.RUNNING, vm_states.ACTIVE) instance.refresh(use_slave=False) self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, power_state.RUNNING) def test_sync_instance_power_state_running_stopped(self): instance = self._get_sync_instance(power_state.RUNNING, vm_states.ACTIVE) instance.refresh(use_slave=False) instance.save() self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, power_state.SHUTDOWN) self.assertEqual(instance.power_state, power_state.SHUTDOWN) def _test_sync_to_stop(self, power_state, vm_state, driver_power_state, stop=True, force=False, shutdown_terminate=False): instance = self._get_sync_instance( power_state, vm_state, shutdown_terminate=shutdown_terminate) instance.refresh(use_slave=False) instance.save() self.mox.StubOutWithMock(self.compute.compute_api, 'stop') self.mox.StubOutWithMock(self.compute.compute_api, 'delete') self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop') if shutdown_terminate: self.compute.compute_api.delete(self.context, instance) elif stop: if force: self.compute.compute_api.force_stop(self.context, instance) else: self.compute.compute_api.stop(self.context, instance) self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, driver_power_state) self.mox.VerifyAll() self.mox.UnsetStubs() def test_sync_instance_power_state_to_stop(self): for ps in (power_state.SHUTDOWN, power_state.CRASHED, power_state.SUSPENDED): self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps) for ps in (power_state.SHUTDOWN, power_state.CRASHED): self._test_sync_to_stop(power_state.PAUSED, vm_states.PAUSED, ps, force=True) self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED, power_state.RUNNING, force=True) def test_sync_instance_power_state_to_terminate(self): self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, power_state.SHUTDOWN, force=False, shutdown_terminate=True) def test_sync_instance_power_state_to_no_stop(self): for ps in (power_state.PAUSED, power_state.NOSTATE): self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps, stop=False) for vs in (vm_states.SOFT_DELETED, vm_states.DELETED): for ps in (power_state.NOSTATE, power_state.SHUTDOWN): self._test_sync_to_stop(power_state.RUNNING, vs, ps, stop=False) @mock.patch('nova.compute.manager.ComputeManager.' '_sync_instance_power_state') def test_query_driver_power_state_and_sync_pending_task( self, mock_sync_power_state): with mock.patch.object(self.compute.driver, 'get_info') as mock_get_info: db_instance = objects.Instance(uuid=uuids.db_instance, task_state=task_states.POWERING_OFF) self.compute._query_driver_power_state_and_sync(self.context, db_instance) self.assertFalse(mock_get_info.called) self.assertFalse(mock_sync_power_state.called) @mock.patch('nova.compute.manager.ComputeManager.' '_sync_instance_power_state') def test_query_driver_power_state_and_sync_not_found_driver( self, mock_sync_power_state): error = exception.InstanceNotFound(instance_id=1) with mock.patch.object(self.compute.driver, 'get_info', side_effect=error) as mock_get_info: db_instance = objects.Instance(uuid=uuids.db_instance, task_state=None) self.compute._query_driver_power_state_and_sync(self.context, db_instance) mock_get_info.assert_called_once_with(db_instance) mock_sync_power_state.assert_called_once_with(self.context, db_instance, power_state.NOSTATE, use_slave=True) def test_run_pending_deletes(self): self.flags(instance_delete_interval=10) class FakeInstance(object): def __init__(self, uuid, name, smd): self.uuid = uuid self.name = name self.system_metadata = smd self.cleaned = False def __getitem__(self, name): return getattr(self, name) def save(self): pass a = FakeInstance('123', 'apple', {'clean_attempts': '100'}) b = FakeInstance('456', 'orange', {'clean_attempts': '3'}) c = FakeInstance('789', 'banana', {}) self.mox.StubOutWithMock(objects.InstanceList, 'get_by_filters') objects.InstanceList.get_by_filters( {'read_deleted': 'yes'}, {'deleted': True, 'soft_deleted': False, 'host': 'fake-mini', 'cleaned': False}, expected_attrs=['info_cache', 'security_groups', 'system_metadata'], use_slave=True).AndReturn([a, b, c]) self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files') self.compute.driver.delete_instance_files( mox.IgnoreArg()).AndReturn(True) self.compute.driver.delete_instance_files( mox.IgnoreArg()).AndReturn(False) self.mox.ReplayAll() self.compute._run_pending_deletes({}) self.assertFalse(a.cleaned) self.assertEqual('100', a.system_metadata['clean_attempts']) self.assertTrue(b.cleaned) self.assertEqual('4', b.system_metadata['clean_attempts']) self.assertFalse(c.cleaned) self.assertEqual('1', c.system_metadata['clean_attempts']) @mock.patch.object(objects.Migration, 'obj_as_admin') @mock.patch.object(objects.Migration, 'save') @mock.patch.object(objects.MigrationList, 'get_by_filters') @mock.patch.object(objects.InstanceList, 'get_by_filters') def _test_cleanup_incomplete_migrations(self, inst_host, mock_inst_get_by_filters, mock_migration_get_by_filters, mock_save, mock_obj_as_admin): def fake_inst(context, uuid, host): inst = objects.Instance(context) inst.uuid = uuid inst.host = host return inst def fake_migration(uuid, status, inst_uuid, src_host, dest_host): migration = objects.Migration() migration.uuid = uuid migration.status = status migration.instance_uuid = inst_uuid migration.source_compute = src_host migration.dest_compute = dest_host return migration fake_instances = [fake_inst(self.context, uuids.instance_1, inst_host), fake_inst(self.context, uuids.instance_2, inst_host)] fake_migrations = [fake_migration('123', 'error', uuids.instance_1, 'fake-host', 'fake-mini'), fake_migration('456', 'error', uuids.instance_2, 'fake-host', 'fake-mini')] mock_migration_get_by_filters.return_value = fake_migrations mock_inst_get_by_filters.return_value = fake_instances with mock.patch.object(self.compute.driver, 'delete_instance_files'): self.compute._cleanup_incomplete_migrations(self.context) # Ensure that migration status is set to 'failed' after instance # files deletion for those instances whose instance.host is not # same as compute host where periodic task is running. for inst in fake_instances: for mig in fake_migrations: if inst.uuid == mig.instance_uuid: self.assertEqual('failed', mig.status) # Make sure we filtered the instances by host in the DB query. self.assertEqual(CONF.host, mock_inst_get_by_filters.call_args[0][1]['host']) def test_cleanup_incomplete_migrations_dest_node(self): """Test to ensure instance files are deleted from destination node. If instance gets deleted during resizing/revert-resizing operation, in that case instance files gets deleted from instance.host (source host here), but there is possibility that instance files could be present on destination node. This test ensures that `_cleanup_incomplete_migration` periodic task deletes orphaned instance files from destination compute node. """ self.flags(host='fake-mini') self._test_cleanup_incomplete_migrations('fake-host') def test_cleanup_incomplete_migrations_source_node(self): """Test to ensure instance files are deleted from source node. If instance gets deleted during resizing/revert-resizing operation, in that case instance files gets deleted from instance.host (dest host here), but there is possibility that instance files could be present on source node. This test ensures that `_cleanup_incomplete_migration` periodic task deletes orphaned instance files from source compute node. """ self.flags(host='fake-host') self._test_cleanup_incomplete_migrations('fake-mini') def test_attach_interface_failure(self): # Test that the fault methods are invoked when an attach fails db_instance = fake_instance.fake_db_instance() f_instance = objects.Instance._from_db_object(self.context, objects.Instance(), db_instance) e = exception.InterfaceAttachFailed(instance_uuid=f_instance.uuid) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(self.compute.network_api, 'allocate_port_for_instance', side_effect=e) @mock.patch.object(self.compute, '_instance_update', side_effect=lambda *a, **k: {}) def do_test(update, meth, add_fault): self.assertRaises(exception.InterfaceAttachFailed, self.compute.attach_interface, self.context, f_instance, 'net_id', 'port_id', None) add_fault.assert_has_calls([ mock.call(self.context, f_instance, e, mock.ANY)]) do_test() def test_detach_interface_failure(self): # Test that the fault methods are invoked when a detach fails # Build test data that will cause a PortNotFound exception f_instance = mock.MagicMock() f_instance.info_cache = mock.MagicMock() f_instance.info_cache.network_info = [] @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(self.compute, '_set_instance_obj_error_state') def do_test(meth, add_fault): self.assertRaises(exception.PortNotFound, self.compute.detach_interface, self.context, f_instance, 'port_id') add_fault.assert_has_calls( [mock.call(self.context, f_instance, mock.ANY, mock.ANY)]) do_test() def test_swap_volume_volume_api_usage(self): # This test ensures that volume_id arguments are passed to volume_api # and that volume states are OK volumes = {} old_volume_id = uuidutils.generate_uuid() volumes[old_volume_id] = {'id': old_volume_id, 'display_name': 'old_volume', 'status': 'detaching', 'size': 1} new_volume_id = uuidutils.generate_uuid() volumes[new_volume_id] = {'id': new_volume_id, 'display_name': 'new_volume', 'status': 'available', 'size': 2} def fake_vol_api_roll_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'detaching': volumes[volume_id]['status'] = 'in-use' fake_bdm = fake_block_device.FakeDbBlockDeviceDict( {'device_name': '/dev/vdb', 'source_type': 'volume', 'destination_type': 'volume', 'instance_uuid': uuids.instance, 'connection_info': '{"foo": "bar"}'}) def fake_vol_api_func(context, volume, *args): self.assertTrue(uuidutils.is_uuid_like(volume)) return {} def fake_vol_get(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) return volumes[volume_id] def fake_vol_unreserve(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'attaching': volumes[volume_id]['status'] = 'available' def fake_vol_migrate_volume_completion(context, old_volume_id, new_volume_id, error=False): self.assertTrue(uuidutils.is_uuid_like(old_volume_id)) self.assertTrue(uuidutils.is_uuid_like(new_volume_id)) volumes[old_volume_id]['status'] = 'in-use' return {'save_volume_id': new_volume_id} def fake_func_exc(*args, **kwargs): raise AttributeError # Random exception def fake_swap_volume(old_connection_info, new_connection_info, instance, mountpoint, resize_to): self.assertEqual(resize_to, 2) def fake_block_device_mapping_update(ctxt, id, updates, legacy): self.assertEqual(2, updates['volume_size']) return fake_bdm self.stubs.Set(self.compute.volume_api, 'roll_detaching', fake_vol_api_roll_detaching) self.stubs.Set(self.compute.volume_api, 'get', fake_vol_get) self.stubs.Set(self.compute.volume_api, 'initialize_connection', fake_vol_api_func) self.stubs.Set(self.compute.volume_api, 'unreserve_volume', fake_vol_unreserve) self.stubs.Set(self.compute.volume_api, 'terminate_connection', fake_vol_api_func) self.stub_out('nova.db.' 'block_device_mapping_get_by_instance_and_volume_id', lambda x, y, z, v: fake_bdm) self.stubs.Set(self.compute.driver, 'get_volume_connector', lambda x: {}) self.stubs.Set(self.compute.driver, 'swap_volume', fake_swap_volume) self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion', fake_vol_migrate_volume_completion) self.stub_out('nova.db.block_device_mapping_update', fake_block_device_mapping_update) self.stub_out('nova.db.instance_fault_create', lambda x, y: test_instance_fault.fake_faults['fake-uuid'][0]) self.stubs.Set(self.compute, '_instance_update', lambda c, u, **k: {}) # Good path self.compute.swap_volume(self.context, old_volume_id, new_volume_id, fake_instance.fake_instance_obj( self.context, **{'uuid': uuids.instance})) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') # Error paths volumes[old_volume_id]['status'] = 'detaching' volumes[new_volume_id]['status'] = 'attaching' self.stubs.Set(self.compute.driver, 'swap_volume', fake_func_exc) self.assertRaises(AttributeError, self.compute.swap_volume, self.context, old_volume_id, new_volume_id, fake_instance.fake_instance_obj( self.context, **{'uuid': uuids.instance})) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[old_volume_id]['status'] = 'detaching' volumes[new_volume_id]['status'] = 'attaching' self.stubs.Set(self.compute.volume_api, 'initialize_connection', fake_func_exc) self.assertRaises(AttributeError, self.compute.swap_volume, self.context, old_volume_id, new_volume_id, fake_instance.fake_instance_obj( self.context, **{'uuid': uuids.instance})) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') @mock.patch.object(compute_utils, 'EventReporter') def test_check_can_live_migrate_source(self, event_mock): is_volume_backed = 'volume_backed' dest_check_data = migrate_data_obj.LiveMigrateData() db_instance = fake_instance.fake_db_instance() instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.mox.StubOutWithMock(self.compute.compute_api, 'is_volume_backed_instance') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.driver, 'check_can_live_migrate_source') self.compute.compute_api.is_volume_backed_instance( self.context, instance).AndReturn(is_volume_backed) self.compute._get_instance_block_device_info( self.context, instance, refresh_conn_info=True ).AndReturn({'block_device_mapping': 'fake'}) self.compute.driver.check_can_live_migrate_source( self.context, instance, dest_check_data, {'block_device_mapping': 'fake'}) self.mox.ReplayAll() self.compute.check_can_live_migrate_source( self.context, instance=instance, dest_check_data=dest_check_data) event_mock.assert_called_once_with( self.context, 'compute_check_can_live_migrate_source', instance.uuid) self.assertTrue(dest_check_data.is_volume_backed) @mock.patch.object(compute_utils, 'EventReporter') def _test_check_can_live_migrate_destination(self, event_mock, do_raise=False): db_instance = fake_instance.fake_db_instance(host='fake-host') instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) instance.host = 'fake-host' block_migration = 'block_migration' disk_over_commit = 'disk_over_commit' src_info = 'src_info' dest_info = 'dest_info' dest_check_data = dict(foo='bar') mig_data = dict(cow='moo') self.mox.StubOutWithMock(self.compute, '_get_compute_info') self.mox.StubOutWithMock(self.compute.driver, 'check_can_live_migrate_destination') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'check_can_live_migrate_source') self.mox.StubOutWithMock(self.compute.driver, 'check_can_live_migrate_destination_cleanup') self.compute._get_compute_info(self.context, 'fake-host').AndReturn(src_info) self.compute._get_compute_info(self.context, CONF.host).AndReturn(dest_info) self.compute.driver.check_can_live_migrate_destination( self.context, instance, src_info, dest_info, block_migration, disk_over_commit).AndReturn(dest_check_data) mock_meth = self.compute.compute_rpcapi.check_can_live_migrate_source( self.context, instance, dest_check_data) if do_raise: mock_meth.AndRaise(test.TestingException()) self.mox.StubOutWithMock(db, 'instance_fault_create') db.instance_fault_create( self.context, mox.IgnoreArg()).AndReturn( test_instance_fault.fake_faults['fake-uuid'][0]) else: mock_meth.AndReturn(mig_data) self.compute.driver.check_can_live_migrate_destination_cleanup( self.context, dest_check_data) self.mox.ReplayAll() result = self.compute.check_can_live_migrate_destination( self.context, instance=instance, block_migration=block_migration, disk_over_commit=disk_over_commit) self.assertEqual(mig_data, result) event_mock.assert_called_once_with( self.context, 'compute_check_can_live_migrate_destination', instance.uuid) def test_check_can_live_migrate_destination_success(self): self._test_check_can_live_migrate_destination() def test_check_can_live_migrate_destination_fail(self): self.assertRaises( test.TestingException, self._test_check_can_live_migrate_destination, do_raise=True) @mock.patch('nova.compute.manager.InstanceEvents._lock_name') def test_prepare_for_instance_event(self, lock_name_mock): inst_obj = objects.Instance(uuid=uuids.instance) result = self.compute.instance_events.prepare_for_instance_event( inst_obj, 'test-event') self.assertIn(uuids.instance, self.compute.instance_events._events) self.assertIn('test-event', self.compute.instance_events._events[uuids.instance]) self.assertEqual( result, self.compute.instance_events._events[uuids.instance]['test-event']) self.assertTrue(hasattr(result, 'send')) lock_name_mock.assert_called_once_with(inst_obj) @mock.patch('nova.compute.manager.InstanceEvents._lock_name') def test_pop_instance_event(self, lock_name_mock): event = eventlet_event.Event() self.compute.instance_events._events = { uuids.instance: { 'network-vif-plugged': event, } } inst_obj = objects.Instance(uuid=uuids.instance) event_obj = objects.InstanceExternalEvent(name='network-vif-plugged', tag=None) result = self.compute.instance_events.pop_instance_event(inst_obj, event_obj) self.assertEqual(result, event) lock_name_mock.assert_called_once_with(inst_obj) @mock.patch('nova.compute.manager.InstanceEvents._lock_name') def test_clear_events_for_instance(self, lock_name_mock): event = eventlet_event.Event() self.compute.instance_events._events = { uuids.instance: { 'test-event': event, } } inst_obj = objects.Instance(uuid=uuids.instance) result = self.compute.instance_events.clear_events_for_instance( inst_obj) self.assertEqual(result, {'test-event': event}) lock_name_mock.assert_called_once_with(inst_obj) def test_instance_events_lock_name(self): inst_obj = objects.Instance(uuid=uuids.instance) result = self.compute.instance_events._lock_name(inst_obj) self.assertEqual(result, "%s-events" % uuids.instance) def test_prepare_for_instance_event_again(self): inst_obj = objects.Instance(uuid=uuids.instance) self.compute.instance_events.prepare_for_instance_event( inst_obj, 'test-event') # A second attempt will avoid creating a new list; make sure we # get the current list result = self.compute.instance_events.prepare_for_instance_event( inst_obj, 'test-event') self.assertIn(uuids.instance, self.compute.instance_events._events) self.assertIn('test-event', self.compute.instance_events._events[uuids.instance]) self.assertEqual( result, self.compute.instance_events._events[uuids.instance]['test-event']) self.assertTrue(hasattr(result, 'send')) def test_process_instance_event(self): event = eventlet_event.Event() self.compute.instance_events._events = { uuids.instance: { 'network-vif-plugged': event, } } inst_obj = objects.Instance(uuid=uuids.instance) event_obj = objects.InstanceExternalEvent(name='network-vif-plugged', tag=None) self.compute._process_instance_event(inst_obj, event_obj) self.assertTrue(event.ready()) self.assertEqual(event_obj, event.wait()) self.assertEqual({}, self.compute.instance_events._events) def test_process_instance_vif_deleted_event(self): vif1 = fake_network_cache_model.new_vif() vif1['id'] = '1' vif2 = fake_network_cache_model.new_vif() vif2['id'] = '2' nw_info = network_model.NetworkInfo([vif1, vif2]) info_cache = objects.InstanceInfoCache(network_info=nw_info, instance_uuid=uuids.instance) inst_obj = objects.Instance(id=3, uuid=uuids.instance, info_cache=info_cache) @mock.patch.object(manager.base_net_api, 'update_instance_cache_with_nw_info') @mock.patch.object(self.compute.driver, 'detach_interface') def do_test(detach_interface, update_instance_cache_with_nw_info): self.compute._process_instance_vif_deleted_event(self.context, inst_obj, vif2['id']) update_instance_cache_with_nw_info.assert_called_once_with( self.compute.network_api, self.context, inst_obj, nw_info=[vif1]) detach_interface.assert_called_once_with(inst_obj, vif2) do_test() def test_external_instance_event(self): instances = [ objects.Instance(id=1, uuid=uuids.instance_1), objects.Instance(id=2, uuid=uuids.instance_2), objects.Instance(id=3, uuid=uuids.instance_3)] events = [ objects.InstanceExternalEvent(name='network-changed', tag='tag1', instance_uuid=uuids.instance_1), objects.InstanceExternalEvent(name='network-vif-plugged', instance_uuid=uuids.instance_2, tag='tag2'), objects.InstanceExternalEvent(name='network-vif-deleted', instance_uuid=uuids.instance_3, tag='tag3')] @mock.patch.object(self.compute, '_process_instance_vif_deleted_event') @mock.patch.object(self.compute.network_api, 'get_instance_nw_info') @mock.patch.object(self.compute, '_process_instance_event') def do_test(_process_instance_event, get_instance_nw_info, _process_instance_vif_deleted_event): self.compute.external_instance_event(self.context, instances, events) get_instance_nw_info.assert_called_once_with(self.context, instances[0]) _process_instance_event.assert_called_once_with(instances[1], events[1]) _process_instance_vif_deleted_event.assert_called_once_with( self.context, instances[2], events[2].tag) do_test() def test_external_instance_event_with_exception(self): vif1 = fake_network_cache_model.new_vif() vif1['id'] = '1' vif2 = fake_network_cache_model.new_vif() vif2['id'] = '2' nw_info = network_model.NetworkInfo([vif1, vif2]) info_cache = objects.InstanceInfoCache(network_info=nw_info, instance_uuid=uuids.instance_2) instances = [ objects.Instance(id=1, uuid=uuids.instance_1), objects.Instance(id=2, uuid=uuids.instance_2, info_cache=info_cache), objects.Instance(id=3, uuid=uuids.instance_3)] events = [ objects.InstanceExternalEvent(name='network-changed', tag='tag1', instance_uuid=uuids.instance_1), objects.InstanceExternalEvent(name='network-vif-deleted', instance_uuid=uuids.instance_2, tag='2'), objects.InstanceExternalEvent(name='network-vif-plugged', instance_uuid=uuids.instance_3, tag='tag3')] # Make sure all the three events are handled despite the exceptions in # processing events 1 and 2 @mock.patch.object(manager.base_net_api, 'update_instance_cache_with_nw_info') @mock.patch.object(self.compute.driver, 'detach_interface', side_effect=exception.NovaException) @mock.patch.object(self.compute.network_api, 'get_instance_nw_info', side_effect=exception.InstanceInfoCacheNotFound( instance_uuid=uuids.instance_1)) @mock.patch.object(self.compute, '_process_instance_event') def do_test(_process_instance_event, get_instance_nw_info, detach_interface, update_instance_cache_with_nw_info): self.compute.external_instance_event(self.context, instances, events) get_instance_nw_info.assert_called_once_with(self.context, instances[0]) update_instance_cache_with_nw_info.assert_called_once_with( self.compute.network_api, self.context, instances[1], nw_info=[vif1]) detach_interface.assert_called_once_with(instances[1], vif2) _process_instance_event.assert_called_once_with(instances[2], events[2]) do_test() def test_cancel_all_events(self): inst = objects.Instance(uuid=uuids.instance) fake_eventlet_event = mock.MagicMock() self.compute.instance_events._events = { inst.uuid: { 'network-vif-plugged-bar': fake_eventlet_event, } } self.compute.instance_events.cancel_all_events() # call it again to make sure we handle that gracefully self.compute.instance_events.cancel_all_events() self.assertTrue(fake_eventlet_event.send.called) event = fake_eventlet_event.send.call_args_list[0][0][0] self.assertEqual('network-vif-plugged', event.name) self.assertEqual('bar', event.tag) self.assertEqual('failed', event.status) def test_cleanup_cancels_all_events(self): with mock.patch.object(self.compute, 'instance_events') as mock_ev: self.compute.cleanup_host() mock_ev.cancel_all_events.assert_called_once_with() def test_cleanup_blocks_new_events(self): instance = objects.Instance(uuid=uuids.instance) self.compute.instance_events.cancel_all_events() callback = mock.MagicMock() body = mock.MagicMock() with self.compute.virtapi.wait_for_instance_event( instance, ['network-vif-plugged-bar'], error_callback=callback): body() self.assertTrue(body.called) callback.assert_called_once_with('network-vif-plugged-bar', instance) def test_pop_events_fails_gracefully(self): inst = objects.Instance(uuid=uuids.instance) event = mock.MagicMock() self.compute.instance_events._events = None self.assertIsNone( self.compute.instance_events.pop_instance_event(inst, event)) def test_clear_events_fails_gracefully(self): inst = objects.Instance(uuid=uuids.instance) self.compute.instance_events._events = None self.assertEqual( self.compute.instance_events.clear_events_for_instance(inst), {}) def test_retry_reboot_pending_soft(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_PENDING instance.vm_state = vm_states.ACTIVE with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertTrue(allow_reboot) self.assertEqual(reboot_type, 'SOFT') def test_retry_reboot_pending_hard(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_PENDING_HARD instance.vm_state = vm_states.ACTIVE with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertTrue(allow_reboot) self.assertEqual(reboot_type, 'HARD') def test_retry_reboot_starting_soft_off(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_STARTED with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.NOSTATE): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertTrue(allow_reboot) self.assertEqual(reboot_type, 'HARD') def test_retry_reboot_starting_hard_off(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_STARTED_HARD with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.NOSTATE): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertTrue(allow_reboot) self.assertEqual(reboot_type, 'HARD') def test_retry_reboot_starting_hard_on(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_STARTED_HARD with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertFalse(allow_reboot) self.assertEqual(reboot_type, 'HARD') def test_retry_reboot_no_reboot(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = 'bar' with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertFalse(allow_reboot) self.assertEqual(reboot_type, 'HARD') @mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_and_instance') @mock.patch('nova.compute.manager.ComputeManager._driver_detach_volume') @mock.patch('nova.objects.Instance._from_db_object') def test_remove_volume_connection(self, inst_from_db, detach, bdm_get): bdm = mock.sentinel.bdm inst_obj = mock.Mock() inst_obj.uuid = 'uuid' bdm_get.return_value = bdm inst_from_db.return_value = inst_obj with mock.patch.object(self.compute, 'volume_api'): self.compute.remove_volume_connection(self.context, 'vol', inst_obj) detach.assert_called_once_with(self.context, inst_obj, bdm) bdm_get.assert_called_once_with(self.context, 'vol', 'uuid') def test_detach_volume(self): self._test_detach_volume() def test_detach_volume_not_destroy_bdm(self): self._test_detach_volume(destroy_bdm=False) @mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_and_instance') @mock.patch('nova.compute.manager.ComputeManager._driver_detach_volume') @mock.patch('nova.compute.manager.ComputeManager.' '_notify_about_instance_usage') def _test_detach_volume(self, notify_inst_usage, detach, bdm_get, destroy_bdm=True): volume_id = uuids.volume inst_obj = mock.Mock() inst_obj.uuid = uuids.instance attachment_id = uuids.attachment bdm = mock.MagicMock(spec=objects.BlockDeviceMapping) bdm.device_name = 'vdb' bdm_get.return_value = bdm detach.return_value = {} with mock.patch.object(self.compute, 'volume_api') as volume_api: with mock.patch.object(self.compute, 'driver') as driver: connector_sentinel = mock.sentinel.connector driver.get_volume_connector.return_value = connector_sentinel self.compute._detach_volume(self.context, volume_id, inst_obj, destroy_bdm=destroy_bdm, attachment_id=attachment_id) detach.assert_called_once_with(self.context, inst_obj, bdm) driver.get_volume_connector.assert_called_once_with(inst_obj) volume_api.terminate_connection.assert_called_once_with( self.context, volume_id, connector_sentinel) volume_api.detach.assert_called_once_with(mock.ANY, volume_id, inst_obj.uuid, attachment_id) notify_inst_usage.assert_called_once_with( self.context, inst_obj, "volume.detach", extra_usage_info={'volume_id': volume_id} ) if destroy_bdm: bdm.destroy.assert_called_once_with() else: self.assertFalse(bdm.destroy.called) def test_detach_volume_evacuate(self): """For evacuate, terminate_connection is called with original host.""" expected_connector = {'host': 'evacuated-host'} conn_info_str = '{"connector": {"host": "evacuated-host"}}' self._test_detach_volume_evacuate(conn_info_str, expected=expected_connector) def test_detach_volume_evacuate_legacy(self): """Test coverage for evacuate with legacy attachments. In this case, legacy means the volume was attached to the instance before nova stashed the connector in connection_info. The connector sent to terminate_connection will still be for the local host in this case because nova does not have the info to get the connector for the original (evacuated) host. """ conn_info_str = '{"foo": "bar"}' # Has no 'connector'. self._test_detach_volume_evacuate(conn_info_str) def test_detach_volume_evacuate_mismatch(self): """Test coverage for evacuate with connector mismatch. For evacuate, if the stashed connector also has the wrong host, then log it and stay with the local connector. """ conn_info_str = '{"connector": {"host": "other-host"}}' self._test_detach_volume_evacuate(conn_info_str) @mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_and_instance') @mock.patch('nova.compute.manager.ComputeManager.' '_notify_about_instance_usage') def _test_detach_volume_evacuate(self, conn_info_str, notify_inst_usage, bdm_get, expected=None): """Re-usable code for detach volume evacuate test cases. :param conn_info_str: String form of the stashed connector. :param expected: Dict of the connector that is expected in the terminate call (optional). Default is to expect the local connector to be used. """ volume_id = 'vol_id' instance = fake_instance.fake_instance_obj(self.context, host='evacuated-host') bdm = mock.Mock() bdm.connection_info = conn_info_str bdm_get.return_value = bdm local_connector = {'host': 'local-connector-host'} expected_connector = local_connector if not expected else expected with mock.patch.object(self.compute, 'volume_api') as volume_api: with mock.patch.object(self.compute, 'driver') as driver: driver.get_volume_connector.return_value = local_connector self.compute._detach_volume(self.context, volume_id, instance, destroy_bdm=False) driver.get_volume_connector.assert_called_once_with(instance) volume_api.terminate_connection.assert_called_once_with( self.context, volume_id, expected_connector) volume_api.detach.assert_called_once_with(mock.ANY, volume_id, instance.uuid, None) notify_inst_usage.assert_called_once_with( self.context, instance, "volume.detach", extra_usage_info={'volume_id': volume_id} ) def test__driver_detach_volume_return(self): """_driver_detach_volume returns the connection_info from loads().""" with mock.patch.object(jsonutils, 'loads') as loads: conn_info_str = 'test-expected-loads-param' bdm = mock.Mock() bdm.connection_info = conn_info_str loads.return_value = {'test-loads-key': 'test loads return value'} instance = fake_instance.fake_instance_obj(self.context) ret = self.compute._driver_detach_volume(self.context, instance, bdm) self.assertEqual(loads.return_value, ret) loads.assert_called_once_with(conn_info_str) def _test_rescue(self, clean_shutdown=True): instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE) fake_nw_info = network_model.NetworkInfo() rescue_image_meta = objects.ImageMeta.from_dict( {'id': 'fake', 'name': 'fake'}) with test.nested( mock.patch.object(self.context, 'elevated', return_value=self.context), mock.patch.object(self.compute.network_api, 'get_instance_nw_info', return_value=fake_nw_info), mock.patch.object(self.compute, '_get_rescue_image', return_value=rescue_image_meta), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute, '_power_off_instance'), mock.patch.object(self.compute.driver, 'rescue'), mock.patch.object(compute_utils, 'notify_usage_exists'), mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING), mock.patch.object(instance, 'save') ) as ( elevated_context, get_nw_info, get_rescue_image, notify_instance_usage, power_off_instance, driver_rescue, notify_usage_exists, get_power_state, instance_save ): self.compute.rescue_instance( self.context, instance, rescue_password='verybadpass', rescue_image_ref=None, clean_shutdown=clean_shutdown) # assert the field values on the instance object self.assertEqual(vm_states.RESCUED, instance.vm_state) self.assertIsNone(instance.task_state) self.assertEqual(power_state.RUNNING, instance.power_state) self.assertIsNotNone(instance.launched_at) # assert our mock calls get_nw_info.assert_called_once_with(self.context, instance) get_rescue_image.assert_called_once_with( self.context, instance, None) extra_usage_info = {'rescue_image_name': 'fake'} notify_calls = [ mock.call(self.context, instance, "rescue.start", extra_usage_info=extra_usage_info, network_info=fake_nw_info), mock.call(self.context, instance, "rescue.end", extra_usage_info=extra_usage_info, network_info=fake_nw_info) ] notify_instance_usage.assert_has_calls(notify_calls) power_off_instance.assert_called_once_with(self.context, instance, clean_shutdown) driver_rescue.assert_called_once_with( self.context, instance, fake_nw_info, rescue_image_meta, 'verybadpass') notify_usage_exists.assert_called_once_with(self.compute.notifier, self.context, instance, current_period=True) instance_save.assert_called_once_with( expected_task_state=task_states.RESCUING) def test_rescue(self): self._test_rescue() def test_rescue_forced_shutdown(self): self._test_rescue(clean_shutdown=False) def test_unrescue(self): instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.RESCUED) fake_nw_info = network_model.NetworkInfo() with test.nested( mock.patch.object(self.context, 'elevated', return_value=self.context), mock.patch.object(self.compute.network_api, 'get_instance_nw_info', return_value=fake_nw_info), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute.driver, 'unrescue'), mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING), mock.patch.object(instance, 'save') ) as ( elevated_context, get_nw_info, notify_instance_usage, driver_unrescue, get_power_state, instance_save ): self.compute.unrescue_instance(self.context, instance) # assert the field values on the instance object self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) self.assertEqual(power_state.RUNNING, instance.power_state) # assert our mock calls get_nw_info.assert_called_once_with(self.context, instance) notify_calls = [ mock.call(self.context, instance, "unrescue.start", network_info=fake_nw_info), mock.call(self.context, instance, "unrescue.end", network_info=fake_nw_info) ] notify_instance_usage.assert_has_calls(notify_calls) driver_unrescue.assert_called_once_with(instance, fake_nw_info) instance_save.assert_called_once_with( expected_task_state=task_states.UNRESCUING) @mock.patch('nova.compute.manager.ComputeManager._get_power_state', return_value=power_state.RUNNING) @mock.patch.object(objects.Instance, 'save') @mock.patch('nova.utils.generate_password', return_value='fake-pass') def test_set_admin_password(self, gen_password_mock, instance_save_mock, power_state_mock): # Ensure instance can have its admin password set. instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE, task_state=task_states.UPDATING_PASSWORD) @mock.patch.object(self.context, 'elevated', return_value=self.context) @mock.patch.object(self.compute.driver, 'set_admin_password') def do_test(driver_mock, elevated_mock): # call the manager method self.compute.set_admin_password(self.context, instance, None) # make our assertions self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) power_state_mock.assert_called_once_with(self.context, instance) driver_mock.assert_called_once_with(instance, 'fake-pass') instance_save_mock.assert_called_once_with( expected_task_state=task_states.UPDATING_PASSWORD) do_test() @mock.patch('nova.compute.manager.ComputeManager._get_power_state', return_value=power_state.NOSTATE) @mock.patch('nova.compute.manager.ComputeManager._instance_update') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') def test_set_admin_password_bad_state(self, add_fault_mock, instance_save_mock, update_mock, power_state_mock): # Test setting password while instance is rebuilding. instance = fake_instance.fake_instance_obj(self.context) with mock.patch.object(self.context, 'elevated', return_value=self.context): # call the manager method self.assertRaises(exception.InstancePasswordSetFailed, self.compute.set_admin_password, self.context, instance, None) # make our assertions power_state_mock.assert_called_once_with(self.context, instance) instance_save_mock.assert_called_once_with( expected_task_state=task_states.UPDATING_PASSWORD) add_fault_mock.assert_called_once_with( self.context, instance, mock.ANY, mock.ANY) @mock.patch('nova.utils.generate_password', return_value='fake-pass') @mock.patch('nova.compute.manager.ComputeManager._get_power_state', return_value=power_state.RUNNING) @mock.patch('nova.compute.manager.ComputeManager._instance_update') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state, expected_task_state, expected_exception, add_fault_mock, instance_save_mock, update_mock, power_state_mock, gen_password_mock): # Ensure expected exception is raised if set_admin_password fails. instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE, task_state=task_states.UPDATING_PASSWORD) @mock.patch.object(self.context, 'elevated', return_value=self.context) @mock.patch.object(self.compute.driver, 'set_admin_password', side_effect=exc) def do_test(driver_mock, elevated_mock): # error raised from the driver should not reveal internal # information so a new error is raised self.assertRaises(expected_exception, self.compute.set_admin_password, self.context, instance=instance, new_pass=None) if expected_exception == NotImplementedError: instance_save_mock.assert_called_once_with( expected_task_state=task_states.UPDATING_PASSWORD) else: # setting the instance to error state instance_save_mock.assert_called_once_with() self.assertEqual(expected_vm_state, instance.vm_state) # check revert_task_state decorator update_mock.assert_called_once_with( self.context, instance, task_state=expected_task_state) # check wrap_instance_fault decorator add_fault_mock.assert_called_once_with( self.context, instance, mock.ANY, mock.ANY) do_test() def test_set_admin_password_driver_not_authorized(self): # Ensure expected exception is raised if set_admin_password not # authorized. exc = exception.Forbidden('Internal error') expected_exception = exception.InstancePasswordSetFailed self._do_test_set_admin_password_driver_error( exc, vm_states.ERROR, None, expected_exception) def test_set_admin_password_driver_not_implemented(self): # Ensure expected exception is raised if set_admin_password not # implemented by driver. exc = NotImplementedError() expected_exception = NotImplementedError self._do_test_set_admin_password_driver_error( exc, vm_states.ACTIVE, None, expected_exception) def test_destroy_evacuated_instances(self): our_host = self.compute.host instance_1 = objects.Instance(self.context) instance_1.uuid = uuids.instance_1 instance_1.task_state = None instance_1.vm_state = vm_states.ACTIVE instance_1.host = 'not-' + our_host instance_2 = objects.Instance(self.context) instance_2.uuid = uuids.instance_2 instance_2.task_state = None instance_2.vm_state = vm_states.ACTIVE instance_2.host = 'not-' + our_host # Only instance 2 has a migration record migration = objects.Migration(instance_uuid=instance_2.uuid) # Consider the migration successful migration.status = 'done' with test.nested( mock.patch.object(self.compute, '_get_instances_on_driver', return_value=[instance_1, instance_2]), mock.patch.object(self.compute.network_api, 'get_instance_nw_info', return_value=None), mock.patch.object(self.compute, '_get_instance_block_device_info', return_value={}), mock.patch.object(self.compute, '_is_instance_storage_shared', return_value=False), mock.patch.object(self.compute.driver, 'destroy'), mock.patch('nova.objects.MigrationList.get_by_filters'), mock.patch('nova.objects.Migration.save') ) as (_get_instances_on_driver, get_instance_nw_info, _get_instance_block_device_info, _is_instance_storage_shared, destroy, migration_list, migration_save): migration_list.return_value = [migration] self.compute._destroy_evacuated_instances(self.context) # Only instance 2 should be deleted. Instance 1 is still running # here, but no migration from our host exists, so ignore it destroy.assert_called_once_with(self.context, instance_2, None, {}, True) @mock.patch('nova.compute.manager.ComputeManager.' '_destroy_evacuated_instances') @mock.patch('nova.compute.manager.LOG') def test_init_host_foreign_instance(self, mock_log, mock_destroy): inst = mock.MagicMock() inst.host = self.compute.host + '-alt' self.compute._init_instance(mock.sentinel.context, inst) self.assertFalse(inst.save.called) self.assertTrue(mock_log.warning.called) msg = mock_log.warning.call_args_list[0] self.assertIn('appears to not be owned by this host', msg[0][0]) @mock.patch('nova.compute.manager.ComputeManager._instance_update') def test_error_out_instance_on_exception_not_implemented_err(self, inst_update_mock): instance = fake_instance.fake_instance_obj(self.context) def do_test(): with self.compute._error_out_instance_on_exception( self.context, instance, instance_state=vm_states.STOPPED): raise NotImplementedError('test') self.assertRaises(NotImplementedError, do_test) inst_update_mock.assert_called_once_with( self.context, instance, vm_state=vm_states.STOPPED, task_state=None) @mock.patch('nova.compute.manager.ComputeManager._instance_update') def test_error_out_instance_on_exception_inst_fault_rollback(self, inst_update_mock): instance = fake_instance.fake_instance_obj(self.context) def do_test(): with self.compute._error_out_instance_on_exception(self.context, instance): raise exception.InstanceFaultRollback( inner_exception=test.TestingException('test')) self.assertRaises(test.TestingException, do_test) inst_update_mock.assert_called_once_with( self.context, instance, vm_state=vm_states.ACTIVE, task_state=None) @mock.patch('nova.compute.manager.ComputeManager.' '_set_instance_obj_error_state') def test_error_out_instance_on_exception_unknown_with_quotas(self, set_error): instance = fake_instance.fake_instance_obj(self.context) quotas = mock.create_autospec(objects.Quotas, spec_set=True) def do_test(): with self.compute._error_out_instance_on_exception( self.context, instance, quotas): raise test.TestingException('test') self.assertRaises(test.TestingException, do_test) self.assertEqual(1, len(quotas.method_calls)) self.assertEqual(mock.call.rollback(), quotas.method_calls[0]) set_error.assert_called_once_with(self.context, instance) def test_cleanup_volumes(self): instance = fake_instance.fake_instance_obj(self.context) bdm_do_not_delete_dict = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id1', 'source_type': 'image', 'delete_on_termination': False}) bdm_delete_dict = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id2', 'source_type': 'image', 'delete_on_termination': True}) bdms = block_device_obj.block_device_make_list(self.context, [bdm_do_not_delete_dict, bdm_delete_dict]) with mock.patch.object(self.compute.volume_api, 'delete') as volume_delete: self.compute._cleanup_volumes(self.context, instance.uuid, bdms) volume_delete.assert_called_once_with(self.context, bdms[1].volume_id) def test_cleanup_volumes_exception_do_not_raise(self): instance = fake_instance.fake_instance_obj(self.context) bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id1', 'source_type': 'image', 'delete_on_termination': True}) bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id2', 'source_type': 'image', 'delete_on_termination': True}) bdms = block_device_obj.block_device_make_list(self.context, [bdm_dict1, bdm_dict2]) with mock.patch.object(self.compute.volume_api, 'delete', side_effect=[test.TestingException(), None]) as volume_delete: self.compute._cleanup_volumes(self.context, instance.uuid, bdms, raise_exc=False) calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms] self.assertEqual(calls, volume_delete.call_args_list) def test_cleanup_volumes_exception_raise(self): instance = fake_instance.fake_instance_obj(self.context) bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id1', 'source_type': 'image', 'delete_on_termination': True}) bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id2', 'source_type': 'image', 'delete_on_termination': True}) bdms = block_device_obj.block_device_make_list(self.context, [bdm_dict1, bdm_dict2]) with mock.patch.object(self.compute.volume_api, 'delete', side_effect=[test.TestingException(), None]) as volume_delete: self.assertRaises(test.TestingException, self.compute._cleanup_volumes, self.context, instance.uuid, bdms) calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms] self.assertEqual(calls, volume_delete.call_args_list) def test_stop_instance_task_state_none_power_state_shutdown(self): # Tests that stop_instance doesn't puke when the instance power_state # is shutdown and the task_state is None. instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE, task_state=None, power_state=power_state.SHUTDOWN) @mock.patch.object(self.compute, '_get_power_state', return_value=power_state.SHUTDOWN) @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, '_power_off_instance') @mock.patch.object(instance, 'save') def do_test(save_mock, power_off_mock, notify_mock, get_state_mock): # run the code self.compute.stop_instance(self.context, instance, True) # assert the calls self.assertEqual(2, get_state_mock.call_count) notify_mock.assert_has_calls([ mock.call(self.context, instance, 'power_off.start'), mock.call(self.context, instance, 'power_off.end') ]) power_off_mock.assert_called_once_with( self.context, instance, True) save_mock.assert_called_once_with( expected_task_state=[task_states.POWERING_OFF, None]) self.assertEqual(power_state.SHUTDOWN, instance.power_state) self.assertIsNone(instance.task_state) self.assertEqual(vm_states.STOPPED, instance.vm_state) do_test() def test_reset_network_driver_not_implemented(self): instance = fake_instance.fake_instance_obj(self.context) @mock.patch.object(self.compute.driver, 'reset_network', side_effect=NotImplementedError()) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') def do_test(mock_add_fault, mock_reset): self.assertRaises(messaging.ExpectedException, self.compute.reset_network, self.context, instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.reset_network, self.context, instance) do_test() def _test_rebuild_ex(self, instance, ex): # Test that we do not raise on certain exceptions with test.nested( mock.patch.object(self.compute, '_get_compute_info'), mock.patch.object(self.compute, '_do_rebuild_instance_with_claim', side_effect=ex), mock.patch.object(self.compute, '_set_migration_status'), mock.patch.object(self.compute, '_notify_about_instance_usage') ) as (mock_get, mock_rebuild, mock_set, mock_notify): self.compute.rebuild_instance(self.context, instance, None, None, None, None, None, None, None) mock_set.assert_called_once_with(None, 'failed') mock_notify.assert_called_once_with(mock.ANY, instance, 'rebuild.error', fault=ex) def test_rebuild_deleting(self): instance = objects.Instance(uuid='fake-uuid') ex = exception.UnexpectedDeletingTaskStateError( instance_uuid=instance.uuid, expected='expected', actual='actual') self._test_rebuild_ex(instance, ex) def test_rebuild_notfound(self): instance = objects.Instance(uuid='fake-uuid') ex = exception.InstanceNotFound(instance_id=instance.uuid) self._test_rebuild_ex(instance, ex) def test_rebuild_default_impl(self): def _detach(context, bdms): # NOTE(rpodolyaka): check that instance has been powered off by # the time we detach block devices, exact calls arguments will be # checked below self.assertTrue(mock_power_off.called) self.assertFalse(mock_destroy.called) def _attach(context, instance, bdms, do_check_attach=True): return {'block_device_mapping': 'shared_block_storage'} def _spawn(context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): self.assertEqual(block_device_info['block_device_mapping'], 'shared_block_storage') with test.nested( mock.patch.object(self.compute.driver, 'destroy', return_value=None), mock.patch.object(self.compute.driver, 'spawn', side_effect=_spawn), mock.patch.object(objects.Instance, 'save', return_value=None), mock.patch.object(self.compute, '_power_off_instance', return_value=None) ) as( mock_destroy, mock_spawn, mock_save, mock_power_off ): instance = fake_instance.fake_instance_obj(self.context) instance.migration_context = None instance.numa_topology = None instance.task_state = task_states.REBUILDING instance.save(expected_task_state=[task_states.REBUILDING]) self.compute._rebuild_default_impl(self.context, instance, None, [], admin_password='new_pass', bdms=[], detach_block_devices=_detach, attach_block_devices=_attach, network_info=None, recreate=False, block_device_info=None, preserve_ephemeral=False) self.assertTrue(mock_save.called) self.assertTrue(mock_spawn.called) mock_destroy.assert_called_once_with( self.context, instance, network_info=None, block_device_info=None) mock_power_off.assert_called_once_with( self.context, instance, clean_shutdown=True) @mock.patch.object(utils, 'last_completed_audit_period', return_value=(0, 0)) @mock.patch.object(time, 'time', side_effect=[10, 20, 21]) @mock.patch.object(objects.InstanceList, 'get_by_host', return_value=[]) @mock.patch.object(objects.BandwidthUsage, 'get_by_instance_uuid_and_mac') @mock.patch.object(db, 'bw_usage_update') def test_poll_bandwidth_usage(self, bw_usage_update, get_by_uuid_mac, get_by_host, time, last_completed_audit): bw_counters = [{'uuid': uuids.instance, 'mac_address': 'fake-mac', 'bw_in': 1, 'bw_out': 2}] usage = objects.BandwidthUsage() usage.bw_in = 3 usage.bw_out = 4 usage.last_ctr_in = 0 usage.last_ctr_out = 0 self.flags(bandwidth_poll_interval=1) get_by_uuid_mac.return_value = usage _time = timeutils.utcnow() bw_usage_update.return_value = {'uuid': uuids.instance, 'mac': '', 'start_period': _time, 'last_refreshed': _time, 'bw_in': 0, 'bw_out': 0, 'last_ctr_in': 0, 'last_ctr_out': 0, 'deleted': 0, 'created_at': _time, 'updated_at': _time, 'deleted_at': _time} with mock.patch.object(self.compute.driver, 'get_all_bw_counters', return_value=bw_counters): self.compute._poll_bandwidth_usage(self.context) get_by_uuid_mac.assert_called_once_with(self.context, uuids.instance, 'fake-mac', start_period=0, use_slave=True) # NOTE(sdague): bw_usage_update happens at some time in # the future, so what last_refreshed is irrelevant. bw_usage_update.assert_called_once_with(self.context, uuids.instance, 'fake-mac', 0, 4, 6, 1, 2, last_refreshed=mock.ANY, update_cells=False) def test_reverts_task_state_instance_not_found(self): # Tests that the reverts_task_state decorator in the compute manager # will not trace when an InstanceNotFound is raised. instance = objects.Instance(uuid=uuids.instance, task_state="FAKE") instance_update_mock = mock.Mock( side_effect=exception.InstanceNotFound(instance_id=instance.uuid)) self.compute._instance_update = instance_update_mock log_mock = mock.Mock() manager.LOG = log_mock @manager.reverts_task_state def fake_function(self, context, instance): raise test.TestingException() self.assertRaises(test.TestingException, fake_function, self, self.context, instance) self.assertFalse(log_mock.called) @mock.patch.object(nova.scheduler.client.SchedulerClient, 'update_instance_info') def test_update_scheduler_instance_info(self, mock_update): instance = objects.Instance(uuid=uuids.instance) self.compute._update_scheduler_instance_info(self.context, instance) self.assertEqual(mock_update.call_count, 1) args = mock_update.call_args[0] self.assertNotEqual(args[0], self.context) self.assertIsInstance(args[0], self.context.__class__) self.assertEqual(args[1], self.compute.host) # Send a single instance; check that the method converts to an # InstanceList self.assertIsInstance(args[2], objects.InstanceList) self.assertEqual(args[2].objects[0], instance) @mock.patch.object(nova.scheduler.client.SchedulerClient, 'delete_instance_info') def test_delete_scheduler_instance_info(self, mock_delete): self.compute._delete_scheduler_instance_info(self.context, mock.sentinel.inst_uuid) self.assertEqual(mock_delete.call_count, 1) args = mock_delete.call_args[0] self.assertNotEqual(args[0], self.context) self.assertIsInstance(args[0], self.context.__class__) self.assertEqual(args[1], self.compute.host) self.assertEqual(args[2], mock.sentinel.inst_uuid) @mock.patch.object(nova.context.RequestContext, 'elevated') @mock.patch.object(nova.objects.InstanceList, 'get_by_host') @mock.patch.object(nova.scheduler.client.SchedulerClient, 'sync_instance_info') def test_sync_scheduler_instance_info(self, mock_sync, mock_get_by_host, mock_elevated): inst1 = objects.Instance(uuid=uuids.instance_1) inst2 = objects.Instance(uuid=uuids.instance_2) inst3 = objects.Instance(uuid=uuids.instance_3) exp_uuids = [inst.uuid for inst in [inst1, inst2, inst3]] mock_get_by_host.return_value = objects.InstanceList( objects=[inst1, inst2, inst3]) fake_elevated = context.get_admin_context() mock_elevated.return_value = fake_elevated self.compute._sync_scheduler_instance_info(self.context) mock_get_by_host.assert_called_once_with( fake_elevated, self.compute.host, expected_attrs=[], use_slave=True) mock_sync.assert_called_once_with(fake_elevated, self.compute.host, exp_uuids) @mock.patch.object(nova.scheduler.client.SchedulerClient, 'sync_instance_info') @mock.patch.object(nova.scheduler.client.SchedulerClient, 'delete_instance_info') @mock.patch.object(nova.scheduler.client.SchedulerClient, 'update_instance_info') def test_scheduler_info_updates_off(self, mock_update, mock_delete, mock_sync): mgr = self.compute mgr.send_instance_updates = False mgr._update_scheduler_instance_info(self.context, mock.sentinel.instance) mgr._delete_scheduler_instance_info(self.context, mock.sentinel.instance_uuid) mgr._sync_scheduler_instance_info(self.context) # None of the calls should have been made self.assertFalse(mock_update.called) self.assertFalse(mock_delete.called) self.assertFalse(mock_sync.called) def test_refresh_instance_security_rules_takes_non_object(self): inst = fake_instance.fake_db_instance() with mock.patch.object(self.compute.driver, 'refresh_instance_security_rules') as mock_r: self.compute.refresh_instance_security_rules(self.context, inst) self.assertIsInstance(mock_r.call_args_list[0][0][0], objects.Instance) def test_set_instance_obj_error_state_with_clean_task_state(self): instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING) with mock.patch.object(instance, 'save'): self.compute._set_instance_obj_error_state(self.context, instance, clean_task_state=True) self.assertEqual(vm_states.ERROR, instance.vm_state) self.assertIsNone(instance.task_state) def test_set_instance_obj_error_state_by_default(self): instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING) with mock.patch.object(instance, 'save'): self.compute._set_instance_obj_error_state(self.context, instance) self.assertEqual(vm_states.ERROR, instance.vm_state) self.assertEqual(task_states.SPAWNING, instance.task_state) @mock.patch.object(objects.Instance, 'save') def test_instance_update(self, mock_save): instance = objects.Instance(task_state=task_states.SCHEDULING, vm_state=vm_states.BUILDING) updates = {'task_state': None, 'vm_state': vm_states.ERROR} with mock.patch.object(self.compute, '_update_resource_tracker') as mock_rt: self.compute._instance_update(self.context, instance, **updates) self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ERROR, instance.vm_state) mock_save.assert_called_once_with() mock_rt.assert_called_once_with(self.context, instance) def test_reset_reloads_rpcapi(self): orig_rpc = self.compute.compute_rpcapi with mock.patch('nova.compute.rpcapi.ComputeAPI') as mock_rpc: self.compute.reset() mock_rpc.assert_called_once_with() self.assertIsNot(orig_rpc, self.compute.compute_rpcapi) @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') @mock.patch('nova.compute.manager.ComputeManager._delete_instance') def test_terminate_instance_no_bdm_volume_id(self, mock_delete_instance, mock_bdm_get_by_inst): # Tests that we refresh the bdm list if a volume bdm does not have the # volume_id set. instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ERROR, task_state=task_states.DELETING) bdm = fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'snapshot', 'destination_type': 'volume', 'instance_uuid': instance.uuid, 'device_name': '/dev/vda'}) bdms = block_device_obj.block_device_make_list(self.context, [bdm]) # since the bdms passed in don't have a volume_id, we'll go back to the # database looking for updated versions mock_bdm_get_by_inst.return_value = bdms self.compute.terminate_instance(self.context, instance, bdms, []) mock_bdm_get_by_inst.assert_called_once_with( self.context, instance.uuid) mock_delete_instance.assert_called_once_with( self.context, instance, bdms, mock.ANY) @mock.patch.object(nova.compute.manager.ComputeManager, '_notify_about_instance_usage') def test_trigger_crash_dump(self, notify_mock): instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE) self.compute.trigger_crash_dump(self.context, instance) notify_mock.assert_has_calls([ mock.call(self.context, instance, 'trigger_crash_dump.start'), mock.call(self.context, instance, 'trigger_crash_dump.end') ]) self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ACTIVE, instance.vm_state) class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): def setUp(self): super(ComputeManagerBuildInstanceTestCase, self).setUp() self.compute = importutils.import_object(CONF.compute_manager) self.context = context.RequestContext('fake', 'fake') self.instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, expected_attrs=['metadata', 'system_metadata', 'info_cache']) self.admin_pass = 'pass' self.injected_files = [] self.image = {} self.node = 'fake-node' self.limits = {} self.requested_networks = [] self.security_groups = [] self.block_device_mapping = [] self.filter_properties = {'retry': {'num_attempts': 1, 'hosts': [[self.compute.host, 'fake-node']]}} self.useFixture(fixtures.SpawnIsSynchronousFixture()) def fake_network_info(): return network_model.NetworkInfo([{'address': '1.2.3.4'}]) self.network_info = network_model.NetworkInfoAsyncWrapper( fake_network_info) self.block_device_info = self.compute._prep_block_device(context, self.instance, self.block_device_mapping) # override tracker with a version that doesn't need the database: fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host, self.compute.driver, self.node) self.compute._resource_tracker_dict[self.node] = fake_rt def _do_build_instance_update(self, reschedule_update=False): self.mox.StubOutWithMock(self.instance, 'save') self.instance.save( expected_task_state=(task_states.SCHEDULING, None)).AndReturn( self.instance) if reschedule_update: self.instance.save().AndReturn(self.instance) def _build_and_run_instance_update(self): self.mox.StubOutWithMock(self.instance, 'save') self._build_resources_instance_update(stub=False) self.instance.save(expected_task_state= task_states.BLOCK_DEVICE_MAPPING).AndReturn(self.instance) def _build_resources_instance_update(self, stub=True): if stub: self.mox.StubOutWithMock(self.instance, 'save') self.instance.save().AndReturn(self.instance) def _notify_about_instance_usage(self, event, stub=True, **kwargs): if stub: self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.compute._notify_about_instance_usage(self.context, self.instance, event, **kwargs) def _instance_action_events(self): self.mox.StubOutWithMock(objects.InstanceActionEvent, 'event_start') self.mox.StubOutWithMock(objects.InstanceActionEvent, 'event_finish_with_failure') objects.InstanceActionEvent.event_start( self.context, self.instance.uuid, mox.IgnoreArg(), want_result=False) objects.InstanceActionEvent.event_finish_with_failure( self.context, self.instance.uuid, mox.IgnoreArg(), exc_val=mox.IgnoreArg(), exc_tb=mox.IgnoreArg(), want_result=False) @staticmethod def _assert_build_instance_hook_called(mock_hooks, result): # NOTE(coreywright): we want to test the return value of # _do_build_and_run_instance, but it doesn't bubble all the way up, so # mock the hooking, which allows us to test that too, though a little # too intimately mock_hooks.setdefault().run_post.assert_called_once_with( 'build_instance', result, mock.ANY, mock.ANY, f=None) @mock.patch('nova.hooks._HOOKS') def test_build_and_run_instance_called_with_proper_args(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self._do_build_instance_update() self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.ACTIVE) # This test when sending an icehouse compatible rpc call to juno compute # node, NetworkRequest object can load from three items tuple. @mock.patch('nova.objects.Instance.save') @mock.patch('nova.compute.manager.ComputeManager._build_and_run_instance') def test_build_and_run_instance_with_icehouse_requested_network( self, mock_build_and_run, mock_save): fake_server_actions.stub_out_action_events(self.stubs) mock_save.return_value = self.instance self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=[objects.NetworkRequest( network_id='fake_network_id', address='10.0.0.1', port_id=uuids.port_instance)], security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) requested_network = mock_build_and_run.call_args[0][5][0] self.assertEqual('fake_network_id', requested_network.network_id) self.assertEqual('10.0.0.1', str(requested_network.address)) self.assertEqual(uuids.port_instance, requested_network.port_id) @mock.patch('nova.hooks._HOOKS') def test_build_abort_exception(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self._do_build_instance_update() self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.BuildAbortException(reason='', instance_uuid=self.instance.uuid)) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) self.compute._cleanup_volumes(self.context, self.instance.uuid, self.block_device_mapping, raise_exc=False) compute_utils.add_instance_fault_from_exc(self.context, self.instance, mox.IgnoreArg(), mox.IgnoreArg()) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute._set_instance_obj_error_state(self.context, self.instance, clean_task_state=True) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.FAILED) @mock.patch('nova.hooks._HOOKS') def test_rescheduled_exception(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.mox.StubOutWithMock(self.compute.network_api, 'cleanup_instance_network_on_host') self._do_build_instance_update(reschedule_update=True) self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance.uuid)) self.compute.network_api.cleanup_instance_network_on_host(self.context, self.instance, self.compute.host) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute.compute_task_api.build_instances(self.context, [self.instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.RESCHEDULED) def test_rescheduled_exception_with_non_ascii_exception(self): exc = exception.NovaException(u's\xe9quence') self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) self.compute.driver.spawn(self.context, self.instance, mox.IsA(objects.ImageMeta), self.injected_files, self.admin_pass, network_info=self.network_info, block_device_info=self.block_device_info).AndRaise(exc) self._notify_about_instance_usage('create.error', fault=exc, stub=False) self.mox.ReplayAll() with mock.patch.object(self.instance, 'save') as mock_save: self.assertRaises(exception.RescheduledException, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_save.assert_has_calls([ mock.call(), mock.call(), mock.call(expected_task_state='block_device_mapping'), ]) @mock.patch.object(manager.ComputeManager, '_build_and_run_instance') @mock.patch.object(conductor_api.ComputeTaskAPI, 'build_instances') @mock.patch.object(network_api.API, 'cleanup_instance_network_on_host') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') @mock.patch.object(virt_driver.ComputeDriver, 'macs_for_instance') def test_rescheduled_exception_with_network_allocated(self, mock_macs_for_instance, mock_event_finish, mock_event_start, mock_ins_save, mock_cleanup_network, mock_build_ins, mock_build_and_run): instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, system_metadata={'network_allocated': 'True'}, expected_attrs=['metadata', 'system_metadata', 'info_cache']) mock_ins_save.return_value = instance mock_macs_for_instance.return_value = [] mock_build_and_run.side_effect = exception.RescheduledException( reason='', instance_uuid=self.instance.uuid) self.compute._do_build_and_run_instance(self.context, instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) mock_build_and_run.assert_called_once_with(self.context, instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_cleanup_network.assert_called_once_with( self.context, instance, self.compute.host) mock_build_ins.assert_called_once_with(self.context, [instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) @mock.patch.object(manager.ComputeManager, '_build_and_run_instance') @mock.patch.object(conductor_api.ComputeTaskAPI, 'build_instances') @mock.patch.object(manager.ComputeManager, '_cleanup_allocated_networks') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') @mock.patch.object(virt_driver.ComputeDriver, 'macs_for_instance') def test_rescheduled_exception_with_sriov_network_allocated(self, mock_macs_for_instance, mock_event_finish, mock_event_start, mock_ins_save, mock_cleanup_network, mock_build_ins, mock_build_and_run): vif1 = fake_network_cache_model.new_vif() vif1['id'] = '1' vif1['vnic_type'] = network_model.VNIC_TYPE_NORMAL vif2 = fake_network_cache_model.new_vif() vif2['id'] = '2' vif1['vnic_type'] = network_model.VNIC_TYPE_DIRECT nw_info = network_model.NetworkInfo([vif1, vif2]) instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, system_metadata={'network_allocated': 'True'}, expected_attrs=['metadata', 'system_metadata', 'info_cache']) info_cache = objects.InstanceInfoCache(network_info=nw_info, instance_uuid=instance.uuid) instance.info_cache = info_cache mock_ins_save.return_value = instance mock_macs_for_instance.return_value = [] mock_build_and_run.side_effect = exception.RescheduledException( reason='', instance_uuid=self.instance.uuid) self.compute._do_build_and_run_instance(self.context, instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) mock_build_and_run.assert_called_once_with(self.context, instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_cleanup_network.assert_called_once_with( self.context, instance, self.requested_networks) mock_build_ins.assert_called_once_with(self.context, [instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) @mock.patch('nova.hooks._HOOKS') def test_rescheduled_exception_without_retry(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self._do_build_instance_update() self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, {}).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance.uuid)) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) compute_utils.add_instance_fault_from_exc(self.context, self.instance, mox.IgnoreArg(), mox.IgnoreArg(), fault_message=mox.IgnoreArg()) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute._set_instance_obj_error_state(self.context, self.instance, clean_task_state=True) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties={}, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.FAILED) @mock.patch('nova.hooks._HOOKS') def test_rescheduled_exception_do_not_deallocate_network(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute.driver, 'deallocate_networks_on_reschedule') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self.mox.StubOutWithMock(self.compute.network_api, 'cleanup_instance_network_on_host') self._do_build_instance_update(reschedule_update=True) self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance.uuid)) self.compute.driver.deallocate_networks_on_reschedule( self.instance).AndReturn(False) self.compute.network_api.cleanup_instance_network_on_host( self.context, self.instance, self.compute.host) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute.compute_task_api.build_instances(self.context, [self.instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.RESCHEDULED) @mock.patch('nova.hooks._HOOKS') def test_rescheduled_exception_deallocate_network(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute.driver, 'deallocate_networks_on_reschedule') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self._do_build_instance_update(reschedule_update=True) self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance.uuid)) self.compute.driver.deallocate_networks_on_reschedule( self.instance).AndReturn(True) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute.compute_task_api.build_instances(self.context, [self.instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.RESCHEDULED) @mock.patch('nova.hooks._HOOKS') def _test_build_and_run_exceptions(self, exc, mock_hooks, set_error=False, cleanup_volumes=False, nil_out_host_and_node=False): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self._do_build_instance_update() self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise(exc) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) if cleanup_volumes: self.compute._cleanup_volumes(self.context, self.instance.uuid, self.block_device_mapping, raise_exc=False) if nil_out_host_and_node: self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.compute._nil_out_instance_obj_host_and_node(self.instance) if set_error: self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') compute_utils.add_instance_fault_from_exc(self.context, self.instance, mox.IgnoreArg(), mox.IgnoreArg()) self.compute._set_instance_obj_error_state(self.context, self.instance, clean_task_state=True) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.FAILED) def test_build_and_run_notfound_exception(self): self._test_build_and_run_exceptions(exception.InstanceNotFound( instance_id='')) def test_build_and_run_unexpecteddeleting_exception(self): self._test_build_and_run_exceptions( exception.UnexpectedDeletingTaskStateError( instance_uuid=uuids.instance, expected={}, actual={})) def test_build_and_run_buildabort_exception(self): self._test_build_and_run_exceptions( exception.BuildAbortException(instance_uuid='', reason=''), set_error=True, cleanup_volumes=True, nil_out_host_and_node=True) def test_build_and_run_unhandled_exception(self): self._test_build_and_run_exceptions(test.TestingException(), set_error=True, cleanup_volumes=True, nil_out_host_and_node=True) def test_instance_not_found(self): exc = exception.InstanceNotFound(instance_id=1) self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) self.compute.driver.spawn(self.context, self.instance, mox.IsA(objects.ImageMeta), self.injected_files, self.admin_pass, network_info=self.network_info, block_device_info=self.block_device_info).AndRaise(exc) self._notify_about_instance_usage('create.end', fault=exc, stub=False) self.mox.ReplayAll() with mock.patch.object(self.instance, 'save') as mock_save: self.assertRaises(exception.InstanceNotFound, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_save.assert_has_calls([ mock.call(), mock.call(), mock.call(expected_task_state='block_device_mapping'), ]) def test_reschedule_on_exception(self): self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) exc = test.TestingException() self.compute.driver.spawn(self.context, self.instance, mox.IsA(objects.ImageMeta), self.injected_files, self.admin_pass, network_info=self.network_info, block_device_info=self.block_device_info).AndRaise(exc) self._notify_about_instance_usage('create.error', fault=exc, stub=False) self.mox.ReplayAll() with mock.patch.object(self.instance, 'save') as mock_save: self.assertRaises(exception.RescheduledException, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_save.assert_has_calls([ mock.call(), mock.call(), mock.call(expected_task_state='block_device_mapping'), ]) def test_spawn_network_alloc_failure(self): # Because network allocation is asynchronous, failures may not present # themselves until the virt spawn method is called. self._test_build_and_run_spawn_exceptions(exception.NoMoreNetworks()) def test_build_and_run_no_more_fixedips_exception(self): self._test_build_and_run_spawn_exceptions( exception.NoMoreFixedIps("error messge")) def test_build_and_run_flavor_disk_smaller_image_exception(self): self._test_build_and_run_spawn_exceptions( exception.FlavorDiskSmallerThanImage( flavor_size=0, image_size=1)) def test_build_and_run_flavor_disk_smaller_min_disk(self): self._test_build_and_run_spawn_exceptions( exception.FlavorDiskSmallerThanMinDisk( flavor_size=0, image_min_disk=1)) def test_build_and_run_flavor_memory_too_small_exception(self): self._test_build_and_run_spawn_exceptions( exception.FlavorMemoryTooSmall()) def test_build_and_run_image_not_active_exception(self): self._test_build_and_run_spawn_exceptions( exception.ImageNotActive(image_id=self.image.get('id'))) def test_build_and_run_image_unacceptable_exception(self): self._test_build_and_run_spawn_exceptions( exception.ImageUnacceptable(image_id=self.image.get('id'), reason="")) def test_build_and_run_invalid_disk_info_exception(self): self._test_build_and_run_spawn_exceptions( exception.InvalidDiskInfo(reason="")) def _test_build_and_run_spawn_exceptions(self, exc): with test.nested( mock.patch.object(self.compute.driver, 'spawn', side_effect=exc), mock.patch.object(self.instance, 'save', side_effect=[self.instance, self.instance, self.instance]), mock.patch.object(self.compute, '_build_networks_for_instance', return_value=self.network_info), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute, '_shutdown_instance'), mock.patch.object(self.compute, '_validate_instance_group_policy') ) as (spawn, save, _build_networks_for_instance, _notify_about_instance_usage, _shutdown_instance, _validate_instance_group_policy): self.assertRaises(exception.BuildAbortException, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) _validate_instance_group_policy.assert_called_once_with( self.context, self.instance, self.filter_properties) _build_networks_for_instance.assert_has_calls( [mock.call(self.context, self.instance, self.requested_networks, self.security_groups)]) _notify_about_instance_usage.assert_has_calls([ mock.call(self.context, self.instance, 'create.start', extra_usage_info={'image_name': self.image.get('name')}), mock.call(self.context, self.instance, 'create.error', fault=exc)]) save.assert_has_calls([ mock.call(), mock.call(), mock.call( expected_task_state=task_states.BLOCK_DEVICE_MAPPING)]) spawn.assert_has_calls([mock.call(self.context, self.instance, test.MatchType(objects.ImageMeta), self.injected_files, self.admin_pass, network_info=self.network_info, block_device_info=self.block_device_info)]) _shutdown_instance.assert_called_once_with(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) def test_reschedule_on_resources_unavailable(self): reason = 'resource unavailable' exc = exception.ComputeResourcesUnavailable(reason=reason) class FakeResourceTracker(object): def instance_claim(self, context, instance, limits): raise exc self.mox.StubOutWithMock(self.compute, '_get_resource_tracker') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self.mox.StubOutWithMock(self.compute.network_api, 'cleanup_instance_network_on_host') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.compute._get_resource_tracker(self.node).AndReturn( FakeResourceTracker()) self._do_build_instance_update(reschedule_update=True) self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) self._notify_about_instance_usage('create.error', fault=exc, stub=False) self.compute.network_api.cleanup_instance_network_on_host( self.context, self.instance, self.compute.host) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute.compute_task_api.build_instances(self.context, [self.instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) def test_build_resources_buildabort_reraise(self): exc = exception.BuildAbortException( instance_uuid=self.instance.uuid, reason='') self.mox.StubOutWithMock(self.compute, '_build_resources') self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, mox.IsA(objects.ImageMeta), self.block_device_mapping).AndRaise(exc) self._notify_about_instance_usage('create.error', fault=exc, stub=False) self.mox.ReplayAll() with mock.patch.object(self.instance, 'save') as mock_save: self.assertRaises(exception.BuildAbortException, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_save.assert_called_once_with() def test_build_resources_reraises_on_failed_bdm_prep(self): self.mox.StubOutWithMock(self.compute, '_prep_block_device') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self._build_resources_instance_update() self.compute._prep_block_device(self.context, self.instance, self.block_device_mapping).AndRaise(test.TestingException()) self.mox.ReplayAll() try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): pass except Exception as e: self.assertIsInstance(e, exception.BuildAbortException) def test_failed_bdm_prep_from_delete_raises_unexpected(self): with test.nested( mock.patch.object(self.compute, '_build_networks_for_instance', return_value=self.network_info), mock.patch.object(self.instance, 'save', side_effect=exception.UnexpectedDeletingTaskStateError( instance_uuid=uuids.instance, actual={'task_state': task_states.DELETING}, expected={'task_state': None})), ) as (_build_networks_for_instance, save): try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): pass except Exception as e: self.assertIsInstance(e, exception.UnexpectedDeletingTaskStateError) _build_networks_for_instance.assert_has_calls( [mock.call(self.context, self.instance, self.requested_networks, self.security_groups)]) save.assert_has_calls([mock.call()]) def test_build_resources_aborts_on_failed_network_alloc(self): self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndRaise( test.TestingException()) self.mox.ReplayAll() try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): pass except Exception as e: self.assertIsInstance(e, exception.BuildAbortException) def test_failed_network_alloc_from_delete_raises_unexpected(self): with mock.patch.object(self.compute, '_build_networks_for_instance') as _build_networks: exc = exception.UnexpectedDeletingTaskStateError _build_networks.side_effect = exc( instance_uuid=uuids.instance, actual={'task_state': task_states.DELETING}, expected={'task_state': None}) try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): pass except Exception as e: self.assertIsInstance(e, exc) _build_networks.assert_has_calls( [mock.call(self.context, self.instance, self.requested_networks, self.security_groups)]) def test_build_resources_with_network_info_obj_on_spawn_failure(self): self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._build_resources_instance_update() self.mox.ReplayAll() test_exception = test.TestingException() def fake_spawn(): raise test_exception try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): fake_spawn() except Exception as e: self.assertEqual(test_exception, e) def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self): self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._build_resources_instance_update() self.mox.ReplayAll() test_exception = test.TestingException() def fake_spawn(): raise test_exception try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): fake_spawn() except Exception as e: self.assertEqual(test_exception, e) @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait') @mock.patch( 'nova.compute.manager.ComputeManager._build_networks_for_instance') @mock.patch('nova.objects.Instance.save') def test_build_resources_instance_not_found_before_yield( self, mock_save, mock_build_network, mock_info_wait): mock_build_network.return_value = self.network_info expected_exc = exception.InstanceNotFound( instance_id=self.instance.uuid) mock_save.side_effect = expected_exc try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): raise except Exception as e: self.assertEqual(expected_exc, e) mock_build_network.assert_called_once_with(self.context, self.instance, self.requested_networks, self.security_groups) mock_info_wait.assert_called_once_with(do_raise=False) @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait') @mock.patch( 'nova.compute.manager.ComputeManager._build_networks_for_instance') @mock.patch('nova.objects.Instance.save') def test_build_resources_unexpected_task_error_before_yield( self, mock_save, mock_build_network, mock_info_wait): mock_build_network.return_value = self.network_info mock_save.side_effect = exception.UnexpectedTaskStateError( instance_uuid=uuids.instance, expected={}, actual={}) try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): raise except exception.BuildAbortException: pass mock_build_network.assert_called_once_with(self.context, self.instance, self.requested_networks, self.security_groups) mock_info_wait.assert_called_once_with(do_raise=False) @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait') @mock.patch( 'nova.compute.manager.ComputeManager._build_networks_for_instance') @mock.patch('nova.objects.Instance.save') def test_build_resources_exception_before_yield( self, mock_save, mock_build_network, mock_info_wait): mock_build_network.return_value = self.network_info mock_save.side_effect = Exception() try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): raise except exception.BuildAbortException: pass mock_build_network.assert_called_once_with(self.context, self.instance, self.requested_networks, self.security_groups) mock_info_wait.assert_called_once_with(do_raise=False) @mock.patch('nova.compute.manager.LOG') def test_build_resources_aborts_on_cleanup_failure(self, mock_log): self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False).AndRaise( test.TestingException('Failed to shutdown')) self._build_resources_instance_update() self.mox.ReplayAll() def fake_spawn(): raise test.TestingException('Failed to spawn') with self.assertRaisesRegex(exception.BuildAbortException, 'Failed to spawn'): with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): fake_spawn() self.assertTrue(mock_log.warning.called) msg = mock_log.warning.call_args_list[0] self.assertIn('Failed to shutdown', msg[0][1]) def test_build_networks_if_not_allocated(self): instance = fake_instance.fake_instance_obj(self.context, system_metadata={}, expected_attrs=['system_metadata']) self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_allocate_network') self.compute._allocate_network(self.context, instance, self.requested_networks, None, self.security_groups, None) self.mox.ReplayAll() self.compute._build_networks_for_instance(self.context, instance, self.requested_networks, self.security_groups) def test_build_networks_if_allocated_false(self): instance = fake_instance.fake_instance_obj(self.context, system_metadata=dict(network_allocated='False'), expected_attrs=['system_metadata']) self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_allocate_network') self.compute._allocate_network(self.context, instance, self.requested_networks, None, self.security_groups, None) self.mox.ReplayAll() self.compute._build_networks_for_instance(self.context, instance, self.requested_networks, self.security_groups) def test_return_networks_if_found(self): instance = fake_instance.fake_instance_obj(self.context, system_metadata=dict(network_allocated='True'), expected_attrs=['system_metadata']) def fake_network_info(): return network_model.NetworkInfo([{'address': '123.123.123.123'}]) self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_allocate_network') self.mox.StubOutWithMock(self.compute.network_api, 'setup_instance_network_on_host') self.compute.network_api.setup_instance_network_on_host( self.context, instance, instance.host) self.compute.network_api.get_instance_nw_info( self.context, instance).AndReturn( network_model.NetworkInfoAsyncWrapper(fake_network_info)) self.mox.ReplayAll() self.compute._build_networks_for_instance(self.context, instance, self.requested_networks, self.security_groups) def test_cleanup_allocated_networks_instance_not_found(self): with test.nested( mock.patch.object(self.compute, '_deallocate_network'), mock.patch.object(self.instance, 'save', side_effect=exception.InstanceNotFound(instance_id='')) ) as (_deallocate_network, save): # Testing that this doesn't raise an exeption self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) save.assert_called_once_with() self.assertEqual('False', self.instance.system_metadata['network_allocated']) @mock.patch.object(manager.ComputeManager, '_instance_update') def test_launched_at_in_create_end_notification(self, mock_instance_update): def fake_notify(*args, **kwargs): if args[2] == 'create.end': # Check that launched_at is set on the instance self.assertIsNotNone(args[1].launched_at) with test.nested( mock.patch.object(self.compute, '_update_scheduler_instance_info'), mock.patch.object(self.compute.driver, 'spawn'), mock.patch.object(self.compute, '_build_networks_for_instance', return_value=[]), mock.patch.object(self.instance, 'save'), mock.patch.object(self.compute, '_notify_about_instance_usage', side_effect=fake_notify) ) as (mock_upd, mock_spawn, mock_networks, mock_save, mock_notify): self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) expected_call = mock.call(self.context, self.instance, 'create.end', extra_usage_info={'message': u'Success'}, network_info=[]) create_end_call = mock_notify.call_args_list[ mock_notify.call_count - 1] self.assertEqual(expected_call, create_end_call) def test_access_ip_set_when_instance_set_to_active(self): self.flags(default_access_ip_network_name='test1') instance = fake_instance.fake_db_instance() @mock.patch.object(db, 'instance_update_and_get_original', return_value=({}, instance)) @mock.patch.object(self.compute.driver, 'spawn') @mock.patch.object(self.compute, '_build_networks_for_instance', return_value=fake_network.fake_get_instance_nw_info(self)) @mock.patch.object(db, 'instance_extra_update_by_uuid') @mock.patch.object(self.compute, '_notify_about_instance_usage') def _check_access_ip(mock_notify, mock_extra, mock_networks, mock_spawn, mock_db_update): self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) updates = {'vm_state': u'active', 'access_ip_v6': netaddr.IPAddress('2001:db8:0:1:dcad:beff:feef:1'), 'access_ip_v4': netaddr.IPAddress('192.168.1.100'), 'power_state': 0, 'task_state': None, 'launched_at': mock.ANY, 'expected_task_state': 'spawning'} expected_call = mock.call(self.context, self.instance.uuid, updates, columns_to_join=['metadata', 'system_metadata', 'info_cache']) last_update_call = mock_db_update.call_args_list[ mock_db_update.call_count - 1] self.assertEqual(expected_call, last_update_call) _check_access_ip() @mock.patch.object(manager.ComputeManager, '_instance_update') def test_create_end_on_instance_delete(self, mock_instance_update): def fake_notify(*args, **kwargs): if args[2] == 'create.end': # Check that launched_at is set on the instance self.assertIsNotNone(args[1].launched_at) exc = exception.InstanceNotFound(instance_id='') with test.nested( mock.patch.object(self.compute.driver, 'spawn'), mock.patch.object(self.compute, '_build_networks_for_instance', return_value=[]), mock.patch.object(self.instance, 'save', side_effect=[None, None, None, exc]), mock.patch.object(self.compute, '_notify_about_instance_usage', side_effect=fake_notify) ) as (mock_spawn, mock_networks, mock_save, mock_notify): self.assertRaises(exception.InstanceNotFound, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) expected_call = mock.call(self.context, self.instance, 'create.end', fault=exc) create_end_call = mock_notify.call_args_list[ mock_notify.call_count - 1] self.assertEqual(expected_call, create_end_call) class ComputeManagerMigrationTestCase(test.NoDBTestCase): def setUp(self): super(ComputeManagerMigrationTestCase, self).setUp() self.compute = importutils.import_object(CONF.compute_manager) self.context = context.RequestContext('fake', 'fake') self.image = {} self.instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, expected_attrs=['metadata', 'system_metadata', 'info_cache']) self.migration = objects.Migration(context=self.context.elevated(), new_instance_type_id=7) self.migration.status = 'migrating' fake_server_actions.stub_out_action_events(self.stubs) self.useFixture(fixtures.SpawnIsSynchronousFixture()) @mock.patch.object(objects.Migration, 'save') @mock.patch.object(objects.Migration, 'obj_as_admin') def test_errors_out_migration_decorator(self, mock_save, mock_obj_as_admin): # Tests that errors_out_migration decorator in compute manager # sets migration status to 'error' when an exception is raised # from decorated method instance = fake_instance.fake_instance_obj(self.context) migration = objects.Migration() migration.instance_uuid = instance.uuid migration.status = 'migrating' migration.id = 0 @manager.errors_out_migration def fake_function(self, context, instance, migration): raise test.TestingException() mock_obj_as_admin.return_value = mock.MagicMock() self.assertRaises(test.TestingException, fake_function, self, self.context, instance, migration) self.assertEqual('error', migration.status) mock_save.assert_called_once_with() mock_obj_as_admin.assert_called_once_with() def test_finish_resize_failure(self): with test.nested( mock.patch.object(self.compute, '_finish_resize', side_effect=exception.ResizeError(reason='')), mock.patch.object(db, 'instance_fault_create'), mock.patch.object(self.compute, '_instance_update'), mock.patch.object(self.instance, 'save'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.migration, 'obj_as_admin', return_value=mock.MagicMock()) ) as (meth, fault_create, instance_update, instance_save, migration_save, migration_obj_as_admin): fault_create.return_value = ( test_instance_fault.fake_faults['fake-uuid'][0]) self.assertRaises( exception.ResizeError, self.compute.finish_resize, context=self.context, disk_info=[], image=self.image, instance=self.instance, reservations=[], migration=self.migration ) self.assertEqual("error", self.migration.status) migration_save.assert_called_once_with() migration_obj_as_admin.assert_called_once_with() def test_resize_instance_failure(self): self.migration.dest_host = None with test.nested( mock.patch.object(self.compute.driver, 'migrate_disk_and_power_off', side_effect=exception.ResizeError(reason='')), mock.patch.object(db, 'instance_fault_create'), mock.patch.object(self.compute, '_instance_update'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.migration, 'obj_as_admin', return_value=mock.MagicMock()), mock.patch.object(self.compute.network_api, 'get_instance_nw_info', return_value=None), mock.patch.object(self.instance, 'save'), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute, '_get_instance_block_device_info', return_value=None), mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=None), mock.patch.object(objects.Flavor, 'get_by_id', return_value=None) ) as (meth, fault_create, instance_update, migration_save, migration_obj_as_admin, nw_info, save_inst, notify, vol_block_info, bdm, flavor): fault_create.return_value = ( test_instance_fault.fake_faults['fake-uuid'][0]) self.assertRaises( exception.ResizeError, self.compute.resize_instance, context=self.context, instance=self.instance, image=self.image, reservations=[], migration=self.migration, instance_type='type', clean_shutdown=True) self.assertEqual("error", self.migration.status) self.assertEqual([mock.call(), mock.call()], migration_save.mock_calls) self.assertEqual([mock.call(), mock.call()], migration_obj_as_admin.mock_calls) def _test_revert_resize_instance_destroy_disks(self, is_shared=False): # This test asserts that _is_instance_storage_shared() is called from # revert_resize() and the return value is passed to driver.destroy(). # Otherwise we could regress this. @mock.patch.object(self.instance, 'revert_migration_context') @mock.patch.object(self.compute.network_api, 'get_instance_nw_info') @mock.patch.object(self.compute, '_is_instance_storage_shared') @mock.patch.object(self.compute, 'finish_revert_resize') @mock.patch.object(self.compute, '_instance_update') @mock.patch.object(self.compute, '_get_resource_tracker') @mock.patch.object(self.compute.driver, 'destroy') @mock.patch.object(self.compute.network_api, 'setup_networks_on_host') @mock.patch.object(self.compute.network_api, 'migrate_instance_start') @mock.patch.object(compute_utils, 'notify_usage_exists') @mock.patch.object(self.migration, 'save') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def do_test(get_by_instance_uuid, migration_save, notify_usage_exists, migrate_instance_start, setup_networks_on_host, destroy, _get_resource_tracker, _instance_update, finish_revert_resize, _is_instance_storage_shared, get_instance_nw_info, revert_migration_context): self.migration.source_compute = self.instance['host'] # Inform compute that instance uses non-shared or shared storage _is_instance_storage_shared.return_value = is_shared self.compute.revert_resize(context=self.context, migration=self.migration, instance=self.instance, reservations=None) _is_instance_storage_shared.assert_called_once_with( self.context, self.instance, host=self.migration.source_compute) # If instance storage is shared, driver destroy method # should not destroy disks otherwise it should destroy disks. destroy.assert_called_once_with(self.context, self.instance, mock.ANY, mock.ANY, not is_shared) do_test() def test_revert_resize_instance_destroy_disks_shared_storage(self): self._test_revert_resize_instance_destroy_disks(is_shared=True) def test_revert_resize_instance_destroy_disks_non_shared_storage(self): self._test_revert_resize_instance_destroy_disks(is_shared=False) def test_consoles_enabled(self): self.flags(enabled=False, group='vnc') self.flags(enabled=False, group='spice') self.flags(enabled=False, group='rdp') self.flags(enabled=False, group='serial_console') self.assertFalse(self.compute._consoles_enabled()) self.flags(enabled=True, group='vnc') self.assertTrue(self.compute._consoles_enabled()) self.flags(enabled=False, group='vnc') for console in ['spice', 'rdp', 'serial_console']: self.flags(enabled=True, group=console) self.assertTrue(self.compute._consoles_enabled()) self.flags(enabled=False, group=console) @mock.patch('nova.compute.manager.ComputeManager.' '_do_live_migration') def _test_max_concurrent_live(self, mock_lm): @mock.patch('nova.objects.Migration.save') def _do_it(mock_mig_save): instance = objects.Instance(uuid=str(uuid.uuid4())) migration = objects.Migration() self.compute.live_migration(self.context, mock.sentinel.dest, instance, mock.sentinel.block_migration, migration, mock.sentinel.migrate_data) self.assertEqual('queued', migration.status) migration.save.assert_called_once_with() with mock.patch.object(self.compute, '_live_migration_semaphore') as mock_sem: for i in (1, 2, 3): _do_it() self.assertEqual(3, mock_sem.__enter__.call_count) def test_max_concurrent_live_limited(self): self.flags(max_concurrent_live_migrations=2) self._test_max_concurrent_live() def test_max_concurrent_live_unlimited(self): self.flags(max_concurrent_live_migrations=0) self._test_max_concurrent_live() def test_max_concurrent_live_semaphore_limited(self): self.flags(max_concurrent_live_migrations=123) self.assertEqual( 123, manager.ComputeManager()._live_migration_semaphore.balance) def test_max_concurrent_live_semaphore_unlimited(self): self.flags(max_concurrent_live_migrations=0) compute = manager.ComputeManager() self.assertEqual(0, compute._live_migration_semaphore.balance) self.assertIsInstance(compute._live_migration_semaphore, compute_utils.UnlimitedSemaphore) def test_max_concurrent_live_semaphore_negative(self): self.flags(max_concurrent_live_migrations=-2) compute = manager.ComputeManager() self.assertEqual(0, compute._live_migration_semaphore.balance) self.assertIsInstance(compute._live_migration_semaphore, compute_utils.UnlimitedSemaphore) def test_check_migrate_source_converts_object(self): # NOTE(danms): Make sure that we legacy-ify any data objects # the drivers give us back, if we were passed a non-object data = migrate_data_obj.LiveMigrateData(is_volume_backed=False) compute = manager.ComputeManager() @mock.patch.object(compute.driver, 'check_can_live_migrate_source') @mock.patch.object(compute, '_get_instance_block_device_info') @mock.patch.object(compute.compute_api, 'is_volume_backed_instance') def _test(mock_ivbi, mock_gibdi, mock_cclms): mock_cclms.return_value = data self.assertIsInstance( compute.check_can_live_migrate_source( self.context, {'uuid': uuids.instance}, {}), dict) self.assertIsInstance(mock_cclms.call_args_list[0][0][2], migrate_data_obj.LiveMigrateData) _test() def test_pre_live_migration_handles_dict(self): compute = manager.ComputeManager() @mock.patch.object(compute, '_notify_about_instance_usage') @mock.patch.object(compute, 'network_api') @mock.patch.object(compute.driver, 'pre_live_migration') @mock.patch.object(compute, '_get_instance_block_device_info') @mock.patch.object(compute.compute_api, 'is_volume_backed_instance') def _test(mock_ivbi, mock_gibdi, mock_plm, mock_nwapi, mock_notify): migrate_data = migrate_data_obj.LiveMigrateData() mock_plm.return_value = migrate_data r = compute.pre_live_migration(self.context, {'uuid': 'foo'}, False, {}, {}) self.assertIsInstance(r, dict) self.assertIsInstance(mock_plm.call_args_list[0][0][5], migrate_data_obj.LiveMigrateData) _test() def test_live_migration_handles_dict(self): compute = manager.ComputeManager() @mock.patch.object(compute, 'compute_rpcapi') @mock.patch.object(compute, 'driver') def _test(mock_driver, mock_rpc): migrate_data = migrate_data_obj.LiveMigrateData() migration = objects.Migration() migration.save = mock.MagicMock() mock_rpc.pre_live_migration.return_value = migrate_data compute._do_live_migration(self.context, 'foo', {'uuid': 'foo'}, False, migration, {}) self.assertIsInstance( mock_rpc.pre_live_migration.call_args_list[0][0][5], migrate_data_obj.LiveMigrateData) _test() def test_rollback_live_migration_handles_dict(self): compute = manager.ComputeManager() @mock.patch.object(compute.network_api, 'setup_networks_on_host') @mock.patch.object(compute, '_notify_about_instance_usage') @mock.patch.object(compute, '_live_migration_cleanup_flags') @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') def _test(mock_bdm, mock_lmcf, mock_notify, mock_nwapi): mock_bdm.return_value = [] mock_lmcf.return_value = False, False compute._rollback_live_migration(self.context, mock.MagicMock(), 'foo', False, {}) self.assertIsInstance(mock_lmcf.call_args_list[0][0][0], migrate_data_obj.LiveMigrateData) _test() def test_live_migration_force_complete_succeeded(self): instance = objects.Instance(uuid=str(uuid.uuid4())) migration = objects.Migration() migration.status = 'running' migration.id = 0 @mock.patch.object(compute_utils.EventReporter, '__enter__') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(objects.Migration, 'get_by_id', return_value=migration) @mock.patch.object(self.compute.driver, 'live_migration_force_complete') def _do_test(force_complete, get_by_id, _notify_about_instance_usage, enter_event_reporter): self.compute.live_migration_force_complete( self.context, instance, migration.id) force_complete.assert_called_once_with(instance) _notify_usage_calls = [ mock.call(self.context, instance, 'live.migration.force.complete.start'), mock.call(self.context, instance, 'live.migration.force.complete.end') ] _notify_about_instance_usage.assert_has_calls(_notify_usage_calls) enter_event_reporter.assert_called_once_with() _do_test() @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') def test_live_migration_pause_vm_invalid_migration_state( self, add_instance_fault_from_exc): instance = objects.Instance(id=1234, uuid=str(uuid.uuid4())) migration = objects.Migration() migration.status = 'aborted' migration.id = 0 @mock.patch.object(objects.Migration, 'get_by_id', return_value=migration) def _do_test(get_by_id): self.assertRaises(exception.InvalidMigrationState, self.compute.live_migration_force_complete, self.context, instance, migration.id) _do_test() def test_post_live_migration_at_destination_success(self): @mock.patch.object(self.instance, 'save') @mock.patch.object(self.compute.network_api, 'get_instance_nw_info', return_value='test_network') @mock.patch.object(self.compute.network_api, 'setup_networks_on_host') @mock.patch.object(self.compute.network_api, 'migrate_instance_finish') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, '_get_instance_block_device_info') @mock.patch.object(self.compute, '_get_power_state', return_value=1) @mock.patch.object(self.compute, '_get_compute_info') @mock.patch.object(self.compute.driver, 'post_live_migration_at_destination') def _do_test(post_live_migration_at_destination, _get_compute_info, _get_power_state, _get_instance_block_device_info, _notify_about_instance_usage, migrate_instance_finish, setup_networks_on_host, get_instance_nw_info, save): cn = mock.Mock(spec_set=['hypervisor_hostname']) cn.hypervisor_hostname = 'test_host' _get_compute_info.return_value = cn cn_old = self.instance.host instance_old = self.instance self.compute.post_live_migration_at_destination( self.context, self.instance, False) setup_networks_calls = [ mock.call(self.context, self.instance, self.compute.host), mock.call(self.context, self.instance, cn_old, teardown=True), mock.call(self.context, self.instance, self.compute.host) ] setup_networks_on_host.assert_has_calls(setup_networks_calls) notify_usage_calls = [ mock.call(self.context, instance_old, "live_migration.post.dest.start", network_info='test_network'), mock.call(self.context, self.instance, "live_migration.post.dest.end", network_info='test_network') ] _notify_about_instance_usage.assert_has_calls(notify_usage_calls) migrate_instance_finish.assert_called_once_with( self.context, self.instance, {'source_compute': cn_old, 'dest_compute': self.compute.host}) _get_instance_block_device_info.assert_called_once_with( self.context, self.instance ) get_instance_nw_info.assert_called_once_with(self.context, self.instance) _get_power_state.assert_called_once_with(self.context, self.instance) _get_compute_info.assert_called_once_with(self.context, self.compute.host) self.assertEqual(self.compute.host, self.instance.host) self.assertEqual('test_host', self.instance.node) self.assertEqual(1, self.instance.power_state) self.assertEqual(0, self.instance.progress) self.assertIsNone(self.instance.task_state) save.assert_called_once_with( expected_task_state=task_states.MIGRATING) _do_test() def test_post_live_migration_at_destination_compute_not_found(self): @mock.patch.object(self.instance, 'save') @mock.patch.object(self.compute, 'network_api') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, '_get_instance_block_device_info') @mock.patch.object(self.compute, '_get_power_state', return_value=1) @mock.patch.object(self.compute, '_get_compute_info', side_effect=exception.ComputeHostNotFound( host='fake')) @mock.patch.object(self.compute.driver, 'post_live_migration_at_destination') def _do_test(post_live_migration_at_destination, _get_compute_info, _get_power_state, _get_instance_block_device_info, _notify_about_instance_usage, network_api, save): cn = mock.Mock(spec_set=['hypervisor_hostname']) cn.hypervisor_hostname = 'test_host' _get_compute_info.return_value = cn self.compute.post_live_migration_at_destination( self.context, self.instance, False) self.assertIsNone(self.instance.node) _do_test() def test_post_live_migration_at_destination_unexpected_exception(self): @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(self.instance, 'save') @mock.patch.object(self.compute, 'network_api') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, '_get_instance_block_device_info') @mock.patch.object(self.compute, '_get_power_state', return_value=1) @mock.patch.object(self.compute, '_get_compute_info') @mock.patch.object(self.compute.driver, 'post_live_migration_at_destination', side_effect=exception.NovaException) def _do_test(post_live_migration_at_destination, _get_compute_info, _get_power_state, _get_instance_block_device_info, _notify_about_instance_usage, network_api, save, add_instance_fault_from_exc): cn = mock.Mock(spec_set=['hypervisor_hostname']) cn.hypervisor_hostname = 'test_host' _get_compute_info.return_value = cn self.assertRaises(exception.NovaException, self.compute.post_live_migration_at_destination, self.context, self.instance, False) self.assertEqual(vm_states.ERROR, self.instance.vm_state) _do_test() def _get_migration(self, migration_id, status, migration_type): migration = objects.Migration() migration.id = migration_id migration.status = status migration.migration_type = migration_type return migration @mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage') @mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(nova.virt.fake.SmallFakeDriver, 'live_migration_abort') def test_live_migration_abort(self, mock_driver, mock_get_migration, mock_notify): instance = objects.Instance(id=123, uuid=uuids.instance) migration = self._get_migration(10, 'running', 'live-migration') mock_get_migration.return_value = migration self.compute.live_migration_abort(self.context, instance, migration.id) mock_driver.assert_called_with(instance) _notify_usage_calls = [mock.call(self.context, instance, 'live.migration.abort.start'), mock.call(self.context, instance, 'live.migration.abort.end')] mock_notify.assert_has_calls(_notify_usage_calls) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage') @mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(nova.virt.fake.SmallFakeDriver, 'live_migration_abort') def test_live_migration_abort_not_supported(self, mock_driver, mock_get_migration, mock_notify, mock_instance_fault): instance = objects.Instance(id=123, uuid=uuids.instance) migration = self._get_migration(10, 'running', 'live-migration') mock_get_migration.return_value = migration mock_driver.side_effect = NotImplementedError() self.assertRaises(NotImplementedError, self.compute.live_migration_abort, self.context, instance, migration.id) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(objects.Migration, 'get_by_id') def test_live_migration_abort_wrong_migration_state(self, mock_get_migration, mock_instance_fault): instance = objects.Instance(id=123, uuid=uuids.instance) migration = self._get_migration(10, 'completed', 'live-migration') mock_get_migration.return_value = migration self.assertRaises(exception.InvalidMigrationState, self.compute.live_migration_abort, self.context, instance, migration.id) def test_live_migration_cleanup_flags_block_migrate_libvirt(self): migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=False, is_shared_instance_path=False) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertTrue(do_cleanup) self.assertTrue(destroy_disks) def test_live_migration_cleanup_flags_shared_block_libvirt(self): migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=True, is_shared_instance_path=False) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertTrue(do_cleanup) self.assertFalse(destroy_disks) def test_live_migration_cleanup_flags_shared_path_libvirt(self): migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=False, is_shared_instance_path=True) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertFalse(do_cleanup) self.assertTrue(destroy_disks) def test_live_migration_cleanup_flags_shared_libvirt(self): migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=True, is_shared_instance_path=True) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertFalse(do_cleanup) self.assertFalse(destroy_disks) def test_live_migration_cleanup_flags_block_migrate_xenapi(self): migrate_data = objects.XenapiLiveMigrateData(block_migration=True) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertTrue(do_cleanup) self.assertTrue(destroy_disks) def test_live_migration_cleanup_flags_live_migrate_xenapi(self): migrate_data = objects.XenapiLiveMigrateData(block_migration=False) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertFalse(do_cleanup) self.assertFalse(destroy_disks) def test_live_migration_cleanup_flags_live_migrate(self): do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( {}) self.assertFalse(do_cleanup) self.assertFalse(destroy_disks) class ComputeManagerInstanceUsageAuditTestCase(test.TestCase): def setUp(self): super(ComputeManagerInstanceUsageAuditTestCase, self).setUp() self.flags(use_local=True, group='conductor') self.flags(instance_usage_audit=True) @mock.patch('nova.objects.TaskLog') def test_deleted_instance(self, mock_task_log): mock_task_log.get.return_value = None compute = importutils.import_object(CONF.compute_manager) admin_context = context.get_admin_context() fake_db_flavor = fake_flavor.fake_db_flavor() flavor = objects.Flavor(admin_context, **fake_db_flavor) updates = {'host': compute.host, 'flavor': flavor, 'root_gb': 0, 'ephemeral_gb': 0} # fudge beginning and ending time by a second (backwards and forwards, # respectively) so they differ from the instance's launch and # termination times when sub-seconds are truncated and fall within the # audit period one_second = datetime.timedelta(seconds=1) begin = timeutils.utcnow() - one_second instance = objects.Instance(admin_context, **updates) instance.create() instance.launched_at = timeutils.utcnow() instance.save() instance.destroy() end = timeutils.utcnow() + one_second def fake_last_completed_audit_period(): return (begin, end) self.stub_out('nova.utils.last_completed_audit_period', fake_last_completed_audit_period) compute._instance_usage_audit(admin_context) self.assertEqual(1, mock_task_log().task_items, 'the deleted test instance was not found in the audit' ' period') self.assertEqual(0, mock_task_log().errors, 'an error was encountered processing the deleted test' ' instance') nova-13.0.0/nova/tests/unit/compute/test_resource_tracker.py0000664000567000056710000016114212701410011025402 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute resource tracking.""" import copy import datetime import uuid import mock from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils import six from nova.compute.monitors import base as monitor_base from nova.compute import resource_tracker from nova.compute import resources from nova.compute import task_states from nova.compute import vm_states from nova import context from nova import exception from nova import objects from nova.objects import base as obj_base from nova.objects import fields from nova.objects import pci_device_pool from nova import rpc from nova import test from nova.tests.unit.pci import fakes as pci_fakes from nova.tests import uuidsentinel from nova.virt import driver FAKE_VIRT_MEMORY_MB = 5 FAKE_VIRT_MEMORY_OVERHEAD = 1 FAKE_VIRT_MEMORY_WITH_OVERHEAD = ( FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD) FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) ROOT_GB = 5 EPHEMERAL_GB = 1 FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB FAKE_VIRT_VCPUS = 1 FAKE_VIRT_STATS = {'virt_stat': 10} FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'} FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS) RESOURCE_NAMES = ['vcpu'] CONF = cfg.CONF class UnsupportedVirtDriver(driver.ComputeDriver): """Pretend version of a lame virt driver.""" def __init__(self): super(UnsupportedVirtDriver, self).__init__(None) def get_host_ip_addr(self): return '127.0.0.1' def get_available_resource(self, nodename): # no support for getting resource usage info return {} class FakeVirtDriver(driver.ComputeDriver): def __init__(self, pci_support=False, stats=None, numa_topology=FAKE_VIRT_NUMA_TOPOLOGY): super(FakeVirtDriver, self).__init__(None) self.memory_mb = FAKE_VIRT_MEMORY_MB self.local_gb = FAKE_VIRT_LOCAL_GB self.vcpus = FAKE_VIRT_VCPUS self.numa_topology = numa_topology self.memory_mb_used = 0 self.local_gb_used = 0 self.pci_support = pci_support self.pci_devices = [ { 'label': 'label_8086_0443', 'dev_type': fields.PciDeviceType.SRIOV_VF, 'compute_node_id': 1, 'address': '0000:00:01.1', 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1, 'parent_addr': '0000:00:01.0', }, { 'label': 'label_8086_0443', 'dev_type': fields.PciDeviceType.SRIOV_VF, 'compute_node_id': 1, 'address': '0000:00:01.2', 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1, 'parent_addr': '0000:00:01.0', }, { 'label': 'label_8086_0443', 'dev_type': fields.PciDeviceType.SRIOV_PF, 'compute_node_id': 1, 'address': '0000:00:01.0', 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1, }, { 'label': 'label_8086_0123', 'dev_type': 'type-PCI', 'compute_node_id': 1, 'address': '0000:00:01.0', 'product_id': '0123', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1, }, { 'label': 'label_8086_7891', 'dev_type': fields.PciDeviceType.SRIOV_VF, 'compute_node_id': 1, 'address': '0000:00:01.0', 'product_id': '7891', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': None, 'parent_addr': '0000:08:01.0', }, ] if self.pci_support else [] self.pci_stats = [ { 'count': 2, 'vendor_id': '8086', 'product_id': '0443', 'numa_node': 1, 'dev_type': fields.PciDeviceType.SRIOV_VF }, { 'count': 1, 'vendor_id': '8086', 'product_id': '0443', 'numa_node': 1, 'dev_type': fields.PciDeviceType.SRIOV_PF }, { 'count': 1, 'vendor_id': '8086', 'product_id': '7891', 'numa_node': None, 'dev_type': fields.PciDeviceType.SRIOV_VF }, ] if self.pci_support else [] if stats is not None: self.stats = stats def get_host_ip_addr(self): return '127.0.0.1' def get_available_resource(self, nodename): d = { 'vcpus': self.vcpus, 'memory_mb': self.memory_mb, 'local_gb': self.local_gb, 'vcpus_used': 0, 'memory_mb_used': self.memory_mb_used, 'local_gb_used': self.local_gb_used, 'hypervisor_type': 'fake', 'hypervisor_version': 0, 'hypervisor_hostname': 'fakehost', 'cpu_info': '', 'numa_topology': ( self.numa_topology._to_json() if self.numa_topology else None), } if self.pci_support: d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices) if hasattr(self, 'stats'): d['stats'] = self.stats return d def estimate_instance_overhead(self, instance_info): instance_info['memory_mb'] # make sure memory value is present overhead = { 'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD } return overhead # just return a constant value for testing class BaseTestCase(test.TestCase): @mock.patch('stevedore.enabled.EnabledExtensionManager') def setUp(self, _mock_ext_mgr): super(BaseTestCase, self).setUp() self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.context = context.get_admin_context() self._set_pci_passthrough_whitelist() self.flags(use_local=True, group='conductor') self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) self._instances = {} self._instance_types = {} self.stubs.Set(objects.InstanceList, 'get_by_host_and_node', self._fake_instance_get_by_host_and_node) self.stubs.Set(self.conductor.db, 'flavor_get', self._fake_flavor_get) self.host = 'fakehost' self.compute = self._create_compute_node() self.updated = False self.deleted = False self.update_call_count = 0 def _set_pci_passthrough_whitelist(self): self.flags(pci_passthrough_whitelist=[ '{"vendor_id": "8086", "product_id": "0443"}', '{"vendor_id": "8086", "product_id": "7891"}']) def _create_compute_node(self, values=None): # This creates a db representation of a compute_node. compute = { "id": 1, "uuid": uuidsentinel.fake_compute_node, "service_id": 1, "host": "fakehost", "vcpus": 1, "memory_mb": 1, "local_gb": 1, "vcpus_used": 1, "memory_mb_used": 1, "local_gb_used": 1, "free_ram_mb": 1, "free_disk_gb": 1, "current_workload": 1, "running_vms": 0, "cpu_info": None, "numa_topology": None, "stats": '{"num_instances": "1"}', "hypervisor_hostname": "fakenode", 'hypervisor_version': 1, 'hypervisor_type': 'fake-hyp', 'disk_available_least': None, 'host_ip': None, 'metrics': None, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'cpu_allocation_ratio': None, 'ram_allocation_ratio': None, 'disk_allocation_ratio': None, } if values: compute.update(values) return compute def _create_compute_node_obj(self, context): # Use the db representation of a compute node returned # by _create_compute_node() to create an equivalent compute # node object. compute = self._create_compute_node() compute_obj = objects.ComputeNode() compute_obj = objects.ComputeNode._from_db_object( context, compute_obj, compute) return compute_obj def _create_service(self, host="fakehost", compute=None): if compute: compute = [compute] service = { "id": 1, "host": host, "binary": "nova-compute", "topic": "compute", "compute_node": compute, "report_count": 0, 'disabled': False, 'disabled_reason': None, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'last_seen_up': None, 'forced_down': False, 'version': 0, } return service def _fake_instance_obj(self, stash=True, flavor=None, **kwargs): # Default to an instance ready to resize to or from the same # instance_type flavor = flavor or self._fake_flavor_create() if not isinstance(flavor, objects.Flavor): flavor = objects.Flavor(**flavor) instance_uuid = str(uuid.uuid1()) instance = objects.Instance(context=self.context, uuid=instance_uuid, flavor=flavor) instance.update({ 'vm_state': vm_states.RESIZED, 'task_state': None, 'ephemeral_key_uuid': None, 'os_type': 'Linux', 'project_id': '123456', 'host': None, 'node': None, 'instance_type_id': flavor['id'], 'memory_mb': flavor['memory_mb'], 'vcpus': flavor['vcpus'], 'root_gb': flavor['root_gb'], 'ephemeral_gb': flavor['ephemeral_gb'], 'launched_on': None, 'system_metadata': {}, 'availability_zone': None, 'vm_mode': None, 'reservation_id': None, 'display_name': None, 'default_swap_device': None, 'power_state': None, 'access_ip_v6': None, 'access_ip_v4': None, 'key_name': None, 'updated_at': None, 'cell_name': None, 'locked': None, 'locked_by': None, 'launch_index': None, 'architecture': None, 'auto_disk_config': None, 'terminated_at': None, 'ramdisk_id': None, 'user_data': None, 'cleaned': None, 'deleted_at': None, 'id': 333, 'disable_terminate': None, 'hostname': None, 'display_description': None, 'key_data': None, 'deleted': None, 'default_ephemeral_device': None, 'progress': None, 'launched_at': None, 'config_drive': None, 'kernel_id': None, 'user_id': None, 'shutdown_terminate': None, 'created_at': None, 'image_ref': None, 'root_device_name': None, }) if stash: instance.old_flavor = flavor instance.new_flavor = flavor instance.numa_topology = kwargs.pop('numa_topology', None) instance.update(kwargs) self._instances[instance_uuid] = instance return instance def _fake_flavor_create(self, **kwargs): instance_type = { 'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'disabled': False, 'is_public': True, 'name': 'fakeitype', 'memory_mb': FAKE_VIRT_MEMORY_MB, 'vcpus': FAKE_VIRT_VCPUS, 'root_gb': ROOT_GB, 'ephemeral_gb': EPHEMERAL_GB, 'swap': 0, 'rxtx_factor': 1.0, 'vcpu_weight': 1, 'flavorid': 'fakeflavor', 'extra_specs': {}, } instance_type.update(**kwargs) instance_type = objects.Flavor(**instance_type) id_ = instance_type['id'] self._instance_types[id_] = instance_type return instance_type def _fake_instance_get_by_host_and_node(self, context, host, nodename, expected_attrs=None): return objects.InstanceList( objects=[i for i in self._instances.values() if i['host'] == host]) def _fake_flavor_get(self, ctxt, id_): return self._instance_types[id_] def _fake_compute_node_update(self, ctx, compute_node_id, values, prune_stats=False): self.update_call_count += 1 self.updated = True self.compute.update(values) return self.compute def _driver(self): return FakeVirtDriver() def _tracker(self, host=None): if host is None: host = self.host node = "fakenode" driver = self._driver() tracker = resource_tracker.ResourceTracker(host, driver, node) tracker.compute_node = self._create_compute_node_obj(self.context) tracker.ext_resources_handler = \ resources.ResourceHandler(RESOURCE_NAMES, True) return tracker class UnsupportedDriverTestCase(BaseTestCase): """Resource tracking should be disabled when the virt driver doesn't support it. """ def setUp(self): super(UnsupportedDriverTestCase, self).setUp() self.tracker = self._tracker() # seed tracker with data: self.tracker.update_available_resource(self.context) def _driver(self): return UnsupportedVirtDriver() def test_disabled(self): # disabled = no compute node stats self.assertTrue(self.tracker.disabled) self.assertIsNone(self.tracker.compute_node) def test_disabled_claim(self): # basic claim: instance = self._fake_instance_obj() with mock.patch.object(instance, 'save'): claim = self.tracker.instance_claim(self.context, instance) self.assertEqual(0, claim.memory_mb) def test_disabled_instance_claim(self): # instance variation: instance = self._fake_instance_obj() with mock.patch.object(instance, 'save'): claim = self.tracker.instance_claim(self.context, instance) self.assertEqual(0, claim.memory_mb) @mock.patch('nova.objects.Instance.save') def test_disabled_instance_context_claim(self, mock_save): # instance context manager variation: instance = self._fake_instance_obj() self.tracker.instance_claim(self.context, instance) with self.tracker.instance_claim(self.context, instance) as claim: self.assertEqual(0, claim.memory_mb) def test_disabled_updated_usage(self): instance = self._fake_instance_obj(host='fakehost', memory_mb=5, root_gb=10) self.tracker.update_usage(self.context, instance) def test_disabled_resize_claim(self): instance = self._fake_instance_obj() instance_type = self._fake_flavor_create() claim = self.tracker.resize_claim(self.context, instance, instance_type) self.assertEqual(0, claim.memory_mb) self.assertEqual(instance['uuid'], claim.migration['instance_uuid']) self.assertEqual(instance_type['id'], claim.migration['new_instance_type_id']) def test_disabled_resize_context_claim(self): instance = self._fake_instance_obj() instance_type = self._fake_flavor_create() with self.tracker.resize_claim(self.context, instance, instance_type) \ as claim: self.assertEqual(0, claim.memory_mb) class MissingComputeNodeTestCase(BaseTestCase): def setUp(self): super(MissingComputeNodeTestCase, self).setUp() self.tracker = self._tracker() self.stub_out('nova.db.service_get_by_compute_host', self._fake_service_get_by_compute_host) self.stub_out('nova.db.compute_node_get_by_host_and_nodename', self._fake_compute_node_get_by_host_and_nodename) self.stub_out('nova.db.compute_node_create', self._fake_create_compute_node) self.tracker.scheduler_client.update_resource_stats = mock.Mock() def _fake_create_compute_node(self, context, values): self.created = True return self._create_compute_node(values) def _fake_service_get_by_compute_host(self, ctx, host): # return a service with no joined compute service = self._create_service() return service def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename): # return no compute node raise exception.ComputeHostNotFound(host=host) def test_create_compute_node(self): self.tracker.compute_node = None self.tracker.update_available_resource(self.context) self.assertTrue(self.created) def test_enabled(self): self.tracker.update_available_resource(self.context) self.assertFalse(self.tracker.disabled) class BaseTrackerTestCase(BaseTestCase): def setUp(self): # setup plumbing for a working resource tracker with required # database models and a compatible compute driver: super(BaseTrackerTestCase, self).setUp() self.tracker = self._tracker() self._migrations = {} self.stub_out('nova.db.service_get_by_compute_host', self._fake_service_get_by_compute_host) self.stub_out('nova.db.compute_node_get_by_host_and_nodename', self._fake_compute_node_get_by_host_and_nodename) self.stub_out('nova.db.compute_node_update', self._fake_compute_node_update) self.stub_out('nova.db.compute_node_delete', self._fake_compute_node_delete) self.stub_out('nova.db.migration_update', self._fake_migration_update) self.stub_out('nova.db.migration_get_in_progress_by_host_and_node', self._fake_migration_get_in_progress_by_host_and_node) # Note that this must be called before the call to _init_tracker() patcher = pci_fakes.fake_pci_whitelist() self.addCleanup(patcher.stop) self._init_tracker() self.limits = self._limits() def _fake_service_get_by_compute_host(self, ctx, host): self.service = self._create_service(host, compute=self.compute) return self.service def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename): self.compute = self._create_compute_node() return self.compute def _fake_compute_node_update(self, ctx, compute_node_id, values, prune_stats=False): self.update_call_count += 1 self.updated = True self.compute.update(values) return self.compute def _fake_compute_node_delete(self, ctx, compute_node_id): self.deleted = True self.compute.update({'deleted': 1}) return self.compute def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host, node): status = ['confirmed', 'reverted', 'error'] migrations = [] for migration in self._migrations.values(): migration = obj_base.obj_to_primitive(migration) if migration['status'] in status: continue uuid = migration['instance_uuid'] migration['instance'] = self._instances[uuid] migrations.append(migration) return migrations def _fake_migration_update(self, ctxt, migration_id, values): # cheat and assume there's only 1 migration present migration = list(self._migrations.values())[0] migration.update(values) return migration def _init_tracker(self): self.tracker.update_available_resource(self.context) def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD, disk_gb=FAKE_VIRT_LOCAL_GB, vcpus=FAKE_VIRT_VCPUS, numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD): """Create limits dictionary used for oversubscribing resources.""" return { 'memory_mb': memory_mb, 'disk_gb': disk_gb, 'vcpu': vcpus, 'numa_topology': numa_topology, } def assertEqualNUMAHostTopology(self, expected, got): attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage') if None in (expected, got): if expected != got: raise AssertionError("Topologies don't match. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) else: return if len(expected) != len(got): raise AssertionError("Topologies don't match due to different " "number of cells. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) for exp_cell, got_cell in zip(expected.cells, got.cells): for attr in attrs: if getattr(exp_cell, attr) != getattr(got_cell, attr): raise AssertionError("Topologies don't match. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) def assertEqualPciDevicePool(self, expected, observed): self.assertEqual(expected.product_id, observed.product_id) self.assertEqual(expected.vendor_id, observed.vendor_id) self.assertEqual(expected.tags, observed.tags) self.assertEqual(expected.count, observed.count) def assertEqualPciDevicePoolList(self, expected, observed): ex_objs = expected.objects ob_objs = observed.objects self.assertEqual(len(ex_objs), len(ob_objs)) for i in range(len(ex_objs)): self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i]) def _assert(self, value, field, tracker=None): if tracker is None: tracker = self.tracker if field not in tracker.compute_node: raise test.TestingException( "'%(field)s' not in compute node." % {'field': field}) x = tracker.compute_node[field] if field == 'numa_topology': self.assertEqualNUMAHostTopology( value, objects.NUMATopology.obj_from_db_obj(x)) else: self.assertEqual(value, x) class TrackerTestCase(BaseTrackerTestCase): def test_free_ram_resource_value(self): driver = FakeVirtDriver() mem_free = driver.memory_mb - driver.memory_mb_used self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb) def test_free_disk_resource_value(self): driver = FakeVirtDriver() mem_free = driver.local_gb - driver.local_gb_used self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb) def test_update_compute_node(self): self.assertFalse(self.tracker.disabled) self.assertTrue(self.updated) def test_init(self): driver = self._driver() self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb') self._assert(FAKE_VIRT_VCPUS, 'vcpus') self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology') self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'vcpus_used') self._assert(0, 'running_vms') self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb') self.assertFalse(self.tracker.disabled) self.assertEqual(0, self.tracker.compute_node.current_workload) expected = pci_device_pool.from_pci_stats(driver.pci_stats) self.assertEqual(len(expected), len(self.tracker.compute_node.pci_device_pools)) for expected_pool, actual_pool in zip( expected, self.tracker.compute_node.pci_device_pools): self.assertEqual(expected_pool, actual_pool) def test_set_instance_host_and_node(self): inst = objects.Instance() with mock.patch.object(inst, 'save') as mock_save: self.tracker._set_instance_host_and_node(inst) mock_save.assert_called_once_with() self.assertEqual(self.tracker.host, inst.host) self.assertEqual(self.tracker.nodename, inst.node) self.assertEqual(self.tracker.host, inst.launched_on) def test_unset_instance_host_and_node(self): inst = objects.Instance() with mock.patch.object(inst, 'save') as mock_save: self.tracker._set_instance_host_and_node(inst) self.tracker._unset_instance_host_and_node(inst) self.assertEqual(2, mock_save.call_count) self.assertIsNone(inst.host) self.assertIsNone(inst.node) self.assertEqual(self.tracker.host, inst.launched_on) class SchedulerClientTrackerTestCase(BaseTrackerTestCase): def setUp(self): super(SchedulerClientTrackerTestCase, self).setUp() self.tracker.scheduler_client.update_resource_stats = mock.Mock() def test_update_resource(self): # NOTE(pmurray): we are not doing a full pass through the resource # trackers update path, so safest to do two updates and look for # differences then to rely on the initial state being the same # as an update urs_mock = self.tracker.scheduler_client.update_resource_stats self.tracker._update(self.context) urs_mock.reset_mock() # change a compute node value to simulate a change self.tracker.compute_node.local_gb_used += 1 self.tracker._update(self.context) urs_mock.assert_called_once_with(self.tracker.compute_node) def test_no_update_resource(self): # NOTE(pmurray): we are not doing a full pass through the resource # trackers update path, so safest to do two updates and look for # differences then to rely on the initial state being the same # as an update self.tracker._update(self.context) update = self.tracker.scheduler_client.update_resource_stats update.reset_mock() self.tracker._update(self.context) self.assertFalse(update.called, "update_resource_stats should not be " "called when there is no change") class TrackerPciStatsTestCase(BaseTrackerTestCase): def test_update_compute_node(self): self.assertFalse(self.tracker.disabled) self.assertTrue(self.updated) def test_init(self): driver = self._driver() self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb') self._assert(FAKE_VIRT_VCPUS, 'vcpus') self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology') self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'vcpus_used') self._assert(0, 'running_vms') self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb') self.assertFalse(self.tracker.disabled) self.assertEqual(0, self.tracker.compute_node.current_workload) expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats) observed_pools = self.tracker.compute_node.pci_device_pools self.assertEqualPciDevicePoolList(expected_pools, observed_pools) def _driver(self): return FakeVirtDriver(pci_support=True) class TrackerExtraResourcesTestCase(BaseTrackerTestCase): def test_set_empty_ext_resources(self): resources = self._create_compute_node_obj(self.context) del resources.stats self.tracker._write_ext_resources(resources) self.assertEqual({}, resources.stats) def test_set_extra_resources(self): def fake_write_resources(resources): resources['stats']['resA'] = '123' resources['stats']['resB'] = 12 self.stubs.Set(self.tracker.ext_resources_handler, 'write_resources', fake_write_resources) resources = self._create_compute_node_obj(self.context) del resources.stats self.tracker._write_ext_resources(resources) expected = {"resA": "123", "resB": "12"} self.assertEqual(sorted(expected), sorted(resources.stats)) class InstanceClaimTestCase(BaseTrackerTestCase): def _instance_topology(self, mem): mem = mem * 1024 return objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([1]), memory=mem), objects.InstanceNUMACell( id=1, cpuset=set([3]), memory=mem)]) def _claim_topology(self, mem, cpus=1): if self.tracker.driver.numa_topology is None: return None mem = mem * 1024 return objects.NUMATopology( cells=[objects.NUMACell( id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus, memory_usage=mem, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell( id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus, memory_usage=mem, mempages=[], siblings=[], pinned_cpus=set([]))]) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_instance_claim_with_oversubscription(self, mock_get): memory_mb = FAKE_VIRT_MEMORY_MB * 2 root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB vcpus = FAKE_VIRT_VCPUS * 2 claim_topology = self._claim_topology(3) instance_topology = self._instance_topology(3) limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD, 'disk_gb': root_gb * 2, 'vcpu': vcpus, 'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD} instance = self._fake_instance_obj(memory_mb=memory_mb, root_gb=root_gb, ephemeral_gb=ephemeral_gb, numa_topology=instance_topology) with mock.patch.object(instance, 'save'): self.tracker.instance_claim(self.context, instance, limits) self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD, self.tracker.compute_node.memory_mb_used) self.assertEqualNUMAHostTopology( claim_topology, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) self.assertEqual(root_gb * 2, self.tracker.compute_node.local_gb_used) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) @mock.patch('nova.objects.Instance.save') def test_additive_claims(self, mock_save, mock_get): self.limits['vcpu'] = 2 claim_topology = self._claim_topology(2, cpus=2) flavor = self._fake_flavor_create( memory_mb=1, root_gb=1, ephemeral_gb=0) instance_topology = self._instance_topology(1) instance = self._fake_instance_obj( flavor=flavor, numa_topology=instance_topology) with self.tracker.instance_claim(self.context, instance, self.limits): pass instance = self._fake_instance_obj( flavor=flavor, numa_topology=instance_topology) with self.tracker.instance_claim(self.context, instance, self.limits): pass self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD), self.tracker.compute_node.memory_mb_used) self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']), self.tracker.compute_node.local_gb_used) self.assertEqual(2 * flavor['vcpus'], self.tracker.compute_node.vcpus_used) self.assertEqualNUMAHostTopology( claim_topology, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) @mock.patch('nova.objects.Instance.save') def test_context_claim_with_exception(self, mock_save, mock_get): instance = self._fake_instance_obj(memory_mb=1, root_gb=1, ephemeral_gb=1) try: with self.tracker.instance_claim(self.context, instance): # raise test.TestingException() except test.TestingException: pass self.assertEqual(0, self.tracker.compute_node.memory_mb_used) self.assertEqual(0, self.tracker.compute_node.local_gb_used) self.assertEqual(0, self.compute['memory_mb_used']) self.assertEqual(0, self.compute['local_gb_used']) self.assertEqualNUMAHostTopology( FAKE_VIRT_NUMA_TOPOLOGY, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_update_load_stats_for_instance(self, mock_get): instance = self._fake_instance_obj(task_state=task_states.SCHEDULING) with mock.patch.object(instance, 'save'): with self.tracker.instance_claim(self.context, instance): pass self.assertEqual(1, self.tracker.compute_node.current_workload) instance['vm_state'] = vm_states.ACTIVE instance['task_state'] = None instance['host'] = 'fakehost' self.tracker.update_usage(self.context, instance) self.assertEqual(0, self.tracker.compute_node.current_workload) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) @mock.patch('nova.objects.Instance.save') def test_cpu_stats(self, mock_save, mock_get): limits = {'disk_gb': 100, 'memory_mb': 100} self.assertEqual(0, self.tracker.compute_node.vcpus_used) vcpus = 1 instance = self._fake_instance_obj(vcpus=vcpus) # should not do anything until a claim is made: self.tracker.update_usage(self.context, instance) self.assertEqual(0, self.tracker.compute_node.vcpus_used) with self.tracker.instance_claim(self.context, instance, limits): pass self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used) # instance state can change without modifying vcpus in use: instance['task_state'] = task_states.SCHEDULING self.tracker.update_usage(self.context, instance) self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used) add_vcpus = 10 vcpus += add_vcpus instance = self._fake_instance_obj(vcpus=add_vcpus) with self.tracker.instance_claim(self.context, instance, limits): pass self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used) instance['vm_state'] = vm_states.DELETED self.tracker.update_usage(self.context, instance) vcpus -= add_vcpus self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used) def test_skip_deleted_instances(self): # ensure that the audit process skips instances that have vm_state # DELETED, but the DB record is not yet deleted. self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host) self.tracker.update_available_resource(self.context) self.assertEqual(0, self.tracker.compute_node.memory_mb_used) self.assertEqual(0, self.tracker.compute_node.local_gb_used) @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_deleted_instances_with_migrations(self, mock_migration_list): migration = objects.Migration(context=self.context, migration_type='resize', instance_uuid='invalid') mock_migration_list.return_value = [migration] self.tracker.update_available_resource(self.context) self.assertEqual(0, self.tracker.compute_node.memory_mb_used) self.assertEqual(0, self.tracker.compute_node.local_gb_used) mock_migration_list.assert_called_once_with(self.context, "fakehost", "fakenode") @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_instances_with_live_migrations(self, mock_instance_list, mock_migration_list): instance = self._fake_instance_obj() migration = objects.Migration(context=self.context, migration_type='live-migration', instance_uuid=instance.uuid) mock_migration_list.return_value = [migration] mock_instance_list.return_value = [instance] with mock.patch.object(self.tracker, '_pair_instances_to_migrations' ) as mock_pair: self.tracker.update_available_resource(self.context) self.assertTrue(mock_pair.called) self.assertEqual( instance.uuid, mock_pair.call_args_list[0][0][0][0].instance_uuid) self.assertEqual(instance.uuid, mock_pair.call_args_list[0][0][1][0].uuid) self.assertEqual( ['system_metadata', 'numa_topology', 'flavor', 'migration_context'], mock_instance_list.call_args_list[0][1]['expected_attrs']) self.assertEqual(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD, self.tracker.compute_node['memory_mb_used']) self.assertEqual(ROOT_GB + EPHEMERAL_GB, self.tracker.compute_node['local_gb_used']) mock_migration_list.assert_called_once_with(self.context, "fakehost", "fakenode") def test_pair_instances_to_migrations(self): migrations = [objects.Migration(instance_uuid=uuidsentinel.instance1), objects.Migration(instance_uuid=uuidsentinel.instance2)] instances = [objects.Instance(uuid=uuidsentinel.instance2), objects.Instance(uuid=uuidsentinel.instance1)] self.tracker._pair_instances_to_migrations(migrations, instances) order = [uuidsentinel.instance1, uuidsentinel.instance2] for i, migration in enumerate(migrations): self.assertEqual(order[i], migration.instance.uuid) @mock.patch('nova.compute.claims.Claim') @mock.patch('nova.objects.Instance.save') def test_claim_saves_numa_topology(self, mock_save, mock_claim): def fake_save(): self.assertEqual(set(['numa_topology', 'host', 'node', 'launched_on']), inst.obj_what_changed()) mock_save.side_effect = fake_save inst = objects.Instance(host=None, node=None, memory_mb=1024) inst.obj_reset_changes() numa = objects.InstanceNUMATopology() claim = mock.MagicMock() claim.claimed_numa_topology = numa mock_claim.return_value = claim with mock.patch.object(self.tracker, '_update_usage_from_instance'): self.tracker.instance_claim(self.context, inst) mock_save.assert_called_once_with() @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_claim_sets_instance_host_and_node(self, mock_get): instance = self._fake_instance_obj() self.assertIsNone(instance['host']) self.assertIsNone(instance['launched_on']) self.assertIsNone(instance['node']) with mock.patch.object(instance, 'save'): claim = self.tracker.instance_claim(self.context, instance) self.assertNotEqual(0, claim.memory_mb) self.assertEqual('fakehost', instance['host']) self.assertEqual('fakehost', instance['launched_on']) self.assertEqual('fakenode', instance['node']) class _MoveClaimTestCase(BaseTrackerTestCase): def setUp(self): super(_MoveClaimTestCase, self).setUp() self.instance = self._fake_instance_obj() self.instance_type = self._fake_flavor_create() self.claim_method = self.tracker._move_claim @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_additive_claims(self, mock_get, mock_save): limits = self._limits( 2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 2 * FAKE_VIRT_LOCAL_GB, 2 * FAKE_VIRT_VCPUS) self.claim_method( self.context, self.instance, self.instance_type, limits=limits) mock_save.assert_called_once_with() mock_save.reset_mock() instance2 = self._fake_instance_obj() self.claim_method( self.context, instance2, self.instance_type, limits=limits) mock_save.assert_called_once_with() self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used') self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used') self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used') @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_move_type_not_tracked(self, mock_get, mock_save): self.claim_method(self.context, self.instance, self.instance_type, limits=self.limits, move_type="live-migration") mock_save.assert_called_once_with() self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'vcpus_used') self.assertEqual(0, len(self.tracker.tracked_migrations)) @mock.patch('nova.objects.Instance.save') @mock.patch.object(objects.Migration, 'save') def test_existing_migration(self, save_mock, save_inst_mock): migration = objects.Migration(self.context, id=42, instance_uuid=self.instance.uuid, source_compute='fake-other-compute', source_node='fake-other-node', status='accepted', migration_type='evacuation') self.claim_method(self.context, self.instance, self.instance_type, migration=migration) self.assertEqual(self.tracker.host, migration.dest_compute) self.assertEqual(self.tracker.nodename, migration.dest_node) self.assertEqual("pre-migrating", migration.status) self.assertEqual(1, len(self.tracker.tracked_migrations)) save_mock.assert_called_once_with() save_inst_mock.assert_called_once_with() class ResizeClaimTestCase(_MoveClaimTestCase): def setUp(self): super(ResizeClaimTestCase, self).setUp() self.claim_method = self.tracker.resize_claim def test_move_type_not_tracked(self): self.skipTest("Resize_claim does already sets the move_type.") def test_existing_migration(self): self.skipTest("Resize_claim does not support having existing " "migration record.") class OrphanTestCase(BaseTrackerTestCase): def _driver(self): class OrphanVirtDriver(FakeVirtDriver): def get_per_instance_usage(self): return { '1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB, 'uuid': '1-2-3-4-5'}, '2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB, 'uuid': '2-3-4-5-6'}, } return OrphanVirtDriver() def test_usage(self): self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, self.tracker.compute_node.memory_mb_used) def test_find(self): # create one legit instance and verify the 2 orphans remain self._fake_instance_obj() orphans = self.tracker._find_orphaned_instances() self.assertEqual(2, len(orphans)) class ComputeMonitorTestCase(BaseTestCase): def setUp(self): super(ComputeMonitorTestCase, self).setUp() self.tracker = self._tracker() self.node_name = 'nodename' self.user_id = 'fake' self.project_id = 'fake' self.info = {} self.context = context.RequestContext(self.user_id, self.project_id) def test_get_host_metrics_none(self): self.tracker.monitors = [] metrics = self.tracker._get_host_metrics(self.context, self.node_name) self.assertEqual(len(metrics), 0) @mock.patch.object(resource_tracker.LOG, 'warning') def test_get_host_metrics_exception(self, mock_LOG_warning): monitor = mock.MagicMock() monitor.add_metrics_to_list.side_effect = Exception self.tracker.monitors = [monitor] metrics = self.tracker._get_host_metrics(self.context, self.node_name) mock_LOG_warning.assert_called_once_with( u'Cannot get the metrics from %(mon)s; error: %(exc)s', mock.ANY) self.assertEqual(0, len(metrics)) def test_get_host_metrics(self): class FakeCPUMonitor(monitor_base.MonitorBase): NOW_TS = timeutils.utcnow() def __init__(self, *args): super(FakeCPUMonitor, self).__init__(*args) self.source = 'FakeCPUMonitor' def get_metric_names(self): return set(["cpu.frequency"]) def get_metrics(self): return [("cpu.frequency", 100, self.NOW_TS)] self.tracker.monitors = [FakeCPUMonitor(None)] mock_notifier = mock.Mock() with mock.patch.object(rpc, 'get_notifier', return_value=mock_notifier) as mock_get: metrics = self.tracker._get_host_metrics(self.context, self.node_name) mock_get.assert_called_once_with(service='compute', host=self.node_name) expected_metrics = [ { 'timestamp': FakeCPUMonitor.NOW_TS.isoformat(), 'name': 'cpu.frequency', 'value': 100, 'source': 'FakeCPUMonitor' }, ] payload = { 'metrics': expected_metrics, 'host': self.tracker.host, 'host_ip': CONF.my_ip, 'nodename': self.node_name } mock_notifier.info.assert_called_once_with( self.context, 'compute.metrics.update', payload) self.assertEqual(metrics, expected_metrics) class TrackerPeriodicTestCase(BaseTrackerTestCase): def test_periodic_status_update(self): # verify update called on instantiation self.assertEqual(1, self.update_call_count) # verify update not called if no change to resources self.tracker.update_available_resource(self.context) self.assertEqual(1, self.update_call_count) # verify update is called when resources change driver = self.tracker.driver driver.memory_mb += 1 self.tracker.update_available_resource(self.context) self.assertEqual(2, self.update_call_count) def test_update_available_resource_calls_locked_inner(self): @mock.patch.object(self.tracker, 'driver') @mock.patch.object(self.tracker, '_update_available_resource') @mock.patch.object(self.tracker, '_verify_resources') @mock.patch.object(self.tracker, '_report_hypervisor_resource_view') def _test(mock_rhrv, mock_vr, mock_uar, mock_driver): resources = {'there is someone in my head': 'but it\'s not me'} mock_driver.get_available_resource.return_value = resources self.tracker.update_available_resource(self.context) mock_uar.assert_called_once_with(self.context, resources) _test() class StatsDictTestCase(BaseTrackerTestCase): """Test stats handling for a virt driver that provides stats as a dictionary. """ def _driver(self): return FakeVirtDriver(stats=FAKE_VIRT_STATS) def test_virt_stats(self): # start with virt driver stats stats = self.tracker.compute_node.stats self.assertEqual(FAKE_VIRT_STATS_COERCED, stats) # adding an instance should keep virt driver stats self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host) self.tracker.update_available_resource(self.context) stats = self.tracker.compute_node.stats # compute node stats are coerced to strings expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED) for k, v in self.tracker.stats.items(): expected_stats[k] = six.text_type(v) self.assertEqual(expected_stats, stats) # removing the instances should keep only virt driver stats self._instances = {} self.tracker.update_available_resource(self.context) stats = self.tracker.compute_node.stats self.assertEqual(FAKE_VIRT_STATS_COERCED, stats) class StatsInvalidTypeTestCase(BaseTrackerTestCase): """Test stats handling for a virt driver that provides an invalid type for stats. """ def _driver(self): return FakeVirtDriver(stats=10) def _init_tracker(self): # do not do initial update in setup pass def test_virt_stats(self): # should throw exception for incorrect stats value type self.assertRaises(ValueError, self.tracker.update_available_resource, context=self.context) class UpdateUsageFromMigrationsTestCase(BaseTrackerTestCase): @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') def test_no_migrations(self, mock_update_usage): migrations = [] self.tracker._update_usage_from_migrations(self.context, migrations) self.assertFalse(mock_update_usage.called) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_instance_not_found(self, mock_get_instance, mock_update_usage): mock_get_instance.side_effect = exception.InstanceNotFound( instance_id='some_id', ) migration = objects.Migration( context=self.context, instance_uuid='some_uuid', ) self.tracker._update_usage_from_migrations(self.context, [migration]) mock_get_instance.assert_called_once_with(self.context, 'some_uuid') self.assertFalse(mock_update_usage.called) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_update_usage_called(self, mock_get_instance, mock_update_usage): instance = self._fake_instance_obj() mock_get_instance.return_value = instance migration = objects.Migration( context=self.context, instance_uuid=instance.uuid, ) self.tracker._update_usage_from_migrations(self.context, [migration]) mock_get_instance.assert_called_once_with(self.context, instance.uuid) mock_update_usage.assert_called_once_with( self.context, instance, None, migration) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_flavor_not_found(self, mock_get_instance, mock_update_usage): mock_update_usage.side_effect = exception.FlavorNotFound(flavor_id='') instance = self._fake_instance_obj() mock_get_instance.return_value = instance migration = objects.Migration( context=self.context, instance_uuid=instance.uuid, ) self.tracker._update_usage_from_migrations(self.context, [migration]) mock_get_instance.assert_called_once_with(self.context, instance.uuid) mock_update_usage.assert_called_once_with( self.context, instance, None, migration) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_not_resizing_state(self, mock_get_instance, mock_update_usage): instance = self._fake_instance_obj() instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.SUSPENDING mock_get_instance.return_value = instance migration = objects.Migration( context=self.context, instance_uuid=instance.uuid, ) self.tracker._update_usage_from_migrations(self.context, [migration]) mock_get_instance.assert_called_once_with(self.context, instance.uuid) self.assertFalse(mock_update_usage.called) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_use_most_recent(self, mock_get_instance, mock_update_usage): instance = self._fake_instance_obj() mock_get_instance.return_value = instance migration_2002 = objects.Migration( id=2002, context=self.context, instance_uuid=instance.uuid, updated_at=datetime.datetime(2002, 1, 1, 0, 0, 0), ) migration_2003 = objects.Migration( id=2003, context=self.context, instance_uuid=instance.uuid, updated_at=datetime.datetime(2003, 1, 1, 0, 0, 0), ) migration_2001 = objects.Migration( id=2001, context=self.context, instance_uuid=instance.uuid, updated_at=datetime.datetime(2001, 1, 1, 0, 0, 0), ) self.tracker._update_usage_from_migrations( self.context, [migration_2002, migration_2003, migration_2001]) mock_get_instance.assert_called_once_with(self.context, instance.uuid) mock_update_usage.assert_called_once_with( self.context, instance, None, migration_2003) nova-13.0.0/nova/tests/unit/compute/test_claims.py0000664000567000056710000004031012701410011023301 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for resource tracker claims.""" import uuid import mock from nova.compute import claims from nova import context from nova import exception from nova import objects from nova.pci import manager as pci_manager from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.pci import fakes as pci_fakes class FakeResourceHandler(object): test_called = False usage_is_instance = False def test_resources(self, usage, limits): self.test_called = True self.usage_is_itype = usage.get('name') == 'fakeitype' return [] class DummyTracker(object): icalled = False rcalled = False ext_resources_handler = FakeResourceHandler() def __init__(self): self.new_pci_tracker() def abort_instance_claim(self, *args, **kwargs): self.icalled = True def drop_move_claim(self, *args, **kwargs): self.rcalled = True def new_pci_tracker(self): ctxt = context.RequestContext('testuser', 'testproject') self.pci_tracker = pci_manager.PciDevTracker(ctxt) class ClaimTestCase(test.NoDBTestCase): def setUp(self): super(ClaimTestCase, self).setUp() self.context = context.RequestContext('fake-user', 'fake-project') self.resources = self._fake_resources() self.tracker = DummyTracker() self.empty_requests = objects.InstancePCIRequests( requests=[] ) def _claim(self, limits=None, overhead=None, requests=None, **kwargs): numa_topology = kwargs.pop('numa_topology', None) instance = self._fake_instance(**kwargs) if numa_topology: db_numa_topology = { 'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'instance_uuid': instance.uuid, 'numa_topology': numa_topology._to_json(), 'pci_requests': (requests or self.empty_requests).to_json() } else: db_numa_topology = None if overhead is None: overhead = {'memory_mb': 0} @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=requests or self.empty_requests) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value=db_numa_topology) def get_claim(mock_extra_get, mock_pci_get): return claims.Claim(self.context, instance, self.tracker, self.resources, overhead=overhead, limits=limits) return get_claim() def _fake_instance(self, **kwargs): instance = { 'uuid': str(uuid.uuid1()), 'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 5, 'vcpus': 1, 'system_metadata': {}, 'numa_topology': None } instance.update(**kwargs) return fake_instance.fake_instance_obj(self.context, **instance) def _fake_instance_type(self, **kwargs): instance_type = { 'id': 1, 'name': 'fakeitype', 'memory_mb': 1, 'vcpus': 1, 'root_gb': 1, 'ephemeral_gb': 2 } instance_type.update(**kwargs) return objects.Flavor(**instance_type) def _fake_resources(self, values=None): resources = { 'memory_mb': 2048, 'memory_mb_used': 0, 'free_ram_mb': 2048, 'local_gb': 20, 'local_gb_used': 0, 'free_disk_gb': 20, 'vcpus': 2, 'vcpus_used': 0, 'numa_topology': objects.NUMATopology( cells=[objects.NUMACell(id=1, cpuset=set([1, 2]), memory=512, memory_usage=0, cpu_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=2, cpuset=set([3, 4]), memory=512, memory_usage=0, cpu_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))] )._to_json() } if values: resources.update(values) return resources def test_memory_unlimited(self): self._claim(memory_mb=99999999) def test_disk_unlimited_root(self): self._claim(root_gb=999999) def test_disk_unlimited_ephemeral(self): self._claim(ephemeral_gb=999999) def test_memory_with_overhead(self): overhead = {'memory_mb': 8} limits = {'memory_mb': 2048} self._claim(memory_mb=2040, limits=limits, overhead=overhead) def test_memory_with_overhead_insufficient(self): overhead = {'memory_mb': 9} limits = {'memory_mb': 2048} self.assertRaises(exception.ComputeResourcesUnavailable, self._claim, limits=limits, overhead=overhead, memory_mb=2040) def test_memory_oversubscription(self): self._claim(memory_mb=4096) def test_memory_insufficient(self): limits = {'memory_mb': 8192} self.assertRaises(exception.ComputeResourcesUnavailable, self._claim, limits=limits, memory_mb=16384) def test_disk_oversubscription(self): limits = {'disk_gb': 60} self._claim(root_gb=10, ephemeral_gb=40, limits=limits) def test_disk_insufficient(self): limits = {'disk_gb': 45} self.assertRaisesRegex( exception.ComputeResourcesUnavailable, "disk", self._claim, limits=limits, root_gb=10, ephemeral_gb=40) def test_disk_and_memory_insufficient(self): limits = {'disk_gb': 45, 'memory_mb': 8192} self.assertRaisesRegex( exception.ComputeResourcesUnavailable, "memory.*disk", self._claim, limits=limits, root_gb=10, ephemeral_gb=40, memory_mb=16384) @mock.patch('nova.pci.stats.PciDeviceStats.support_requests', return_value=True) def test_pci_pass(self, mock_supports): request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) requests = objects.InstancePCIRequests(requests=[request]) # Claim.__init__() would raise ComputeResourcesUnavailable # if Claim._test_pci() did not return None. self._claim(requests=requests) mock_supports.assert_called_once_with(requests.requests) @mock.patch('nova.pci.stats.PciDeviceStats.support_requests', return_value=False) def test_pci_fail(self, mock_supports): request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) requests = objects.InstancePCIRequests(requests=[request]) self.assertRaises(exception.ComputeResourcesUnavailable, self._claim, requests=requests) mock_supports.assert_called_once_with(requests.requests) @mock.patch('nova.pci.stats.PciDeviceStats.support_requests', return_value=True) def test_pci_pass_no_requests(self, mock_supports): # Claim.__init__() would raise ComputeResourcesUnavailable # if Claim._test_pci() did not return None. self._claim() self.assertFalse(mock_supports.called) def test_ext_resources(self): self._claim() self.assertTrue(self.tracker.ext_resources_handler.test_called) self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype) def test_numa_topology_no_limit(self): huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) self._claim(numa_topology=huge_instance) def test_numa_topology_fails(self): huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2, 3, 4, 5]), memory=2048)]) limit_topo = objects.NUMATopologyLimits( cpu_allocation_ratio=1, ram_allocation_ratio=1) self.assertRaises(exception.ComputeResourcesUnavailable, self._claim, limits={'numa_topology': limit_topo}, numa_topology=huge_instance) def test_numa_topology_passes(self): huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) limit_topo = objects.NUMATopologyLimits( cpu_allocation_ratio=1, ram_allocation_ratio=1) self._claim(limits={'numa_topology': limit_topo}, numa_topology=huge_instance) @pci_fakes.patch_pci_whitelist @mock.patch('nova.objects.InstancePCIRequests.get_by_instance') def test_numa_topology_with_pci(self, mock_get_by_instance): dev_dict = { 'compute_node_id': 1, 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', 'numa_node': 1, 'dev_type': 'type-PCI', 'parent_addr': 'a1', 'status': 'available'} self.tracker.new_pci_tracker() self.tracker.pci_tracker._set_hvdevs([dev_dict]) request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) requests = objects.InstancePCIRequests(requests=[request]) mock_get_by_instance.return_value = requests huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) self._claim(requests=requests, numa_topology=huge_instance) @pci_fakes.patch_pci_whitelist @mock.patch('nova.objects.InstancePCIRequests.get_by_instance') def test_numa_topology_with_pci_fail(self, mock_get_by_instance): dev_dict = { 'compute_node_id': 1, 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', 'numa_node': 1, 'dev_type': 'type-PCI', 'parent_addr': 'a1', 'status': 'available'} dev_dict2 = { 'compute_node_id': 1, 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', 'numa_node': 2, 'dev_type': 'type-PCI', 'parent_addr': 'a1', 'status': 'available'} self.tracker.new_pci_tracker() self.tracker.pci_tracker._set_hvdevs([dev_dict, dev_dict2]) request = objects.InstancePCIRequest(count=2, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) requests = objects.InstancePCIRequests(requests=[request]) mock_get_by_instance.return_value = requests huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) self.assertRaises(exception.ComputeResourcesUnavailable, self._claim, requests=requests, numa_topology=huge_instance) @pci_fakes.patch_pci_whitelist @mock.patch('nova.objects.InstancePCIRequests.get_by_instance') def test_numa_topology_with_pci_no_numa_info(self, mock_get_by_instance): dev_dict = { 'compute_node_id': 1, 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', 'numa_node': None, 'dev_type': 'type-PCI', 'parent_addr': 'a1', 'status': 'available'} self.tracker.new_pci_tracker() self.tracker.pci_tracker._set_hvdevs([dev_dict]) request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) requests = objects.InstancePCIRequests(requests=[request]) mock_get_by_instance.return_value = requests huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) self._claim(requests=requests, numa_topology=huge_instance) def test_abort(self): claim = self._abort() self.assertTrue(claim.tracker.icalled) def _abort(self): claim = None try: with self._claim(memory_mb=4096) as claim: raise test.TestingException("abort") except test.TestingException: pass return claim class MoveClaimTestCase(ClaimTestCase): def _claim(self, limits=None, overhead=None, requests=None, **kwargs): instance_type = self._fake_instance_type(**kwargs) numa_topology = kwargs.pop('numa_topology', None) self.instance = self._fake_instance(**kwargs) self.instance.numa_topology = None if numa_topology: self.db_numa_topology = { 'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'instance_uuid': self.instance.uuid, 'numa_topology': numa_topology._to_json(), 'pci_requests': (requests or self.empty_requests).to_json() } else: self.db_numa_topology = None if overhead is None: overhead = {'memory_mb': 0} @mock.patch('nova.objects.InstancePCIRequests.' 'get_by_instance_uuid_and_newness', return_value=requests or self.empty_requests) @mock.patch('nova.virt.hardware.numa_get_constraints', return_value=numa_topology) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value=self.db_numa_topology) def get_claim(mock_extra_get, mock_numa_get, mock_pci_get): return claims.MoveClaim(self.context, self.instance, instance_type, {}, self.tracker, self.resources, overhead=overhead, limits=limits) return get_claim() def test_ext_resources(self): self._claim() self.assertTrue(self.tracker.ext_resources_handler.test_called) self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype) def test_abort(self): claim = self._abort() self.assertTrue(claim.tracker.rcalled) def test_create_migration_context(self): numa_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) claim = self._claim(numa_topology=numa_topology) migration = objects.Migration(context=self.context, id=42) claim.migration = migration fake_mig_context = mock.Mock(spec=objects.MigrationContext) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value=None) @mock.patch('nova.objects.MigrationContext', return_value=fake_mig_context) def _test(ctxt_mock, mock_get_extra): claim.create_migration_context() ctxt_mock.assert_called_once_with( context=self.context, instance_uuid=self.instance.uuid, migration_id=42, old_numa_topology=None, new_numa_topology=mock.ANY) self.assertIsInstance(ctxt_mock.call_args[1]['new_numa_topology'], objects.InstanceNUMATopology) self.assertEqual(migration, claim.migration) _test() nova-13.0.0/nova/tests/unit/compute/test_compute_utils.py0000664000567000056710000007642712701410011024747 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For miscellaneous util methods used with compute.""" import copy import string import uuid import mock from oslo_serialization import jsonutils from oslo_utils import importutils import six from nova.compute import flavors from nova.compute import power_state from nova.compute import task_states from nova.compute import utils as compute_utils import nova.conf from nova import context from nova import exception from nova.image import glance from nova.network import api as network_api from nova.network import model from nova import objects from nova.objects import block_device as block_device_obj from nova import rpc from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_network from nova.tests.unit import fake_notifier from nova.tests.unit import fake_server_actions import nova.tests.unit.image.fake from nova.tests.unit.objects import test_flavor from nova.tests.unit.objects import test_migration from nova.tests import uuidsentinel as uuids CONF = nova.conf.CONF CONF.import_opt('compute_manager', 'nova.service') def create_instance(context, user_id='fake', project_id='fake', params=None): """Create a test instance.""" flavor = flavors.get_flavor_by_name('m1.tiny') net_info = model.NetworkInfo([]) info_cache = objects.InstanceInfoCache(network_info=net_info) inst = objects.Instance(context=context, image_ref=1, reservation_id='r-fakeres', user_id=user_id, project_id=project_id, instance_type_id=flavor.id, flavor=flavor, old_flavor=None, new_flavor=None, system_metadata={}, ami_launch_index=0, root_gb=0, ephemeral_gb=0, info_cache=info_cache) if params: inst.update(params) inst.create() return inst class ComputeValidateDeviceTestCase(test.NoDBTestCase): def setUp(self): super(ComputeValidateDeviceTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') # check if test name includes "xen" if 'xen' in self.id(): self.flags(compute_driver='xenapi.XenAPIDriver') self.instance = objects.Instance(uuid=uuid.uuid4().hex, root_device_name=None, default_ephemeral_device=None) else: self.instance = objects.Instance(uuid=uuid.uuid4().hex, root_device_name='/dev/vda', default_ephemeral_device='/dev/vdb') flavor = objects.Flavor(**test_flavor.fake_flavor) self.instance.system_metadata = {} self.instance.flavor = flavor self.instance.default_swap_device = None self.data = [] self.stub_out('nova.db.block_device_mapping_get_all_by_instance', lambda context, instance: self.data) def _validate_device(self, device=None): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, self.instance['uuid']) return compute_utils.get_device_name_for_instance( self.instance, bdms, device) @staticmethod def _fake_bdm(device): return fake_block_device.FakeDbBlockDeviceDict({ 'source_type': 'volume', 'destination_type': 'volume', 'device_name': device, 'no_device': None, 'volume_id': 'fake', 'snapshot_id': None, 'guest_format': None }) def test_wrap(self): self.data = [] for letter in string.ascii_lowercase[2:]: self.data.append(self._fake_bdm('/dev/vd' + letter)) device = self._validate_device() self.assertEqual(device, '/dev/vdaa') def test_wrap_plus_one(self): self.data = [] for letter in string.ascii_lowercase[2:]: self.data.append(self._fake_bdm('/dev/vd' + letter)) self.data.append(self._fake_bdm('/dev/vdaa')) device = self._validate_device() self.assertEqual(device, '/dev/vdab') def test_later(self): self.data = [ self._fake_bdm('/dev/vdc'), self._fake_bdm('/dev/vdd'), self._fake_bdm('/dev/vde'), ] device = self._validate_device() self.assertEqual(device, '/dev/vdf') def test_gap(self): self.data = [ self._fake_bdm('/dev/vdc'), self._fake_bdm('/dev/vde'), ] device = self._validate_device() self.assertEqual(device, '/dev/vdd') def test_no_bdms(self): self.data = [] device = self._validate_device() self.assertEqual(device, '/dev/vdc') def test_lxc_names_work(self): self.instance['root_device_name'] = '/dev/a' self.instance['ephemeral_device_name'] = '/dev/b' self.data = [] device = self._validate_device() self.assertEqual(device, '/dev/c') def test_name_conversion(self): self.data = [] device = self._validate_device('/dev/c') self.assertEqual(device, '/dev/vdc') device = self._validate_device('/dev/sdc') self.assertEqual(device, '/dev/vdc') device = self._validate_device('/dev/xvdc') self.assertEqual(device, '/dev/vdc') def test_invalid_device_prefix(self): self.assertRaises(exception.InvalidDevicePath, self._validate_device, '/baddata/vdc') def test_device_in_use(self): exc = self.assertRaises(exception.DevicePathInUse, self._validate_device, '/dev/vda') self.assertIn('/dev/vda', six.text_type(exc)) def test_swap(self): self.instance['default_swap_device'] = "/dev/vdc" device = self._validate_device() self.assertEqual(device, '/dev/vdd') def test_swap_no_ephemeral(self): self.instance.default_ephemeral_device = None self.instance.default_swap_device = "/dev/vdb" device = self._validate_device() self.assertEqual(device, '/dev/vdc') def test_ephemeral_xenapi(self): self.instance.flavor.ephemeral_gb = 10 self.instance.flavor.swap = 0 device = self._validate_device() self.assertEqual(device, '/dev/xvdc') def test_swap_xenapi(self): self.instance.flavor.ephemeral_gb = 0 self.instance.flavor.swap = 10 device = self._validate_device() self.assertEqual(device, '/dev/xvdb') def test_swap_and_ephemeral_xenapi(self): self.instance.flavor.ephemeral_gb = 10 self.instance.flavor.swap = 10 device = self._validate_device() self.assertEqual(device, '/dev/xvdd') def test_swap_and_one_attachment_xenapi(self): self.instance.flavor.ephemeral_gb = 0 self.instance.flavor.swap = 10 device = self._validate_device() self.assertEqual(device, '/dev/xvdb') self.data.append(self._fake_bdm(device)) device = self._validate_device() self.assertEqual(device, '/dev/xvdd') def test_no_dev_root_device_name_get_next_name(self): self.instance['root_device_name'] = 'vda' device = self._validate_device() self.assertEqual('/dev/vdc', device) class DefaultDeviceNamesForInstanceTestCase(test.NoDBTestCase): def setUp(self): super(DefaultDeviceNamesForInstanceTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.ephemerals = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': None, 'boot_index': -1})]) self.swap = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdc', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'boot_index': -1})]) self.block_device_mapping = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'boot_index': 0}), fake_block_device.FakeDbBlockDeviceDict( {'id': 4, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdd', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': 'fake-snapshot-id-1', 'boot_index': -1}), fake_block_device.FakeDbBlockDeviceDict( {'id': 5, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vde', 'source_type': 'blank', 'destination_type': 'volume', 'boot_index': -1})]) self.instance = {'uuid': uuids.instance, 'ephemeral_gb': 2} self.is_libvirt = False self.root_device_name = '/dev/vda' self.update_called = False self.patchers = [] self.patchers.append( mock.patch.object(objects.BlockDeviceMapping, 'save')) for patcher in self.patchers: patcher.start() def tearDown(self): super(DefaultDeviceNamesForInstanceTestCase, self).tearDown() for patcher in self.patchers: patcher.stop() def _test_default_device_names(self, *block_device_lists): compute_utils.default_device_names_for_instance(self.instance, self.root_device_name, *block_device_lists) def test_only_block_device_mapping(self): # Test no-op original_bdm = copy.deepcopy(self.block_device_mapping) self._test_default_device_names([], [], self.block_device_mapping) for original, new in zip(original_bdm, self.block_device_mapping): self.assertEqual(original.device_name, new.device_name) # Assert it defaults the missing one as expected self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names([], [], self.block_device_mapping) self.assertEqual('/dev/vdb', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vdc', self.block_device_mapping[2]['device_name']) def test_with_ephemerals(self): # Test ephemeral gets assigned self.ephemerals[0]['device_name'] = None self._test_default_device_names(self.ephemerals, [], self.block_device_mapping) self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names(self.ephemerals, [], self.block_device_mapping) self.assertEqual('/dev/vdc', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vdd', self.block_device_mapping[2]['device_name']) def test_with_swap(self): # Test swap only self.swap[0]['device_name'] = None self._test_default_device_names([], self.swap, []) self.assertEqual(self.swap[0]['device_name'], '/dev/vdb') # Test swap and block_device_mapping self.swap[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names([], self.swap, self.block_device_mapping) self.assertEqual(self.swap[0]['device_name'], '/dev/vdb') self.assertEqual('/dev/vdc', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vdd', self.block_device_mapping[2]['device_name']) def test_all_together(self): # Test swap missing self.swap[0]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual(self.swap[0]['device_name'], '/dev/vdc') # Test swap and eph missing self.swap[0]['device_name'] = None self.ephemerals[0]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.assertEqual(self.swap[0]['device_name'], '/dev/vdc') # Test all missing self.swap[0]['device_name'] = None self.ephemerals[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.assertEqual(self.swap[0]['device_name'], '/dev/vdc') self.assertEqual('/dev/vdd', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vde', self.block_device_mapping[2]['device_name']) class UsageInfoTestCase(test.TestCase): def setUp(self): def fake_get_nw_info(cls, ctxt, instance): self.assertTrue(ctxt.is_admin) return fake_network.fake_get_instance_nw_info(self, 1, 1) super(UsageInfoTestCase, self).setUp() self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) self.flags(use_local=True, group='conductor') self.flags(compute_driver='nova.virt.fake.FakeDriver', network_manager='nova.network.manager.FlatManager') self.compute = importutils.import_object(CONF.compute_manager) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) def fake_show(meh, context, id, **kwargs): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} self.stubs.Set(nova.tests.unit.image.fake._FakeImageService, 'show', fake_show) fake_network.set_stub_network_methods(self) fake_server_actions.stub_out_action_events(self.stubs) def test_notify_usage_exists(self): # Ensure 'exists' notification generates appropriate usage data. instance = create_instance(self.context) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} instance.system_metadata.update(sys_metadata) instance.save() compute_utils.notify_usage_exists( rpc.get_notifier('compute'), self.context, instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.exists') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertIn(attr, payload, "Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_notify_usage_exists_deleted_instance(self): # Ensure 'exists' notification generates appropriate usage data. instance = create_instance(self.context) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} instance.system_metadata.update(sys_metadata) instance.save() self.compute.terminate_instance(self.context, instance, [], []) compute_utils.notify_usage_exists( rpc.get_notifier('compute'), self.context, instance) msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.exists') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertIn(attr, payload, "Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) def test_notify_usage_exists_instance_not_found(self): # Ensure 'exists' notification generates appropriate usage data. instance = create_instance(self.context) self.compute.terminate_instance(self.context, instance, [], []) compute_utils.notify_usage_exists( rpc.get_notifier('compute'), self.context, instance) msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.exists') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertIn(attr, payload, "Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) def test_notify_about_instance_usage(self): instance = create_instance(self.context) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} instance.system_metadata.update(sys_metadata) instance.save() extra_usage_info = {'image_name': 'fake_name'} compute_utils.notify_about_instance_usage( rpc.get_notifier('compute'), self.context, instance, 'create.start', extra_usage_info=extra_usage_info) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.create.start') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'image_meta'): self.assertIn(attr, payload, "Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) self.assertEqual(payload['image_name'], 'fake_name') image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_notify_about_aggregate_update_with_id(self): # Set aggregate payload aggregate_payload = {'aggregate_id': 1} compute_utils.notify_about_aggregate_update(self.context, "create.end", aggregate_payload) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'aggregate.create.end') payload = msg.payload self.assertEqual(payload['aggregate_id'], 1) def test_notify_about_aggregate_update_with_name(self): # Set aggregate payload aggregate_payload = {'name': 'fakegroup'} compute_utils.notify_about_aggregate_update(self.context, "create.start", aggregate_payload) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'aggregate.create.start') payload = msg.payload self.assertEqual(payload['name'], 'fakegroup') def test_notify_about_aggregate_update_without_name_id(self): # Set empty aggregate payload aggregate_payload = {} compute_utils.notify_about_aggregate_update(self.context, "create.start", aggregate_payload) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) class ComputeUtilsGetValFromSysMetadata(test.NoDBTestCase): def test_get_value_from_system_metadata(self): instance = fake_instance.fake_instance_obj('fake-context') system_meta = {'int_val': 1, 'int_string': '2', 'not_int': 'Nope'} instance.system_metadata = system_meta result = compute_utils.get_value_from_system_metadata( instance, 'int_val', int, 0) self.assertEqual(1, result) result = compute_utils.get_value_from_system_metadata( instance, 'int_string', int, 0) self.assertEqual(2, result) result = compute_utils.get_value_from_system_metadata( instance, 'not_int', int, 0) self.assertEqual(0, result) class ComputeUtilsGetNWInfo(test.NoDBTestCase): def test_instance_object_none_info_cache(self): inst = fake_instance.fake_instance_obj('fake-context', expected_attrs=['info_cache']) self.assertIsNone(inst.info_cache) result = compute_utils.get_nw_info_for_instance(inst) self.assertEqual(jsonutils.dumps([]), result.json()) class ComputeUtilsGetRebootTypes(test.NoDBTestCase): def setUp(self): super(ComputeUtilsGetRebootTypes, self).setUp() self.context = context.RequestContext('fake', 'fake') def test_get_reboot_type_started_soft(self): reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_STARTED, power_state.RUNNING) self.assertEqual(reboot_type, 'SOFT') def test_get_reboot_type_pending_soft(self): reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_PENDING, power_state.RUNNING) self.assertEqual(reboot_type, 'SOFT') def test_get_reboot_type_hard(self): reboot_type = compute_utils.get_reboot_type('foo', power_state.RUNNING) self.assertEqual(reboot_type, 'HARD') def test_get_reboot_not_running_hard(self): reboot_type = compute_utils.get_reboot_type('foo', 'bar') self.assertEqual(reboot_type, 'HARD') class ComputeUtilsTestCase(test.NoDBTestCase): @mock.patch('netifaces.interfaces') def test_get_machine_ips_value_error(self, mock_interfaces): # Tests that the utility method does not explode if netifaces raises # a ValueError. iface = mock.sentinel mock_interfaces.return_value = [iface] with mock.patch('netifaces.ifaddresses', side_effect=ValueError) as mock_ifaddresses: addresses = compute_utils.get_machine_ips() self.assertEqual([], addresses) mock_ifaddresses.assert_called_once_with(iface) class ComputeUtilsQuotaDeltaTestCase(test.TestCase): def setUp(self): super(ComputeUtilsQuotaDeltaTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') def test_upsize_quota_delta(self): old_flavor = flavors.get_flavor_by_name('m1.tiny') new_flavor = flavors.get_flavor_by_name('m1.medium') expected_deltas = { 'cores': new_flavor['vcpus'] - old_flavor['vcpus'], 'ram': new_flavor['memory_mb'] - old_flavor['memory_mb'] } deltas = compute_utils.upsize_quota_delta(self.context, new_flavor, old_flavor) self.assertEqual(expected_deltas, deltas) def test_downsize_quota_delta(self): inst = create_instance(self.context, params=None) inst.old_flavor = flavors.get_flavor_by_name('m1.medium') inst.new_flavor = flavors.get_flavor_by_name('m1.tiny') expected_deltas = { 'cores': (inst.new_flavor['vcpus'] - inst.old_flavor['vcpus']), 'ram': (inst.new_flavor['memory_mb'] - inst.old_flavor['memory_mb']) } deltas = compute_utils.downsize_quota_delta(self.context, inst) self.assertEqual(expected_deltas, deltas) @mock.patch.object(objects.Flavor, 'get_by_id') def test_reverse_quota_delta(self, mock_get_flavor): inst = create_instance(self.context, params=None) inst.old_flavor = flavors.get_flavor_by_name('m1.tiny') inst.new_flavor = flavors.get_flavor_by_name('m1.medium') expected_deltas = { 'cores': -1 * (inst.new_flavor['vcpus'] - inst.old_flavor['vcpus']), 'ram': -1 * (inst.new_flavor['memory_mb'] - inst.old_flavor['memory_mb']) } updates = {'old_instance_type_id': inst.old_flavor['id'], 'new_instance_type_id': inst.new_flavor['id']} fake_migration = test_migration.fake_db_migration(**updates) def _flavor_get_by_id(context, type_id): if type_id == updates['old_instance_type_id']: return inst.old_flavor else: return inst.new_flavor mock_get_flavor.side_effect = _flavor_get_by_id deltas = compute_utils.reverse_upsize_quota_delta(self.context, fake_migration) self.assertEqual(expected_deltas, deltas) @mock.patch.object(objects.Quotas, 'reserve') @mock.patch.object(objects.quotas, 'ids_from_instance') def test_reserve_quota_delta(self, mock_ids_from_instance, mock_reserve): quotas = objects.Quotas(context=context) inst = create_instance(self.context, params=None) inst.old_flavor = flavors.get_flavor_by_name('m1.tiny') inst.new_flavor = flavors.get_flavor_by_name('m1.medium') mock_ids_from_instance.return_value = (inst.project_id, inst.user_id) mock_reserve.return_value = quotas deltas = compute_utils.upsize_quota_delta(self.context, inst.new_flavor, inst.old_flavor) compute_utils.reserve_quota_delta(self.context, deltas, inst) mock_reserve.assert_called_once_with(project_id=inst.project_id, user_id=inst.user_id, **deltas) nova-13.0.0/nova/tests/unit/compute/test_shelve.py0000664000567000056710000005352012701410011023326 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mox3 import mox from oslo_config import cfg from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from nova.compute import claims from nova.compute import task_states from nova.compute import vm_states from nova import db from nova import objects from nova.tests.unit.compute import test_compute from nova.tests.unit.image import fake as fake_image CONF = cfg.CONF CONF.import_opt('shelved_offload_time', 'nova.compute.manager') def _fake_resources(): resources = { 'memory_mb': 2048, 'memory_mb_used': 0, 'free_ram_mb': 2048, 'local_gb': 20, 'local_gb_used': 0, 'free_disk_gb': 20, 'vcpus': 2, 'vcpus_used': 0 } return resources class ShelveComputeManagerTestCase(test_compute.BaseTestCase): def _shelve_instance(self, shelved_offload_time, clean_shutdown=True): CONF.set_override('shelved_offload_time', shelved_offload_time) host = 'fake-mini' instance = self._create_fake_instance_obj(params={'host': host}) image_id = 'fake_image_id' host = 'fake-mini' self.useFixture(utils_fixture.TimeFixture()) instance.task_state = task_states.SHELVING instance.save() self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'snapshot') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.compute.network_api, 'cleanup_instance_network_on_host') self.compute._notify_about_instance_usage(self.context, instance, 'shelve.start') if clean_shutdown: self.compute.driver.power_off(instance, CONF.shutdown_timeout, self.compute.SHUTDOWN_RETRY_INTERVAL) else: self.compute.driver.power_off(instance, 0, 0) self.compute._get_power_state(self.context, instance).AndReturn(123) if CONF.shelved_offload_time == 0: self.compute.network_api.cleanup_instance_network_on_host( self.context, instance, instance.host) self.compute.driver.snapshot(self.context, instance, 'fake_image_id', mox.IgnoreArg()) tracking = {'last_state': instance.vm_state} def check_save(expected_task_state=None): self.assertEqual(123, instance.power_state) if tracking['last_state'] == vm_states.ACTIVE: if CONF.shelved_offload_time == 0: self.assertEqual(task_states.SHELVING_OFFLOADING, instance.task_state) else: self.assertIsNone(instance.task_state) self.assertEqual(vm_states.SHELVED, instance.vm_state) self.assertEqual([task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING], expected_task_state) self.assertIn('shelved_at', instance.system_metadata) self.assertEqual(image_id, instance.system_metadata['shelved_image_id']) self.assertEqual(host, instance.system_metadata['shelved_host']) tracking['last_state'] = instance.vm_state elif (tracking['last_state'] == vm_states.SHELVED and CONF.shelved_offload_time == 0): self.assertIsNone(instance.host) self.assertIsNone(instance.node) self.assertIsNone(instance.task_state) self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state) self.assertEqual([task_states.SHELVING, task_states.SHELVING_OFFLOADING], expected_task_state) tracking['last_state'] = instance.vm_state else: self.fail('Unexpected save!') self.compute._notify_about_instance_usage(self.context, instance, 'shelve.end') if CONF.shelved_offload_time == 0: self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.start') self.compute.driver.power_off(instance, 0, 0) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.end') self.mox.ReplayAll() with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = check_save self.compute.shelve_instance(self.context, instance, image_id=image_id, clean_shutdown=clean_shutdown) def test_shelve(self): self._shelve_instance(-1) def test_shelve_forced_shutdown(self): self._shelve_instance(-1, clean_shutdown=False) def test_shelve_and_offload(self): self._shelve_instance(0) def _shelve_offload(self, clean_shutdown=True): host = 'fake-mini' instance = self._create_fake_instance_obj(params={'host': host}) instance.task_state = task_states.SHELVING instance.save() self.useFixture(utils_fixture.TimeFixture()) self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.compute.network_api, 'cleanup_instance_network_on_host') self.mox.StubOutWithMock(self.compute, '_update_resource_tracker') self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.start') if clean_shutdown: self.compute.driver.power_off(instance, CONF.shutdown_timeout, self.compute.SHUTDOWN_RETRY_INTERVAL) else: self.compute.driver.power_off(instance, 0, 0) self.compute.network_api.cleanup_instance_network_on_host( self.context, instance, instance.host) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute._update_resource_tracker(self.context, instance) self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.end') self.mox.ReplayAll() with mock.patch.object(instance, 'save'): self.compute.shelve_offload_instance(self.context, instance, clean_shutdown=clean_shutdown) self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state) self.assertIsNone(instance.task_state) def test_shelve_offload(self): self._shelve_offload() def test_shelve_offload_forced_shutdown(self): self._shelve_offload(clean_shutdown=False) def test_unshelve(self): instance = self._create_fake_instance_obj() instance.task_state = task_states.UNSHELVING instance.save() image = {'id': 'fake_id'} node = test_compute.NODENAME limits = {} filter_properties = {'limits': limits} host = 'fake-mini' cur_time = timeutils.utcnow() # Adding shelved_* keys in system metadata to verify # whether those are deleted after unshelve call. sys_meta = dict(instance.system_metadata) sys_meta['shelved_at'] = cur_time.isoformat() sys_meta['shelved_image_id'] = image['id'] sys_meta['shelved_host'] = host instance.system_metadata = sys_meta self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute, '_prep_block_device') self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.rt, 'instance_claim') self.mox.StubOutWithMock(self.compute.network_api, 'setup_instance_network_on_host') self.deleted_image_id = None def fake_delete(self2, ctxt, image_id): self.deleted_image_id = image_id def fake_claim(context, instance, limits): instance.host = self.compute.host return claims.Claim(context, instance, self.rt, _fake_resources()) tracking = { 'last_state': instance.task_state, 'spawned': False, } def check_save(expected_task_state=None): if tracking['last_state'] == task_states.UNSHELVING: if tracking['spawned']: self.assertIsNone(instance.task_state) else: self.assertEqual(task_states.SPAWNING, instance.task_state) tracking['spawned'] = True tracking['last_state'] == instance.task_state elif tracking['last_state'] == task_states.SPAWNING: self.assertEqual(vm_states.ACTIVE, instance.vm_state) tracking['last_state'] == instance.task_state else: self.fail('Unexpected save!') fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) self.compute._notify_about_instance_usage(self.context, instance, 'unshelve.start') self.compute._prep_block_device(self.context, instance, mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm') self.compute.network_api.setup_instance_network_on_host( self.context, instance, self.compute.host) self.compute.driver.spawn(self.context, instance, mox.IsA(objects.ImageMeta), injected_files=[], admin_password=None, network_info=[], block_device_info='fake_bdm') self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute._notify_about_instance_usage(self.context, instance, 'unshelve.end') self.mox.ReplayAll() with mock.patch.object(self.rt, 'instance_claim', side_effect=fake_claim), \ mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = check_save self.compute.unshelve_instance( self.context, instance, image=image, filter_properties=filter_properties, node=node) self.assertNotIn('shelved_at', instance.system_metadata) self.assertNotIn('shelved_image_id', instance.system_metadata) self.assertNotIn('shelved_host', instance.system_metadata) self.assertEqual(image['id'], self.deleted_image_id) self.assertEqual(instance.host, self.compute.host) self.assertEqual(123, instance.power_state) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) self.assertIsNone(instance.key_data) self.assertEqual(self.compute.host, instance.host) self.assertFalse(instance.auto_disk_config) @mock.patch('nova.utils.get_image_from_system_metadata') def test_unshelve_volume_backed(self, mock_image_meta): instance = self._create_fake_instance_obj() node = test_compute.NODENAME limits = {} filter_properties = {'limits': limits} instance.task_state = task_states.UNSHELVING instance.save() image_meta = {'properties': {'base_image_ref': 'fake_id'}} mock_image_meta.return_value = image_meta self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute, '_prep_block_device') self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.rt, 'instance_claim') self.mox.StubOutWithMock(self.compute.network_api, 'setup_instance_network_on_host') tracking = {'last_state': instance.task_state} def check_save(expected_task_state=None): if tracking['last_state'] == task_states.UNSHELVING: self.assertEqual(task_states.SPAWNING, instance.task_state) tracking['last_state'] = instance.task_state elif tracking['last_state'] == task_states.SPAWNING: self.assertEqual(123, instance.power_state) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) self.assertIsNone(instance.key_data) self.assertFalse(instance.auto_disk_config) self.assertIsNone(instance.task_state) tracking['last_state'] = instance.task_state else: self.fail('Unexpected save!') self.compute._notify_about_instance_usage(self.context, instance, 'unshelve.start') self.compute._prep_block_device(self.context, instance, mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm') self.compute.network_api.setup_instance_network_on_host( self.context, instance, self.compute.host) self.rt.instance_claim(self.context, instance, limits).AndReturn( claims.Claim(self.context, instance, self.rt, _fake_resources())) self.compute.driver.spawn(self.context, instance, mox.IsA(objects.ImageMeta), injected_files=[], admin_password=None, network_info=[], block_device_info='fake_bdm') self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute._notify_about_instance_usage(self.context, instance, 'unshelve.end') self.mox.ReplayAll() with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = check_save self.compute.unshelve_instance(self.context, instance, image=None, filter_properties=filter_properties, node=node) @mock.patch.object(objects.InstanceList, 'get_by_filters') def test_shelved_poll_none_offloaded(self, mock_get_by_filters): # Test instances are not offloaded when shelved_offload_time is -1 self.flags(shelved_offload_time=-1) self.compute._poll_shelved_instances(self.context) self.assertEqual(0, mock_get_by_filters.call_count) @mock.patch('oslo_utils.timeutils.is_older_than') def test_shelved_poll_none_exist(self, mock_older): self.flags(shelved_offload_time=1) mock_older.return_value = False with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: self.compute._poll_shelved_instances(self.context) self.assertFalse(soi.called) @mock.patch('oslo_utils.timeutils.is_older_than') def test_shelved_poll_not_timedout(self, mock_older): mock_older.return_value = False self.flags(shelved_offload_time=1) shelved_time = timeutils.utcnow() time_fixture = self.useFixture(utils_fixture.TimeFixture(shelved_time)) time_fixture.advance_time_seconds(CONF.shelved_offload_time - 1) instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED instance.task_state = None instance.host = self.compute.host sys_meta = instance.system_metadata sys_meta['shelved_at'] = shelved_time.isoformat() instance.save() with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: self.compute._poll_shelved_instances(self.context) self.assertFalse(soi.called) self.assertTrue(mock_older.called) def test_shelved_poll_timedout(self): self.flags(shelved_offload_time=1) shelved_time = timeutils.utcnow() time_fixture = self.useFixture(utils_fixture.TimeFixture(shelved_time)) time_fixture.advance_time_seconds(CONF.shelved_offload_time + 1) instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED instance.task_state = None instance.host = self.compute.host sys_meta = instance.system_metadata sys_meta['shelved_at'] = shelved_time.isoformat() instance.save() data = [] def fake_soi(context, instance, **kwargs): data.append(instance.uuid) with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: soi.side_effect = fake_soi self.compute._poll_shelved_instances(self.context) self.assertTrue(soi.called) self.assertEqual(instance.uuid, data[0]) @mock.patch('oslo_utils.timeutils.is_older_than') @mock.patch('oslo_utils.timeutils.parse_strtime') def test_shelved_poll_filters_task_state(self, mock_parse, mock_older): self.flags(shelved_offload_time=1) mock_older.return_value = True instance1 = self._create_fake_instance_obj() instance1.task_state = task_states.SPAWNING instance1.vm_state = vm_states.SHELVED instance1.host = self.compute.host instance1.system_metadata = {'shelved_at': ''} instance1.save() instance2 = self._create_fake_instance_obj() instance2.task_state = None instance2.vm_state = vm_states.SHELVED instance2.host = self.compute.host instance2.system_metadata = {'shelved_at': ''} instance2.save() data = [] def fake_soi(context, instance, **kwargs): data.append(instance.uuid) with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: soi.side_effect = fake_soi self.compute._poll_shelved_instances(self.context) self.assertTrue(soi.called) self.assertEqual([instance2.uuid], data) @mock.patch('oslo_utils.timeutils.is_older_than') @mock.patch('oslo_utils.timeutils.parse_strtime') def test_shelved_poll_checks_task_state_on_save(self, mock_parse, mock_older): self.flags(shelved_offload_time=1) mock_older.return_value = True instance = self._create_fake_instance_obj() instance.task_state = None instance.vm_state = vm_states.SHELVED instance.host = self.compute.host instance.system_metadata = {'shelved_at': ''} instance.save() def fake_parse_hook(timestring): instance.task_state = task_states.SPAWNING instance.save() mock_parse.side_effect = fake_parse_hook with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: self.compute._poll_shelved_instances(self.context) self.assertFalse(soi.called) class ShelveComputeAPITestCase(test_compute.BaseTestCase): def test_shelve(self): # Ensure instance can be shelved. fake_instance = self._create_fake_instance_obj( {'display_name': 'vm01'}) instance = fake_instance self.assertIsNone(instance['task_state']) def fake_init(self2): # In original _FakeImageService.__init__(), some fake images are # created. To verify the snapshot name of this test only, here # sets a fake method. self2.images = {} def fake_create(self2, ctxt, metadata, data=None): self.assertEqual(metadata['name'], 'vm01-shelved') metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42' return metadata fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init) self.stubs.Set(fake_image._FakeImageService, 'create', fake_create) self.compute_api.shelve(self.context, instance) self.assertEqual(instance.task_state, task_states.SHELVING) db.instance_destroy(self.context, instance['uuid']) @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') def test_unshelve(self, get_by_instance_uuid): # Ensure instance can be unshelved. instance = self._create_fake_instance_obj() self.assertIsNone(instance['task_state']) self.compute_api.shelve(self.context, instance) instance.task_state = None instance.vm_state = vm_states.SHELVED instance.save() fake_spec = objects.RequestSpec() get_by_instance_uuid.return_value = fake_spec with mock.patch.object(self.compute_api.compute_task_api, 'unshelve_instance') as unshelve: self.compute_api.unshelve(self.context, instance) get_by_instance_uuid.assert_called_once_with(self.context, instance.uuid) unshelve.assert_called_once_with(self.context, instance, fake_spec) self.assertEqual(instance.task_state, task_states.UNSHELVING) db.instance_destroy(self.context, instance['uuid']) nova-13.0.0/nova/tests/unit/compute/test_hvtype.py0000664000567000056710000000334012701407773023377 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import hv_type from nova import exception from nova import test class HvTypeTest(test.NoDBTestCase): def test_valid_string(self): self.assertTrue(hv_type.is_valid("vmware")) def test_valid_constant(self): self.assertTrue(hv_type.is_valid(hv_type.QEMU)) def test_valid_docker(self): self.assertTrue(hv_type.is_valid("docker")) def test_valid_lxd(self): self.assertTrue(hv_type.is_valid("lxd")) def test_valid_vz(self): self.assertTrue(hv_type.is_valid(hv_type.VIRTUOZZO)) def test_valid_bogus(self): self.assertFalse(hv_type.is_valid("acmehypervisor")) def test_canonicalize_none(self): self.assertIsNone(hv_type.canonicalize(None)) def test_canonicalize_case(self): self.assertEqual(hv_type.QEMU, hv_type.canonicalize("QeMu")) def test_canonicalize_xapi(self): self.assertEqual(hv_type.XEN, hv_type.canonicalize("xapi")) def test_canonicalize_invalid(self): self.assertRaises(exception.InvalidHypervisorVirtType, hv_type.canonicalize, "wibble") nova-13.0.0/nova/tests/unit/compute/test_vmmode.py0000664000567000056710000000473512701407773023360 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import vm_mode from nova import exception from nova import test from nova.tests.unit import fake_instance class ComputeVMModeTest(test.NoDBTestCase): def _fake_object(self, updates): return fake_instance.fake_instance_obj(None, **updates) def test_case(self): inst = self._fake_object(dict(vm_mode="HVM")) mode = vm_mode.get_from_instance(inst) self.assertEqual(mode, "hvm") def test_legacy_pv(self): inst = self._fake_object(dict(vm_mode="pv")) mode = vm_mode.get_from_instance(inst) self.assertEqual(mode, "xen") def test_legacy_hv(self): inst = self._fake_object(dict(vm_mode="hv")) mode = vm_mode.get_from_instance(inst) self.assertEqual(mode, "hvm") def test_bogus(self): inst = self._fake_object(dict(vm_mode="wibble")) self.assertRaises(exception.Invalid, vm_mode.get_from_instance, inst) def test_good(self): inst = self._fake_object(dict(vm_mode="hvm")) mode = vm_mode.get_from_instance(inst) self.assertEqual(mode, "hvm") def test_name_pv_compat(self): mode = vm_mode.canonicalize('pv') self.assertEqual(vm_mode.XEN, mode) def test_name_hv_compat(self): mode = vm_mode.canonicalize('hv') self.assertEqual(vm_mode.HVM, mode) def test_name_baremetal_compat(self): mode = vm_mode.canonicalize('baremetal') self.assertEqual(vm_mode.HVM, mode) def test_name_hvm(self): mode = vm_mode.canonicalize('hvm') self.assertEqual(vm_mode.HVM, mode) def test_name_none(self): mode = vm_mode.canonicalize(None) self.assertIsNone(mode) def test_name_invalid(self): self.assertRaises(exception.InvalidVirtualMachineMode, vm_mode.canonicalize, 'invalid') nova-13.0.0/nova/tests/unit/compute/test_rpcapi.py0000664000567000056710000007547212701407773023355 0ustar jenkinsjenkins00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.compute.rpcapi """ import mock from oslo_config import cfg from oslo_serialization import jsonutils from nova.compute import rpcapi as compute_rpcapi from nova import context from nova import exception from nova.objects import block_device as objects_block_dev from nova.objects import migrate_data as migrate_data_obj from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance CONF = cfg.CONF class ComputeRpcAPITestCase(test.NoDBTestCase): def setUp(self): super(ComputeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() self.fake_flavor_obj = fake_flavor.fake_flavor_obj(self.context) self.fake_flavor = jsonutils.to_primitive(self.fake_flavor_obj) instance_attr = {'host': 'fake_host', 'instance_type_id': self.fake_flavor_obj['id'], 'instance_type': self.fake_flavor_obj} self.fake_instance_obj = fake_instance.fake_instance_obj(self.context, **instance_attr) self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj) self.fake_volume_bdm = objects_block_dev.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'instance_uuid': self.fake_instance_obj.uuid, 'volume_id': 'fake-volume-id'})) @mock.patch('nova.objects.Service.get_minimum_version') def test_auto_pin(self, mock_get_min): mock_get_min.return_value = 1 self.flags(compute='auto', group='upgrade_levels') compute_rpcapi.LAST_VERSION = None rpcapi = compute_rpcapi.ComputeAPI() self.assertEqual('4.4', rpcapi.client.version_cap) mock_get_min.assert_called_once_with(mock.ANY, 'nova-compute') @mock.patch('nova.objects.Service.get_minimum_version') def test_auto_pin_fails_if_too_old(self, mock_get_min): mock_get_min.return_value = 1955 self.flags(compute='auto', group='upgrade_levels') compute_rpcapi.LAST_VERSION = None self.assertRaises(exception.ServiceTooOld, compute_rpcapi.ComputeAPI) @mock.patch('nova.objects.Service.get_minimum_version') def test_auto_pin_kilo(self, mock_get_min): mock_get_min.return_value = 0 self.flags(compute='auto', group='upgrade_levels') compute_rpcapi.LAST_VERSION = None rpcapi = compute_rpcapi.ComputeAPI() self.assertEqual('4.0', rpcapi.client.version_cap) mock_get_min.assert_called_once_with(mock.ANY, 'nova-compute') @mock.patch('nova.objects.Service.get_minimum_version') def test_auto_pin_caches(self, mock_get_min): mock_get_min.return_value = 1 self.flags(compute='auto', group='upgrade_levels') compute_rpcapi.LAST_VERSION = None compute_rpcapi.ComputeAPI() compute_rpcapi.ComputeAPI() mock_get_min.assert_called_once_with(mock.ANY, 'nova-compute') self.assertEqual('4.4', compute_rpcapi.LAST_VERSION) def _test_compute_api(self, method, rpc_method, expected_args=None, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = kwargs.pop('rpcapi_class', compute_rpcapi.ComputeAPI)() self.assertIsNotNone(rpcapi.client) self.assertEqual(rpcapi.client.target.topic, CONF.compute_topic) orig_prepare = rpcapi.client.prepare base_version = rpcapi.client.target.version expected_version = kwargs.pop('version', base_version) expected_kwargs = kwargs.copy() if expected_args: expected_kwargs.update(expected_args) if 'host_param' in expected_kwargs: expected_kwargs['host'] = expected_kwargs.pop('host_param') else: expected_kwargs.pop('host', None) cast_and_call = ['confirm_resize', 'stop_instance'] if rpc_method == 'call' and method in cast_and_call: if method == 'confirm_resize': kwargs['cast'] = False else: kwargs['do_cast'] = False if 'host' in kwargs: host = kwargs['host'] elif 'instances' in kwargs: host = kwargs['instances'][0]['host'] else: host = kwargs['instance']['host'] if method == 'rebuild_instance' and 'node' in expected_kwargs: expected_kwargs['scheduled_node'] = expected_kwargs.pop('node') with test.nested( mock.patch.object(rpcapi.client, rpc_method), mock.patch.object(rpcapi.client, 'prepare'), mock.patch.object(rpcapi.client, 'can_send_version'), ) as ( rpc_mock, prepare_mock, csv_mock ): prepare_mock.return_value = rpcapi.client if '_return_value' in kwargs: rpc_mock.return_value = kwargs.pop('_return_value') del expected_kwargs['_return_value'] elif rpc_method == 'call': rpc_mock.return_value = 'foo' else: rpc_mock.return_value = None csv_mock.side_effect = ( lambda v: orig_prepare(version=v).can_send_version()) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, rpc_mock.return_value) prepare_mock.assert_called_once_with(version=expected_version, server=host) rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs) def test_add_aggregate_host(self): self._test_compute_api('add_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', slave_info={}) def test_add_fixed_ip_to_instance(self): self._test_compute_api('add_fixed_ip_to_instance', 'cast', instance=self.fake_instance_obj, network_id='id', version='4.0') def test_attach_interface(self): self._test_compute_api('attach_interface', 'call', instance=self.fake_instance_obj, network_id='id', port_id='id2', version='4.0', requested_ip='192.168.1.50') def test_attach_volume(self): self._test_compute_api('attach_volume', 'cast', instance=self.fake_instance_obj, bdm=self.fake_volume_bdm, version='4.0') def test_change_instance_metadata(self): self._test_compute_api('change_instance_metadata', 'cast', instance=self.fake_instance_obj, diff={}, version='4.0') def test_check_instance_shared_storage(self): self._test_compute_api('check_instance_shared_storage', 'call', instance=self.fake_instance_obj, data='foo', version='4.0') def test_confirm_resize_cast(self): self._test_compute_api('confirm_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'foo'}, host='host', reservations=list('fake_res')) def test_confirm_resize_call(self): self._test_compute_api('confirm_resize', 'call', instance=self.fake_instance_obj, migration={'id': 'foo'}, host='host', reservations=list('fake_res')) def test_detach_interface(self): self._test_compute_api('detach_interface', 'cast', version='4.0', instance=self.fake_instance_obj, port_id='fake_id') def test_detach_volume(self): self._test_compute_api('detach_volume', 'cast', instance=self.fake_instance_obj, volume_id='id', attachment_id='fake_id', version='4.7') def test_detach_volume_no_attachment_id(self): ctxt = context.RequestContext('fake_user', 'fake_project') instance = self.fake_instance_obj rpcapi = compute_rpcapi.ComputeAPI() cast_mock = mock.Mock() cctxt_mock = mock.Mock(cast=cast_mock) with test.nested( mock.patch.object(rpcapi.client, 'can_send_version', return_value=False), mock.patch.object(rpcapi.client, 'prepare', return_value=cctxt_mock) ) as ( can_send_mock, prepare_mock ): rpcapi.detach_volume(ctxt, instance=instance, volume_id='id', attachment_id='fake_id') # assert our mocks were called as expected can_send_mock.assert_called_once_with('4.7') prepare_mock.assert_called_once_with(server=instance['host'], version='4.0') cast_mock.assert_called_once_with(ctxt, 'detach_volume', instance=instance, volume_id='id') def test_finish_resize(self): self._test_compute_api('finish_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'foo'}, image='image', disk_info='disk_info', host='host', reservations=list('fake_res')) def test_finish_revert_resize(self): self._test_compute_api('finish_revert_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'fake_id'}, host='host', reservations=list('fake_res')) def test_get_console_output(self): self._test_compute_api('get_console_output', 'call', instance=self.fake_instance_obj, tail_length='tl', version='4.0') def test_get_console_pool_info(self): self._test_compute_api('get_console_pool_info', 'call', console_type='type', host='host') def test_get_console_topic(self): self._test_compute_api('get_console_topic', 'call', host='host') def test_get_diagnostics(self): self._test_compute_api('get_diagnostics', 'call', instance=self.fake_instance_obj, version='4.0') def test_get_instance_diagnostics(self): expected_args = {'instance': self.fake_instance} self._test_compute_api('get_instance_diagnostics', 'call', expected_args, instance=self.fake_instance_obj, version='4.0') def test_get_vnc_console(self): self._test_compute_api('get_vnc_console', 'call', instance=self.fake_instance_obj, console_type='type', version='4.0') def test_get_spice_console(self): self._test_compute_api('get_spice_console', 'call', instance=self.fake_instance_obj, console_type='type', version='4.0') def test_get_rdp_console(self): self._test_compute_api('get_rdp_console', 'call', instance=self.fake_instance_obj, console_type='type', version='4.0') def test_get_serial_console(self): self._test_compute_api('get_serial_console', 'call', instance=self.fake_instance_obj, console_type='serial', version='4.0') def test_get_mks_console(self): self._test_compute_api('get_mks_console', 'call', instance=self.fake_instance_obj, console_type='webmks', version='4.3') def test_validate_console_port(self): self._test_compute_api('validate_console_port', 'call', instance=self.fake_instance_obj, port="5900", console_type="novnc", version='4.0') def test_host_maintenance_mode(self): self._test_compute_api('host_maintenance_mode', 'call', host_param='param', mode='mode', host='host') def test_host_power_action(self): self._test_compute_api('host_power_action', 'call', action='action', host='host') def test_inject_network_info(self): self._test_compute_api('inject_network_info', 'cast', instance=self.fake_instance_obj) def test_live_migration(self): self._test_compute_api('live_migration', 'cast', instance=self.fake_instance_obj, dest='dest', block_migration='blockity_block', host='tsoh', migration='migration', migrate_data={}, version='4.8') def test_live_migration_force_complete(self): self._test_compute_api('live_migration_force_complete', 'cast', instance=self.fake_instance_obj, migration_id='1', version='4.9') def test_live_migration_abort(self): self._test_compute_api('live_migration_abort', 'cast', instance=self.fake_instance_obj, migration_id='1', version='4.10') def test_post_live_migration_at_destination(self): self._test_compute_api('post_live_migration_at_destination', 'cast', instance=self.fake_instance_obj, block_migration='block_migration', host='host', version='4.0') def test_pause_instance(self): self._test_compute_api('pause_instance', 'cast', instance=self.fake_instance_obj) def test_soft_delete_instance(self): self._test_compute_api('soft_delete_instance', 'cast', instance=self.fake_instance_obj, reservations=['uuid1', 'uuid2']) def test_swap_volume(self): self._test_compute_api('swap_volume', 'cast', instance=self.fake_instance_obj, old_volume_id='oldid', new_volume_id='newid') def test_restore_instance(self): self._test_compute_api('restore_instance', 'cast', instance=self.fake_instance_obj, version='4.0') def test_pre_live_migration(self): self._test_compute_api('pre_live_migration', 'call', instance=self.fake_instance_obj, block_migration='block_migration', disk='disk', host='host', migrate_data=None, version='4.8') def test_prep_resize(self): self._test_compute_api('prep_resize', 'cast', instance=self.fake_instance_obj, instance_type=self.fake_flavor_obj, image='fake_image', host='host', reservations=list('fake_res'), request_spec='fake_spec', filter_properties={'fakeprop': 'fakeval'}, node='node', clean_shutdown=True, version='4.1') self.flags(compute='4.0', group='upgrade_levels') expected_args = {'instance_type': self.fake_flavor} self._test_compute_api('prep_resize', 'cast', expected_args, instance=self.fake_instance_obj, instance_type=self.fake_flavor_obj, image='fake_image', host='host', reservations=list('fake_res'), request_spec='fake_spec', filter_properties={'fakeprop': 'fakeval'}, node='node', clean_shutdown=True, version='4.0') def test_reboot_instance(self): self.maxDiff = None self._test_compute_api('reboot_instance', 'cast', instance=self.fake_instance_obj, block_device_info={}, reboot_type='type') def test_rebuild_instance(self): self._test_compute_api('rebuild_instance', 'cast', new_pass='None', injected_files='None', image_ref='None', orig_image_ref='None', bdms=[], instance=self.fake_instance_obj, host='new_host', orig_sys_metadata=None, recreate=True, on_shared_storage=True, preserve_ephemeral=True, migration=None, node=None, limits=None, version='4.5') def test_rebuild_instance_downgrade(self): self.flags(group='upgrade_levels', compute='4.0') self._test_compute_api('rebuild_instance', 'cast', new_pass='None', injected_files='None', image_ref='None', orig_image_ref='None', bdms=[], instance=self.fake_instance_obj, host='new_host', orig_sys_metadata=None, recreate=True, on_shared_storage=True, preserve_ephemeral=True, version='4.0') def test_reserve_block_device_name(self): self._test_compute_api('reserve_block_device_name', 'call', instance=self.fake_instance_obj, device='device', volume_id='id', disk_bus='ide', device_type='cdrom', version='4.0', _return_value=objects_block_dev.BlockDeviceMapping()) def test_refresh_instance_security_rules(self): expected_args = {'instance': self.fake_instance_obj} self._test_compute_api('refresh_instance_security_rules', 'cast', expected_args, host='fake_host', instance=self.fake_instance_obj, version='4.4') def test_remove_aggregate_host(self): self._test_compute_api('remove_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', slave_info={}) def test_remove_fixed_ip_from_instance(self): self._test_compute_api('remove_fixed_ip_from_instance', 'cast', instance=self.fake_instance_obj, address='addr', version='4.0') def test_remove_volume_connection(self): self._test_compute_api('remove_volume_connection', 'call', instance=self.fake_instance_obj, volume_id='id', host='host', version='4.0') def test_rescue_instance(self): self._test_compute_api('rescue_instance', 'cast', instance=self.fake_instance_obj, rescue_password='pw', rescue_image_ref='fake_image_ref', clean_shutdown=True, version='4.0') def test_reset_network(self): self._test_compute_api('reset_network', 'cast', instance=self.fake_instance_obj) def test_resize_instance(self): self._test_compute_api('resize_instance', 'cast', instance=self.fake_instance_obj, migration={'id': 'fake_id'}, image='image', instance_type=self.fake_flavor_obj, reservations=list('fake_res'), clean_shutdown=True, version='4.1') self.flags(compute='4.0', group='upgrade_levels') expected_args = {'instance_type': self.fake_flavor} self._test_compute_api('resize_instance', 'cast', expected_args, instance=self.fake_instance_obj, migration={'id': 'fake_id'}, image='image', instance_type=self.fake_flavor_obj, reservations=list('fake_res'), clean_shutdown=True, version='4.0') def test_resume_instance(self): self._test_compute_api('resume_instance', 'cast', instance=self.fake_instance_obj) def test_revert_resize(self): self._test_compute_api('revert_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'fake_id'}, host='host', reservations=list('fake_res')) def test_set_admin_password(self): self._test_compute_api('set_admin_password', 'call', instance=self.fake_instance_obj, new_pass='pw', version='4.0') def test_set_host_enabled(self): self._test_compute_api('set_host_enabled', 'call', enabled='enabled', host='host') def test_get_host_uptime(self): self._test_compute_api('get_host_uptime', 'call', host='host') def test_backup_instance(self): self._test_compute_api('backup_instance', 'cast', instance=self.fake_instance_obj, image_id='id', backup_type='type', rotation='rotation') def test_snapshot_instance(self): self._test_compute_api('snapshot_instance', 'cast', instance=self.fake_instance_obj, image_id='id') def test_start_instance(self): self._test_compute_api('start_instance', 'cast', instance=self.fake_instance_obj) def test_stop_instance_cast(self): self._test_compute_api('stop_instance', 'cast', instance=self.fake_instance_obj, clean_shutdown=True, version='4.0') def test_stop_instance_call(self): self._test_compute_api('stop_instance', 'call', instance=self.fake_instance_obj, clean_shutdown=True, version='4.0') def test_suspend_instance(self): self._test_compute_api('suspend_instance', 'cast', instance=self.fake_instance_obj) def test_terminate_instance(self): self._test_compute_api('terminate_instance', 'cast', instance=self.fake_instance_obj, bdms=[], reservations=['uuid1', 'uuid2'], version='4.0') def test_unpause_instance(self): self._test_compute_api('unpause_instance', 'cast', instance=self.fake_instance_obj) def test_unrescue_instance(self): self._test_compute_api('unrescue_instance', 'cast', instance=self.fake_instance_obj, version='4.0') def test_shelve_instance(self): self._test_compute_api('shelve_instance', 'cast', instance=self.fake_instance_obj, image_id='image_id', clean_shutdown=True, version='4.0') def test_shelve_offload_instance(self): self._test_compute_api('shelve_offload_instance', 'cast', instance=self.fake_instance_obj, clean_shutdown=True, version='4.0') def test_unshelve_instance(self): self._test_compute_api('unshelve_instance', 'cast', instance=self.fake_instance_obj, host='host', image='image', filter_properties={'fakeprop': 'fakeval'}, node='node', version='4.0') def test_volume_snapshot_create(self): self._test_compute_api('volume_snapshot_create', 'cast', instance=self.fake_instance_obj, volume_id='fake_id', create_info={}, version='4.0') def test_volume_snapshot_delete(self): self._test_compute_api('volume_snapshot_delete', 'cast', instance=self.fake_instance_obj, volume_id='fake_id', snapshot_id='fake_id2', delete_info={}, version='4.0') def test_external_instance_event(self): self._test_compute_api('external_instance_event', 'cast', instances=[self.fake_instance_obj], events=['event'], version='4.0') def test_build_and_run_instance(self): self._test_compute_api('build_and_run_instance', 'cast', instance=self.fake_instance_obj, host='host', image='image', request_spec={'request': 'spec'}, filter_properties=[], admin_password='passwd', injected_files=None, requested_networks=['network1'], security_groups=None, block_device_mapping=None, node='node', limits=[], version='4.0') def test_quiesce_instance(self): self._test_compute_api('quiesce_instance', 'call', instance=self.fake_instance_obj, version='4.0') def test_unquiesce_instance(self): self._test_compute_api('unquiesce_instance', 'cast', instance=self.fake_instance_obj, mapping=None, version='4.0') def test_trigger_crash_dump(self): self._test_compute_api('trigger_crash_dump', 'cast', instance=self.fake_instance_obj, version='4.6') def test_trigger_crash_dump_incompatible(self): self.flags(compute='4.0', group='upgrade_levels') self.assertRaises(exception.TriggerCrashDumpNotSupported, self._test_compute_api, 'trigger_crash_dump', 'cast', instance=self.fake_instance_obj, version='4.6') def _test_simple_call(self, method, inargs, callargs, callret, calltype='call', can_send=False): rpc = compute_rpcapi.ComputeAPI() @mock.patch.object(rpc, 'client') @mock.patch.object(compute_rpcapi, '_compute_host') def _test(mock_ch, mock_client): mock_client.can_send_version.return_value = can_send call = getattr(mock_client.prepare.return_value, calltype) call.return_value = callret ctxt = mock.MagicMock() result = getattr(rpc, method)(ctxt, **inargs) call.assert_called_once_with(ctxt, method, **callargs) return result return _test() def test_check_can_live_migrate_source_converts_objects(self): obj = migrate_data_obj.LiveMigrateData() result = self._test_simple_call('check_can_live_migrate_source', inargs={'instance': 'foo', 'dest_check_data': obj}, callargs={'instance': 'foo', 'dest_check_data': {}}, callret=obj) self.assertEqual(obj, result) result = self._test_simple_call('check_can_live_migrate_source', inargs={'instance': 'foo', 'dest_check_data': obj}, callargs={'instance': 'foo', 'dest_check_data': {}}, callret={'foo': 'bar'}) self.assertIsInstance(result, migrate_data_obj.LiveMigrateData) @mock.patch('nova.objects.migrate_data.LiveMigrateData.' 'detect_implementation') def test_check_can_live_migrate_destination_converts_dict(self, mock_det): result = self._test_simple_call('check_can_live_migrate_destination', inargs={'instance': 'foo', 'destination': 'bar', 'block_migration': False, 'disk_over_commit': False}, callargs={'instance': 'foo', 'block_migration': False, 'disk_over_commit': False}, callret={'foo': 'bar'}) self.assertEqual(mock_det.return_value, result) def test_live_migration_converts_objects(self): obj = migrate_data_obj.LiveMigrateData() self._test_simple_call('live_migration', inargs={'instance': 'foo', 'dest': 'foo', 'block_migration': False, 'host': 'foo', 'migration': None, 'migrate_data': obj}, callargs={'instance': 'foo', 'dest': 'foo', 'block_migration': False, 'migration': None, 'migrate_data': { 'pre_live_migration_result': {}}}, callret=None, calltype='cast') @mock.patch('nova.objects.migrate_data.LiveMigrateData.from_legacy_dict') def test_pre_live_migration_converts_objects(self, mock_fld): obj = migrate_data_obj.LiveMigrateData() result = self._test_simple_call('pre_live_migration', inargs={'instance': 'foo', 'block_migration': False, 'disk': None, 'host': 'foo', 'migrate_data': obj}, callargs={'instance': 'foo', 'block_migration': False, 'disk': None, 'migrate_data': {}}, callret=obj) self.assertFalse(mock_fld.called) self.assertEqual(obj, result) result = self._test_simple_call('pre_live_migration', inargs={'instance': 'foo', 'block_migration': False, 'disk': None, 'host': 'foo', 'migrate_data': obj}, callargs={'instance': 'foo', 'block_migration': False, 'disk': None, 'migrate_data': {}}, callret={'foo': 'bar'}) mock_fld.assert_called_once_with( {'pre_live_migration_result': {'foo': 'bar'}}) self.assertIsInstance(result, migrate_data_obj.LiveMigrateData) def test_rollback_live_migration_at_destination_converts_objects(self): obj = migrate_data_obj.LiveMigrateData() method = 'rollback_live_migration_at_destination' self._test_simple_call(method, inargs={'instance': 'foo', 'host': 'foo', 'destroy_disks': False, 'migrate_data': obj}, callargs={'instance': 'foo', 'destroy_disks': False, 'migrate_data': {}}, callret=None, calltype='cast') def test_check_can_live_migrate_destination_old_compute(self): self.flags(compute='4.10', group='upgrade_levels') self.assertRaises(exception.LiveMigrationWithOldNovaNotSupported, self._test_compute_api, 'check_can_live_migrate_destination', 'call', instance=self.fake_instance_obj, block_migration=None, destination='dest', disk_over_commit=None, version='4.11') nova-13.0.0/nova/tests/unit/compute/test_keypairs.py0000664000567000056710000002456512701407773023723 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for keypair API.""" from oslo_concurrency import processutils from oslo_config import cfg import six from nova.compute import api as compute_api from nova import context from nova import exception from nova.objects import keypair as keypair_obj from nova import quota from nova.tests.unit.compute import test_compute from nova.tests.unit import fake_crypto from nova.tests.unit import fake_notifier from nova.tests.unit.objects import test_keypair CONF = cfg.CONF QUOTAS = quota.QUOTAS class KeypairAPITestCase(test_compute.BaseTestCase): def setUp(self): super(KeypairAPITestCase, self).setUp() self.keypair_api = compute_api.KeypairAPI() self.ctxt = context.RequestContext('fake', 'fake') self._keypair_db_call_stubs() self.existing_key_name = 'fake existing key name' self.pub_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLnVkqJu9WVf' '/5StU3JCrBR2r1s1j8K1tux+5XeSvdqaM8lMFNorzbY5iyoBbR' 'S56gy1jmm43QsMPJsrpfUZKcJpRENSe3OxIIwWXRoiapZe78u/' 'a9xKwj0avFYMcws9Rk9iAB7W4K1nEJbyCPl5lRBoyqeHBqrnnu' 'XWEgGxJCK0Ah6wcOzwlEiVjdf4kxzXrwPHyi7Ea1qvnNXTziF8' 'yYmUlH4C8UXfpTQckwSwpDyxZUc63P8q+vPbs3Q2kw+/7vvkCK' 'HJAXVI+oCiyMMfffoTq16M1xfV58JstgtTqAXG+ZFpicGajREU' 'E/E3hO5MGgcHmyzIrWHKpe1n3oEGuz') self.fingerprint = '4e:48:c6:a0:4a:f9:dd:b5:4c:85:54:5a:af:43:47:5a' self.keypair_type = keypair_obj.KEYPAIR_TYPE_SSH self.key_destroyed = False def _keypair_db_call_stubs(self): def db_key_pair_get_all_by_user(context, user_id): return [dict(test_keypair.fake_keypair, name=self.existing_key_name, public_key=self.pub_key, fingerprint=self.fingerprint)] def db_key_pair_create(context, keypair): return dict(test_keypair.fake_keypair, **keypair) def db_key_pair_destroy(context, user_id, name): if name == self.existing_key_name: self.key_destroyed = True def db_key_pair_get(context, user_id, name): if name == self.existing_key_name and not self.key_destroyed: return dict(test_keypair.fake_keypair, name=self.existing_key_name, public_key=self.pub_key, fingerprint=self.fingerprint) else: raise exception.KeypairNotFound(user_id=user_id, name=name) self.stub_out("nova.db.key_pair_get_all_by_user", db_key_pair_get_all_by_user) self.stub_out("nova.db.key_pair_create", db_key_pair_create) self.stub_out("nova.db.key_pair_destroy", db_key_pair_destroy) self.stub_out("nova.db.key_pair_get", db_key_pair_get) def _check_notifications(self, action='create', key_name='foo'): self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) n1 = fake_notifier.NOTIFICATIONS[0] self.assertEqual('INFO', n1.priority) self.assertEqual('keypair.%s.start' % action, n1.event_type) self.assertEqual('api.%s' % CONF.host, n1.publisher_id) self.assertEqual('fake', n1.payload['user_id']) self.assertEqual('fake', n1.payload['tenant_id']) self.assertEqual(key_name, n1.payload['key_name']) n2 = fake_notifier.NOTIFICATIONS[1] self.assertEqual('INFO', n2.priority) self.assertEqual('keypair.%s.end' % action, n2.event_type) self.assertEqual('api.%s' % CONF.host, n2.publisher_id) self.assertEqual('fake', n2.payload['user_id']) self.assertEqual('fake', n2.payload['tenant_id']) self.assertEqual(key_name, n2.payload['key_name']) class CreateImportSharedTestMixIn(object): """Tests shared between create and import_key. Mix-in pattern is used here so that these `test_*` methods aren't picked up by the test runner unless they are part of a 'concrete' test case. """ def assertKeypairRaises(self, exc_class, expected_message, name): func = getattr(self.keypair_api, self.func_name) args = [] if self.func_name == 'import_key_pair': args.append(self.pub_key) args.append(self.keypair_type) exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id, name, *args) self.assertEqual(expected_message, six.text_type(exc)) def assertInvalidKeypair(self, expected_message, name): msg = 'Keypair data is invalid: %s' % expected_message self.assertKeypairRaises(exception.InvalidKeypair, msg, name) def test_name_too_short(self): msg = ('Keypair name must be string and between 1 ' 'and 255 characters long') self.assertInvalidKeypair(msg, '') def test_name_too_long(self): msg = ('Keypair name must be string and between 1 ' 'and 255 characters long') self.assertInvalidKeypair(msg, 'x' * 256) def test_invalid_chars(self): msg = "Keypair name contains unsafe characters" self.assertInvalidKeypair(msg, '* BAD CHARACTERS! *') def test_already_exists(self): def db_key_pair_create_duplicate(context, keypair): raise exception.KeyPairExists(key_name=keypair.get('name', '')) self.stub_out("nova.db.key_pair_create", db_key_pair_create_duplicate) msg = ("Key pair '%(key_name)s' already exists." % {'key_name': self.existing_key_name}) self.assertKeypairRaises(exception.KeyPairExists, msg, self.existing_key_name) def test_quota_limit(self): def fake_quotas_count(self, context, resource, *args, **kwargs): return CONF.quota_key_pairs self.stubs.Set(QUOTAS, "count", fake_quotas_count) msg = "Maximum number of key pairs exceeded" self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo') class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn): func_name = 'create_key_pair' def _check_success(self): keypair, private_key = self.keypair_api.create_key_pair( self.ctxt, self.ctxt.user_id, 'foo', key_type=self.keypair_type) self.assertEqual('foo', keypair['name']) self.assertEqual(self.keypair_type, keypair['type']) self._check_notifications() def test_success_ssh(self): self._check_success() def test_success_x509(self): self.keypair_type = keypair_obj.KEYPAIR_TYPE_X509 self._check_success() def test_x509_subject_too_long(self): # X509 keypairs will fail if the Subject they're created with # is longer than 64 characters. The previous unit tests could not # detect the issue because the ctxt.user_id was too short. # This unit tests is added to prove this issue. self.keypair_type = keypair_obj.KEYPAIR_TYPE_X509 self.ctxt.user_id = 'a' * 65 self.assertRaises(processutils.ProcessExecutionError, self._check_success) class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn): func_name = 'import_key_pair' def _check_success(self): keypair = self.keypair_api.import_key_pair(self.ctxt, self.ctxt.user_id, 'foo', self.pub_key, self.keypair_type) self.assertEqual('foo', keypair['name']) self.assertEqual(self.keypair_type, keypair['type']) self.assertEqual(self.fingerprint, keypair['fingerprint']) self.assertEqual(self.pub_key, keypair['public_key']) self.assertEqual(self.keypair_type, keypair['type']) self._check_notifications(action='import') def test_success_ssh(self): self._check_success() def test_success_x509(self): self.keypair_type = keypair_obj.KEYPAIR_TYPE_X509 certif, fingerprint = fake_crypto.get_x509_cert_and_fingerprint() self.pub_key = certif self.fingerprint = fingerprint self._check_success() def test_bad_key_data(self): exc = self.assertRaises(exception.InvalidKeypair, self.keypair_api.import_key_pair, self.ctxt, self.ctxt.user_id, 'foo', 'bad key data') msg = u'Keypair data is invalid: failed to generate fingerprint' self.assertEqual(msg, six.text_type(exc)) class GetKeypairTestCase(KeypairAPITestCase): def test_success(self): keypair = self.keypair_api.get_key_pair(self.ctxt, self.ctxt.user_id, self.existing_key_name) self.assertEqual(self.existing_key_name, keypair['name']) class GetKeypairsTestCase(KeypairAPITestCase): def test_success(self): keypairs = self.keypair_api.get_key_pairs(self.ctxt, self.ctxt.user_id) self.assertEqual([self.existing_key_name], [k['name'] for k in keypairs]) class DeleteKeypairTestCase(KeypairAPITestCase): def test_success(self): self.keypair_api.get_key_pair(self.ctxt, self.ctxt.user_id, self.existing_key_name) self.keypair_api.delete_key_pair(self.ctxt, self.ctxt.user_id, self.existing_key_name) self.assertRaises(exception.KeypairNotFound, self.keypair_api.get_key_pair, self.ctxt, self.ctxt.user_id, self.existing_key_name) self._check_notifications(action='delete', key_name=self.existing_key_name) nova-13.0.0/nova/tests/unit/compute/test_tracker.py0000664000567000056710000022533512701410011023500 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_utils import units from nova.compute import arch from nova.compute import claims from nova.compute import hv_type from nova.compute import power_state from nova.compute import resource_tracker from nova.compute import task_states from nova.compute import vm_mode from nova.compute import vm_states from nova import exception as exc from nova import objects from nova.objects import base as obj_base from nova.pci import manager as pci_manager from nova import test _VIRT_DRIVER_AVAIL_RESOURCES = { 'vcpus': 4, 'memory_mb': 512, 'local_gb': 6, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 0, 'hypervisor_type': 'fake', 'hypervisor_version': 0, 'hypervisor_hostname': 'fakehost', 'cpu_info': '', 'numa_topology': None, } _COMPUTE_NODE_FIXTURES = [ objects.ComputeNode( id=1, host='fake-host', vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'], memory_mb=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'], local_gb=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'], vcpus_used=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'], memory_mb_used=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'], local_gb_used=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'], hypervisor_type='fake', hypervisor_version=0, hypervisor_hostname='fake-host', free_ram_mb=(_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] - _VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']), free_disk_gb=(_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] - _VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']), current_workload=0, running_vms=0, cpu_info='{}', disk_available_least=0, host_ip='1.1.1.1', supported_hv_specs=[ objects.HVSpec.from_list([arch.I686, hv_type.KVM, vm_mode.HVM]) ], metrics=None, pci_device_pools=None, extra_resources=None, stats={}, numa_topology=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0, ), ] _INSTANCE_TYPE_FIXTURES = { 1: { 'id': 1, 'flavorid': 'fakeid-1', 'name': 'fake1.small', 'memory_mb': 128, 'vcpus': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'swap': 0, 'rxtx_factor': 0, 'vcpu_weight': 1, 'extra_specs': {}, }, 2: { 'id': 2, 'flavorid': 'fakeid-2', 'name': 'fake1.medium', 'memory_mb': 256, 'vcpus': 2, 'root_gb': 5, 'ephemeral_gb': 0, 'swap': 0, 'rxtx_factor': 0, 'vcpu_weight': 1, 'extra_specs': {}, }, } _INSTANCE_TYPE_OBJ_FIXTURES = { 1: objects.Flavor(id=1, flavorid='fakeid-1', name='fake1.small', memory_mb=128, vcpus=1, root_gb=1, ephemeral_gb=0, swap=0, rxtx_factor=0, vcpu_weight=1, extra_specs={}), 2: objects.Flavor(id=2, flavorid='fakeid-2', name='fake1.medium', memory_mb=256, vcpus=2, root_gb=5, ephemeral_gb=0, swap=0, rxtx_factor=0, vcpu_weight=1, extra_specs={}), } _2MB = 2 * units.Mi / units.Ki _INSTANCE_NUMA_TOPOLOGIES = { '2mb': objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1]), memory=_2MB, pagesize=0), objects.InstanceNUMACell( id=1, cpuset=set([3]), memory=_2MB, pagesize=0)]), } _NUMA_LIMIT_TOPOLOGIES = { '2mb': objects.NUMATopologyLimits(id=0, cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0), } _NUMA_PAGE_TOPOLOGIES = { '2kb*8': objects.NUMAPagesTopology(size_kb=2, total=8, used=0) } _NUMA_HOST_TOPOLOGIES = { '2mb': objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([1, 2]), memory=_2MB, cpu_usage=0, memory_usage=0, mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([3, 4]), memory=_2MB, cpu_usage=0, memory_usage=0, mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']], siblings=[], pinned_cpus=set([]))]), } _INSTANCE_FIXTURES = [ objects.Instance( id=1, host=None, # prevent RT trying to lazy-load this node=None, uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124', memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'], numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'], instance_type_id=1, vm_state=vm_states.ACTIVE, power_state=power_state.RUNNING, task_state=None, os_type='fake-os', # Used by the stats collector. project_id='fake-project', # Used by the stats collector. flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1], old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1], new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1], ), objects.Instance( id=2, host=None, node=None, uuid='33805b54-dea6-47b8-acb2-22aeb1b57919', memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'], numa_topology=None, instance_type_id=2, vm_state=vm_states.DELETED, power_state=power_state.SHUTDOWN, task_state=None, os_type='fake-os', project_id='fake-project-2', flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2], old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2], new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2], ), ] _MIGRATION_FIXTURES = { # A migration that has only this compute node as the source host 'source-only': objects.Migration( id=1, instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08', source_compute='fake-host', dest_compute='other-host', source_node='fake-node', dest_node='other-node', old_instance_type_id=1, new_instance_type_id=2, migration_type='resize', status='migrating' ), # A migration that has only this compute node as the dest host 'dest-only': objects.Migration( id=2, instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765', source_compute='other-host', dest_compute='fake-host', source_node='other-node', dest_node='fake-node', old_instance_type_id=1, new_instance_type_id=2, migration_type='resize', status='migrating' ), # A migration that has this compute node as both the source and dest host 'source-and-dest': objects.Migration( id=3, instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997', source_compute='fake-host', dest_compute='fake-host', source_node='fake-node', dest_node='fake-node', old_instance_type_id=1, new_instance_type_id=2, migration_type='resize', status='migrating' ), # A migration that has this compute node as destination and is an evac 'dest-only-evac': objects.Migration( id=4, instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc', source_compute='other-host', dest_compute='fake-host', source_node='other-node', dest_node='fake-node', old_instance_type_id=2, new_instance_type_id=None, migration_type='evacuation', status='pre-migrating' ), } _MIGRATION_INSTANCE_FIXTURES = { # source-only 'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.Instance( id=101, host=None, # prevent RT trying to lazy-load this node=None, uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08', memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'], numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'], instance_type_id=1, vm_state=vm_states.ACTIVE, power_state=power_state.RUNNING, task_state=task_states.RESIZE_MIGRATING, system_metadata={}, os_type='fake-os', project_id='fake-project', flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1], old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1], new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], ), # dest-only 'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance( id=102, host=None, # prevent RT trying to lazy-load this node=None, uuid='f6ed631a-8645-4b12-8e1e-2fff55795765', memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'], numa_topology=None, instance_type_id=2, vm_state=vm_states.ACTIVE, power_state=power_state.RUNNING, task_state=task_states.RESIZE_MIGRATING, system_metadata={}, os_type='fake-os', project_id='fake-project', flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1], new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], ), # source-and-dest 'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.Instance( id=3, host=None, # prevent RT trying to lazy-load this node=None, uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997', memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'], numa_topology=None, instance_type_id=2, vm_state=vm_states.ACTIVE, power_state=power_state.RUNNING, task_state=task_states.RESIZE_MIGRATING, system_metadata={}, os_type='fake-os', project_id='fake-project', flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1], new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], ), # dest-only-evac '077fb63a-bdc8-4330-90ef-f012082703dc': objects.Instance( id=102, host=None, # prevent RT trying to lazy-load this node=None, uuid='077fb63a-bdc8-4330-90ef-f012082703dc', memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'], numa_topology=None, instance_type_id=2, vm_state=vm_states.ACTIVE, power_state=power_state.RUNNING, task_state=task_states.REBUILDING, system_metadata={}, os_type='fake-os', project_id='fake-project', flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1], new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], ), } _MIGRATION_CONTEXT_FIXTURES = { 'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.MigrationContext( instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997', migration_id=3, new_numa_topology=None, old_numa_topology=None), 'c17741a5-6f3d-44a8-ade8-773dc8c29124': objects.MigrationContext( instance_uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124', migration_id=3, new_numa_topology=None, old_numa_topology=None), 'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.MigrationContext( instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08', migration_id=1, new_numa_topology=None, old_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb']), 'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.MigrationContext( instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765', migration_id=2, new_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'], old_numa_topology=None), '077fb63a-bdc8-4330-90ef-f012082703dc': objects.MigrationContext( instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc', migration_id=2, new_numa_topology=None, old_numa_topology=None), } def overhead_zero(instance): # Emulate that the driver does not adjust the memory # of the instance... return { 'memory_mb': 0 } def setup_rt(hostname, nodename, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES, estimate_overhead=overhead_zero): """Sets up the resource tracker instance with mock fixtures. :param virt_resources: Optional override of the resource representation returned by the virt driver's `get_available_resource()` method. :param estimate_overhead: Optional override of a function that should return overhead of memory given an instance object. Defaults to returning zero overhead. """ sched_client_mock = mock.MagicMock() notifier_mock = mock.MagicMock() vd = mock.MagicMock() # Make sure we don't change any global fixtures during tests virt_resources = copy.deepcopy(virt_resources) vd.get_available_resource.return_value = virt_resources vd.estimate_instance_overhead.side_effect = estimate_overhead with test.nested( mock.patch('nova.scheduler.client.SchedulerClient', return_value=sched_client_mock), mock.patch('nova.rpc.get_notifier', return_value=notifier_mock)): rt = resource_tracker.ResourceTracker(hostname, vd, nodename) return (rt, sched_client_mock, vd) class BaseTestCase(test.NoDBTestCase): def setUp(self): super(BaseTestCase, self).setUp() self.rt = None self.flags(my_ip='1.1.1.1') def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES, estimate_overhead=overhead_zero): (self.rt, self.sched_client_mock, self.driver_mock) = setup_rt( 'fake-host', 'fake-node', virt_resources, estimate_overhead) class TestUpdateAvailableResources(BaseTestCase): def _update_available_resources(self): # We test RT._update separately, since the complexity # of the update_available_resource() function is high enough as # it is, we just want to focus here on testing the resources # parameter that update_available_resource() eventually passes # to _update(). with mock.patch.object(self.rt, '_update') as update_mock: self.rt.update_available_resource(mock.sentinel.ctx) return update_mock @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock, get_cn_mock): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self._setup_rt() get_mock.return_value = [] migr_mock.return_value = [] get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] update_mock = self._update_available_resources() vd = self.driver_mock vd.get_available_resource.assert_called_once_with('fake-node') get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node', expected_attrs=[ 'system_metadata', 'numa_topology', 'flavor', 'migration_context']) get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') migr_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 6, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 512, 'memory_mb_used': 0, 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 0, 'hypervisor_type': 'fake', 'local_gb_used': 0, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_no_instances_no_migrations_reserved_disk_and_ram( self, get_mock, migr_mock, get_cn_mock): self.flags(reserved_host_disk_mb=1024, reserved_host_memory_mb=512) self._setup_rt() get_mock.return_value = [] migr_mock.return_value = [] get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 5, # 6GB avail - 1 GB reserved 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 0, # 512MB avail - 512MB reserved 'memory_mb_used': 512, # 0MB used + 512MB reserved 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 0, 'hypervisor_type': 'fake', 'local_gb_used': 1, # 0GB used + 1 GB reserved 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_some_instances_no_migrations(self, get_mock, migr_mock, get_cn_mock): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(vcpus_used=1, memory_mb_used=128, local_gb_used=1) self._setup_rt(virt_resources=virt_resources) get_mock.return_value = _INSTANCE_FIXTURES migr_mock.return_value = [] get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 5, # 6 - 1 used 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 384, # 512 - 128 used 'memory_mb_used': 128, 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 1, 'hypervisor_type': 'fake', 'local_gb_used': 1, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 1 # One active instance }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_orphaned_instances_no_migrations(self, get_mock, migr_mock, get_cn_mock): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(memory_mb_used=64) self._setup_rt(virt_resources=virt_resources) get_mock.return_value = [] migr_mock.return_value = [] get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] # Orphaned instances are those that the virt driver has on # record as consuming resources on the compute node, but the # Nova database has no record of the instance being active # on the host. For some reason, the resource tracker only # considers orphaned instance's memory usage in its calculations # of free resources... orphaned_usages = { '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d': { # Yes, the return result format of get_per_instance_usage # is indeed this stupid and redundant. Also note that the # libvirt driver just returns an empty dict always for this # method and so who the heck knows whether this stuff # actually works. 'uuid': '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d', 'memory_mb': 64 } } vd = self.driver_mock vd.get_per_instance_usage.return_value = orphaned_usages update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 6, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 448, # 512 - 64 orphaned usage 'memory_mb_used': 64, 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 0, 'hypervisor_type': 'fake', 'local_gb_used': 0, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, # Yep, for some reason, orphaned instances are not counted # as running VMs... 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_no_instances_source_migration(self, get_mock, get_inst_mock, migr_mock, get_cn_mock): # We test the behavior of update_available_resource() when # there is an active migration that involves this compute node # as the source host not the destination host, and the resource # tracker does not have any instances assigned to it. This is # the case when a migration from this compute host to another # has been completed, but the user has not confirmed the resize # yet, so the resource tracker must continue to keep the resources # for the original instance type available on the source compute # node in case of a revert of the resize. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(vcpus_used=4, memory_mb_used=128, local_gb_used=1) self._setup_rt(virt_resources=virt_resources) get_mock.return_value = [] migr_obj = _MIGRATION_FIXTURES['source-only'] migr_mock.return_value = [migr_obj] get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] # Migration.instance property is accessed in the migration # processing code, and this property calls # objects.Instance.get_by_uuid, so we have the migration return inst_uuid = migr_obj.instance_uuid instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone() get_inst_mock.return_value = instance instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 5, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 384, # 512 total - 128 for possible revert of orig 'memory_mb_used': 128, # 128 possible revert amount 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 1, 'hypervisor_type': 'fake', 'local_gb_used': 1, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_no_instances_dest_migration(self, get_mock, get_inst_mock, migr_mock, get_cn_mock): # We test the behavior of update_available_resource() when # there is an active migration that involves this compute node # as the destination host not the source host, and the resource # tracker does not yet have any instances assigned to it. This is # the case when a migration to this compute host from another host # is in progress, but the user has not confirmed the resize # yet, so the resource tracker must reserve the resources # for the possibly-to-be-confirmed instance's instance type # node in case of a confirm of the resize. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(vcpus_used=2, memory_mb_used=256, local_gb_used=5) self._setup_rt(virt_resources=virt_resources) get_mock.return_value = [] migr_obj = _MIGRATION_FIXTURES['dest-only'] migr_mock.return_value = [migr_obj] inst_uuid = migr_obj.instance_uuid instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone() get_inst_mock.return_value = instance get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 1, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 256, # 512 total - 256 for possible confirm of new 'memory_mb_used': 256, # 256 possible confirmed amount 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 2, 'hypervisor_type': 'fake', 'local_gb_used': 5, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_no_instances_dest_evacuation(self, get_mock, get_inst_mock, migr_mock, get_cn_mock): # We test the behavior of update_available_resource() when # there is an active evacuation that involves this compute node # as the destination host not the source host, and the resource # tracker does not yet have any instances assigned to it. This is # the case when a migration to this compute host from another host # is in progress, but not finished yet. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(vcpus_used=2, memory_mb_used=256, local_gb_used=5) self._setup_rt(virt_resources=virt_resources) get_mock.return_value = [] migr_obj = _MIGRATION_FIXTURES['dest-only-evac'] migr_mock.return_value = [migr_obj] inst_uuid = migr_obj.instance_uuid instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone() get_inst_mock.return_value = instance get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 1, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 256, # 512 total - 256 for possible confirm of new 'memory_mb_used': 256, # 256 possible confirmed amount 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 2, 'hypervisor_type': 'fake', 'local_gb_used': 5, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.MigrationContext.get_by_instance_uuid', return_value=None) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_some_instances_source_and_dest_migration(self, get_mock, get_inst_mock, migr_mock, get_cn_mock, get_mig_ctxt_mock): # We test the behavior of update_available_resource() when # there is an active migration that involves this compute node # as the destination host AND the source host, and the resource # tracker has a few instances assigned to it, including the # instance that is resizing to this same compute node. The tracking # of resource amounts takes into account both the old and new # resize instance types as taking up space on the node. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(vcpus_used=4, memory_mb_used=512, local_gb_used=7) self._setup_rt(virt_resources=virt_resources) migr_obj = _MIGRATION_FIXTURES['source-and-dest'] migr_mock.return_value = [migr_obj] inst_uuid = migr_obj.instance_uuid # The resizing instance has already had its instance type # changed to the *new* instance type (the bigger one, instance type 2) resizing_instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone() resizing_instance.migration_context = ( _MIGRATION_CONTEXT_FIXTURES[resizing_instance.uuid]) all_instances = _INSTANCE_FIXTURES + [resizing_instance] get_mock.return_value = all_instances get_inst_mock.return_value = resizing_instance get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', # 6 total - 1G existing - 5G new flav - 1G old flav 'free_disk_gb': -1, 'hypervisor_version': 0, 'local_gb': 6, # 512 total - 128 existing - 256 new flav - 128 old flav 'free_ram_mb': 0, 'memory_mb_used': 512, # 128 exist + 256 new flav + 128 old flav 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 4, 'hypervisor_type': 'fake', 'local_gb_used': 7, # 1G existing, 5G new flav + 1 old flav 'memory_mb': 512, 'current_workload': 1, # One migrating instance... 'vcpus': 4, 'running_vms': 2 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) class TestInitComputeNode(BaseTestCase): @mock.patch('nova.objects.ComputeNode.create') @mock.patch('nova.objects.Service.get_by_compute_host') @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') def test_no_op_init_compute_node(self, get_mock, service_mock, create_mock): self._setup_rt() resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) self.rt.compute_node = compute_node self.rt._init_compute_node(mock.sentinel.ctx, resources) self.assertFalse(service_mock.called) self.assertFalse(get_mock.called) self.assertFalse(create_mock.called) self.assertFalse(self.rt.disabled) @mock.patch('nova.objects.ComputeNode.create') @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') def test_compute_node_loaded(self, get_mock, create_mock): self._setup_rt() def fake_get_node(_ctx, host, node): res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) return res get_mock.side_effect = fake_get_node resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) self.rt._init_compute_node(mock.sentinel.ctx, resources) get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') self.assertFalse(create_mock.called) self.assertFalse(self.rt.disabled) @mock.patch('nova.objects.ComputeNode.create') @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') def test_compute_node_created_on_empty(self, get_mock, create_mock): self._setup_rt() get_mock.side_effect = exc.NotFound cpu_alloc_ratio = 1.0 ram_alloc_ratio = 1.0 disk_alloc_ratio = 1.0 resources = { 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 6, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 512, 'memory_mb_used': 0, 'pci_device_pools': [], 'vcpus_used': 0, 'hypervisor_type': 'fake', 'local_gb_used': 0, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0, 'pci_passthrough_devices': '[]' } # The expected compute represents the initial values used # when creating a compute node. expected_compute = objects.ComputeNode( host_ip=resources['host_ip'], vcpus=resources['vcpus'], memory_mb=resources['memory_mb'], local_gb=resources['local_gb'], cpu_info=resources['cpu_info'], vcpus_used=resources['vcpus_used'], memory_mb_used=resources['memory_mb_used'], local_gb_used=resources['local_gb_used'], numa_topology=resources['numa_topology'], hypervisor_type=resources['hypervisor_type'], hypervisor_version=resources['hypervisor_version'], hypervisor_hostname=resources['hypervisor_hostname'], # NOTE(sbauza): ResourceTracker adds host field host='fake-host', # NOTE(sbauza): ResourceTracker adds CONF allocation ratios ram_allocation_ratio=ram_alloc_ratio, cpu_allocation_ratio=cpu_alloc_ratio, disk_allocation_ratio=disk_alloc_ratio, ) # Forcing the flags to the values we know self.rt.ram_allocation_ratio = ram_alloc_ratio self.rt.cpu_allocation_ratio = cpu_alloc_ratio self.rt.disk_allocation_ratio = disk_alloc_ratio self.rt._init_compute_node(mock.sentinel.ctx, resources) self.assertFalse(self.rt.disabled) get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') create_mock.assert_called_once_with() self.assertTrue(obj_base.obj_equal_prims(expected_compute, self.rt.compute_node)) def test_copy_resources_adds_allocation_ratios(self): self.flags(cpu_allocation_ratio=4.0, ram_allocation_ratio=3.0, disk_allocation_ratio=2.0) self._setup_rt() resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) self.rt.compute_node = compute_node self.rt._copy_resources(resources) self.assertEqual(4.0, self.rt.compute_node.cpu_allocation_ratio) self.assertEqual(3.0, self.rt.compute_node.ram_allocation_ratio) self.assertEqual(2.0, self.rt.compute_node.disk_allocation_ratio) class TestUpdateComputeNode(BaseTestCase): @mock.patch('nova.objects.Service.get_by_compute_host') def test_existing_compute_node_updated_same_resources(self, service_mock): self._setup_rt() # This is the same set of resources as the fixture, deliberately. We # are checking below to see that update_resource_stats() is not # needlessly called when the resources don't actually change. compute = objects.ComputeNode( host_ip='1.1.1.1', numa_topology=None, metrics='[]', cpu_info='', hypervisor_hostname='fakehost', free_disk_gb=6, hypervisor_version=0, local_gb=6, free_ram_mb=512, memory_mb_used=0, pci_device_pools=objects.PciDevicePoolList(), vcpus_used=0, hypervisor_type='fake', local_gb_used=0, memory_mb=512, current_workload=0, vcpus=4, running_vms=0, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0, ) self.rt.compute_node = compute self.rt._update(mock.sentinel.ctx) self.assertFalse(self.rt.disabled) self.assertFalse(service_mock.called) # The above call to _update() will populate the # RT.old_resources collection with the resources. Here, we check that # if we call _update() again with the same resources, that # the scheduler client won't be called again to update those # (unchanged) resources for the compute node self.sched_client_mock.reset_mock() urs_mock = self.sched_client_mock.update_resource_stats self.rt._update(mock.sentinel.ctx) self.assertFalse(urs_mock.called) @mock.patch('nova.objects.Service.get_by_compute_host') def test_existing_compute_node_updated_new_resources(self, service_mock): self._setup_rt() # Deliberately changing local_gb_used, vcpus_used, and memory_mb_used # below to be different from the compute node fixture's base usages. # We want to check that the code paths update the stored compute node # usage records with what is supplied to _update(). compute = objects.ComputeNode( host='fake-host', host_ip='1.1.1.1', numa_topology=None, metrics='[]', cpu_info='', hypervisor_hostname='fakehost', free_disk_gb=2, hypervisor_version=0, local_gb=6, free_ram_mb=384, memory_mb_used=128, pci_device_pools=objects.PciDevicePoolList(), vcpus_used=2, hypervisor_type='fake', local_gb_used=4, memory_mb=512, current_workload=0, vcpus=4, running_vms=0, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0, ) self.rt.compute_node = compute self.rt._update(mock.sentinel.ctx) self.assertFalse(self.rt.disabled) self.assertFalse(service_mock.called) urs_mock = self.sched_client_mock.update_resource_stats urs_mock.assert_called_once_with(self.rt.compute_node) class TestInstanceClaim(BaseTestCase): def setUp(self): super(TestInstanceClaim, self).setUp() self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self._setup_rt() self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) # not using mock.sentinel.ctx because instance_claim calls #elevated self.ctx = mock.MagicMock() self.elevated = mock.MagicMock() self.ctx.elevated.return_value = self.elevated self.instance = _INSTANCE_FIXTURES[0].obj_clone() def assertEqualNUMAHostTopology(self, expected, got): attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage') if None in (expected, got): if expected != got: raise AssertionError("Topologies don't match. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) else: return if len(expected) != len(got): raise AssertionError("Topologies don't match due to different " "number of cells. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) for exp_cell, got_cell in zip(expected.cells, got.cells): for attr in attrs: if getattr(exp_cell, attr) != getattr(got_cell, attr): raise AssertionError("Topologies don't match. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) def test_claim_disabled(self): self.rt.compute_node = None self.assertTrue(self.rt.disabled) with mock.patch.object(self.instance, 'save'): claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance, None) self.assertEqual(self.rt.host, self.instance.host) self.assertEqual(self.rt.host, self.instance.launched_on) self.assertEqual(self.rt.nodename, self.instance.node) self.assertIsInstance(claim, claims.NopClaim) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_update_usage_with_claim(self, migr_mock, pci_mock): # Test that RT.update_usage() only changes the compute node # resources if there has been a claim first. pci_mock.return_value = objects.InstancePCIRequests(requests=[]) expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) self.rt.update_usage(self.ctx, self.instance) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) disk_used = self.instance.root_gb + self.instance.ephemeral_gb expected.update({ 'local_gb_used': disk_used, 'memory_mb_used': self.instance.memory_mb, 'free_disk_gb': expected['local_gb'] - disk_used, "free_ram_mb": expected['memory_mb'] - self.instance.memory_mb, 'running_vms': 1, 'vcpus_used': 1, 'pci_device_pools': objects.PciDevicePoolList(), }) with mock.patch.object(self.rt, '_update') as update_mock: with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, None) update_mock.assert_called_once_with(self.elevated) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_update_usage_removed(self, migr_mock, pci_mock): # Test that RT.update_usage() removes the instance when update is # called in a removed state pci_mock.return_value = objects.InstancePCIRequests(requests=[]) expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) disk_used = self.instance.root_gb + self.instance.ephemeral_gb expected.update({ 'local_gb_used': disk_used, 'memory_mb_used': self.instance.memory_mb, 'free_disk_gb': expected['local_gb'] - disk_used, "free_ram_mb": expected['memory_mb'] - self.instance.memory_mb, 'running_vms': 1, 'vcpus_used': 1, 'pci_device_pools': objects.PciDevicePoolList(), }) with mock.patch.object(self.rt, '_update') as update_mock: with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, None) update_mock.assert_called_once_with(self.elevated) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) expected_updated = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_updated['pci_device_pools'] = objects.PciDevicePoolList() self.instance.vm_state = vm_states.SHELVED_OFFLOADED with mock.patch.object(self.rt, '_update') as update_mock: self.rt.update_usage(self.ctx, self.instance) self.assertTrue(obj_base.obj_equal_prims(expected_updated, self.rt.compute_node)) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim(self, migr_mock, pci_mock): self.assertFalse(self.rt.disabled) pci_mock.return_value = objects.InstancePCIRequests(requests=[]) disk_used = self.instance.root_gb + self.instance.ephemeral_gb expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected.update({ 'local_gb_used': disk_used, 'memory_mb_used': self.instance.memory_mb, 'free_disk_gb': expected['local_gb'] - disk_used, "free_ram_mb": expected['memory_mb'] - self.instance.memory_mb, 'running_vms': 1, 'vcpus_used': 1, 'pci_device_pools': objects.PciDevicePoolList(), }) with mock.patch.object(self.rt, '_update') as update_mock: with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, None) update_mock.assert_called_once_with(self.elevated) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) @mock.patch('nova.pci.stats.PciDeviceStats.support_requests', return_value=True) @mock.patch('nova.pci.manager.PciDevTracker.claim_instance') @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_with_pci(self, migr_mock, pci_mock, pci_manager_mock, pci_stats_mock): # Test that a claim involving PCI requests correctly claims # PCI devices on the host and sends an updated pci_device_pools # attribute of the ComputeNode object. self.assertFalse(self.rt.disabled) # TODO(jaypipes): Remove once the PCI tracker is always created # upon the resource tracker being initialized... self.rt.pci_tracker = pci_manager.PciDevTracker(mock.sentinel.ctx) pci_pools = objects.PciDevicePoolList() pci_manager_mock.return_value = pci_pools request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) pci_mock.return_value = objects.InstancePCIRequests(requests=[request]) disk_used = self.instance.root_gb + self.instance.ephemeral_gb expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected.update({ 'local_gb_used': disk_used, 'memory_mb_used': self.instance.memory_mb, 'free_disk_gb': expected['local_gb'] - disk_used, "free_ram_mb": expected['memory_mb'] - self.instance.memory_mb, 'running_vms': 1, 'vcpus_used': 1, 'pci_device_pools': pci_pools }) with mock.patch.object(self.rt, '_update') as update_mock: with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, None) update_mock.assert_called_once_with(self.elevated) pci_manager_mock.assert_called_once_with(mock.ANY, # context... self.instance) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_abort_context_manager(self, migr_mock, pci_mock): pci_mock.return_value = objects.InstancePCIRequests(requests=[]) self.assertEqual(0, self.rt.compute_node.local_gb_used) self.assertEqual(0, self.rt.compute_node.memory_mb_used) self.assertEqual(0, self.rt.compute_node.running_vms) mock_save = mock.MagicMock() mock_clear_numa = mock.MagicMock() @mock.patch.object(self.instance, 'save', mock_save) @mock.patch.object(self.instance, 'clear_numa_topology', mock_clear_numa) @mock.patch.object(objects.Instance, 'obj_clone', return_value=self.instance) def _doit(mock_clone): with self.rt.instance_claim(self.ctx, self.instance, None): # Raise an exception. Just make sure below that the abort() # method of the claim object was called (and the resulting # resources reset to the pre-claimed amounts) raise test.TestingException() self.assertRaises(test.TestingException, _doit) self.assertEqual(2, mock_save.call_count) mock_clear_numa.assert_called_once_with() self.assertIsNone(self.instance.host) self.assertIsNone(self.instance.node) # Assert that the resources claimed by the Claim() constructor # are returned to the resource tracker due to the claim's abort() # method being called when triggered by the exception raised above. self.assertEqual(0, self.rt.compute_node.local_gb_used) self.assertEqual(0, self.rt.compute_node.memory_mb_used) self.assertEqual(0, self.rt.compute_node.running_vms) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_abort(self, migr_mock, pci_mock): pci_mock.return_value = objects.InstancePCIRequests(requests=[]) disk_used = self.instance.root_gb + self.instance.ephemeral_gb @mock.patch.object(objects.Instance, 'obj_clone', return_value=self.instance) @mock.patch.object(self.instance, 'save') def _claim(mock_save, mock_clone): return self.rt.instance_claim(self.ctx, self.instance, None) claim = _claim() self.assertEqual(disk_used, self.rt.compute_node.local_gb_used) self.assertEqual(self.instance.memory_mb, self.rt.compute_node.memory_mb_used) self.assertEqual(1, self.rt.compute_node.running_vms) mock_save = mock.MagicMock() mock_clear_numa = mock.MagicMock() @mock.patch.object(self.instance, 'save', mock_save) @mock.patch.object(self.instance, 'clear_numa_topology', mock_clear_numa) def _abort(): claim.abort() _abort() mock_save.assert_called_once_with() mock_clear_numa.assert_called_once_with() self.assertIsNone(self.instance.host) self.assertIsNone(self.instance.node) self.assertEqual(0, self.rt.compute_node.local_gb_used) self.assertEqual(0, self.rt.compute_node.memory_mb_used) self.assertEqual(0, self.rt.compute_node.running_vms) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_limits(self, migr_mock, pci_mock): self.assertFalse(self.rt.disabled) pci_mock.return_value = objects.InstancePCIRequests(requests=[]) good_limits = { 'memory_mb': _COMPUTE_NODE_FIXTURES[0]['memory_mb'], 'disk_gb': _COMPUTE_NODE_FIXTURES[0]['local_gb'], 'vcpu': _COMPUTE_NODE_FIXTURES[0]['vcpus'], } for key in good_limits.keys(): bad_limits = copy.deepcopy(good_limits) bad_limits[key] = 0 self.assertRaises(exc.ComputeResourcesUnavailable, self.rt.instance_claim, self.ctx, self.instance, bad_limits) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_numa(self, migr_mock, pci_mock): self.assertFalse(self.rt.disabled) pci_mock.return_value = objects.InstancePCIRequests(requests=[]) self.instance.numa_topology = _INSTANCE_NUMA_TOPOLOGIES['2mb'] host_topology = _NUMA_HOST_TOPOLOGIES['2mb'] self.rt.compute_node['numa_topology'] = host_topology._to_json() limits = {'numa_topology': _NUMA_LIMIT_TOPOLOGIES['2mb']} expected_numa = copy.deepcopy(host_topology) for cell in expected_numa.cells: cell.memory_usage += _2MB cell.cpu_usage += 1 with mock.patch.object(self.rt, '_update') as update_mock: with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, limits) update_mock.assert_called_once_with(self.ctx.elevated()) updated_compute_node = self.rt.compute_node new_numa = updated_compute_node['numa_topology'] new_numa = objects.NUMATopology.obj_from_db_obj(new_numa) self.assertEqualNUMAHostTopology(expected_numa, new_numa) @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') class TestMoveClaim(BaseTestCase): def setUp(self): super(TestMoveClaim, self).setUp() self._setup_rt() self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) self.instance = _INSTANCE_FIXTURES[0].obj_clone() self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1] self.limits = {} # not using mock.sentinel.ctx because resize_claim calls #elevated self.ctx = mock.MagicMock() self.elevated = mock.MagicMock() self.ctx.elevated.return_value = self.elevated # Initialise extensible resource trackers self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) with test.nested( mock.patch('nova.objects.InstanceList.get_by_host_and_node'), mock.patch('nova.objects.MigrationList.' 'get_in_progress_by_host_and_node') ) as (inst_list_mock, migr_mock): inst_list_mock.return_value = objects.InstanceList(objects=[]) migr_mock.return_value = objects.MigrationList(objects=[]) self.rt.update_available_resource(self.ctx) def register_mocks(self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): pci_mock.return_value = objects.InstancePCIRequests(requests=[]) self.inst_list_mock = inst_list_mock self.inst_by_uuid = inst_by_uuid self.migr_mock = migr_mock self.inst_save_mock = inst_save_mock def audit(self, rt, instances, migrations, migr_inst): self.inst_list_mock.return_value = \ objects.InstanceList(objects=instances) self.migr_mock.return_value = \ objects.MigrationList(objects=migrations) self.inst_by_uuid.return_value = migr_inst rt.update_available_resource(self.ctx) def assertEqual(self, expected, actual): if type(expected) != dict or type(actual) != dict: super(TestMoveClaim, self).assertEqual(expected, actual) return fail = False for k, e in expected.items(): a = actual[k] if e != a: print("%s: %s != %s" % (k, e, a)) fail = True if fail: self.fail() def adjust_expected(self, expected, flavor): disk_used = flavor['root_gb'] + flavor['ephemeral_gb'] expected.free_disk_gb -= disk_used expected.local_gb_used += disk_used expected.free_ram_mb -= flavor['memory_mb'] expected.memory_mb_used += flavor['memory_mb'] expected.vcpus_used += flavor['vcpus'] @mock.patch('nova.objects.Flavor.get_by_id') def test_claim(self, flavor_mock, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): """Resize self.instance and check that the expected quantities of each resource have been consumed. """ self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) self.driver_mock.get_host_ip_addr.return_value = "fake-ip" flavor_mock.return_value = objects.Flavor(**self.flavor) mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid] self.instance.migration_context = mig_context_obj expected = copy.deepcopy(self.rt.compute_node) self.adjust_expected(expected, self.flavor) create_mig_mock = mock.patch.object(self.rt, '_create_migration') mig_ctxt_mock = mock.patch('nova.objects.MigrationContext', return_value=mig_context_obj) with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock: migr_mock.return_value = _MIGRATION_FIXTURES['source-only'] claim = self.rt.resize_claim( self.ctx, self.instance, self.flavor, None) self.assertEqual(1, ctxt_mock.call_count) self.assertIsInstance(claim, claims.MoveClaim) inst_save_mock.assert_called_once_with() self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) def test_claim_abort(self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): # Resize self.instance and check that the expected quantities of each # resource have been consumed. The abort the resize claim and check # that the resources have been set back to their original values. self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) self.driver_mock.get_host_ip_addr.return_value = "fake-host" migr_obj = _MIGRATION_FIXTURES['dest-only'] self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']] mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid] self.instance.migration_context = mig_context_obj self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2] with mock.patch.object(self.rt, '_create_migration') as migr_mock: migr_mock.return_value = migr_obj claim = self.rt.resize_claim( self.ctx, self.instance, self.flavor, None) self.assertIsInstance(claim, claims.MoveClaim) self.assertEqual(5, self.rt.compute_node.local_gb_used) self.assertEqual(256, self.rt.compute_node.memory_mb_used) self.assertEqual(1, len(self.rt.tracked_migrations)) with mock.patch('nova.objects.Instance.' 'drop_migration_context') as drop_migr_mock: claim.abort() drop_migr_mock.assert_called_once_with() self.assertEqual(0, self.rt.compute_node.local_gb_used) self.assertEqual(0, self.rt.compute_node.memory_mb_used) self.assertEqual(0, len(self.rt.tracked_migrations)) def test_same_host(self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): """Resize self.instance to the same host but with a different flavor. Then abort the claim. Check that the same amount of resources are available afterwards as we started with. """ self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) migr_obj = _MIGRATION_FIXTURES['source-and-dest'] self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']] self.instance._context = self.ctx mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid] self.instance.migration_context = mig_context_obj with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, None) expected = copy.deepcopy(self.rt.compute_node) create_mig_mock = mock.patch.object(self.rt, '_create_migration') mig_ctxt_mock = mock.patch('nova.objects.MigrationContext', return_value=mig_context_obj) with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock: migr_mock.return_value = migr_obj claim = self.rt.resize_claim(self.ctx, self.instance, _INSTANCE_TYPE_OBJ_FIXTURES[1], None) self.assertEqual(1, ctxt_mock.call_count) self.audit(self.rt, [self.instance], [migr_obj], self.instance) inst_save_mock.assert_called_once_with() self.assertNotEqual(expected, self.rt.compute_node) claim.instance.migration_context = mig_context_obj with mock.patch('nova.objects.MigrationContext._destroy') as destroy_m: claim.abort() self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) destroy_m.assert_called_once_with(self.ctx, claim.instance.uuid) def test_revert_reserve_source( self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): """Check that the source node of an instance migration reserves resources until the migration has completed, even if the migration is reverted. """ self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) # Get our migrations, instances and itypes in a row src_migr = _MIGRATION_FIXTURES['source-only'] src_instance = ( _MIGRATION_INSTANCE_FIXTURES[src_migr['instance_uuid']].obj_clone() ) src_instance.migration_context = ( _MIGRATION_CONTEXT_FIXTURES[src_instance.uuid]) old_itype = _INSTANCE_TYPE_FIXTURES[src_migr['old_instance_type_id']] dst_migr = _MIGRATION_FIXTURES['dest-only'] dst_instance = ( _MIGRATION_INSTANCE_FIXTURES[dst_migr['instance_uuid']].obj_clone() ) new_itype = _INSTANCE_TYPE_FIXTURES[dst_migr['new_instance_type_id']] dst_instance.migration_context = ( _MIGRATION_CONTEXT_FIXTURES[dst_instance.uuid]) # Set up the destination resource tracker # update_available_resource to initialise extensible resource trackers src_rt = self.rt (dst_rt, _, _) = setup_rt("other-host", "other-node") dst_rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) inst_list_mock.return_value = objects.InstanceList(objects=[]) dst_rt.update_available_resource(self.ctx) # Register the instance with dst_rt expected = copy.deepcopy(dst_rt.compute_node) with mock.patch.object(dst_instance, 'save'): dst_rt.instance_claim(self.ctx, dst_instance) self.adjust_expected(expected, new_itype) expected.stats = {'num_task_resize_migrating': 1, 'io_workload': 1, 'num_instances': 1, 'num_proj_fake-project': 1, 'num_vm_active': 1, 'num_os_type_fake-os': 1} expected.current_workload = 1 expected.running_vms = 1 self.assertTrue(obj_base.obj_equal_prims(expected, dst_rt.compute_node)) # Provide the migration via a mock, then audit dst_rt to check that # the instance + migration resources are not double-counted self.audit(dst_rt, [dst_instance], [dst_migr], dst_instance) self.assertTrue(obj_base.obj_equal_prims(expected, dst_rt.compute_node)) # Audit src_rt with src_migr expected = copy.deepcopy(src_rt.compute_node) self.adjust_expected(expected, old_itype) self.audit(src_rt, [], [src_migr], src_instance) self.assertTrue(obj_base.obj_equal_prims(expected, src_rt.compute_node)) # Flag the instance as reverting and re-audit src_instance['vm_state'] = vm_states.RESIZED src_instance['task_state'] = task_states.RESIZE_REVERTING self.audit(src_rt, [], [src_migr], src_instance) self.assertTrue(obj_base.obj_equal_prims(expected, src_rt.compute_node)) def test_update_available_resources_migration_no_context(self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): """When migrating onto older nodes - it is possible for the migration_context record to be missing. Confirm resource audit works regardless. """ self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) migr_obj = _MIGRATION_FIXTURES['source-and-dest'] self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']] self.instance.migration_context = None expected = copy.deepcopy(self.rt.compute_node) self.adjust_expected(expected, self.flavor) self.audit(self.rt, [], [migr_obj], self.instance) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) def test_dupe_filter(self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) migr_obj = _MIGRATION_FIXTURES['source-and-dest'] # This is good enough to prevent a lazy-load; value is unimportant migr_obj['updated_at'] = None self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']] self.instance.migration_context = ( _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]) self.audit(self.rt, [], [migr_obj, migr_obj], self.instance) self.assertEqual(1, len(self.rt.tracked_migrations)) class TestInstanceInResizeState(test.NoDBTestCase): def test_active_suspending(self): instance = objects.Instance(vm_state=vm_states.ACTIVE, task_state=task_states.SUSPENDING) self.assertFalse(resource_tracker._instance_in_resize_state(instance)) def test_resized_suspending(self): instance = objects.Instance(vm_state=vm_states.RESIZED, task_state=task_states.SUSPENDING) self.assertTrue(resource_tracker._instance_in_resize_state(instance)) def test_resized_resize_migrating(self): instance = objects.Instance(vm_state=vm_states.RESIZED, task_state=task_states.RESIZE_MIGRATING) self.assertTrue(resource_tracker._instance_in_resize_state(instance)) def test_resized_resize_finish(self): instance = objects.Instance(vm_state=vm_states.RESIZED, task_state=task_states.RESIZE_FINISH) self.assertTrue(resource_tracker._instance_in_resize_state(instance)) nova-13.0.0/nova/tests/unit/compute/test_resources.py0000664000567000056710000002143712701407773024101 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the compute extra resources framework.""" from oslo_config import cfg from stevedore import extension from stevedore import named from nova.compute import resources from nova.compute.resources import base from nova import context from nova.objects import flavor as flavor_obj from nova import test CONF = cfg.CONF class FakeResourceHandler(resources.ResourceHandler): def __init__(self, extensions): self._mgr = \ named.NamedExtensionManager.make_test_instance(extensions) class FakeResource(base.Resource): def __init__(self): self.total_res = 0 self.used_res = 0 def _get_requested(self, usage): if 'extra_specs' not in usage: return if self.resource_name not in usage['extra_specs']: return req = usage['extra_specs'][self.resource_name] return int(req) def _get_limit(self, limits): if self.resource_name not in limits: return limit = limits[self.resource_name] return int(limit) def reset(self, resources, driver): self.total_res = 0 self.used_res = 0 def test(self, usage, limits): requested = self._get_requested(usage) if not requested: return limit = self._get_limit(limits) if not limit: return free = limit - self.used_res if requested <= free: return else: return ('Free %(free)d < requested %(requested)d ' % {'free': free, 'requested': requested}) def add_instance(self, usage): requested = self._get_requested(usage) if requested: self.used_res += requested def remove_instance(self, usage): requested = self._get_requested(usage) if requested: self.used_res -= requested def write(self, resources): pass def report_free(self): return "Free %s" % (self.total_res - self.used_res) class ResourceA(FakeResource): def reset(self, resources, driver): # ResourceA uses a configuration option self.total_res = int(CONF.resA) self.used_res = 0 self.resource_name = 'resource:resA' def write(self, resources): resources['resA'] = self.total_res resources['used_resA'] = self.used_res class ResourceB(FakeResource): def reset(self, resources, driver): # ResourceB uses resource details passed in parameter resources self.total_res = resources['resB'] self.used_res = 0 self.resource_name = 'resource:resB' def write(self, resources): resources['resB'] = self.total_res resources['used_resB'] = self.used_res def fake_flavor_obj(**updates): flavor = flavor_obj.Flavor() flavor.id = 1 flavor.name = 'fakeflavor' flavor.memory_mb = 8000 flavor.vcpus = 3 flavor.root_gb = 11 flavor.ephemeral_gb = 4 flavor.swap = 0 flavor.rxtx_factor = 1.0 flavor.vcpu_weight = 1 if updates: flavor.update(updates) return flavor class BaseTestCase(test.NoDBTestCase): def _initialize_used_res_counter(self): # Initialize the value for the used resource for ext in self.r_handler._mgr.extensions: ext.obj.used_res = 0 def setUp(self): super(BaseTestCase, self).setUp() # initialize flavors and stub get_by_id to # get flavors from here self._flavors = {} self.ctxt = context.get_admin_context() # Create a flavor without extra_specs defined _flavor_id = 1 _flavor = fake_flavor_obj(id=_flavor_id) self._flavors[_flavor_id] = _flavor # Create a flavor with extra_specs defined _flavor_id = 2 requested_resA = 5 requested_resB = 7 requested_resC = 7 _extra_specs = {'resource:resA': requested_resA, 'resource:resB': requested_resB, 'resource:resC': requested_resC} _flavor = fake_flavor_obj(id=_flavor_id, extra_specs=_extra_specs) self._flavors[_flavor_id] = _flavor # create fake resource extensions and resource handler _extensions = [ extension.Extension('resA', None, ResourceA, ResourceA()), extension.Extension('resB', None, ResourceB, ResourceB()), ] self.r_handler = FakeResourceHandler(_extensions) # Resources details can be passed to each plugin or can be specified as # configuration options driver_resources = {'resB': 5} CONF.resA = '10' # initialise the resources self.r_handler.reset_resources(driver_resources, None) def test_update_from_instance_with_extra_specs(self): # Flavor with extra_specs _flavor_id = 2 sign = 1 self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA'] expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB'] self.assertEqual(int(expected_resA), self.r_handler._mgr['resA'].obj.used_res) self.assertEqual(int(expected_resB), self.r_handler._mgr['resB'].obj.used_res) def test_update_from_instance_without_extra_specs(self): # Flavor id without extra spec _flavor_id = 1 self._initialize_used_res_counter() self.r_handler.resource_list = [] sign = 1 self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res) self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res) def test_write_resources(self): self._initialize_used_res_counter() extra_resources = {} expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0} self.r_handler.write_resources(extra_resources) self.assertEqual(expected, extra_resources) def test_test_resources_without_extra_specs(self): limits = {} # Flavor id without extra_specs flavor = self._flavors[1] result = self.r_handler.test_resources(flavor, limits) self.assertEqual([None, None], result) def test_test_resources_with_limits_for_different_resource(self): limits = {'resource:resC': 20} # Flavor id with extra_specs flavor = self._flavors[2] result = self.r_handler.test_resources(flavor, limits) self.assertEqual([None, None], result) def test_passing_test_resources(self): limits = {'resource:resA': 10, 'resource:resB': 20} # Flavor id with extra_specs flavor = self._flavors[2] self._initialize_used_res_counter() result = self.r_handler.test_resources(flavor, limits) self.assertEqual([None, None], result) def test_failing_test_resources_for_single_resource(self): limits = {'resource:resA': 4, 'resource:resB': 20} # Flavor id with extra_specs flavor = self._flavors[2] self._initialize_used_res_counter() result = self.r_handler.test_resources(flavor, limits) expected = ['Free 4 < requested 5 ', None] self.assertEqual(sorted(expected, key=str), sorted(result, key=str)) def test_empty_resource_handler(self): """An empty resource handler has no resource extensions, should have no effect, and should raise no exceptions. """ empty_r_handler = FakeResourceHandler([]) resources = {} empty_r_handler.reset_resources(resources, None) flavor = self._flavors[1] sign = 1 empty_r_handler.update_from_instance(flavor, sign) limits = {} test_result = empty_r_handler.test_resources(flavor, limits) self.assertEqual([], test_result) sign = -1 empty_r_handler.update_from_instance(flavor, sign) extra_resources = {} expected_extra_resources = extra_resources empty_r_handler.write_resources(extra_resources) self.assertEqual(expected_extra_resources, extra_resources) empty_r_handler.report_free_resources() nova-13.0.0/nova/tests/unit/compute/test_multiple_nodes.py0000664000567000056710000001600512701410011025060 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute service with multiple compute nodes.""" from oslo_utils import importutils import nova.conf from nova import context from nova import objects from nova import test from nova.tests import uuidsentinel from nova.virt import fake CONF = nova.conf.CONF CONF.import_opt('compute_manager', 'nova.service') class BaseTestCase(test.TestCase): def tearDown(self): fake.restore_nodes() super(BaseTestCase, self).tearDown() class FakeDriverSingleNodeTestCase(BaseTestCase): def setUp(self): super(FakeDriverSingleNodeTestCase, self).setUp() self.driver = fake.FakeDriver(virtapi=None) fake.set_nodes(['xyz']) def test_get_available_resource(self): res = self.driver.get_available_resource('xyz') self.assertEqual(res['hypervisor_hostname'], 'xyz') class FakeDriverMultiNodeTestCase(BaseTestCase): def setUp(self): super(FakeDriverMultiNodeTestCase, self).setUp() self.driver = fake.FakeDriver(virtapi=None) fake.set_nodes(['aaa', 'bbb']) def test_get_available_resource(self): res_a = self.driver.get_available_resource('aaa') self.assertEqual(res_a['hypervisor_hostname'], 'aaa') res_b = self.driver.get_available_resource('bbb') self.assertEqual(res_b['hypervisor_hostname'], 'bbb') res_x = self.driver.get_available_resource('xxx') self.assertEqual(res_x, {}) class MultiNodeComputeTestCase(BaseTestCase): def setUp(self): super(MultiNodeComputeTestCase, self).setUp() self.flags(compute_driver='nova.virt.fake.FakeDriver') self.compute = importutils.import_object(CONF.compute_manager) self.flags(use_local=True, group='conductor') self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) def fake_get_compute_nodes_in_db(context, use_slave=False): fake_compute_nodes = [{'local_gb': 259, 'uuid': uuidsentinel.fake_compute, 'vcpus_used': 0, 'deleted': 0, 'hypervisor_type': 'powervm', 'created_at': '2013-04-01T00:27:06.000000', 'local_gb_used': 0, 'updated_at': '2013-04-03T00:35:41.000000', 'hypervisor_hostname': 'fake_phyp1', 'memory_mb_used': 512, 'memory_mb': 131072, 'current_workload': 0, 'vcpus': 16, 'cpu_info': 'ppc64,powervm,3940', 'running_vms': 0, 'free_disk_gb': 259, 'service_id': 7, 'hypervisor_version': 7, 'disk_available_least': 265856, 'deleted_at': None, 'free_ram_mb': 130560, 'metrics': '', 'numa_topology': '', 'stats': '', 'id': 2, 'host': 'fake_phyp1', 'cpu_allocation_ratio': None, 'ram_allocation_ratio': None, 'disk_allocation_ratio': None, 'host_ip': '127.0.0.1'}] return [objects.ComputeNode._from_db_object( context, objects.ComputeNode(), cn) for cn in fake_compute_nodes] def fake_compute_node_delete(context, compute_node_id): self.assertEqual(2, compute_node_id) self.stubs.Set(self.compute, '_get_compute_nodes_in_db', fake_get_compute_nodes_in_db) self.stub_out('nova.db.compute_node_delete', fake_compute_node_delete) def test_update_available_resource_add_remove_node(self): ctx = context.get_admin_context() fake.set_nodes(['A', 'B', 'C']) self.compute.update_available_resource(ctx) self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A', 'B', 'C']) fake.set_nodes(['A', 'B']) self.compute.update_available_resource(ctx) self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A', 'B']) fake.set_nodes(['A', 'B', 'C']) self.compute.update_available_resource(ctx) self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A', 'B', 'C']) def test_compute_manager_removes_deleted_node(self): ctx = context.get_admin_context() fake.set_nodes(['A', 'B']) fake_compute_nodes = [ objects.ComputeNode( context=ctx, hypervisor_hostname='A', id=2), objects.ComputeNode( context=ctx, hypervisor_hostname='B', id=3), ] def fake_get_compute_nodes_in_db(context, use_slave=False): return fake_compute_nodes def fake_compute_node_delete(context, compute_node_id): for cn in fake_compute_nodes: if compute_node_id == cn.id: fake_compute_nodes.remove(cn) return self.stubs.Set(self.compute, '_get_compute_nodes_in_db', fake_get_compute_nodes_in_db) self.stub_out('nova.db.compute_node_delete', fake_compute_node_delete) self.compute.update_available_resource(ctx) # Verify nothing is deleted if driver and db compute nodes match self.assertEqual(len(fake_compute_nodes), 2) self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A', 'B']) fake.set_nodes(['A']) self.compute.update_available_resource(ctx) # Verify B gets deleted since now only A is reported by driver self.assertEqual(len(fake_compute_nodes), 1) self.assertEqual(fake_compute_nodes[0]['hypervisor_hostname'], 'A') self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A']) nova-13.0.0/nova/tests/unit/compute/test_stats.py0000664000567000056710000002232312701407773023220 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute node stats.""" from nova.compute import stats from nova.compute import task_states from nova.compute import vm_states from nova import test from nova.tests.unit import fake_instance from nova.tests import uuidsentinel as uuids class StatsTestCase(test.NoDBTestCase): def setUp(self): super(StatsTestCase, self).setUp() self.stats = stats.Stats() def _fake_object(self, updates): return fake_instance.fake_instance_obj(None, **updates) def _create_instance(self, values=None): instance = { "os_type": "Linux", "project_id": "1234", "task_state": None, "vm_state": vm_states.BUILDING, "vcpus": 1, "uuid": uuids.stats_linux_instance_1, } if values: instance.update(values) return self._fake_object(instance) def test_os_type_count(self): os_type = "Linux" self.assertEqual(0, self.stats.num_os_type(os_type)) self.stats._increment("num_os_type_" + os_type) self.stats._increment("num_os_type_" + os_type) self.stats._increment("num_os_type_Vax") self.assertEqual(2, self.stats.num_os_type(os_type)) self.stats["num_os_type_" + os_type] -= 1 self.assertEqual(1, self.stats.num_os_type(os_type)) def test_update_project_count(self): proj_id = "1234" def _get(): return self.stats.num_instances_for_project(proj_id) self.assertEqual(0, _get()) self.stats._increment("num_proj_" + proj_id) self.assertEqual(1, _get()) self.stats["num_proj_" + proj_id] -= 1 self.assertEqual(0, _get()) def test_instance_count(self): self.assertEqual(0, self.stats.num_instances) for i in range(5): self.stats._increment("num_instances") self.stats["num_instances"] -= 1 self.assertEqual(4, self.stats.num_instances) def test_add_stats_for_instance(self): instance = { "os_type": "Linux", "project_id": "1234", "task_state": None, "vm_state": vm_states.BUILDING, "vcpus": 3, "uuid": uuids.stats_linux_instance_1, } self.stats.update_stats_for_instance(self._fake_object(instance)) instance = { "os_type": "FreeBSD", "project_id": "1234", "task_state": task_states.SCHEDULING, "vm_state": None, "vcpus": 1, "uuid": uuids.stats_freebsd_instance, } self.stats.update_stats_for_instance(self._fake_object(instance)) instance = { "os_type": "Linux", "project_id": "2345", "task_state": task_states.SCHEDULING, "vm_state": vm_states.BUILDING, "vcpus": 2, "uuid": uuids.stats_linux_instance_2, } self.stats.update_stats_for_instance(self._fake_object(instance)) instance = { "os_type": "Linux", "project_id": "2345", "task_state": task_states.RESCUING, "vm_state": vm_states.ACTIVE, "vcpus": 2, "uuid": uuids.stats_linux_instance_3, } self.stats.update_stats_for_instance(self._fake_object(instance)) instance = { "os_type": "Linux", "project_id": "2345", "task_state": task_states.UNSHELVING, "vm_state": vm_states.ACTIVE, "vcpus": 2, "uuid": uuids.stats_linux_instance_4, } self.stats.update_stats_for_instance(self._fake_object(instance)) self.assertEqual(4, self.stats.num_os_type("Linux")) self.assertEqual(1, self.stats.num_os_type("FreeBSD")) self.assertEqual(2, self.stats.num_instances_for_project("1234")) self.assertEqual(3, self.stats.num_instances_for_project("2345")) self.assertEqual(1, self.stats["num_task_None"]) self.assertEqual(2, self.stats["num_task_" + task_states.SCHEDULING]) self.assertEqual(1, self.stats["num_task_" + task_states.UNSHELVING]) self.assertEqual(1, self.stats["num_task_" + task_states.RESCUING]) self.assertEqual(1, self.stats["num_vm_None"]) self.assertEqual(2, self.stats["num_vm_" + vm_states.BUILDING]) def test_calculate_workload(self): self.stats._increment("num_task_None") self.stats._increment("num_task_" + task_states.SCHEDULING) self.stats._increment("num_task_" + task_states.SCHEDULING) self.assertEqual(2, self.stats.calculate_workload()) def test_update_stats_for_instance_no_change(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.stats.update_stats_for_instance(instance) # no change self.assertEqual(1, self.stats.num_instances) self.assertEqual(1, self.stats.num_instances_for_project("1234")) self.assertEqual(1, self.stats["num_os_type_Linux"]) self.assertEqual(1, self.stats["num_task_None"]) self.assertEqual(1, self.stats["num_vm_" + vm_states.BUILDING]) def test_update_stats_for_instance_vm_change(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) instance["vm_state"] = vm_states.PAUSED self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats.num_instances) self.assertEqual(1, self.stats.num_instances_for_project(1234)) self.assertEqual(1, self.stats["num_os_type_Linux"]) self.assertEqual(0, self.stats["num_vm_%s" % vm_states.BUILDING]) self.assertEqual(1, self.stats["num_vm_%s" % vm_states.PAUSED]) def test_update_stats_for_instance_task_change(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) instance["task_state"] = task_states.REBUILDING self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats.num_instances) self.assertEqual(1, self.stats.num_instances_for_project("1234")) self.assertEqual(1, self.stats["num_os_type_Linux"]) self.assertEqual(0, self.stats["num_task_None"]) self.assertEqual(1, self.stats["num_task_%s" % task_states.REBUILDING]) def test_update_stats_for_instance_deleted(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats.num_instances_for_project("1234")) instance["vm_state"] = vm_states.DELETED self.stats.update_stats_for_instance(instance) self.assertEqual(0, self.stats.num_instances) self.assertEqual(0, self.stats.num_instances_for_project("1234")) self.assertEqual(0, self.stats.num_os_type("Linux")) self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING]) def test_update_stats_for_instance_offloaded(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats.num_instances_for_project("1234")) instance["vm_state"] = vm_states.SHELVED_OFFLOADED self.stats.update_stats_for_instance(instance) self.assertEqual(0, self.stats.num_instances) self.assertEqual(0, self.stats.num_instances_for_project("1234")) self.assertEqual(0, self.stats.num_os_type("Linux")) self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING]) def test_io_workload(self): vms = [vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED] tasks = [task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.RESCUING, task_states.UNSHELVING, task_states.SHELVING] for state in vms: self.stats._increment("num_vm_" + state) for state in tasks: self.stats._increment("num_task_" + state) self.assertEqual(8, self.stats.io_workload) def test_io_workload_saved_to_stats(self): values = {'task_state': task_states.RESIZE_MIGRATING} instance = self._create_instance(values) self.stats.update_stats_for_instance(instance) self.assertEqual(2, self.stats["io_workload"]) def test_clear(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.assertNotEqual(0, len(self.stats)) self.assertEqual(1, len(self.stats.states)) self.stats.clear() self.assertEqual(0, len(self.stats)) self.assertEqual(0, len(self.stats.states)) nova-13.0.0/nova/tests/unit/compute/test_compute_api.py0000664000567000056710000050373312701410011024353 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for compute API.""" import copy import datetime import iso8601 import mock from mox3 import mox from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from oslo_utils import uuidutils from nova.compute import api as compute_api from nova.compute import arch from nova.compute import cells_api as compute_cells_api from nova.compute import flavors from nova.compute import instance_actions from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_mode from nova.compute import vm_states from nova import conductor from nova import context from nova import db from nova import exception from nova import objects from nova.objects import base as obj_base from nova.objects import fields as fields_obj from nova.objects import quotas as quotas_obj from nova import policy from nova import quota from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_volume from nova.tests.unit.image import fake as fake_image from nova.tests.unit import matchers from nova.tests.unit.objects import test_flavor from nova.tests.unit.objects import test_migration from nova.tests.unit.objects import test_service from nova.tests import uuidsentinel as uuids from nova import utils from nova.volume import cinder FAKE_IMAGE_REF = 'fake-image-ref' NODENAME = 'fakenode1' SHELVED_IMAGE = 'fake-shelved-image' SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound' SHELVED_IMAGE_NOT_AUTHORIZED = 'fake-shelved-image-not-authorized' SHELVED_IMAGE_EXCEPTION = 'fake-shelved-image-exception' class _ComputeAPIUnitTestMixIn(object): def setUp(self): super(_ComputeAPIUnitTestMixIn, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.compute_api = compute_api.API() self.context = context.RequestContext(self.user_id, self.project_id) def _get_vm_states(self, exclude_states=None): vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED, vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]) if not exclude_states: exclude_states = set() return vm_state - exclude_states def _create_flavor(self, **updates): flavor = {'id': 1, 'flavorid': 1, 'name': 'm1.tiny', 'memory_mb': 512, 'vcpus': 1, 'vcpu_weight': None, 'root_gb': 1, 'ephemeral_gb': 0, 'rxtx_factor': 1, 'swap': 0, 'deleted': 0, 'disabled': False, 'is_public': True, 'deleted_at': None, 'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329), 'updated_at': None, } if updates: flavor.update(updates) return objects.Flavor._from_db_object(self.context, objects.Flavor(), flavor) def _create_instance_obj(self, params=None, flavor=None): """Create a test instance.""" if not params: params = {} if flavor is None: flavor = self._create_flavor() now = timeutils.utcnow() instance = objects.Instance() instance.metadata = {} instance.metadata.update(params.pop('metadata', {})) instance.system_metadata = params.pop('system_metadata', {}) instance._context = self.context instance.id = 1 instance.uuid = uuidutils.generate_uuid() instance.cell_name = 'api!child' instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.image_ref = FAKE_IMAGE_REF instance.reservation_id = 'r-fakeres' instance.user_id = self.user_id instance.project_id = self.project_id instance.host = 'fake_host' instance.node = NODENAME instance.instance_type_id = flavor.id instance.ami_launch_index = 0 instance.memory_mb = 0 instance.vcpus = 0 instance.root_gb = 0 instance.ephemeral_gb = 0 instance.architecture = arch.X86_64 instance.os_type = 'Linux' instance.locked = False instance.created_at = now instance.updated_at = now instance.launched_at = now instance.disable_terminate = False instance.info_cache = objects.InstanceInfoCache() instance.flavor = flavor instance.old_flavor = instance.new_flavor = None if params: instance.update(params) instance.obj_reset_changes() return instance def _obj_to_list_obj(self, list_obj, obj): list_obj.objects = [] list_obj.objects.append(obj) list_obj._context = self.context list_obj.obj_reset_changes() return list_obj def test_create_quota_exceeded_messages(self): image_href = "image_href" image_id = 0 instance_type = self._create_flavor() self.mox.StubOutWithMock(self.compute_api, "_get_image") self.mox.StubOutWithMock(quota.QUOTAS, "limit_check") self.mox.StubOutWithMock(quota.QUOTAS, "reserve") quotas = {'instances': 1, 'cores': 1, 'ram': 1} usages = {r: {'in_use': 1, 'reserved': 1} for r in ['instances', 'cores', 'ram']} quota_exception = exception.OverQuota(quotas=quotas, usages=usages, overs=['instances']) for _unused in range(2): self.compute_api._get_image(self.context, image_href).AndReturn( (image_id, {})) quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg()) quota.QUOTAS.reserve(self.context, instances=40, cores=mox.IsA(int), expire=mox.IgnoreArg(), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg(), ram=mox.IsA(int)).AndRaise(quota_exception) self.mox.ReplayAll() for min_count, message in [(20, '20-40'), (40, '40')]: try: self.compute_api.create(self.context, instance_type, "image_href", min_count=min_count, max_count=40) except exception.TooManyInstances as e: self.assertEqual(message, e.kwargs['req']) else: self.fail("Exception not raised") def _test_create_max_net_count(self, max_net_count, min_count, max_count): with test.nested( mock.patch.object(self.compute_api, '_get_image', return_value=(None, {})), mock.patch.object(self.compute_api, '_check_auto_disk_config'), mock.patch.object(self.compute_api, '_validate_and_build_base_options', return_value=({}, max_net_count)) ) as ( get_image, check_auto_disk_config, validate_and_build_base_options ): self.assertRaises(exception.PortLimitExceeded, self.compute_api.create, self.context, 'fake_flavor', 'image_id', min_count=min_count, max_count=max_count) def test_max_net_count_zero(self): # Test when max_net_count is zero. max_net_count = 0 min_count = 2 max_count = 3 self._test_create_max_net_count(max_net_count, min_count, max_count) def test_max_net_count_less_than_min_count(self): # Test when max_net_count is nonzero but less than min_count. max_net_count = 1 min_count = 2 max_count = 3 self._test_create_max_net_count(max_net_count, min_count, max_count) def test_specified_port_and_multiple_instances_neutronv2(self): # Tests that if port is specified there is only one instance booting # (i.e max_count == 1) as we can't share the same port across multiple # instances. self.flags(use_neutron=True) port = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '10.0.0.1' min_count = 1 max_count = 2 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(address=address, port_id=port)]) self.assertRaises(exception.MultiplePortsNotApplicable, self.compute_api.create, self.context, 'fake_flavor', 'image_id', min_count=min_count, max_count=max_count, requested_networks=requested_networks) def _test_specified_ip_and_multiple_instances_helper(self, requested_networks): # Tests that if ip is specified there is only one instance booting # (i.e max_count == 1) min_count = 1 max_count = 2 self.assertRaises(exception.InvalidFixedIpAndMaxCountRequest, self.compute_api.create, self.context, "fake_flavor", 'image_id', min_count=min_count, max_count=max_count, requested_networks=requested_networks) def test_specified_ip_and_multiple_instances(self): network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '10.0.0.1' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=network, address=address)]) self._test_specified_ip_and_multiple_instances_helper( requested_networks) def test_specified_ip_and_multiple_instances_neutronv2(self): self.flags(use_neutron=True) network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '10.0.0.1' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=network, address=address)]) self._test_specified_ip_and_multiple_instances_helper( requested_networks) @mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name') def test_create_volume_bdm_call_reserve_dev_name(self, mock_reserve): bdm = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 1, })) mock_reserve.return_value = bdm instance = self._create_instance_obj() result = self.compute_api._create_volume_bdm(self.context, instance, 'vda', '1', None, None) self.assertTrue(mock_reserve.called) self.assertEqual(result, bdm) @mock.patch.object(objects.BlockDeviceMapping, 'create') def test_create_volume_bdm_local_creation(self, bdm_create): instance = self._create_instance_obj() volume_id = 'fake-vol-id' bdm = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'instance_uuid': instance.uuid, 'volume_id': volume_id, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': None, 'disk_bus': None, 'device_type': None })) result = self.compute_api._create_volume_bdm(self.context, instance, '/dev/vda', volume_id, None, None, is_local_creation=True) self.assertEqual(result.instance_uuid, bdm.instance_uuid) self.assertIsNone(result.device_name) self.assertEqual(result.volume_id, bdm.volume_id) self.assertTrue(bdm_create.called) @mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name') @mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume') def test_attach_volume(self, mock_attach, mock_reserve): instance = self._create_instance_obj() volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol', None, None, None, None, None) fake_bdm = mock.MagicMock(spec=objects.BlockDeviceMapping) mock_reserve.return_value = fake_bdm mock_volume_api = mock.patch.object(self.compute_api, 'volume_api', mock.MagicMock(spec=cinder.API)) with mock_volume_api as mock_v_api: mock_v_api.get.return_value = volume self.compute_api.attach_volume( self.context, instance, volume['id']) mock_v_api.check_attach.assert_called_once_with(self.context, volume, instance=instance) mock_v_api.reserve_volume.assert_called_once_with(self.context, volume['id']) mock_attach.assert_called_once_with(self.context, instance, fake_bdm) @mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name') @mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume') def test_attach_volume_reserve_fails(self, mock_attach, mock_reserve): instance = self._create_instance_obj() volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol', None, None, None, None, None) fake_bdm = mock.MagicMock(spec=objects.BlockDeviceMapping) mock_reserve.return_value = fake_bdm mock_volume_api = mock.patch.object(self.compute_api, 'volume_api', mock.MagicMock(spec=cinder.API)) with mock_volume_api as mock_v_api: mock_v_api.get.return_value = volume mock_v_api.reserve_volume.side_effect = test.TestingException() self.assertRaises(test.TestingException, self.compute_api.attach_volume, self.context, instance, volume['id']) mock_v_api.check_attach.assert_called_once_with(self.context, volume, instance=instance) mock_v_api.reserve_volume.assert_called_once_with(self.context, volume['id']) self.assertEqual(0, mock_attach.call_count) fake_bdm.destroy.assert_called_once_with() def test_suspend(self): # Ensure instance can be suspended. instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'suspend_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.SUSPEND) rpcapi.suspend_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.suspend(self.context, instance) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertEqual(task_states.SUSPENDING, instance.task_state) def _test_suspend_fails(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertIsNone(instance.task_state) self.assertRaises(exception.InstanceInvalidState, self.compute_api.suspend, self.context, instance) def test_suspend_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE])) for state in invalid_vm_states: self._test_suspend_fails(state) def test_resume(self): # Ensure instance can be resumed (if suspended). instance = self._create_instance_obj( params=dict(vm_state=vm_states.SUSPENDED)) self.assertEqual(instance.vm_state, vm_states.SUSPENDED) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'resume_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.RESUME) rpcapi.resume_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.resume(self.context, instance) self.assertEqual(vm_states.SUSPENDED, instance.vm_state) self.assertEqual(task_states.RESUMING, instance.task_state) def test_start(self): params = dict(vm_state=vm_states.STOPPED) instance = self._create_instance_obj(params=params) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.START) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'start_instance') rpcapi.start_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.start(self.context, instance) self.assertEqual(task_states.POWERING_ON, instance.task_state) def test_start_invalid_state(self): instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertRaises(exception.InstanceInvalidState, self.compute_api.start, self.context, instance) def test_start_no_host(self): params = dict(vm_state=vm_states.STOPPED, host='') instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceNotReady, self.compute_api.start, self.context, instance) def _test_stop(self, vm_state, force=False, clean_shutdown=True): # Make sure 'progress' gets reset params = dict(task_state=None, progress=99, vm_state=vm_state) instance = self._create_instance_obj(params=params) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.STOP) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'stop_instance') rpcapi.stop_instance(self.context, instance, do_cast=True, clean_shutdown=clean_shutdown) self.mox.ReplayAll() if force: self.compute_api.force_stop(self.context, instance, clean_shutdown=clean_shutdown) else: self.compute_api.stop(self.context, instance, clean_shutdown=clean_shutdown) self.assertEqual(task_states.POWERING_OFF, instance.task_state) self.assertEqual(0, instance.progress) def test_stop(self): self._test_stop(vm_states.ACTIVE) def test_stop_stopped_instance_with_bypass(self): self._test_stop(vm_states.STOPPED, force=True) def test_stop_forced_shutdown(self): self._test_stop(vm_states.ACTIVE, force=True) def test_stop_without_clean_shutdown(self): self._test_stop(vm_states.ACTIVE, clean_shutdown=False) def test_stop_forced_without_clean_shutdown(self): self._test_stop(vm_states.ACTIVE, force=True, clean_shutdown=False) def _test_stop_invalid_state(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.stop, self.context, instance) def test_stop_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE, vm_states.ERROR])) for state in invalid_vm_states: self._test_stop_invalid_state(state) def test_stop_a_stopped_inst(self): params = {'vm_state': vm_states.STOPPED} instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.stop, self.context, instance) def test_stop_no_host(self): params = {'host': ''} instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceNotReady, self.compute_api.stop, self.context, instance) @mock.patch('nova.compute.api.API._record_action_start') @mock.patch('nova.compute.rpcapi.ComputeAPI.trigger_crash_dump') def test_trigger_crash_dump(self, trigger_crash_dump, _record_action_start): instance = self._create_instance_obj() self.compute_api.trigger_crash_dump(self.context, instance) _record_action_start.assert_called_once_with(self.context, instance, instance_actions.TRIGGER_CRASH_DUMP) if self.cell_type == 'api': # cell api has not been implemented. pass else: trigger_crash_dump.assert_called_once_with(self.context, instance) self.assertIsNone(instance.task_state) def test_trigger_crash_dump_invalid_state(self): params = dict(vm_state=vm_states.STOPPED) instance = self._create_instance_obj(params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.trigger_crash_dump, self.context, instance) def test_trigger_crash_dump_no_host(self): params = dict(host='') instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceNotReady, self.compute_api.trigger_crash_dump, self.context, instance) def test_trigger_crash_dump_locked(self): params = dict(locked=True) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceIsLocked, self.compute_api.trigger_crash_dump, self.context, instance) def _test_shelve(self, vm_state=vm_states.ACTIVE, boot_from_volume=False, clean_shutdown=True): params = dict(task_state=None, vm_state=vm_state, display_name='fake-name') instance = self._create_instance_obj(params=params) with test.nested( mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=boot_from_volume), mock.patch.object(self.compute_api, '_create_image', return_value=dict(id='fake-image-id')), mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_instance'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_offload_instance') ) as ( volume_backed_inst, create_image, instance_save, record_action_start, rpcapi_shelve_instance, rpcapi_shelve_offload_instance ): self.compute_api.shelve(self.context, instance, clean_shutdown=clean_shutdown) # assert field values set on the instance object self.assertEqual(task_states.SHELVING, instance.task_state) # assert our mock calls volume_backed_inst.assert_called_once_with( self.context, instance) instance_save.assert_called_once_with(expected_task_state=[None]) record_action_start.assert_called_once_with( self.context, instance, instance_actions.SHELVE) if boot_from_volume: rpcapi_shelve_offload_instance.assert_called_once_with( self.context, instance=instance, clean_shutdown=clean_shutdown) else: rpcapi_shelve_instance.assert_called_once_with( self.context, instance=instance, image_id='fake-image-id', clean_shutdown=clean_shutdown) def test_shelve(self): self._test_shelve() def test_shelve_stopped(self): self._test_shelve(vm_state=vm_states.STOPPED) def test_shelve_paused(self): self._test_shelve(vm_state=vm_states.PAUSED) def test_shelve_suspended(self): self._test_shelve(vm_state=vm_states.SUSPENDED) def test_shelve_boot_from_volume(self): self._test_shelve(boot_from_volume=True) def test_shelve_forced_shutdown(self): self._test_shelve(clean_shutdown=False) def test_shelve_boot_from_volume_forced_shutdown(self): self._test_shelve(boot_from_volume=True, clean_shutdown=False) def _test_shelve_invalid_state(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.shelve, self.context, instance) def test_shelve_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED])) for state in invalid_vm_states: self._test_shelve_invalid_state(state) def _test_shelve_offload(self, clean_shutdown=True): params = dict(task_state=None, vm_state=vm_states.SHELVED) instance = self._create_instance_obj(params=params) with test.nested( mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_offload_instance') ) as ( instance_save, rpcapi_shelve_offload_instance ): self.compute_api.shelve_offload(self.context, instance, clean_shutdown=clean_shutdown) # assert field values set on the instance object self.assertEqual(task_states.SHELVING_OFFLOADING, instance.task_state) instance_save.assert_called_once_with(expected_task_state=[None]) rpcapi_shelve_offload_instance.assert_called_once_with( self.context, instance=instance, clean_shutdown=clean_shutdown) def test_shelve_offload(self): self._test_shelve_offload() def test_shelve_offload_forced_shutdown(self): self._test_shelve_offload(clean_shutdown=False) def _test_shelve_offload_invalid_state(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.shelve_offload, self.context, instance) def test_shelve_offload_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.SHELVED])) for state in invalid_vm_states: self._test_shelve_offload_invalid_state(state) def _test_reboot_type(self, vm_state, reboot_type, task_state=None): # Ensure instance can be soft rebooted. inst = self._create_instance_obj() inst.vm_state = vm_state inst.task_state = task_state self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(inst, 'save') expected_task_state = [None] if reboot_type == 'HARD': expected_task_state.extend([task_states.REBOOTING, task_states.REBOOT_PENDING, task_states.REBOOT_STARTED, task_states.REBOOTING_HARD, task_states.RESUMING, task_states.UNPAUSING, task_states.SUSPENDING]) inst.save(expected_task_state=expected_task_state) self.compute_api._record_action_start(self.context, inst, instance_actions.REBOOT) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'reboot_instance') rpcapi.reboot_instance(self.context, instance=inst, block_device_info=None, reboot_type=reboot_type) self.mox.ReplayAll() self.compute_api.reboot(self.context, inst, reboot_type) def _test_reboot_type_fails(self, reboot_type, **updates): inst = self._create_instance_obj() inst.update(updates) self.assertRaises(exception.InstanceInvalidState, self.compute_api.reboot, self.context, inst, reboot_type) def test_reboot_hard_active(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD') def test_reboot_hard_error(self): self._test_reboot_type(vm_states.ERROR, 'HARD') def test_reboot_hard_rebooting(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.REBOOTING) def test_reboot_hard_reboot_started(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.REBOOT_STARTED) def test_reboot_hard_reboot_pending(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.REBOOT_PENDING) def test_reboot_hard_rescued(self): self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED) def test_reboot_hard_resuming(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.RESUMING) def test_reboot_hard_pausing(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.PAUSING) def test_reboot_hard_unpausing(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.UNPAUSING) def test_reboot_hard_suspending(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.SUSPENDING) def test_reboot_hard_error_not_launched(self): self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR, launched_at=None) def test_reboot_soft(self): self._test_reboot_type(vm_states.ACTIVE, 'SOFT') def test_reboot_soft_error(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR) def test_reboot_soft_paused(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.PAUSED) def test_reboot_soft_stopped(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.STOPPED) def test_reboot_soft_suspended(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.SUSPENDED) def test_reboot_soft_rebooting(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING) def test_reboot_soft_rebooting_hard(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING_HARD) def test_reboot_soft_reboot_started(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOT_STARTED) def test_reboot_soft_reboot_pending(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOT_PENDING) def test_reboot_soft_rescued(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED) def test_reboot_soft_error_not_launched(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR, launched_at=None) def test_reboot_soft_resuming(self): self._test_reboot_type_fails('SOFT', task_state=task_states.RESUMING) def test_reboot_soft_pausing(self): self._test_reboot_type_fails('SOFT', task_state=task_states.PAUSING) def test_reboot_soft_unpausing(self): self._test_reboot_type_fails('SOFT', task_state=task_states.UNPAUSING) def test_reboot_soft_suspending(self): self._test_reboot_type_fails('SOFT', task_state=task_states.SUSPENDING) def _test_delete_resizing_part(self, inst, deltas): old_flavor = inst.old_flavor deltas['cores'] = -old_flavor.vcpus deltas['ram'] = -old_flavor.memory_mb def _test_delete_resized_part(self, inst): migration = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.context.elevated().AndReturn(self.context) objects.Migration.get_by_instance_and_status( self.context, inst.uuid, 'finished').AndReturn(migration) compute_utils.downsize_quota_delta(self.context, inst).AndReturn('deltas') fake_quotas = objects.Quotas.from_reservations(self.context, ['rsvs']) compute_utils.reserve_quota_delta(self.context, 'deltas', inst).AndReturn(fake_quotas) self.compute_api._record_action_start( self.context, inst, instance_actions.CONFIRM_RESIZE) self.compute_api.compute_rpcapi.confirm_resize( self.context, inst, migration, migration['source_compute'], fake_quotas.reservations, cast=False) def _test_delete_shelved_part(self, inst): image_api = self.compute_api.image_api self.mox.StubOutWithMock(image_api, 'delete') snapshot_id = inst.system_metadata.get('shelved_image_id') if snapshot_id == SHELVED_IMAGE: image_api.delete(self.context, snapshot_id).AndReturn(True) elif snapshot_id == SHELVED_IMAGE_NOT_FOUND: image_api.delete(self.context, snapshot_id).AndRaise( exception.ImageNotFound(image_id=snapshot_id)) elif snapshot_id == SHELVED_IMAGE_NOT_AUTHORIZED: image_api.delete(self.context, snapshot_id).AndRaise( exception.ImageNotAuthorized(image_id=snapshot_id)) elif snapshot_id == SHELVED_IMAGE_EXCEPTION: image_api.delete(self.context, snapshot_id).AndRaise( test.TestingException("Unexpected error")) def _test_downed_host_part(self, inst, updates, delete_time, delete_type): compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, '%s.start' % delete_type) self.context.elevated().AndReturn(self.context) self.compute_api.network_api.deallocate_for_instance( self.context, inst) state = ('soft' in delete_type and vm_states.SOFT_DELETED or vm_states.DELETED) updates.update({'vm_state': state, 'task_state': None, 'terminated_at': delete_time}) inst.save() updates.update({'deleted_at': delete_time, 'deleted': True}) fake_inst = fake_instance.fake_db_instance(**updates) self.compute_api._local_cleanup_bdm_volumes([], inst, self.context) db.instance_destroy(self.context, inst.uuid, constraint=None).AndReturn(fake_inst) compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, '%s.end' % delete_type, system_metadata=inst.system_metadata) def _test_delete(self, delete_type, **attrs): reservations = ['fake-resv'] inst = self._create_instance_obj() inst.update(attrs) inst._context = self.context deltas = {'instances': -1, 'cores': -inst.vcpus, 'ram': -inst.memory_mb} delete_time = datetime.datetime(1955, 11, 5, 9, 30, tzinfo=iso8601.iso8601.Utc()) self.useFixture(utils_fixture.TimeFixture(delete_time)) task_state = (delete_type == 'soft_delete' and task_states.SOFT_DELETING or task_states.DELETING) updates = {'progress': 0, 'task_state': task_state} if delete_type == 'soft_delete': updates['deleted_at'] = delete_time self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(self.compute_api.servicegroup_api, 'service_is_up') self.mox.StubOutWithMock(compute_utils, 'downsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(self.compute_api.network_api, 'deallocate_for_instance') self.mox.StubOutWithMock(db, 'instance_system_metadata_get') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'confirm_resize') if (inst.vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED)): self._test_delete_shelved_part(inst) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(rpcapi, 'terminate_instance') self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance') objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, inst.uuid).AndReturn([]) inst.save() if inst.task_state == task_states.RESIZE_FINISH: self._test_delete_resizing_part(inst, deltas) quota.QUOTAS.reserve(self.context, project_id=inst.project_id, user_id=inst.user_id, expire=mox.IgnoreArg(), **deltas).AndReturn(reservations) # NOTE(comstud): This is getting messy. But what we are wanting # to test is: # If cells is enabled and we're the API cell: # * Cast to cells_rpcapi. with reservations=None # * Commit reservations # Otherwise: # * Check for downed host # * If downed host: # * Clean up instance, destroying it, sending notifications. # (Tested in _test_downed_host_part()) # * Commit reservations # * If not downed host: # * Record the action start. # * Cast to compute_rpcapi. with the reservations cast = True commit_quotas = True soft_delete = False if self.cell_type != 'api': if inst.vm_state == vm_states.RESIZED: self._test_delete_resized_part(inst) if inst.vm_state == vm_states.SOFT_DELETED: soft_delete = True if inst.vm_state != vm_states.SHELVED_OFFLOADED: self.context.elevated().AndReturn(self.context) db.service_get_by_compute_host( self.context, inst.host).AndReturn( test_service.fake_service) self.compute_api.servicegroup_api.service_is_up( mox.IsA(objects.Service)).AndReturn( inst.host != 'down-host') if (inst.host == 'down-host' or inst.vm_state == vm_states.SHELVED_OFFLOADED): self._test_downed_host_part(inst, updates, delete_time, delete_type) cast = False else: # Happens on the manager side commit_quotas = False if cast: if self.cell_type != 'api': self.compute_api._record_action_start(self.context, inst, instance_actions.DELETE) if commit_quotas or soft_delete: cast_reservations = None else: cast_reservations = reservations if delete_type == 'soft_delete': rpcapi.soft_delete_instance(self.context, inst, reservations=cast_reservations) elif delete_type in ['delete', 'force_delete']: rpcapi.terminate_instance(self.context, inst, [], reservations=cast_reservations, delete_type=delete_type) if commit_quotas: # Local delete or when we're testing API cell. quota.QUOTAS.commit(self.context, reservations, project_id=inst.project_id, user_id=inst.user_id) self.mox.ReplayAll() getattr(self.compute_api, delete_type)(self.context, inst) for k, v in updates.items(): self.assertEqual(inst[k], v) self.mox.UnsetStubs() def test_delete(self): self._test_delete('delete') def test_delete_if_not_launched(self): self._test_delete('delete', launched_at=None) def test_delete_in_resizing(self): old_flavor = objects.Flavor(vcpus=1, memory_mb=512, extra_specs={}) self._test_delete('delete', task_state=task_states.RESIZE_FINISH, old_flavor=old_flavor) def test_delete_in_resized(self): self._test_delete('delete', vm_state=vm_states.RESIZED) def test_delete_shelved(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE} self._test_delete('delete', vm_state=vm_states.SHELVED, system_metadata=fake_sys_meta) def test_delete_shelved_offloaded(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE} self._test_delete('delete', vm_state=vm_states.SHELVED_OFFLOADED, system_metadata=fake_sys_meta) def test_delete_shelved_image_not_found(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_FOUND} self._test_delete('delete', vm_state=vm_states.SHELVED_OFFLOADED, system_metadata=fake_sys_meta) def test_delete_shelved_image_not_authorized(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_AUTHORIZED} self._test_delete('delete', vm_state=vm_states.SHELVED_OFFLOADED, system_metadata=fake_sys_meta) def test_delete_shelved_exception(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_EXCEPTION} self._test_delete('delete', vm_state=vm_states.SHELVED, system_metadata=fake_sys_meta) def test_delete_with_down_host(self): self._test_delete('delete', host='down-host') def test_delete_soft_with_down_host(self): self._test_delete('soft_delete', host='down-host') def test_delete_soft(self): self._test_delete('soft_delete') def test_delete_forced(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE} for vm_state in self._get_vm_states(): if vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED): self._test_delete('force_delete', vm_state=vm_state, system_metadata=fake_sys_meta) self._test_delete('force_delete', vm_state=vm_state) def test_delete_fast_if_host_not_set(self): inst = self._create_instance_obj() inst.host = '' quotas = quotas_obj.Quotas(self.context) updates = {'progress': 0, 'task_state': task_states.DELETING} self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(db, 'constraint') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(self.compute_api, '_create_reservations') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'terminate_instance') db.block_device_mapping_get_all_by_instance(self.context, inst.uuid).AndReturn([]) inst.save() self.compute_api._create_reservations(self.context, inst, inst.task_state, inst.project_id, inst.user_id ).AndReturn(quotas) if self.cell_type == 'api': rpcapi.terminate_instance( self.context, inst, mox.IsA(objects.BlockDeviceMappingList), reservations=None, delete_type='delete') else: compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.start') db.constraint(host=mox.IgnoreArg()).AndReturn('constraint') delete_time = datetime.datetime(1955, 11, 5, 9, 30, tzinfo=iso8601.iso8601.Utc()) updates['deleted_at'] = delete_time updates['deleted'] = True fake_inst = fake_instance.fake_db_instance(**updates) db.instance_destroy(self.context, inst.uuid, constraint='constraint').AndReturn(fake_inst) compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.end', system_metadata=inst.system_metadata) self.mox.ReplayAll() self.compute_api.delete(self.context, inst) for k, v in updates.items(): self.assertEqual(inst[k], v) def _fake_do_delete(context, instance, bdms, rservations=None, local=False): pass def test_local_delete_with_deleted_volume(self): bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'id': 42, 'volume_id': 'volume_id', 'source_type': 'volume', 'destination_type': 'volume', 'delete_on_termination': False}))] inst = self._create_instance_obj() inst._context = self.context self.mox.StubOutWithMock(inst, 'destroy') self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(self.compute_api.network_api, 'deallocate_for_instance') self.mox.StubOutWithMock(db, 'instance_system_metadata_get') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') self.mox.StubOutWithMock(self.compute_api.volume_api, 'terminate_connection') self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'destroy') compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.start') self.context.elevated().MultipleTimes().AndReturn(self.context) if self.cell_type != 'api': self.compute_api.network_api.deallocate_for_instance( self.context, inst) self.compute_api.volume_api.terminate_connection( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\ AndRaise(exception. VolumeNotFound('volume_id')) bdms[0].destroy() inst.destroy() compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.end', system_metadata=inst.system_metadata) self.mox.ReplayAll() self.compute_api._local_delete(self.context, inst, bdms, 'delete', self._fake_do_delete) def test_local_delete_without_info_cache(self): inst = self._create_instance_obj() with test.nested( mock.patch.object(inst, 'destroy'), mock.patch.object(self.context, 'elevated'), mock.patch.object(self.compute_api.network_api, 'deallocate_for_instance'), mock.patch.object(db, 'instance_system_metadata_get'), mock.patch.object(compute_utils, 'notify_about_instance_usage') ) as ( inst_destroy, context_elevated, net_api_deallocate_for_instance, db_instance_system_metadata_get, notify_about_instance_usage ): compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.start') self.context.elevated().MultipleTimes().AndReturn(self.context) if self.cell_type != 'api': self.compute_api.network_api.deallocate_for_instance( self.context, inst) inst.destroy() compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.end', system_metadata=inst.system_metadata) inst.info_cache = None self.compute_api._local_delete(self.context, inst, [], 'delete', self._fake_do_delete) def test_delete_disabled(self): inst = self._create_instance_obj() inst.disable_terminate = True self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.ReplayAll() self.compute_api.delete(self.context, inst) def test_delete_soft_rollback(self): inst = self._create_instance_obj() self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(inst, 'save') delete_time = datetime.datetime(1955, 11, 5) self.useFixture(utils_fixture.TimeFixture(delete_time)) db.block_device_mapping_get_all_by_instance( self.context, inst.uuid).AndReturn([]) inst.save().AndRaise(test.TestingException) self.mox.ReplayAll() self.assertRaises(test.TestingException, self.compute_api.soft_delete, self.context, inst) def _test_confirm_resize(self, mig_ref_passed=False): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(compute_utils, 'downsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(fake_mig, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'confirm_resize') self.context.elevated().AndReturn(self.context) if not mig_ref_passed: objects.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn( fake_mig) compute_utils.downsize_quota_delta(self.context, fake_inst).AndReturn('deltas') resvs = ['resvs'] fake_quotas = objects.Quotas.from_reservations(self.context, resvs) compute_utils.reserve_quota_delta(self.context, 'deltas', fake_inst).AndReturn(fake_quotas) def _check_mig(expected_task_state=None): self.assertEqual('confirming', fake_mig.status) fake_mig.save().WithSideEffects(_check_mig) if self.cell_type: fake_quotas.commit() self.compute_api._record_action_start(self.context, fake_inst, 'confirmResize') self.compute_api.compute_rpcapi.confirm_resize( self.context, fake_inst, fake_mig, 'compute-source', [] if self.cell_type else fake_quotas.reservations) self.mox.ReplayAll() if mig_ref_passed: self.compute_api.confirm_resize(self.context, fake_inst, migration=fake_mig) else: self.compute_api.confirm_resize(self.context, fake_inst) def test_confirm_resize(self): self._test_confirm_resize() def test_confirm_resize_with_migration_ref(self): self._test_confirm_resize(mig_ref_passed=True) def _test_revert_resize(self): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(compute_utils, 'reverse_upsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.mox.StubOutWithMock(fake_mig, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'revert_resize') self.context.elevated().AndReturn(self.context) objects.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn( fake_mig) compute_utils.reverse_upsize_quota_delta( self.context, fake_mig).AndReturn('deltas') resvs = ['resvs'] fake_quotas = objects.Quotas.from_reservations(self.context, resvs) compute_utils.reserve_quota_delta(self.context, 'deltas', fake_inst).AndReturn(fake_quotas) def _check_state(expected_task_state=None): self.assertEqual(task_states.RESIZE_REVERTING, fake_inst.task_state) fake_inst.save(expected_task_state=[None]).WithSideEffects( _check_state) def _check_mig(expected_task_state=None): self.assertEqual('reverting', fake_mig.status) fake_mig.save().WithSideEffects(_check_mig) if self.cell_type: fake_quotas.commit() self.compute_api._record_action_start(self.context, fake_inst, 'revertResize') self.compute_api.compute_rpcapi.revert_resize( self.context, fake_inst, fake_mig, 'compute-dest', [] if self.cell_type else fake_quotas.reservations) self.mox.ReplayAll() self.compute_api.revert_resize(self.context, fake_inst) def test_revert_resize(self): self._test_revert_resize() def test_revert_resize_concurrent_fail(self): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(compute_utils, 'reverse_upsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.context.elevated().AndReturn(self.context) objects.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn(fake_mig) delta = ['delta'] compute_utils.reverse_upsize_quota_delta( self.context, fake_mig).AndReturn(delta) resvs = ['resvs'] fake_quotas = objects.Quotas.from_reservations(self.context, resvs) compute_utils.reserve_quota_delta( self.context, delta, fake_inst).AndReturn(fake_quotas) exc = exception.UnexpectedTaskStateError( instance_uuid=fake_inst['uuid'], actual={'task_state': task_states.RESIZE_REVERTING}, expected={'task_state': [None]}) fake_inst.save(expected_task_state=[None]).AndRaise(exc) fake_quotas.rollback() self.mox.ReplayAll() self.assertRaises(exception.UnexpectedTaskStateError, self.compute_api.revert_resize, self.context, fake_inst) def _test_resize(self, flavor_id_passed=True, same_host=False, allow_same_host=False, project_id=None, extra_kwargs=None, same_flavor=False, clean_shutdown=True): if extra_kwargs is None: extra_kwargs = {} self.flags(allow_resize_to_same_host=allow_same_host) params = {} if project_id is not None: # To test instance w/ different project id than context (admin) params['project_id'] = project_id fake_inst = self._create_instance_obj(params=params) self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(compute_utils, 'upsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') current_flavor = fake_inst.get_flavor() if flavor_id_passed: new_flavor = self._create_flavor(id=200, flavorid='new-flavor-id', name='new_flavor', disabled=False) if same_flavor: new_flavor.id = current_flavor.id flavors.get_flavor_by_flavor_id( 'new-flavor-id', read_deleted='no').AndReturn(new_flavor) else: new_flavor = current_flavor if (self.cell_type == 'compute' or not (flavor_id_passed and same_flavor)): resvs = ['resvs'] project_id, user_id = quotas_obj.ids_from_instance(self.context, fake_inst) fake_quotas = objects.Quotas.from_reservations(self.context, resvs) if flavor_id_passed: compute_utils.upsize_quota_delta( self.context, mox.IsA(objects.Flavor), mox.IsA(objects.Flavor)).AndReturn('deltas') compute_utils.reserve_quota_delta( self.context, 'deltas', fake_inst).AndReturn(fake_quotas) def _check_state(expected_task_state=None): self.assertEqual(task_states.RESIZE_PREP, fake_inst.task_state) self.assertEqual(fake_inst.progress, 0) for key, value in extra_kwargs.items(): self.assertEqual(value, getattr(fake_inst, key)) fake_inst.save(expected_task_state=[None]).WithSideEffects( _check_state) if allow_same_host: filter_properties = {'ignore_hosts': []} else: filter_properties = {'ignore_hosts': [fake_inst['host']]} if flavor_id_passed: expected_reservations = fake_quotas.reservations else: expected_reservations = [] if self.cell_type == 'api': fake_quotas.commit() expected_reservations = [] mig = objects.Migration() def _get_migration(context=None): return mig def _check_mig(): self.assertEqual(fake_inst.uuid, mig.instance_uuid) self.assertEqual(current_flavor.id, mig.old_instance_type_id) self.assertEqual(new_flavor.id, mig.new_instance_type_id) self.assertEqual('finished', mig.status) if new_flavor.id != current_flavor.id: self.assertEqual('resize', mig.migration_type) else: self.assertEqual('migration', mig.migration_type) self.stubs.Set(objects, 'Migration', _get_migration) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(mig, 'create') self.context.elevated().AndReturn(self.context) mig.create().WithSideEffects(_check_mig) if flavor_id_passed: self.compute_api._record_action_start(self.context, fake_inst, 'resize') else: self.compute_api._record_action_start(self.context, fake_inst, 'migrate') scheduler_hint = {'filter_properties': filter_properties} self.compute_api.compute_task_api.resize_instance( self.context, fake_inst, extra_kwargs, scheduler_hint=scheduler_hint, flavor=mox.IsA(objects.Flavor), reservations=expected_reservations, clean_shutdown=clean_shutdown) self.mox.ReplayAll() if flavor_id_passed: self.compute_api.resize(self.context, fake_inst, flavor_id='new-flavor-id', clean_shutdown=clean_shutdown, **extra_kwargs) else: self.compute_api.resize(self.context, fake_inst, clean_shutdown=clean_shutdown, **extra_kwargs) def _test_migrate(self, *args, **kwargs): self._test_resize(*args, flavor_id_passed=False, **kwargs) def test_resize(self): self._test_resize() def test_resize_with_kwargs(self): self._test_resize(extra_kwargs=dict(cow='moo')) def test_resize_same_host_and_allowed(self): self._test_resize(same_host=True, allow_same_host=True) def test_resize_same_host_and_not_allowed(self): self._test_resize(same_host=True, allow_same_host=False) def test_resize_different_project_id(self): self._test_resize(project_id='different') def test_resize_forced_shutdown(self): self._test_resize(clean_shutdown=False) def test_migrate(self): self._test_migrate() def test_migrate_with_kwargs(self): self._test_migrate(extra_kwargs=dict(cow='moo')) def test_migrate_same_host_and_allowed(self): self._test_migrate(same_host=True, allow_same_host=True) def test_migrate_same_host_and_not_allowed(self): self._test_migrate(same_host=True, allow_same_host=False) def test_migrate_different_project_id(self): self._test_migrate(project_id='different') def test_resize_invalid_flavor_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') # Should never reach these. self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = self._create_instance_obj() exc = exception.FlavorNotFound(flavor_id='flavor-id') flavors.get_flavor_by_flavor_id('flavor-id', read_deleted='no').AndRaise(exc) self.mox.ReplayAll() with mock.patch.object(fake_inst, 'save') as mock_save: self.assertRaises(exception.FlavorNotFound, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') self.assertFalse(mock_save.called) def test_resize_disabled_flavor_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') # Should never reach these. self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = self._create_instance_obj() fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', disabled=True) flavors.get_flavor_by_flavor_id( 'flavor-id', read_deleted='no').AndReturn(fake_flavor) self.mox.ReplayAll() with mock.patch.object(fake_inst, 'save') as mock_save: self.assertRaises(exception.FlavorNotFound, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') self.assertFalse(mock_save.called) @mock.patch.object(flavors, 'get_flavor_by_flavor_id') def test_resize_to_zero_disk_flavor_fails(self, get_flavor_by_flavor_id): fake_inst = self._create_instance_obj() fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', root_gb=0) get_flavor_by_flavor_id.return_value = fake_flavor with mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=False): self.assertRaises(exception.CannotResizeDisk, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') @mock.patch('nova.compute.api.API._record_action_start') @mock.patch('nova.compute.api.API._resize_cells_support') @mock.patch('nova.conductor.conductor_api.ComputeTaskAPI.resize_instance') @mock.patch.object(flavors, 'get_flavor_by_flavor_id') def test_resize_to_zero_disk_flavor_volume_backed(self, get_flavor_by_flavor_id, resize_instance_mock, cells_support_mock, record_mock): params = dict(image_ref='') fake_inst = self._create_instance_obj(params=params) fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', root_gb=0) get_flavor_by_flavor_id.return_value = fake_flavor @mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=True) @mock.patch.object(fake_inst, 'save') def do_test(mock_save, mock_volume): self.compute_api.resize(self.context, fake_inst, flavor_id='flavor-id') mock_volume.assert_called_once_with(self.context, fake_inst) do_test() def test_resize_quota_exceeds_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(compute_utils, 'upsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') # Should never reach these. self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = self._create_instance_obj() fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', disabled=False) flavors.get_flavor_by_flavor_id( 'flavor-id', read_deleted='no').AndReturn(fake_flavor) deltas = dict(resource=0) compute_utils.upsize_quota_delta( self.context, mox.IsA(objects.Flavor), mox.IsA(objects.Flavor)).AndReturn(deltas) usage = dict(in_use=0, reserved=0) quotas = {'resource': 0} usages = {'resource': usage} overs = ['resource'] over_quota_args = dict(quotas=quotas, usages=usages, overs=overs) compute_utils.reserve_quota_delta(self.context, deltas, fake_inst).AndRaise( exception.OverQuota(**over_quota_args)) self.mox.ReplayAll() with mock.patch.object(fake_inst, 'save') as mock_save: self.assertRaises(exception.TooManyInstances, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') self.assertFalse(mock_save.called) def test_check_instance_quota_exceeds_with_multiple_resources(self): quotas = {'cores': 1, 'instances': 1, 'ram': 512} usages = {'cores': dict(in_use=1, reserved=0), 'instances': dict(in_use=1, reserved=0), 'ram': dict(in_use=512, reserved=0)} overs = ['cores', 'instances', 'ram'] over_quota_args = dict(quotas=quotas, usages=usages, overs=overs) e = exception.OverQuota(**over_quota_args) fake_flavor = self._create_flavor() instance_num = 1 with mock.patch.object(objects.Quotas, 'reserve', side_effect=e): try: self.compute_api._check_num_instances_quota(self.context, fake_flavor, instance_num, instance_num) except exception.TooManyInstances as e: self.assertEqual('cores, instances, ram', e.kwargs['overs']) self.assertEqual('1, 1, 512', e.kwargs['req']) self.assertEqual('1, 1, 512', e.kwargs['used']) self.assertEqual('1, 1, 512', e.kwargs['allowed']) else: self.fail("Exception not raised") @mock.patch.object(flavors, 'get_flavor_by_flavor_id') @mock.patch.object(objects.Quotas, 'reserve') def test_resize_instance_quota_exceeds_with_multiple_resources( self, mock_reserve, mock_get_flavor): quotas = {'cores': 1, 'ram': 512} usages = {'cores': dict(in_use=1, reserved=0), 'ram': dict(in_use=512, reserved=0)} overs = ['cores', 'ram'] over_quota_args = dict(quotas=quotas, usages=usages, overs=overs) mock_reserve.side_effect = exception.OverQuota(**over_quota_args) mock_get_flavor.return_value = self._create_flavor(id=333, vcpus=3, memory_mb=1536) try: self.compute_api.resize(self.context, self._create_instance_obj(), 'fake_flavor_id') except exception.TooManyInstances as e: self.assertEqual('cores, ram', e.kwargs['overs']) self.assertEqual('2, 1024', e.kwargs['req']) self.assertEqual('1, 512', e.kwargs['used']) self.assertEqual('1, 512', e.kwargs['allowed']) mock_get_flavor.assert_called_once_with('fake_flavor_id', read_deleted="no") else: self.fail("Exception not raised") def test_pause(self): # Ensure instance can be paused. instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'pause_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.PAUSE) rpcapi.pause_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.pause(self.context, instance) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertEqual(task_states.PAUSING, instance.task_state) def _test_pause_fails(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertIsNone(instance.task_state) self.assertRaises(exception.InstanceInvalidState, self.compute_api.pause, self.context, instance) def test_pause_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE])) for state in invalid_vm_states: self._test_pause_fails(state) def test_unpause(self): # Ensure instance can be unpaused. params = dict(vm_state=vm_states.PAUSED) instance = self._create_instance_obj(params=params) self.assertEqual(instance.vm_state, vm_states.PAUSED) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'unpause_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.UNPAUSE) rpcapi.unpause_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.unpause(self.context, instance) self.assertEqual(vm_states.PAUSED, instance.vm_state) self.assertEqual(task_states.UNPAUSING, instance.task_state) def test_live_migrate_active_vm_state(self): instance = self._create_instance_obj() self._live_migrate_instance(instance) def test_live_migrate_paused_vm_state(self): paused_state = dict(vm_state=vm_states.PAUSED) instance = self._create_instance_obj(params=paused_state) self._live_migrate_instance(instance) @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.InstanceAction, 'action_start') def _live_migrate_instance(self, instance, _save, _action, get_spec): # TODO(gilliard): This logic is upside-down (different # behaviour depending on which class this method is mixed-into. Once # we have cellsv2 we can remove this kind of logic from this test if self.cell_type == 'api': api = self.compute_api.cells_rpcapi else: api = conductor.api.ComputeTaskAPI fake_spec = objects.RequestSpec() get_spec.return_value = fake_spec with mock.patch.object(api, 'live_migrate_instance') as task: self.compute_api.live_migrate(self.context, instance, block_migration=True, disk_over_commit=True, host_name='fake_dest_host') self.assertEqual(task_states.MIGRATING, instance.task_state) task.assert_called_once_with(self.context, instance, 'fake_dest_host', block_migration=True, disk_over_commit=True, request_spec=fake_spec) def test_swap_volume_volume_api_usage(self): # This test ensures that volume_id arguments are passed to volume_api # and that volumes return to previous states in case of error. def fake_vol_api_begin_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) volumes[volume_id]['status'] = 'detaching' def fake_vol_api_roll_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'detaching': volumes[volume_id]['status'] = 'in-use' def fake_vol_api_reserve(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) self.assertEqual(volumes[volume_id]['status'], 'available') volumes[volume_id]['status'] = 'attaching' def fake_vol_api_unreserve(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'attaching': volumes[volume_id]['status'] = 'available' def fake_swap_volume_exc(context, instance, old_volume_id, new_volume_id): raise AttributeError # Random exception # Should fail if VM state is not valid instance = fake_instance.fake_instance_obj(None, **{ 'vm_state': vm_states.BUILDING, 'launched_at': timeutils.utcnow(), 'locked': False, 'availability_zone': 'fake_az', 'uuid': uuids.vol_instance}) volumes = {} old_volume_id = uuidutils.generate_uuid() volumes[old_volume_id] = {'id': old_volume_id, 'display_name': 'old_volume', 'attach_status': 'attached', 'size': 5, 'status': 'in-use', 'multiattach': False, 'attachments': {uuids.vol_instance: { 'attachment_id': 'fakeid' } } } new_volume_id = uuidutils.generate_uuid() volumes[new_volume_id] = {'id': new_volume_id, 'display_name': 'new_volume', 'attach_status': 'detached', 'size': 5, 'status': 'available', 'multiattach': False} self.assertRaises(exception.InstanceInvalidState, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) instance['vm_state'] = vm_states.ACTIVE instance['task_state'] = None # Should fail if old volume is not attached volumes[old_volume_id]['attach_status'] = 'detached' self.assertRaises(exception.VolumeUnattached, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[old_volume_id]['attach_status'] = 'attached' # Should fail if old volume's instance_uuid is not that of the instance volumes[old_volume_id]['attachments'] = {uuids.vol_instance_2: {'attachment_id': 'fakeid'}} self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[old_volume_id]['attachments'] = {uuids.vol_instance: {'attachment_id': 'fakeid'}} # Should fail if new volume is attached volumes[new_volume_id]['attach_status'] = 'attached' self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[new_volume_id]['attach_status'] = 'detached' # Should fail if new volume is smaller than the old volume volumes[new_volume_id]['size'] = 4 self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[new_volume_id]['size'] = 5 # Fail call to swap_volume self.stubs.Set(self.compute_api.volume_api, 'begin_detaching', fake_vol_api_begin_detaching) self.stubs.Set(self.compute_api.volume_api, 'roll_detaching', fake_vol_api_roll_detaching) self.stubs.Set(self.compute_api.volume_api, 'reserve_volume', fake_vol_api_reserve) self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume', fake_vol_api_unreserve) self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume', fake_swap_volume_exc) self.assertRaises(AttributeError, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') # Should succeed self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume', lambda c, instance, old_volume_id, new_volume_id: True) self.compute_api.swap_volume(self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) def _test_snapshot_and_backup(self, is_snapshot=True, with_base_ref=False, min_ram=None, min_disk=None, create_fails=False, instance_vm_state=vm_states.ACTIVE): params = dict(locked=True) instance = self._create_instance_obj(params=params) instance.vm_state = instance_vm_state # 'cache_in_nova' is for testing non-inheritable properties # 'user_id' should also not be carried from sys_meta into # image property...since it should be set explicitly by # _create_image() in compute api. fake_image_meta = { 'is_public': True, 'name': 'base-name', 'properties': { 'user_id': 'meow', 'foo': 'bar', 'blah': 'bug?', 'cache_in_nova': 'dropped', }, } image_type = is_snapshot and 'snapshot' or 'backup' sent_meta = { 'is_public': False, 'name': 'fake-name', 'properties': { 'user_id': self.context.user_id, 'instance_uuid': instance.uuid, 'image_type': image_type, 'foo': 'bar', 'blah': 'bug?', 'cow': 'moo', 'cat': 'meow', }, } if is_snapshot: if min_ram is not None: fake_image_meta['min_ram'] = min_ram sent_meta['min_ram'] = min_ram if min_disk is not None: fake_image_meta['min_disk'] = min_disk sent_meta['min_disk'] = min_disk else: sent_meta['properties']['backup_type'] = 'fake-backup-type' extra_props = dict(cow='moo', cat='meow') self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(self.compute_api.image_api, 'create') self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'snapshot_instance') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'backup_instance') if not is_snapshot: self.mox.StubOutWithMock(self.compute_api, 'is_volume_backed_instance') self.compute_api.is_volume_backed_instance(self.context, instance).AndReturn(False) utils.get_image_from_system_metadata( instance.system_metadata).AndReturn(fake_image_meta) fake_image = dict(id='fake-image-id') mock_method = self.compute_api.image_api.create( self.context, sent_meta) if create_fails: mock_method.AndRaise(test.TestingException()) else: mock_method.AndReturn(fake_image) def check_state(expected_task_state=None): expected_state = (is_snapshot and task_states.IMAGE_SNAPSHOT_PENDING or task_states.IMAGE_BACKUP) self.assertEqual(expected_state, instance.task_state) if not create_fails: instance.save(expected_task_state=[None]).WithSideEffects( check_state) if is_snapshot: self.compute_api.compute_rpcapi.snapshot_instance( self.context, instance, fake_image['id']) else: self.compute_api.compute_rpcapi.backup_instance( self.context, instance, fake_image['id'], 'fake-backup-type', 'fake-rotation') self.mox.ReplayAll() got_exc = False try: if is_snapshot: res = self.compute_api.snapshot(self.context, instance, 'fake-name', extra_properties=extra_props) else: res = self.compute_api.backup(self.context, instance, 'fake-name', 'fake-backup-type', 'fake-rotation', extra_properties=extra_props) self.assertEqual(fake_image, res) except test.TestingException: got_exc = True self.assertEqual(create_fails, got_exc) self.mox.UnsetStubs() def test_snapshot(self): self._test_snapshot_and_backup() def test_snapshot_fails(self): self._test_snapshot_and_backup(create_fails=True) def test_snapshot_invalid_state(self): instance = self._create_instance_obj() instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_BACKUP self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') instance.vm_state = vm_states.BUILDING instance.task_state = None self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') def test_snapshot_with_base_image_ref(self): self._test_snapshot_and_backup(with_base_ref=True) def test_snapshot_min_ram(self): self._test_snapshot_and_backup(min_ram=42) def test_snapshot_min_disk(self): self._test_snapshot_and_backup(min_disk=42) def test_backup(self): for state in [vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED]: self._test_snapshot_and_backup(is_snapshot=False, instance_vm_state=state) def test_backup_fails(self): self._test_snapshot_and_backup(is_snapshot=False, create_fails=True) def test_backup_invalid_state(self): instance = self._create_instance_obj() instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT self.assertRaises(exception.InstanceInvalidState, self.compute_api.backup, self.context, instance, 'fake-name', 'fake', 'fake') instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_BACKUP self.assertRaises(exception.InstanceInvalidState, self.compute_api.backup, self.context, instance, 'fake-name', 'fake', 'fake') instance.vm_state = vm_states.BUILDING instance.task_state = None self.assertRaises(exception.InstanceInvalidState, self.compute_api.backup, self.context, instance, 'fake-name', 'fake', 'fake') def test_backup_with_base_image_ref(self): self._test_snapshot_and_backup(is_snapshot=False, with_base_ref=True) def test_backup_volume_backed_instance(self): instance = self._create_instance_obj() with mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=True) as mock_is_volume_backed: self.assertRaises(exception.InvalidRequest, self.compute_api.backup, self.context, instance, 'fake-name', 'weekly', 3, extra_properties={}) mock_is_volume_backed.assert_called_once_with(self.context, instance) def _test_snapshot_volume_backed(self, quiesce_required, quiesce_fails, vm_state=vm_states.ACTIVE): fake_sys_meta = {'image_min_ram': '11', 'image_min_disk': '22', 'image_container_format': 'ami', 'image_disk_format': 'ami', 'image_ram_disk': 'fake_ram_disk_id', 'image_bdm_v2': 'True', 'image_block_device_mapping': '[]', 'image_mappings': '[]', 'image_cache_in_nova': 'True'} if quiesce_required: fake_sys_meta['image_os_require_quiesce'] = 'yes' params = dict(locked=True, vm_state=vm_state, system_metadata=fake_sys_meta) instance = self._create_instance_obj(params=params) instance['root_device_name'] = 'vda' instance_bdms = [] expect_meta = { 'name': 'test-snapshot', 'properties': {'root_device_name': 'vda', 'ram_disk': 'fake_ram_disk_id'}, 'size': 0, 'min_disk': '22', 'is_public': False, 'min_ram': '11', } if quiesce_required: expect_meta['properties']['os_require_quiesce'] = 'yes' quiesced = [False, False] quiesce_expected = not quiesce_fails and vm_state == vm_states.ACTIVE def fake_get_all_by_instance(context, instance, use_slave=False): return copy.deepcopy(instance_bdms) def fake_image_create(context, image_meta, data=None): self.assertThat(image_meta, matchers.DictMatches(expect_meta)) def fake_volume_get(context, volume_id): return {'id': volume_id, 'display_description': ''} def fake_volume_create_snapshot(context, volume_id, name, description): return {'id': '%s-snapshot' % volume_id} def fake_quiesce_instance(context, instance): if quiesce_fails: raise exception.InstanceQuiesceNotSupported( instance_id=instance['uuid'], reason='test') quiesced[0] = True def fake_unquiesce_instance(context, instance, mapping=None): quiesced[1] = True self.stub_out('nova.db.block_device_mapping_get_all_by_instance', fake_get_all_by_instance) self.stubs.Set(self.compute_api.image_api, 'create', fake_image_create) self.stubs.Set(self.compute_api.volume_api, 'get', fake_volume_get) self.stubs.Set(self.compute_api.volume_api, 'create_snapshot_force', fake_volume_create_snapshot) self.stubs.Set(self.compute_api.compute_rpcapi, 'quiesce_instance', fake_quiesce_instance) self.stubs.Set(self.compute_api.compute_rpcapi, 'unquiesce_instance', fake_unquiesce_instance) fake_image.stub_out_image_service(self) # No block devices defined self.compute_api.snapshot_volume_backed( self.context, instance, 'test-snapshot') bdm = fake_block_device.FakeDbBlockDeviceDict( {'no_device': False, 'volume_id': '1', 'boot_index': 0, 'connection_info': 'inf', 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume'}) instance_bdms.append(bdm) expect_meta['properties']['bdm_v2'] = True expect_meta['properties']['block_device_mapping'] = [] expect_meta['properties']['block_device_mapping'].append( {'guest_format': None, 'boot_index': 0, 'no_device': None, 'image_id': None, 'volume_id': None, 'disk_bus': None, 'volume_size': None, 'source_type': 'snapshot', 'device_type': None, 'snapshot_id': '1-snapshot', 'device_name': '/dev/vda', 'destination_type': 'volume', 'delete_on_termination': False}) # All the db_only fields and the volume ones are removed self.compute_api.snapshot_volume_backed( self.context, instance, 'test-snapshot') self.assertEqual(quiesce_expected, quiesced[0]) self.assertEqual(quiesce_expected, quiesced[1]) instance.system_metadata['image_mappings'] = jsonutils.dumps( [{'virtual': 'ami', 'device': 'vda'}, {'device': 'vda', 'virtual': 'ephemeral0'}, {'device': 'vdb', 'virtual': 'swap'}, {'device': 'vdc', 'virtual': 'ephemeral1'}])[:255] instance.system_metadata['image_block_device_mapping'] = ( jsonutils.dumps( [{'source_type': 'snapshot', 'destination_type': 'volume', 'guest_format': None, 'device_type': 'disk', 'boot_index': 1, 'disk_bus': 'ide', 'device_name': '/dev/vdf', 'delete_on_termination': True, 'snapshot_id': 'snapshot-2', 'volume_id': None, 'volume_size': 100, 'image_id': None, 'no_device': None}])[:255]) bdm = fake_block_device.FakeDbBlockDeviceDict( {'no_device': False, 'volume_id': None, 'boot_index': -1, 'connection_info': 'inf', 'device_name': '/dev/vdh', 'source_type': 'blank', 'destination_type': 'local', 'guest_format': 'swap', 'delete_on_termination': True}) instance_bdms.append(bdm) expect_meta['properties']['block_device_mapping'].append( {'guest_format': 'swap', 'boot_index': -1, 'no_device': False, 'image_id': None, 'volume_id': None, 'disk_bus': None, 'volume_size': None, 'source_type': 'blank', 'device_type': None, 'snapshot_id': None, 'device_name': '/dev/vdh', 'destination_type': 'local', 'delete_on_termination': True}) quiesced = [False, False] # Check that the mappgins from the image properties are not included self.compute_api.snapshot_volume_backed( self.context, instance, 'test-snapshot') self.assertEqual(quiesce_expected, quiesced[0]) self.assertEqual(quiesce_expected, quiesced[1]) def test_snapshot_volume_backed(self): self._test_snapshot_volume_backed(False, False) def test_snapshot_volume_backed_with_quiesce(self): self._test_snapshot_volume_backed(True, False) def test_snapshot_volume_backed_with_quiesce_skipped(self): self._test_snapshot_volume_backed(False, True) def test_snapshot_volume_backed_with_quiesce_exception(self): self.assertRaises(exception.NovaException, self._test_snapshot_volume_backed, True, True) def test_snapshot_volume_backed_with_quiesce_stopped(self): self._test_snapshot_volume_backed(True, True, vm_state=vm_states.STOPPED) def test_volume_snapshot_create(self): volume_id = '1' create_info = {'id': 'eyedee'} fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'device_name': '/dev/sda2', 'source_type': 'volume', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'volume_id': 1, 'boot_index': -1}) fake_bdm['instance'] = fake_instance.fake_db_instance() fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid'] fake_bdm = objects.BlockDeviceMapping._from_db_object( self.context, objects.BlockDeviceMapping(), fake_bdm, expected_attrs=['instance']) self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'volume_snapshot_create') objects.BlockDeviceMapping.get_by_volume( self.context, volume_id, expected_attrs=['instance']).AndReturn(fake_bdm) self.compute_api.compute_rpcapi.volume_snapshot_create(self.context, fake_bdm['instance'], volume_id, create_info) self.mox.ReplayAll() snapshot = self.compute_api.volume_snapshot_create(self.context, volume_id, create_info) expected_snapshot = { 'snapshot': { 'id': create_info['id'], 'volumeId': volume_id, }, } self.assertEqual(snapshot, expected_snapshot) def test_volume_snapshot_delete(self): volume_id = '1' snapshot_id = '2' fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'device_name': '/dev/sda2', 'source_type': 'volume', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'volume_id': 1, 'boot_index': -1}) fake_bdm['instance'] = fake_instance.fake_db_instance() fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid'] fake_bdm = objects.BlockDeviceMapping._from_db_object( self.context, objects.BlockDeviceMapping(), fake_bdm, expected_attrs=['instance']) self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'volume_snapshot_delete') objects.BlockDeviceMapping.get_by_volume( self.context, volume_id, expected_attrs=['instance']).AndReturn(fake_bdm) self.compute_api.compute_rpcapi.volume_snapshot_delete(self.context, fake_bdm['instance'], volume_id, snapshot_id, {}) self.mox.ReplayAll() self.compute_api.volume_snapshot_delete(self.context, volume_id, snapshot_id, {}) def _test_boot_volume_bootable(self, is_bootable=False): def get_vol_data(*args, **kwargs): return {'bootable': is_bootable} block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': None, 'volume_id': '1', 'delete_on_termination': False, }] expected_meta = {'min_disk': 0, 'min_ram': 0, 'properties': {}, 'size': 0, 'status': 'active'} with mock.patch.object(self.compute_api.volume_api, 'get', side_effect=get_vol_data): if not is_bootable: self.assertRaises(exception.InvalidBDMVolumeNotBootable, self.compute_api._get_bdm_image_metadata, self.context, block_device_mapping) else: meta = self.compute_api._get_bdm_image_metadata(self.context, block_device_mapping) self.assertEqual(expected_meta, meta) def test_boot_volume_non_bootable(self): self._test_boot_volume_bootable(False) def test_boot_volume_bootable(self): self._test_boot_volume_bootable(True) def test_boot_volume_basic_property(self): block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': None, 'volume_id': '1', 'delete_on_termination': False, }] fake_volume = {"volume_image_metadata": {"min_ram": 256, "min_disk": 128, "foo": "bar"}} with mock.patch.object(self.compute_api.volume_api, 'get', return_value=fake_volume): meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping) self.assertEqual(256, meta['min_ram']) self.assertEqual(128, meta['min_disk']) self.assertEqual('active', meta['status']) self.assertEqual('bar', meta['properties']['foo']) def test_boot_volume_snapshot_basic_property(self): block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': '2', 'volume_id': None, 'delete_on_termination': False, }] fake_volume = {"volume_image_metadata": {"min_ram": 256, "min_disk": 128, "foo": "bar"}} fake_snapshot = {"volume_id": "1"} with test.nested( mock.patch.object(self.compute_api.volume_api, 'get', return_value=fake_volume), mock.patch.object(self.compute_api.volume_api, 'get_snapshot', return_value=fake_snapshot)) as ( volume_get, volume_get_snapshot): meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping) self.assertEqual(256, meta['min_ram']) self.assertEqual(128, meta['min_disk']) self.assertEqual('active', meta['status']) self.assertEqual('bar', meta['properties']['foo']) volume_get_snapshot.assert_called_once_with(self.context, block_device_mapping[0]['snapshot_id']) volume_get.assert_called_once_with(self.context, fake_snapshot['volume_id']) def _create_instance_with_disabled_disk_config(self, object=False): sys_meta = {"image_auto_disk_config": "Disabled"} params = {"system_metadata": sys_meta} instance = self._create_instance_obj(params=params) if object: return instance return obj_base.obj_to_primitive(instance) def _setup_fake_image_with_disabled_disk_config(self): self.fake_image = { 'id': 1, 'name': 'fake_name', 'status': 'active', 'properties': {"auto_disk_config": "Disabled"}, } def fake_show(obj, context, image_id, **kwargs): return self.fake_image fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) return self.fake_image['id'] def test_resize_with_disabled_auto_disk_config_fails(self): fake_inst = self._create_instance_with_disabled_disk_config( object=True) self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.resize, self.context, fake_inst, auto_disk_config=True) def test_create_with_disabled_auto_disk_config_fails(self): image_id = self._setup_fake_image_with_disabled_disk_config() self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.create, self.context, "fake_flavor", image_id, auto_disk_config=True) def test_rebuild_with_disabled_auto_disk_config_fails(self): fake_inst = self._create_instance_with_disabled_disk_config( object=True) image_id = self._setup_fake_image_with_disabled_disk_config() self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.rebuild, self.context, fake_inst, image_id, "new password", auto_disk_config=True) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'get_flavor') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(compute_api.API, '_get_image') @mock.patch.object(compute_api.API, '_check_auto_disk_config') @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') @mock.patch.object(compute_api.API, '_record_action_start') def test_rebuild(self, _record_action_start, _checks_for_create_and_rebuild, _check_auto_disk_config, _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save): orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, image_ref='foo', expected_attrs=['system_metadata']) get_flavor.return_value = test_flavor.fake_flavor flavor = instance.get_flavor() image_href = 'foo' image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': arch.X86_64}} admin_pass = '' files_to_inject = [] bdms = objects.BlockDeviceMappingList() _get_image.return_value = (None, image) bdm_get_by_instance_uuid.return_value = bdms with mock.patch.object(self.compute_api.compute_task_api, 'rebuild_instance') as rebuild_instance: self.compute_api.rebuild(self.context, instance, image_href, admin_pass, files_to_inject) rebuild_instance.assert_called_once_with(self.context, instance=instance, new_pass=admin_pass, injected_files=files_to_inject, image_ref=image_href, orig_image_ref=image_href, orig_sys_metadata=orig_system_metadata, bdms=bdms, preserve_ephemeral=False, host=instance.host, kwargs={}) _check_auto_disk_config.assert_called_once_with(image=image) _checks_for_create_and_rebuild.assert_called_once_with(self.context, None, image, flavor, {}, [], None) self.assertNotEqual(orig_system_metadata, instance.system_metadata) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'get_flavor') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(compute_api.API, '_get_image') @mock.patch.object(compute_api.API, '_check_auto_disk_config') @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') @mock.patch.object(compute_api.API, '_record_action_start') def test_rebuild_change_image(self, _record_action_start, _checks_for_create_and_rebuild, _check_auto_disk_config, _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save): orig_system_metadata = {} get_flavor.return_value = test_flavor.fake_flavor orig_image_href = 'orig_image' orig_image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': arch.X86_64, 'vm_mode': 'hvm'}} new_image_href = 'new_image' new_image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': arch.X86_64, 'vm_mode': 'xen'}} admin_pass = '' files_to_inject = [] bdms = objects.BlockDeviceMappingList() instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata'], image_ref=orig_image_href, vm_mode=vm_mode.HVM) flavor = instance.get_flavor() def get_image(context, image_href): if image_href == new_image_href: return (None, new_image) if image_href == orig_image_href: return (None, orig_image) _get_image.side_effect = get_image bdm_get_by_instance_uuid.return_value = bdms with mock.patch.object(self.compute_api.compute_task_api, 'rebuild_instance') as rebuild_instance: self.compute_api.rebuild(self.context, instance, new_image_href, admin_pass, files_to_inject) rebuild_instance.assert_called_once_with(self.context, instance=instance, new_pass=admin_pass, injected_files=files_to_inject, image_ref=new_image_href, orig_image_ref=orig_image_href, orig_sys_metadata=orig_system_metadata, bdms=bdms, preserve_ephemeral=False, host=instance.host, kwargs={}) _check_auto_disk_config.assert_called_once_with(image=new_image) _checks_for_create_and_rebuild.assert_called_once_with(self.context, None, new_image, flavor, {}, [], None) self.assertEqual(vm_mode.XEN, instance.vm_mode) def _test_check_injected_file_quota_onset_file_limit_exceeded(self, side_effect): injected_files = [ { "path": "/etc/banner.txt", "contents": "foo" } ] with mock.patch.object(quota.QUOTAS, 'limit_check', side_effect=side_effect): self.compute_api._check_injected_file_quota( self.context, injected_files) def test_check_injected_file_quota_onset_file_limit_exceeded(self): # This is the first call to limit_check. side_effect = exception.OverQuota(overs='injected_files') self.assertRaises(exception.OnsetFileLimitExceeded, self._test_check_injected_file_quota_onset_file_limit_exceeded, side_effect) def test_check_injected_file_quota_onset_file_path_limit(self): # This is the second call to limit_check. side_effect = (mock.DEFAULT, exception.OverQuota(overs='injected_file_path_bytes')) self.assertRaises(exception.OnsetFilePathLimitExceeded, self._test_check_injected_file_quota_onset_file_limit_exceeded, side_effect) def test_check_injected_file_quota_onset_file_content_limit(self): # This is the second call to limit_check but with different overs. side_effect = (mock.DEFAULT, exception.OverQuota(overs='injected_file_content_bytes')) self.assertRaises(exception.OnsetFileContentLimitExceeded, self._test_check_injected_file_quota_onset_file_limit_exceeded, side_effect) @mock.patch('nova.objects.Quotas.commit') @mock.patch('nova.objects.Quotas.reserve') @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.InstanceAction.action_start') def test_restore_by_admin(self, action_start, instance_save, quota_reserve, quota_commit): admin_context = context.RequestContext('admin_user', 'admin_project', True) instance = self._create_instance_obj() instance.vm_state = vm_states.SOFT_DELETED instance.task_state = None instance.save() with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc: self.compute_api.restore(admin_context, instance) rpc.restore_instance.assert_called_once_with(admin_context, instance) self.assertEqual(instance.task_state, task_states.RESTORING) self.assertEqual(1, quota_commit.call_count) quota_reserve.assert_called_once_with(instances=1, cores=instance.flavor.vcpus, ram=instance.flavor.memory_mb, project_id=instance.project_id, user_id=instance.user_id) @mock.patch('nova.objects.Quotas.commit') @mock.patch('nova.objects.Quotas.reserve') @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.InstanceAction.action_start') def test_restore_by_instance_owner(self, action_start, instance_save, quota_reserve, quota_commit): instance = self._create_instance_obj() instance.vm_state = vm_states.SOFT_DELETED instance.task_state = None instance.save() with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc: self.compute_api.restore(self.context, instance) rpc.restore_instance.assert_called_once_with(self.context, instance) self.assertEqual(instance.project_id, self.context.project_id) self.assertEqual(instance.task_state, task_states.RESTORING) self.assertEqual(1, quota_commit.call_count) quota_reserve.assert_called_once_with(instances=1, cores=instance.flavor.vcpus, ram=instance.flavor.memory_mb, project_id=instance.project_id, user_id=instance.user_id) def test_external_instance_event(self): instances = [ objects.Instance(uuid=uuids.instance_1, host='host1'), objects.Instance(uuid=uuids.instance_2, host='host1'), objects.Instance(uuid=uuids.instance_3, host='host2'), ] events = [ objects.InstanceExternalEvent( instance_uuid=uuids.instance_1), objects.InstanceExternalEvent( instance_uuid=uuids.instance_2), objects.InstanceExternalEvent( instance_uuid=uuids.instance_3), ] self.compute_api.compute_rpcapi = mock.MagicMock() self.compute_api.external_instance_event(self.context, instances, events) method = self.compute_api.compute_rpcapi.external_instance_event method.assert_any_call(self.context, instances[0:2], events[0:2]) method.assert_any_call(self.context, instances[2:], events[2:]) self.assertEqual(2, method.call_count) def test_volume_ops_invalid_task_state(self): instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) instance.task_state = 'Any' volume_id = uuidutils.generate_uuid() self.assertRaises(exception.InstanceInvalidState, self.compute_api.attach_volume, self.context, instance, volume_id) self.assertRaises(exception.InstanceInvalidState, self.compute_api.detach_volume, self.context, instance, volume_id) new_volume_id = uuidutils.generate_uuid() self.assertRaises(exception.InstanceInvalidState, self.compute_api.swap_volume, self.context, instance, volume_id, new_volume_id) @mock.patch.object(cinder.API, 'get', side_effect=exception.CinderConnectionFailed(reason='error')) def test_get_bdm_image_metadata_with_cinder_down(self, mock_get): bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', }))] self.assertRaises(exception.CinderConnectionFailed, self.compute_api._get_bdm_image_metadata, self.context, bdms, legacy_bdm=True) @mock.patch.object(cinder.API, 'get') @mock.patch.object(cinder.API, 'check_attach', side_effect=exception.InvalidVolume(reason='error')) def test_validate_bdm_with_error_volume(self, mock_check_attach, mock_get): # Tests that an InvalidVolume exception raised from # volume_api.check_attach due to the volume status not being # 'available' results in _validate_bdm re-raising InvalidVolume. instance = self._create_instance_obj() instance_type = self._create_flavor() volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8' volume_info = {'status': 'error', 'attach_status': 'detached', 'id': volume_id} mock_get.return_value = volume_info bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'boot_index': 0, 'volume_id': volume_id, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', }))] self.assertRaises(exception.InvalidVolume, self.compute_api._validate_bdm, self.context, instance, instance_type, bdms) mock_get.assert_called_once_with(self.context, volume_id) mock_check_attach.assert_called_once_with( self.context, volume_info, instance=instance) @mock.patch.object(cinder.API, 'get_snapshot', side_effect=exception.CinderConnectionFailed(reason='error')) @mock.patch.object(cinder.API, 'get', side_effect=exception.CinderConnectionFailed(reason='error')) def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot): instance = self._create_instance_obj() instance_type = self._create_flavor() bdm = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'snapshot_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] self.assertRaises(exception.CinderConnectionFailed, self.compute_api._validate_bdm, self.context, instance, instance_type, bdm) self.assertRaises(exception.CinderConnectionFailed, self.compute_api._validate_bdm, self.context, instance, instance_type, bdms) def _test_create_db_entry_for_new_instance_with_cinder_error(self, expected_exception): @mock.patch.object(objects.Instance, 'create') @mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default') @mock.patch.object(compute_api.API, '_populate_instance_names') @mock.patch.object(compute_api.API, '_populate_instance_for_create') def do_test(self, mock_create, mock_names, mock_ensure, mock_inst_create): instance = self._create_instance_obj() instance['display_name'] = 'FAKE_DISPLAY_NAME' instance['shutdown_terminate'] = False instance_type = self._create_flavor() fake_image = { 'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} fake_security_group = None fake_num_instances = 1 fake_index = 1 bdm = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] with mock.patch.object(instance, "destroy") as destroy: self.assertRaises(expected_exception, self.compute_api. create_db_entry_for_new_instance, self.context, instance_type, fake_image, instance, fake_security_group, bdm, fake_num_instances, fake_index) destroy.assert_called_once_with() # We use a nested method so we can decorate with the mocks. do_test(self) @mock.patch.object(cinder.API, 'get', side_effect=exception.CinderConnectionFailed(reason='error')) def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get): self._test_create_db_entry_for_new_instance_with_cinder_error( expected_exception=exception.CinderConnectionFailed) @mock.patch.object(cinder.API, 'get', return_value={'id': 1, 'status': 'error', 'attach_status': 'detached'}) def test_create_db_entry_for_new_instancewith_error_volume(self, mock_get): self._test_create_db_entry_for_new_instance_with_cinder_error( expected_exception=exception.InvalidVolume) def test_provision_instances_creates_request_spec(self): @mock.patch.object(self.compute_api, '_check_num_instances_quota') @mock.patch.object(objects.Instance, 'create') @mock.patch.object(self.compute_api.security_group_api, 'ensure_default') @mock.patch.object(self.compute_api, '_validate_bdm') @mock.patch.object(self.compute_api, '_create_block_device_mapping') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(objects, 'BuildRequest') def do_test(_mock_build_req, mock_req_spec_from_components, _mock_create_bdm, _mock_validate_bdm, _mock_ensure_default, _mock_create, mock_check_num_inst_quota): quota_mock = mock.MagicMock() req_spec_mock = mock.MagicMock() mock_check_num_inst_quota.return_value = (1, quota_mock) mock_req_spec_from_components.return_value = req_spec_mock ctxt = context.RequestContext('fake-user', 'fake-project') flavor = self._create_flavor() min_count = max_count = 1 boot_meta = { 'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} base_options = {'image_ref': 'fake-ref', 'display_name': 'fake-name', 'project_id': 'fake-project', 'availability_zone': None, 'metadata': {}, 'access_ip_v4': None, 'access_ip_v6': None, 'config_drive': None, 'key_name': None, 'numa_topology': None, 'pci_requests': None} security_groups = {} block_device_mapping = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] shutdown_terminate = True instance_group = None check_server_group_quota = False filter_properties = {'scheduler_hints': None, 'instance_type': flavor} instances = self.compute_api._provision_instances(ctxt, flavor, min_count, max_count, base_options, boot_meta, security_groups, block_device_mapping, shutdown_terminate, instance_group, check_server_group_quota, filter_properties) self.assertTrue(uuidutils.is_uuid_like(instances[0].uuid)) mock_req_spec_from_components.assert_called_once_with(ctxt, mock.ANY, boot_meta, flavor, base_options['numa_topology'], base_options['pci_requests'], filter_properties, instance_group, base_options['availability_zone']) req_spec_mock.create.assert_called_once_with() do_test() def test_provision_instances_creates_destroys_build_request(self): @mock.patch.object(self.compute_api, '_check_num_instances_quota') @mock.patch.object(objects.Instance, 'create') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(self.compute_api.security_group_api, 'ensure_default') @mock.patch.object(self.compute_api, '_validate_bdm') @mock.patch.object(self.compute_api, '_create_block_device_mapping') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(objects, 'BuildRequest') def do_test(mock_build_req, mock_req_spec_from_components, _mock_create_bdm, _mock_validate_bdm, _mock_ensure_default, _mock_inst_create, _mock_inst_save, mock_check_num_inst_quota): quota_mock = mock.MagicMock() req_spec_mock = mock.MagicMock() build_req_mock = mock.MagicMock() mock_check_num_inst_quota.return_value = (2, quota_mock) mock_req_spec_from_components.return_value = req_spec_mock mock_build_req.return_value = build_req_mock ctxt = context.RequestContext('fake-user', 'fake-project') flavor = self._create_flavor() min_count = 1 max_count = 2 boot_meta = { 'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} base_options = {'image_ref': 'fake-ref', 'display_name': 'fake-name', 'project_id': 'fake-project', 'availability_zone': None, 'metadata': {}, 'access_ip_v4': None, 'access_ip_v6': None, 'config_drive': None, 'key_name': None, 'numa_topology': None, 'pci_requests': None} security_groups = {} block_device_mapping = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] shutdown_terminate = True instance_group = None check_server_group_quota = False filter_properties = {'scheduler_hints': None, 'instance_type': flavor} instances = self.compute_api._provision_instances(ctxt, flavor, min_count, max_count, base_options, boot_meta, security_groups, block_device_mapping, shutdown_terminate, instance_group, check_server_group_quota, filter_properties) self.assertTrue(uuidutils.is_uuid_like(instances[0].uuid)) display_names = ['fake-name-1', 'fake-name-2'] build_req_calls = [ mock.call(ctxt, request_spec=req_spec_mock, project_id=ctxt.project_id, user_id=ctxt.user_id, display_name=display_names[0], instance_metadata=base_options['metadata'], progress=0, vm_state=vm_states.BUILDING, task_state=task_states.SCHEDULING, image_ref=base_options['image_ref'], access_ip_v4=base_options['access_ip_v4'], access_ip_v6=base_options['access_ip_v6'], info_cache=mock.ANY, security_groups=mock.ANY, config_drive=False, key_name=base_options['config_drive'], locked_by=None), mock.call().create(), mock.call().destroy(), mock.call(ctxt, request_spec=req_spec_mock, project_id=ctxt.project_id, user_id=ctxt.user_id, display_name=display_names[1], instance_metadata=base_options['metadata'], progress=0, vm_state=vm_states.BUILDING, task_state=task_states.SCHEDULING, image_ref=base_options['image_ref'], access_ip_v4=base_options['access_ip_v4'], access_ip_v6=base_options['access_ip_v6'], info_cache=mock.ANY, security_groups=mock.ANY, config_drive=False, key_name=base_options['config_drive'], locked_by=None), mock.call().create(), mock.call().destroy() ] mock_build_req.assert_has_calls(build_req_calls) do_test() def _test_rescue(self, vm_state=vm_states.ACTIVE, rescue_password=None, rescue_image=None, clean_shutdown=True): instance = self._create_instance_obj(params={'vm_state': vm_state}) bdms = [] with test.nested( mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=bdms), mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=False), mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'rescue_instance') ) as ( bdm_get_by_instance_uuid, volume_backed_inst, instance_save, record_action_start, rpcapi_rescue_instance ): self.compute_api.rescue(self.context, instance, rescue_password=rescue_password, rescue_image_ref=rescue_image, clean_shutdown=clean_shutdown) # assert field values set on the instance object self.assertEqual(task_states.RESCUING, instance.task_state) # assert our mock calls bdm_get_by_instance_uuid.assert_called_once_with( self.context, instance.uuid) volume_backed_inst.assert_called_once_with( self.context, instance, bdms) instance_save.assert_called_once_with(expected_task_state=[None]) record_action_start.assert_called_once_with( self.context, instance, instance_actions.RESCUE) rpcapi_rescue_instance.assert_called_once_with( self.context, instance=instance, rescue_password=rescue_password, rescue_image_ref=rescue_image, clean_shutdown=clean_shutdown) def test_rescue_active(self): self._test_rescue() def test_rescue_stopped(self): self._test_rescue(vm_state=vm_states.STOPPED) def test_rescue_error(self): self._test_rescue(vm_state=vm_states.ERROR) def test_rescue_with_password(self): self._test_rescue(rescue_password='fake-password') def test_rescue_with_image(self): self._test_rescue(rescue_image='fake-image') def test_rescue_forced_shutdown(self): self._test_rescue(clean_shutdown=False) def test_unrescue(self): instance = self._create_instance_obj( params={'vm_state': vm_states.RESCUED}) with test.nested( mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'unrescue_instance') ) as ( instance_save, record_action_start, rpcapi_unrescue_instance ): self.compute_api.unrescue(self.context, instance) # assert field values set on the instance object self.assertEqual(task_states.UNRESCUING, instance.task_state) # assert our mock calls instance_save.assert_called_once_with(expected_task_state=[None]) record_action_start.assert_called_once_with( self.context, instance, instance_actions.UNRESCUE) rpcapi_unrescue_instance.assert_called_once_with( self.context, instance=instance) def test_set_admin_password_invalid_state(self): # Tests that InstanceInvalidState is raised when not ACTIVE. instance = self._create_instance_obj({'vm_state': vm_states.STOPPED}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.set_admin_password, self.context, instance) def test_set_admin_password(self): # Ensure instance can have its admin password set. instance = self._create_instance_obj() @mock.patch.object(objects.Instance, 'save') @mock.patch.object(self.compute_api, '_record_action_start') @mock.patch.object(self.compute_api.compute_rpcapi, 'set_admin_password') def do_test(compute_rpcapi_mock, record_mock, instance_save_mock): # call the API self.compute_api.set_admin_password(self.context, instance) # make our assertions instance_save_mock.assert_called_once_with( expected_task_state=[None]) record_mock.assert_called_once_with( self.context, instance, instance_actions.CHANGE_PASSWORD) compute_rpcapi_mock.assert_called_once_with( self.context, instance=instance, new_pass=None) do_test() def _test_attach_interface_invalid_state(self, state): instance = self._create_instance_obj( params={'vm_state': state}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.attach_interface, self.context, instance, '', '', '', []) def test_attach_interface_invalid_state(self): for state in [vm_states.BUILDING, vm_states.DELETED, vm_states.ERROR, vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.SUSPENDED, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]: self._test_attach_interface_invalid_state(state) def _test_detach_interface_invalid_state(self, state): instance = self._create_instance_obj( params={'vm_state': state}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.detach_interface, self.context, instance, '', '', '', []) def test_detach_interface_invalid_state(self): for state in [vm_states.BUILDING, vm_states.DELETED, vm_states.ERROR, vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.SUSPENDED, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]: self._test_detach_interface_invalid_state(state) def _test_check_and_transform_bdm(self, block_device_mapping): instance_type = self._create_flavor() base_options = {'uuid': uuids.bdm_instance, 'image_ref': 'fake_image_ref', 'metadata': {}} image_meta = {'status': 'active', 'name': 'image_name', 'deleted': False, 'container_format': 'bare', 'id': 'image_id'} legacy_bdm = False block_device_mapping = block_device_mapping self.assertRaises(exception.InvalidRequest, self.compute_api._check_and_transform_bdm, self.context, base_options, instance_type, image_meta, 1, 1, block_device_mapping, legacy_bdm) def test_check_and_transform_bdm_source_volume(self): block_device_mapping = [{'boot_index': 0, 'device_name': None, 'image_id': 'image_id', 'source_type': 'image'}, {'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': None, 'volume_id': 'volume_id'}] self._test_check_and_transform_bdm(block_device_mapping) def test_check_and_transform_bdm_source_snapshot(self): block_device_mapping = [{'boot_index': 0, 'device_name': None, 'image_id': 'image_id', 'source_type': 'image'}, {'device_name': '/dev/vda', 'source_type': 'snapshot', 'destination_type': 'volume', 'device_type': None, 'volume_id': 'volume_id'}] self._test_check_and_transform_bdm(block_device_mapping) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.InstanceAction, 'action_start') @mock.patch.object(compute_rpcapi.ComputeAPI, 'pause_instance') @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(compute_api.API, '_get_instances_by_filters', return_value=[]) @mock.patch.object(compute_api.API, '_create_instance') def test_skip_policy_check(self, mock_create, mock_get_ins_by_filters, mock_get, mock_pause, mock_action, mock_save): policy.reset() rules = {'compute:pause': '!', 'compute:get': '!', 'compute:get_all': '!', 'compute:create': '!'} policy.set_rules(oslo_policy.Rules.from_dict(rules)) instance = self._create_instance_obj() mock_get.return_value = instance self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.pause, self.context, instance) api = compute_api.API(skip_policy_check=True) api.pause(self.context, instance) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.get, self.context, instance.uuid) api = compute_api.API(skip_policy_check=True) api.get(self.context, instance.uuid) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.get_all, self.context) api = compute_api.API(skip_policy_check=True) api.get_all(self.context) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.create, self.context, None, None) api = compute_api.API(skip_policy_check=True) api.create(self.context, None, None) @mock.patch.object(compute_api.API, '_get_instances_by_filters') def test_tenant_to_project_conversion(self, mock_get): mock_get.return_value = [] api = compute_api.API() api.get_all(self.context, search_opts={'tenant_id': 'foo'}) filters = mock_get.call_args_list[0][0][1] self.assertEqual({'project_id': 'foo'}, filters) def test_metadata_invalid_return_empty_object(self): api = compute_api.API() ret = api.get_all(self.context, want_objects=True, search_opts={'metadata': 'foo'}) self.assertIsInstance(ret, objects.InstanceList) self.assertEqual(0, len(ret)) def test_metadata_invalid_return_empty_list(self): api = compute_api.API() ret = api.get_all(self.context, want_objects=False, search_opts={'metadata': 'foo'}) self.assertIsInstance(ret, list) self.assertEqual(0, len(ret)) def test_populate_instance_names_host_name(self): params = dict(display_name="vm1") instance = self._create_instance_obj(params=params) self.compute_api._populate_instance_names(instance, 1) self.assertEqual('vm1', instance.hostname) def test_populate_instance_names_host_name_is_empty(self): params = dict(display_name=u'\u865a\u62df\u673a\u662f\u4e2d\u6587') instance = self._create_instance_obj(params=params) self.compute_api._populate_instance_names(instance, 1) self.assertEqual('Server-%s' % instance.uuid, instance.hostname) def test_populate_instance_names_host_name_multi(self): params = dict(display_name="vm") instance = self._create_instance_obj(params=params) with mock.patch.object(instance, 'save'): self.compute_api._apply_instance_name_template(self.context, instance, 1) self.assertEqual('vm-2', instance.hostname) def test_populate_instance_names_host_name_is_empty_multi(self): params = dict(display_name=u'\u865a\u62df\u673a\u662f\u4e2d\u6587') instance = self._create_instance_obj(params=params) with mock.patch.object(instance, 'save'): self.compute_api._apply_instance_name_template(self.context, instance, 1) self.assertEqual('Server-%s' % instance.uuid, instance.hostname) def test_host_statuses(self): instances = [ objects.Instance(uuid=uuids.instance_1, host='host1', services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host1', disabled=True, forced_down=True, binary='nova-compute'))), objects.Instance(uuid=uuids.instance_2, host='host2', services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host2', disabled=True, forced_down=False, binary='nova-compute'))), objects.Instance(uuid=uuids.instance_3, host='host3', services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host3', disabled=False, last_seen_up=timeutils.utcnow() - datetime.timedelta(minutes=5), forced_down=False, binary='nova-compute'))), objects.Instance(uuid=uuids.instance_4, host='host4', services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host4', disabled=False, last_seen_up=timeutils.utcnow(), forced_down=False, binary='nova-compute'))), objects.Instance(uuid=uuids.instance_5, host='host5', services= objects.ServiceList()), objects.Instance(uuid=uuids.instance_6, host=None, services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host6', disabled=True, forced_down=False, binary='nova-compute'))), objects.Instance(uuid=uuids.instance_7, host='host2', services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host2', disabled=True, forced_down=False, binary='nova-compute'))) ] host_statuses = self.compute_api.get_instances_host_statuses( instances) expect_statuses = {uuids.instance_1: fields_obj.HostStatus.DOWN, uuids.instance_2: fields_obj.HostStatus.MAINTENANCE, uuids.instance_3: fields_obj.HostStatus.UNKNOWN, uuids.instance_4: fields_obj.HostStatus.UP, uuids.instance_5: fields_obj.HostStatus.NONE, uuids.instance_6: fields_obj.HostStatus.NONE, uuids.instance_7: fields_obj.HostStatus.MAINTENANCE} for instance in instances: self.assertEqual(expect_statuses[instance.uuid], host_statuses[instance.uuid]) @mock.patch.object(objects.Migration, 'get_by_id_and_instance') @mock.patch.object(objects.InstanceAction, 'action_start') def test_live_migrate_force_complete_succeeded( self, action_start, get_by_id_and_instance): if self.cell_type == 'api': # cell api has not been implemented. return rpcapi = self.compute_api.compute_rpcapi instance = self._create_instance_obj() instance.task_state = task_states.MIGRATING migration = objects.Migration() migration.id = 0 migration.status = 'running' get_by_id_and_instance.return_value = migration with mock.patch.object( rpcapi, 'live_migration_force_complete') as lm_force_complete: self.compute_api.live_migrate_force_complete( self.context, instance, migration.id) lm_force_complete.assert_called_once_with(self.context, instance, 0) action_start.assert_called_once_with( self.context, instance.uuid, 'live_migration_force_complete', want_result=False) @mock.patch.object(objects.Migration, 'get_by_id_and_instance') def test_live_migrate_force_complete_invalid_migration_state( self, get_by_id_and_instance): instance = self._create_instance_obj() instance.task_state = task_states.MIGRATING migration = objects.Migration() migration.id = 0 migration.status = 'error' get_by_id_and_instance.return_value = migration self.assertRaises(exception.InvalidMigrationState, self.compute_api.live_migrate_force_complete, self.context, instance, migration.id) def test_live_migrate_force_complete_invalid_vm_state(self): instance = self._create_instance_obj() instance.task_state = None self.assertRaises(exception.InstanceInvalidState, self.compute_api.live_migrate_force_complete, self.context, instance, '1') def _get_migration(self, migration_id, status, migration_type): migration = objects.Migration() migration.id = migration_id migration.status = status migration.migration_type = migration_type return migration @mock.patch('nova.compute.api.API._record_action_start') @mock.patch.object(compute_rpcapi.ComputeAPI, 'live_migration_abort') @mock.patch.object(objects.Migration, 'get_by_id_and_instance') def test_live_migrate_abort_succeeded(self, mock_get_migration, mock_lm_abort, mock_rec_action): instance = self._create_instance_obj() instance.task_state = task_states.MIGRATING migration = self._get_migration(21, 'running', 'live-migration') mock_get_migration.return_value = migration self.compute_api.live_migrate_abort(self.context, instance, migration.id) mock_rec_action.assert_called_once_with(self.context, instance, instance_actions.LIVE_MIGRATION_CANCEL) mock_lm_abort.called_once_with(self.context, instance, migration.id) @mock.patch.object(objects.Migration, 'get_by_id_and_instance') def test_live_migration_abort_wrong_migration_status(self, mock_get_migration): instance = self._create_instance_obj() instance.task_state = task_states.MIGRATING migration = self._get_migration(21, 'completed', 'live-migration') mock_get_migration.return_value = migration self.assertRaises(exception.InvalidMigrationState, self.compute_api.live_migrate_abort, self.context, instance, migration.id) class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeAPIUnitTestCase, self).setUp() self.compute_api = compute_api.API() self.cell_type = None def test_resize_same_flavor_fails(self): self.assertRaises(exception.CannotResizeToSameFlavor, self._test_resize, same_flavor=True) class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeAPIAPICellUnitTestCase, self).setUp() self.flags(cell_type='api', enable=True, group='cells') self.compute_api = compute_cells_api.ComputeCellsAPI() self.cell_type = 'api' def test_resize_same_flavor_fails(self): self.assertRaises(exception.CannotResizeToSameFlavor, self._test_resize, same_flavor=True) @mock.patch.object(compute_cells_api, 'ComputeRPCAPIRedirect') def test_create_volume_bdm_call_reserve_dev_name(self, mock_reserve): instance = self._create_instance_obj() # In the cells rpcapi there isn't the call for the # reserve_block_device_name so the volume_bdm returned # by the _create_volume_bdm is None result = self.compute_api._create_volume_bdm(self.context, instance, 'vda', '1', None, None) self.assertIsNone(result, None) @mock.patch.object(compute_cells_api.ComputeCellsAPI, '_call_to_cells') def test_attach_volume(self, mock_attach): instance = self._create_instance_obj() volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol', None, None, None, None, None) mock_volume_api = mock.patch.object(self.compute_api, 'volume_api', mock.MagicMock(spec=cinder.API)) with mock_volume_api as mock_v_api: mock_v_api.get.return_value = volume self.compute_api.attach_volume( self.context, instance, volume['id']) mock_v_api.check_attach.assert_called_once_with(self.context, volume, instance=instance) mock_attach.assert_called_once_with(self.context, instance, 'attach_volume', volume['id'], None, None, None) def test_attach_volume_reserve_fails(self): self.skipTest("Reserve is never done in the API cell.") class ComputeAPIComputeCellUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeAPIComputeCellUnitTestCase, self).setUp() self.flags(cell_type='compute', enable=True, group='cells') self.compute_api = compute_api.API() self.cell_type = 'compute' def test_resize_same_flavor_passes(self): self._test_resize(same_flavor=True) class DiffDictTestCase(test.NoDBTestCase): """Unit tests for _diff_dict().""" def test_no_change(self): old = dict(a=1, b=2, c=3) new = dict(a=1, b=2, c=3) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, {}) def test_new_key(self): old = dict(a=1, b=2, c=3) new = dict(a=1, b=2, c=3, d=4) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, dict(d=['+', 4])) def test_changed_key(self): old = dict(a=1, b=2, c=3) new = dict(a=1, b=4, c=3) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, dict(b=['+', 4])) def test_removed_key(self): old = dict(a=1, b=2, c=3) new = dict(a=1, c=3) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, dict(b=['-'])) class SecurityGroupAPITest(test.NoDBTestCase): def setUp(self): super(SecurityGroupAPITest, self).setUp() self.secgroup_api = compute_api.SecurityGroupAPI() self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) def test_get_instance_security_groups(self): groups = objects.SecurityGroupList() groups.objects = [objects.SecurityGroup(name='foo'), objects.SecurityGroup(name='bar')] instance = objects.Instance(security_groups=groups) names = self.secgroup_api.get_instance_security_groups(self.context, instance) self.assertEqual(sorted([{'name': 'bar'}, {'name': 'foo'}], key=str), sorted(names, key=str)) @mock.patch('nova.objects.security_group.make_secgroup_list') def test_populate_security_groups(self, mock_msl): r = self.secgroup_api.populate_security_groups([mock.sentinel.group]) mock_msl.assert_called_once_with([mock.sentinel.group]) self.assertEqual(r, mock_msl.return_value) nova-13.0.0/nova/tests/unit/compute/test_flavors.py0000664000567000056710000000443312701407773023540 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for flavor basic functions""" from nova.compute import flavors from nova import exception from nova import test class ExtraSpecTestCase(test.NoDBTestCase): def _flavor_validate_extra_spec_keys_invalid_input(self, key_name_list): self.assertRaises(exception.InvalidInput, flavors.validate_extra_spec_keys, key_name_list) def test_flavor_validate_extra_spec_keys_invalid_input(self): lists = [['', ], ['*', ], ['+', ]] for x in lists: self._flavor_validate_extra_spec_keys_invalid_input(x) def test_flavor_validate_extra_spec_keys(self): key_name_list = ['abc', 'ab c', 'a-b-c', 'a_b-c', 'a:bc'] flavors.validate_extra_spec_keys(key_name_list) class CreateFlavorTestCase(test.NoDBTestCase): def test_create_flavor_ram_error(self): args = ("ram_test", "9999999999", "1", "10", "1") try: flavors.create(*args) self.fail("Be sure this will never be executed.") except exception.InvalidInput as e: self.assertIn("ram", e.message) def test_create_flavor_disk_error(self): args = ("disk_test", "1024", "1", "9999999999", "1") try: flavors.create(*args) self.fail("Be sure this will never be executed.") except exception.InvalidInput as e: self.assertIn("disk", e.message) def test_create_flavor_ephemeral_error(self): args = ("ephemeral_test", "1024", "1", "10", "9999999999") try: flavors.create(*args) self.fail("Be sure this will never be executed.") except exception.InvalidInput as e: self.assertIn("ephemeral", e.message) nova-13.0.0/nova/tests/unit/compute/fake_resource_tracker.py0000664000567000056710000000155312701407773025355 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import resource_tracker class FakeResourceTracker(resource_tracker.ResourceTracker): """Version without a DB requirement.""" def _update(self, context): self._write_ext_resources(self.compute_node) nova-13.0.0/nova/tests/unit/compute/eventlet_utils.py0000664000567000056710000000146712701407773024077 0ustar jenkinsjenkins00000000000000# Rackspace Hosting 2014 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet class SyncPool(eventlet.GreenPool): """Synchronous pool for testing threaded code without adding sleep waits. """ def spawn_n(self, func, *args, **kwargs): func(*args, **kwargs) nova-13.0.0/nova/tests/unit/compute/test_arch.py0000664000567000056710000000403512701407773022777 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from nova.compute import arch from nova import exception from nova import test class ArchTest(test.NoDBTestCase): @mock.patch.object(os, "uname") def test_host(self, mock_uname): os.uname.return_value = ( 'Linux', 'localhost.localdomain', '3.14.8-200.fc20.x86_64', '#1 SMP Mon Jun 16 21:57:53 UTC 2014', 'i686' ) self.assertEqual(arch.I686, arch.from_host()) def test_valid_string(self): self.assertTrue(arch.is_valid("x86_64")) def test_valid_constant(self): self.assertTrue(arch.is_valid(arch.X86_64)) def test_valid_bogus(self): self.assertFalse(arch.is_valid("x86_64wibble")) def test_canonicalize_i386(self): self.assertEqual(arch.I686, arch.canonicalize("i386")) def test_canonicalize_amd64(self): self.assertEqual(arch.X86_64, arch.canonicalize("amd64")) def test_canonicalize_case(self): self.assertEqual(arch.X86_64, arch.canonicalize("X86_64")) def test_canonicalize_compat_xen1(self): self.assertEqual(arch.I686, arch.canonicalize("x86_32")) def test_canonicalize_compat_xen2(self): self.assertEqual(arch.I686, arch.canonicalize("x86_32p")) def test_canonicalize_bogus(self): self.assertRaises(exception.InvalidArchitectureName, arch.canonicalize, "x86_64wibble") nova-13.0.0/nova/tests/unit/conf_fixture.py0000664000567000056710000000671712701410011022026 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as config_fixture from oslo_policy import opts as policy_opts import nova.conf from nova import config from nova import ipv6 from nova import paths from nova.tests.unit import utils CONF = nova.conf.CONF CONF.import_opt('use_ipv6', 'nova.netconf') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('fake_network', 'nova.network.linux_net') CONF.import_opt('network_size', 'nova.network.manager') CONF.import_opt('num_networks', 'nova.network.manager') CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips') CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips') class ConfFixture(config_fixture.Config): """Fixture to manage global conf settings.""" def setUp(self): super(ConfFixture, self).setUp() self.conf.set_default('api_paste_config', paths.state_path_def('etc/nova/api-paste.ini')) self.conf.set_default('host', 'fake-mini') self.conf.set_default('compute_driver', 'nova.virt.fake.SmallFakeDriver') self.conf.set_default('fake_network', True) self.conf.set_default('flat_network_bridge', 'br100') self.conf.set_default('floating_ip_dns_manager', 'nova.tests.unit.utils.dns_manager') self.conf.set_default('instance_dns_manager', 'nova.tests.unit.utils.dns_manager') self.conf.set_default('network_size', 8) self.conf.set_default('num_networks', 2) self.conf.set_default('use_ipv6', True) self.conf.set_default('vlan_interface', 'eth0') self.conf.set_default('auth_strategy', 'noauth2') config.parse_args([], default_config_files=[], configure_db=False, init_rpc=False) self.conf.set_default('connection', "sqlite://", group='database') self.conf.set_default('connection', "sqlite://", group='api_database') self.conf.set_default('sqlite_synchronous', False, group='database') self.conf.set_default('sqlite_synchronous', False, group='api_database') self.conf.set_default('fatal_exception_format_errors', True) self.conf.set_default('enabled', True, 'osapi_v21') # TODO(sdague): this makes our project_id match 'fake' as well. # We should fix the tests to use real # UUIDs then drop this work around. self.conf.set_default('project_id_regex', '[0-9a-fk\-]+', 'osapi_v21') self.conf.set_default('force_dhcp_release', False) self.conf.set_default('periodic_enable', False) policy_opts.set_defaults(self.conf) self.addCleanup(utils.cleanup_dns_managers) self.addCleanup(ipv6.api.reset_backend) nova-13.0.0/nova/tests/unit/test_context.py0000664000567000056710000002341212701410011022045 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_context import context as o_context from oslo_context import fixture as o_fixture from nova import context from nova import objects from nova import test class ContextTestCase(test.NoDBTestCase): def setUp(self): super(ContextTestCase, self).setUp() self.useFixture(o_fixture.ClearRequestContext()) def test_request_context_elevated(self): user_ctxt = context.RequestContext('111', '222', admin=False) self.assertFalse(user_ctxt.is_admin) admin_ctxt = user_ctxt.elevated() self.assertTrue(admin_ctxt.is_admin) self.assertIn('admin', admin_ctxt.roles) self.assertFalse(user_ctxt.is_admin) self.assertNotIn('admin', user_ctxt.roles) def test_request_context_sets_is_admin(self): ctxt = context.RequestContext('111', '222', roles=['admin', 'weasel']) self.assertTrue(ctxt.is_admin) def test_request_context_sets_is_admin_by_role(self): ctxt = context.RequestContext('111', '222', roles=['administrator']) self.assertTrue(ctxt.is_admin) def test_request_context_sets_is_admin_upcase(self): ctxt = context.RequestContext('111', '222', roles=['Admin', 'weasel']) self.assertTrue(ctxt.is_admin) def test_request_context_read_deleted(self): ctxt = context.RequestContext('111', '222', read_deleted='yes') self.assertEqual('yes', ctxt.read_deleted) ctxt.read_deleted = 'no' self.assertEqual('no', ctxt.read_deleted) def test_request_context_read_deleted_invalid(self): self.assertRaises(ValueError, context.RequestContext, '111', '222', read_deleted=True) ctxt = context.RequestContext('111', '222') self.assertRaises(ValueError, setattr, ctxt, 'read_deleted', True) def test_extra_args_to_context_get_logged(self): info = {} def fake_warn(log_msg, *args): if args: log_msg = log_msg % args info['log_msg'] = log_msg self.stub_out('nova.context.LOG.warning', fake_warn) c = context.RequestContext('user', 'project', extra_arg1='meow', extra_arg2='wuff') self.assertTrue(c) self.assertIn("'extra_arg1': 'meow'", info['log_msg']) self.assertIn("'extra_arg2': 'wuff'", info['log_msg']) def test_service_catalog_default(self): ctxt = context.RequestContext('111', '222') self.assertEqual([], ctxt.service_catalog) ctxt = context.RequestContext('111', '222', service_catalog=[]) self.assertEqual([], ctxt.service_catalog) ctxt = context.RequestContext('111', '222', service_catalog=None) self.assertEqual([], ctxt.service_catalog) def test_service_catalog_cinder_only(self): service_catalog = [ {u'type': u'compute', u'name': u'nova'}, {u'type': u's3', u'name': u's3'}, {u'type': u'image', u'name': u'glance'}, {u'type': u'volume', u'name': u'cinder'}, {u'type': u'ec2', u'name': u'ec2'}, {u'type': u'object-store', u'name': u'swift'}, {u'type': u'identity', u'name': u'keystone'}, {u'type': None, u'name': u'S_withouttype'}, {u'type': u'vo', u'name': u'S_partofvolume'}] volume_catalog = [{u'type': u'volume', u'name': u'cinder'}] ctxt = context.RequestContext('111', '222', service_catalog=service_catalog) self.assertEqual(volume_catalog, ctxt.service_catalog) def test_to_dict_from_dict_no_log(self): warns = [] def stub_warn(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] warns.append(str(msg) % a) self.stub_out('nova.context.LOG.warn', stub_warn) ctxt = context.RequestContext('111', '222', roles=['admin', 'weasel']) context.RequestContext.from_dict(ctxt.to_dict()) self.assertEqual(0, len(warns), warns) def test_store_when_no_overwrite(self): # If no context exists we store one even if overwrite is false # (since we are not overwriting anything). ctx = context.RequestContext('111', '222', overwrite=False) self.assertIs(o_context.get_current(), ctx) def test_no_overwrite(self): # If there is already a context in the cache a new one will # not overwrite it if overwrite=False. ctx1 = context.RequestContext('111', '222', overwrite=True) context.RequestContext('333', '444', overwrite=False) self.assertIs(o_context.get_current(), ctx1) def test_admin_no_overwrite(self): # If there is already a context in the cache creating an admin # context will not overwrite it. ctx1 = context.RequestContext('111', '222', overwrite=True) context.get_admin_context() self.assertIs(o_context.get_current(), ctx1) def test_convert_from_rc_to_dict(self): ctx = context.RequestContext( 111, 222, request_id='req-679033b7-1755-4929-bf85-eb3bfaef7e0b', timestamp='2015-03-02T22:31:56.641629') values2 = ctx.to_dict() expected_values = {'auth_token': None, 'domain': None, 'instance_lock_checked': False, 'is_admin': False, 'project_id': 222, 'project_domain': None, 'project_name': None, 'quota_class': None, 'read_deleted': 'no', 'read_only': False, 'remote_address': None, 'request_id': 'req-679033b7-1755-4929-bf85-eb3bfaef7e0b', 'resource_uuid': None, 'roles': [], 'service_catalog': [], 'show_deleted': False, 'tenant': 222, 'timestamp': '2015-03-02T22:31:56.641629', 'user': 111, 'user_domain': None, 'user_id': 111, 'user_identity': '111 222 - - -', 'user_name': None} self.assertEqual(expected_values, values2) def test_convert_from_dict_then_to_dict(self): values = {'user': '111', 'user_id': '111', 'tenant': '222', 'project_id': '222', 'domain': None, 'project_domain': None, 'auth_token': None, 'resource_uuid': None, 'read_only': False, 'user_identity': '111 222 - - -', 'instance_lock_checked': False, 'user_name': None, 'project_name': None, 'timestamp': '2015-03-02T20:03:59.416299', 'remote_address': None, 'quota_class': None, 'is_admin': True, 'service_catalog': [], 'read_deleted': 'no', 'show_deleted': False, 'roles': [], 'request_id': 'req-956637ad-354a-4bc5-b969-66fd1cc00f50', 'user_domain': None} ctx = context.RequestContext.from_dict(values) self.assertEqual('111', ctx.user) self.assertEqual('222', ctx.tenant) self.assertEqual('111', ctx.user_id) self.assertEqual('222', ctx.project_id) values2 = ctx.to_dict() self.assertEqual(values, values2) @mock.patch('nova.db.create_context_manager') def test_target_cell(self, mock_create_ctxt_mgr): mock_create_ctxt_mgr.return_value = mock.sentinel.cm ctxt = context.RequestContext('111', '222', roles=['admin', 'weasel']) # Verify the existing db_connection, if any, is restored ctxt.db_connection = mock.sentinel.db_conn mapping = objects.CellMapping(database_connection='fake://') with context.target_cell(ctxt, mapping): self.assertEqual(ctxt.db_connection, mock.sentinel.cm) self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection) nova-13.0.0/nova/tests/unit/keymgr/0000775000567000056710000000000012701410205020251 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/keymgr/__init__.py0000664000567000056710000000000012701407773022370 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/keymgr/test_key_mgr.py0000664000567000056710000000172212701407773023341 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the key manager. """ from nova import test class KeyManagerTestCase(test.NoDBTestCase): def _create_key_manager(self): raise NotImplementedError() def setUp(self): super(KeyManagerTestCase, self).setUp() self.key_mgr = self._create_key_manager() nova-13.0.0/nova/tests/unit/keymgr/test_single_key_mgr.py0000664000567000056710000000462012701407773024702 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the single key manager. """ import array from nova import exception from nova.keymgr import key from nova.keymgr import single_key_mgr from nova.tests.unit.keymgr import test_mock_key_mgr class SingleKeyManagerTestCase(test_mock_key_mgr.MockKeyManagerTestCase): def _create_key_manager(self): return single_key_mgr.SingleKeyManager() def setUp(self): super(SingleKeyManagerTestCase, self).setUp() self.key_id = '00000000-0000-0000-0000-000000000000' encoded = array.array('B', ('0' * 64).decode('hex')).tolist() self.key = key.SymmetricKey('AES', encoded) def test___init__(self): self.assertEqual(self.key, self.key_mgr.get_key(self.ctxt, self.key_id)) def test_create_key(self): key_id_1 = self.key_mgr.create_key(self.ctxt) key_id_2 = self.key_mgr.create_key(self.ctxt) # ensure that the UUIDs are the same self.assertEqual(key_id_1, key_id_2) def test_create_key_with_length(self): pass def test_store_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.store_key, None, self.key) def test_copy_key(self): key_id = self.key_mgr.create_key(self.ctxt) key = self.key_mgr.get_key(self.ctxt, key_id) copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) self.assertEqual(key_id, copied_key_id) self.assertEqual(key, copied_key) def test_delete_key(self): pass def test_delete_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.delete_key, self.ctxt, None) nova-13.0.0/nova/tests/unit/keymgr/test_barbican.py0000664000567000056710000002457512701407773023460 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the barbican key manager. """ import array import binascii import mock from nova import exception from nova.keymgr import barbican from nova.keymgr import key as keymgr_key from nova.tests.unit.keymgr import test_key_mgr class BarbicanKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def _create_key_manager(self): return barbican.BarbicanKeyManager() def setUp(self): super(BarbicanKeyManagerTestCase, self).setUp() # Create fake auth_token self.ctxt = mock.MagicMock() self.ctxt.auth_token = "fake_token" self.ctxt.project = "fake_project" # Create mock barbican client self._build_mock_barbican() # Create a key_id, secret_ref, pre_hex, and hex to use self.key_id = "d152fa13-2b41-42ca-a934-6c21566c0f40" self.secret_ref = ("http://host:9311/v1/secrets/" + self.key_id) self.pre_hex = "AIDxQp2++uAbKaTVDMXFYIu8PIugJGqkK0JLqkU0rhY=" self.hex = ("0080f1429dbefae01b29a4d50cc5c5608bbc3c8ba0246aa42b424baa4" "534ae16") self.key_mgr._current_context = self.ctxt self.key_mgr._base_url = "http://host:9311/v1" self.addCleanup(self._restore) def _restore(self): if hasattr(self, 'original_key'): keymgr_key.SymmetricKey = self.original_key def _build_mock_barbican(self): self.mock_barbican = mock.MagicMock(name='mock_barbican') # Set commonly used methods self.get = self.mock_barbican.secrets.get self.delete = self.mock_barbican.secrets.delete self.store = self.mock_barbican.secrets.store self.create = self.mock_barbican.secrets.create self.key_mgr._barbican_client = self.mock_barbican def _build_mock_symKey(self): self.mock_symKey = mock.Mock() def fake_sym_key(alg, key): self.mock_symKey.get_encoded.return_value = key self.mock_symKey.get_algorithm.return_value = alg return self.mock_symKey self.original_key = keymgr_key.SymmetricKey keymgr_key.SymmetricKey = fake_sym_key def test_copy_key(self): # Create metadata for original secret original_secret_metadata = mock.Mock() original_secret_metadata.algorithm = mock.sentinel.alg original_secret_metadata.bit_length = mock.sentinel.bit original_secret_metadata.name = mock.sentinel.name original_secret_metadata.expiration = mock.sentinel.expiration original_secret_metadata.mode = mock.sentinel.mode content_types = {'default': 'fake_type'} original_secret_metadata.content_types = content_types original_secret_data = mock.Mock() original_secret_metadata.payload = original_secret_data # Create href for copied secret copied_secret = mock.Mock() copied_secret.store.return_value = 'http://test/uuid' # Set get and create return values self.get.return_value = original_secret_metadata self.create.return_value = copied_secret # Create the mock key self._build_mock_symKey() # Copy the original self.key_mgr.copy_key(self.ctxt, self.key_id) # Assert proper methods were called self.get.assert_called_once_with(self.secret_ref) self.create.assert_called_once_with( mock.sentinel.name, self.mock_symKey.get_encoded(), content_types['default'], 'base64', mock.sentinel.alg, mock.sentinel.bit, mock.sentinel.mode, mock.sentinel.expiration) copied_secret.store.assert_called_once_with() def test_copy_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.Forbidden, self.key_mgr.copy_key, None, self.key_id) def test_create_key(self): # Create order_ref_url and assign return value order_ref_url = ("http://localhost:9311/v1/None/orders/" "4fe939b7-72bc-49aa-bd1e-e979589858af") key_order = mock.Mock() self.mock_barbican.orders.create_key.return_value = key_order key_order.submit.return_value = order_ref_url # Create order and assign return value order = mock.Mock() order.secret_ref = self.secret_ref self.mock_barbican.orders.get.return_value = order # Create the key, get the UUID returned_uuid = self.key_mgr.create_key(self.ctxt) self.mock_barbican.orders.get.assert_called_once_with(order_ref_url) self.assertEqual(returned_uuid, self.key_id) def test_create_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.Forbidden, self.key_mgr.create_key, None) def test_delete_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.Forbidden, self.key_mgr.delete_key, None, self.key_id) def test_delete_key(self): self.key_mgr.delete_key(self.ctxt, self.key_id) self.delete.assert_called_once_with(self.secret_ref) def test_delete_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.delete_key, self.ctxt, None) @mock.patch('base64.b64encode') def test_get_key(self, b64_mock): b64_mock.return_value = self.pre_hex content_type = 'application/octet-stream' key = self.key_mgr.get_key(self.ctxt, self.key_id, content_type) self.get.assert_called_once_with(self.secret_ref) encoded = array.array('B', binascii.unhexlify(self.hex)).tolist() self.assertEqual(key.get_encoded(), encoded) def test_get_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.Forbidden, self.key_mgr.get_key, None, self.key_id) def test_get_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.get_key, self.ctxt, None) def test_store_key_base64(self): # Create Key to store secret_key = array.array('B', [0x01, 0x02, 0xA0, 0xB3]).tolist() _key = keymgr_key.SymmetricKey('AES', secret_key) # Define the return values secret = mock.Mock() self.create.return_value = secret secret.store.return_value = self.secret_ref # Store the Key returned_uuid = self.key_mgr.store_key(self.ctxt, _key, bit_length=32) self.create.assert_called_once_with('Nova Compute Key', 'AQKgsw==', 'application/octet-stream', 'base64', 'AES', 32, 'CBC', None) self.assertEqual(returned_uuid, self.key_id) def test_store_key_plaintext(self): # Create the plaintext key secret_key_text = "This is a test text key." _key = keymgr_key.SymmetricKey('AES', secret_key_text) # Store the Key self.key_mgr.store_key(self.ctxt, _key, payload_content_type='text/plain', payload_content_encoding=None) self.create.assert_called_once_with('Nova Compute Key', secret_key_text, 'text/plain', None, 'AES', 256, 'CBC', None) self.assertEqual(self.store.call_count, 0) def test_store_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.Forbidden, self.key_mgr.store_key, None, None) @mock.patch('keystoneauth1.session.Session') @mock.patch('barbicanclient.client.Client') def test_get_barbican_client_new(self, mock_barbican, mock_keystone): manager = self._create_key_manager() manager._get_barbican_client(self.ctxt) self.assertEqual(mock_barbican.call_count, 1) @mock.patch('keystoneauth1.session.Session') @mock.patch('barbicanclient.client.Client') def test_get_barbican_client_reused(self, mock_barbican, mock_keystone): manager = self._create_key_manager() manager._get_barbican_client(self.ctxt) self.assertEqual(mock_barbican.call_count, 1) manager._get_barbican_client(self.ctxt) self.assertEqual(mock_barbican.call_count, 1) @mock.patch('keystoneauth1.session.Session') @mock.patch('barbicanclient.client.Client') def test_get_barbican_client_not_reused(self, mock_barbican, mock_keystone): manager = self._create_key_manager() manager._get_barbican_client(self.ctxt) self.assertEqual(mock_barbican.call_count, 1) ctxt2 = mock.MagicMock() ctxt2.auth_token = "fake_token2" ctxt2.project = "fake_project2" manager._get_barbican_client(ctxt2) self.assertEqual(mock_barbican.call_count, 2) def test_get_barbican_client_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr._get_barbican_client, None) def test_get_barbican_client_missing_project(self): del(self.ctxt.project_id) self.assertRaises(exception.KeyManagerError, self.key_mgr._get_barbican_client, self.ctxt) def test_get_barbican_client_none_project(self): self.ctxt.project_id = None self.assertRaises(exception.KeyManagerError, self.key_mgr._get_barbican_client, self.ctxt) nova-13.0.0/nova/tests/unit/keymgr/test_not_implemented_key_mgr.py0000664000567000056710000000324112701407773026602 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the not implemented key manager. """ from nova.keymgr import not_implemented_key_mgr from nova.tests.unit.keymgr import test_key_mgr class NotImplementedKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def _create_key_manager(self): return not_implemented_key_mgr.NotImplementedKeyManager() def test_create_key(self): self.assertRaises(NotImplementedError, self.key_mgr.create_key, None) def test_store_key(self): self.assertRaises(NotImplementedError, self.key_mgr.store_key, None, None) def test_copy_key(self): self.assertRaises(NotImplementedError, self.key_mgr.copy_key, None, None) def test_get_key(self): self.assertRaises(NotImplementedError, self.key_mgr.get_key, None, None) def test_delete_key(self): self.assertRaises(NotImplementedError, self.key_mgr.delete_key, None, None) nova-13.0.0/nova/tests/unit/keymgr/fake.py0000664000567000056710000000146012701407773021552 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake key manager.""" from nova.keymgr import mock_key_mgr def fake_api(): return mock_key_mgr.MockKeyManager() nova-13.0.0/nova/tests/unit/keymgr/test_conf_key_mgr.py0000664000567000056710000000376212701407773024354 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the conf key manager. """ import array from oslo_config import cfg from nova.keymgr import conf_key_mgr from nova.keymgr import key from nova.tests.unit.keymgr import test_single_key_mgr CONF = cfg.CONF CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr') class ConfKeyManagerTestCase(test_single_key_mgr.SingleKeyManagerTestCase): def __init__(self, *args, **kwargs): super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs) self._hex_key = '0' * 64 def _create_key_manager(self): CONF.set_default('fixed_key', default=self._hex_key, group='keymgr') return conf_key_mgr.ConfKeyManager() def setUp(self): super(ConfKeyManagerTestCase, self).setUp() encoded_key = array.array('B', self._hex_key.decode('hex')).tolist() self.key = key.SymmetricKey('AES', encoded_key) def test_init(self): key_manager = self._create_key_manager() self.assertEqual(self._hex_key, key_manager._hex_key) def test_init_value_error(self): CONF.set_default('fixed_key', default=None, group='keymgr') self.assertRaises(ValueError, conf_key_mgr.ConfKeyManager) def test_generate_hex_key(self): key_manager = self._create_key_manager() self.assertEqual(self._hex_key, key_manager._generate_hex_key()) nova-13.0.0/nova/tests/unit/keymgr/test_mock_key_mgr.py0000664000567000056710000000703712701407773024357 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the mock key manager. """ import array from nova import context from nova import exception from nova.keymgr import key as keymgr_key from nova.keymgr import mock_key_mgr from nova.tests.unit.keymgr import test_key_mgr class MockKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def _create_key_manager(self): return mock_key_mgr.MockKeyManager() def setUp(self): super(MockKeyManagerTestCase, self).setUp() self.ctxt = context.RequestContext('fake', 'fake') def test_create_key(self): key_id_1 = self.key_mgr.create_key(self.ctxt) key_id_2 = self.key_mgr.create_key(self.ctxt) # ensure that the UUIDs are unique self.assertNotEqual(key_id_1, key_id_2) def test_create_key_with_length(self): for length in [64, 128, 256]: key_id = self.key_mgr.create_key(self.ctxt, key_length=length) key = self.key_mgr.get_key(self.ctxt, key_id) self.assertEqual(length / 8, len(key.get_encoded())) def test_create_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.create_key, None) def test_store_key(self): secret_key = array.array('B', ('0' * 64).decode('hex')).tolist() _key = keymgr_key.SymmetricKey('AES', secret_key) key_id = self.key_mgr.store_key(self.ctxt, _key) actual_key = self.key_mgr.get_key(self.ctxt, key_id) self.assertEqual(_key, actual_key) def test_store_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.store_key, None, None) def test_copy_key(self): key_id = self.key_mgr.create_key(self.ctxt) key = self.key_mgr.get_key(self.ctxt, key_id) copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) self.assertNotEqual(key_id, copied_key_id) self.assertEqual(key, copied_key) def test_copy_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.copy_key, None, None) def test_get_key(self): pass def test_get_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.get_key, None, None) def test_get_unknown_key(self): self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None) def test_delete_key(self): key_id = self.key_mgr.create_key(self.ctxt) self.key_mgr.delete_key(self.ctxt, key_id) self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, key_id) def test_delete_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.delete_key, None, None) def test_delete_unknown_key(self): self.assertRaises(KeyError, self.key_mgr.delete_key, self.ctxt, None) nova-13.0.0/nova/tests/unit/keymgr/test_key.py0000664000567000056710000000356412701407773022502 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the key classes. """ import array from nova.keymgr import key from nova import test class KeyTestCase(test.NoDBTestCase): def _create_key(self): raise NotImplementedError() def setUp(self): super(KeyTestCase, self).setUp() self.key = self._create_key() class SymmetricKeyTestCase(KeyTestCase): def _create_key(self): return key.SymmetricKey(self.algorithm, self.encoded) def setUp(self): self.algorithm = 'AES' self.encoded = array.array('B', ('0' * 64).decode('hex')).tolist() super(SymmetricKeyTestCase, self).setUp() def test_get_algorithm(self): self.assertEqual(self.key.get_algorithm(), self.algorithm) def test_get_format(self): self.assertEqual(self.key.get_format(), 'RAW') def test_get_encoded(self): self.assertEqual(self.key.get_encoded(), self.encoded) def test___eq__(self): self.assertTrue(self.key == self.key) self.assertFalse(self.key is None) self.assertFalse(None == self.key) def test___ne__(self): self.assertFalse(self.key != self.key) self.assertTrue(self.key is not None) self.assertTrue(None != self.key) nova-13.0.0/nova/tests/unit/test_metadata.py0000664000567000056710000014671512701407773022202 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for metadata service.""" import base64 import hashlib import hmac import re try: import cPickle as pickle except ImportError: import pickle import mock from oslo_config import cfg from oslo_serialization import jsonutils import six import webob from nova.api.metadata import base from nova.api.metadata import handler from nova.api.metadata import password from nova import block_device from nova.compute import flavors from nova.conductor import api as conductor_api from nova import context from nova import exception from nova.network import api as network_api from nova.network import model as network_model from nova.network.neutronv2 import api as neutronapi from nova.network.security_group import openstack_driver from nova import objects from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_block_device from nova.tests.unit import fake_network from nova.virt import netutils CONF = cfg.CONF USER_DATA_STRING = (b"This is an encoded string") ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING) FAKE_SEED = '7qtD24mpMR2' def fake_inst_obj(context): inst = objects.Instance( context=context, id=1, user_id='fake_user', uuid='b65cee2f-8c69-4aeb-be2f-f79742548fc2', project_id='test', key_name="key", key_data="ssh-rsa AAAAB3Nzai....N3NtHw== someuser@somehost", host='test', launch_index=1, reservation_id='r-xxxxxxxx', user_data=ENCODE_USER_DATA_STRING, image_ref=7, kernel_id=None, ramdisk_id=None, vcpus=1, fixed_ips=[], root_device_name='/dev/sda1', hostname='test.novadomain', display_name='my_displayname', metadata={}, default_ephemeral_device=None, default_swap_device=None, system_metadata={}, security_groups=objects.SecurityGroupList(), availability_zone=None) nwinfo = network_model.NetworkInfo([]) inst.info_cache = objects.InstanceInfoCache(context=context, instance_uuid=inst.uuid, network_info=nwinfo) inst.flavor = flavors.get_default_flavor() return inst def fake_keypair_obj(name, data): return objects.KeyPair(name=name, type='fake_type', public_key=data) def return_non_existing_address(*args, **kwarg): raise exception.NotFound() def fake_InstanceMetadata(stubs, inst_data, address=None, sgroups=None, content=None, extra_md=None, vd_driver=None, network_info=None, network_metadata=None): content = content or [] extra_md = extra_md or {} if sgroups is None: sgroups = [{'name': 'default'}] def sg_get(*args, **kwargs): return sgroups secgroup_api = openstack_driver.get_openstack_security_group_driver() stubs.Set(secgroup_api.__class__, 'get_instance_security_groups', sg_get) return base.InstanceMetadata(inst_data, address=address, content=content, extra_md=extra_md, vd_driver=vd_driver, network_info=network_info, network_metadata=network_metadata) def fake_request(stubs, mdinst, relpath, address="127.0.0.1", fake_get_metadata=None, headers=None, fake_get_metadata_by_instance_id=None, app=None): def get_metadata_by_remote_address(address): return mdinst if app is None: app = handler.MetadataRequestHandler() if fake_get_metadata is None: fake_get_metadata = get_metadata_by_remote_address if stubs: stubs.Set(app, 'get_metadata_by_remote_address', fake_get_metadata) if fake_get_metadata_by_instance_id: stubs.Set(app, 'get_metadata_by_instance_id', fake_get_metadata_by_instance_id) request = webob.Request.blank(relpath) request.remote_addr = address if headers is not None: request.headers.update(headers) response = request.get_response(app) return response class MetadataTestCase(test.TestCase): def setUp(self): super(MetadataTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.instance = fake_inst_obj(self.context) self.flags(use_local=True, group='conductor') self.keypair = fake_keypair_obj(self.instance.key_name, self.instance.key_data) fake_network.stub_out_nw_api_get_instance_nw_info(self) def test_can_pickle_metadata(self): # Make sure that InstanceMetadata is possible to pickle. This is # required for memcache backend to work correctly. md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone()) pickle.dumps(md, protocol=0) def test_user_data(self): inst = self.instance.obj_clone() inst['user_data'] = base64.b64encode("happy") md = fake_InstanceMetadata(self.stubs, inst) self.assertEqual( md.get_ec2_metadata(version='2009-04-04')['user-data'], "happy") def test_no_user_data(self): inst = self.instance.obj_clone() inst.user_data = None md = fake_InstanceMetadata(self.stubs, inst) obj = object() self.assertEqual( md.get_ec2_metadata(version='2009-04-04').get('user-data', obj), obj) def _test_security_groups(self): inst = self.instance.obj_clone() sgroups = [{'name': name} for name in ('default', 'other')] expected = ['default', 'other'] md = fake_InstanceMetadata(self.stubs, inst, sgroups=sgroups) data = md.get_ec2_metadata(version='2009-04-04') self.assertEqual(data['meta-data']['security-groups'], expected) def test_security_groups(self): self._test_security_groups() def test_neutron_security_groups(self): self.flags(security_group_api='neutron') self._test_security_groups() def test_local_hostname_fqdn(self): md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone()) data = md.get_ec2_metadata(version='2009-04-04') self.assertEqual(data['meta-data']['local-hostname'], "%s.%s" % (self.instance['hostname'], CONF.dhcp_domain)) def test_format_instance_mapping(self): # Make sure that _format_instance_mappings works. instance_ref0 = objects.Instance(**{'id': 0, 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85', 'root_device_name': None, 'default_ephemeral_device': None, 'default_swap_device': None}) instance_ref1 = objects.Instance(**{'id': 0, 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2', 'root_device_name': '/dev/sda1', 'default_ephemeral_device': None, 'default_swap_device': None}) def fake_bdm_get(ctxt, uuid): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 87654321, 'snapshot_id': None, 'no_device': None, 'source_type': 'volume', 'destination_type': 'volume', 'delete_on_termination': True, 'device_name': '/dev/sdh'}), fake_block_device.FakeDbBlockDeviceDict( {'volume_id': None, 'snapshot_id': None, 'no_device': None, 'source_type': 'blank', 'destination_type': 'local', 'guest_format': 'swap', 'delete_on_termination': None, 'device_name': '/dev/sdc'}), fake_block_device.FakeDbBlockDeviceDict( {'volume_id': None, 'snapshot_id': None, 'no_device': None, 'source_type': 'blank', 'destination_type': 'local', 'guest_format': None, 'delete_on_termination': None, 'device_name': '/dev/sdb'})] self.stub_out('nova.db.block_device_mapping_get_all_by_instance', fake_bdm_get) expected = {'ami': 'sda1', 'root': '/dev/sda1', 'ephemeral0': '/dev/sdb', 'swap': '/dev/sdc', 'ebs0': '/dev/sdh'} conductor_api.LocalAPI() self.assertEqual(base._format_instance_mapping(self.context, instance_ref0), block_device._DEFAULT_MAPPINGS) self.assertEqual(base._format_instance_mapping(self.context, instance_ref1), expected) def test_pubkey(self): md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone()) pubkey_ent = md.lookup("/2009-04-04/meta-data/public-keys") self.assertEqual(base.ec2_md_print(pubkey_ent), "0=%s" % self.instance['key_name']) self.assertEqual(base.ec2_md_print(pubkey_ent['0']['openssh-key']), self.instance['key_data']) def test_image_type_ramdisk(self): inst = self.instance.obj_clone() inst['ramdisk_id'] = 'ari-853667c0' md = fake_InstanceMetadata(self.stubs, inst) data = md.lookup("/latest/meta-data/ramdisk-id") self.assertIsNotNone(data) self.assertTrue(re.match('ari-[0-9a-f]{8}', data)) def test_image_type_kernel(self): inst = self.instance.obj_clone() inst['kernel_id'] = 'aki-c2e26ff2' md = fake_InstanceMetadata(self.stubs, inst) data = md.lookup("/2009-04-04/meta-data/kernel-id") self.assertTrue(re.match('aki-[0-9a-f]{8}', data)) self.assertEqual( md.lookup("/ec2/2009-04-04/meta-data/kernel-id"), data) def test_image_type_no_kernel_raises(self): inst = self.instance.obj_clone() md = fake_InstanceMetadata(self.stubs, inst) self.assertRaises(base.InvalidMetadataPath, md.lookup, "/2009-04-04/meta-data/kernel-id") def test_check_version(self): inst = self.instance.obj_clone() md = fake_InstanceMetadata(self.stubs, inst) self.assertTrue(md._check_version('1.0', '2009-04-04')) self.assertFalse(md._check_version('2009-04-04', '1.0')) self.assertFalse(md._check_version('2009-04-04', '2008-09-01')) self.assertTrue(md._check_version('2008-09-01', '2009-04-04')) self.assertTrue(md._check_version('2009-04-04', '2009-04-04')) @mock.patch('nova.virt.netutils.get_injected_network_template') def test_InstanceMetadata_uses_passed_network_info(self, mock_get): network_info = [] mock_get.return_value = False base.InstanceMetadata(fake_inst_obj(self.context), network_info=network_info) mock_get.assert_called_once_with(network_info) @mock.patch.object(netutils, "get_network_metadata", autospec=True) def test_InstanceMetadata_gets_network_metadata(self, mock_netutils): network_data = {'links': [], 'networks': [], 'services': []} mock_netutils.return_value = network_data md = base.InstanceMetadata(fake_inst_obj(self.context)) self.assertEqual(network_data, md.network_metadata) def test_InstanceMetadata_invoke_metadata_for_config_drive(self): fakes.stub_out_key_pair_funcs(self.stubs) inst = self.instance.obj_clone() inst_md = base.InstanceMetadata(inst) expected_paths = [ 'ec2/2009-04-04/user-data', 'ec2/2009-04-04/meta-data.json', 'ec2/latest/user-data', 'ec2/latest/meta-data.json', 'openstack/2012-08-10/meta_data.json', 'openstack/2012-08-10/user_data', 'openstack/2013-04-04/meta_data.json', 'openstack/2013-04-04/user_data', 'openstack/2013-10-17/meta_data.json', 'openstack/2013-10-17/user_data', 'openstack/2013-10-17/vendor_data.json', 'openstack/2015-10-15/meta_data.json', 'openstack/2015-10-15/user_data', 'openstack/2015-10-15/vendor_data.json', 'openstack/2015-10-15/network_data.json', 'openstack/latest/meta_data.json', 'openstack/latest/user_data', 'openstack/latest/vendor_data.json', 'openstack/latest/network_data.json', ] actual_paths = [] for (path, value) in inst_md.metadata_for_config_drive(): actual_paths.append(path) self.assertIsNotNone(path) self.assertEqual(expected_paths, actual_paths) @mock.patch('nova.virt.netutils.get_injected_network_template') def test_InstanceMetadata_queries_network_API_when_needed(self, mock_get): network_info_from_api = [] mock_get.return_value = False base.InstanceMetadata(fake_inst_obj(self.context)) mock_get.assert_called_once_with(network_info_from_api) def test_local_ipv4(self): nw_info = fake_network.fake_get_instance_nw_info(self, num_networks=2) expected_local = "192.168.1.100" md = fake_InstanceMetadata(self.stubs, self.instance, network_info=nw_info, address="fake") data = md.get_ec2_metadata(version='2009-04-04') self.assertEqual(expected_local, data['meta-data']['local-ipv4']) def test_local_ipv4_from_nw_info(self): nw_info = fake_network.fake_get_instance_nw_info(self, num_networks=2) expected_local = "192.168.1.100" md = fake_InstanceMetadata(self.stubs, self.instance, network_info=nw_info) data = md.get_ec2_metadata(version='2009-04-04') self.assertEqual(data['meta-data']['local-ipv4'], expected_local) def test_local_ipv4_from_address(self): expected_local = "fake" md = fake_InstanceMetadata(self.stubs, self.instance, network_info=[], address="fake") data = md.get_ec2_metadata(version='2009-04-04') self.assertEqual(data['meta-data']['local-ipv4'], expected_local) @mock.patch.object(base64, 'b64encode', lambda data: FAKE_SEED) @mock.patch('nova.cells.rpcapi.CellsAPI.get_keypair_at_top') @mock.patch.object(objects.KeyPair, 'get_by_name') @mock.patch.object(jsonutils, 'dump_as_bytes') def _test_as_json_with_options(self, mock_json_dump_as_bytes, mock_keypair, mock_cells_keypair, is_cells=False, os_version=base.GRIZZLY): if is_cells: self.flags(enable=True, group='cells') self.flags(cell_type='compute', group='cells') mock_keypair = mock_cells_keypair instance = self.instance keypair = self.keypair md = fake_InstanceMetadata(self.stubs, instance) expected_metadata = { 'uuid': md.uuid, 'hostname': md._get_hostname(), 'name': instance.display_name, 'launch_index': instance.launch_index, 'availability_zone': md.availability_zone, } if md.launch_metadata: expected_metadata['meta'] = md.launch_metadata if md.files: expected_metadata['files'] = md.files if md.extra_md: expected_metadata['extra_md'] = md.extra_md if md.network_config: expected_metadata['network_config'] = md.network_config if instance.key_name: expected_metadata['public_keys'] = { keypair.name: keypair.public_key } expected_metadata['keys'] = [{'type': keypair.type, 'data': keypair.public_key, 'name': keypair.name}] if md._check_os_version(base.GRIZZLY, os_version): expected_metadata['random_seed'] = FAKE_SEED if md._check_os_version(base.LIBERTY, os_version): expected_metadata['project_id'] = instance.project_id mock_keypair.return_value = keypair md._metadata_as_json(os_version, 'non useless path parameter') if instance.key_name: mock_keypair.assert_called_once_with(mock.ANY, instance.user_id, instance.key_name) self.assertIsInstance(mock_keypair.call_args[0][0], context.RequestContext) self.assertEqual(md.md_mimetype, base.MIME_TYPE_APPLICATION_JSON) mock_json_dump_as_bytes.assert_called_once_with(expected_metadata) def test_as_json(self): for os_version in base.OPENSTACK_VERSIONS: self._test_as_json_with_options(os_version=os_version) def test_as_json_with_cells_mode(self): for os_version in base.OPENSTACK_VERSIONS: self._test_as_json_with_options(is_cells=True, os_version=os_version) class OpenStackMetadataTestCase(test.TestCase): def setUp(self): super(OpenStackMetadataTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.instance = fake_inst_obj(self.context) self.flags(use_local=True, group='conductor') fake_network.stub_out_nw_api_get_instance_nw_info(self) def test_top_level_listing(self): # request for /openstack// should show metadata.json inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) result = mdinst.lookup("/openstack") # trailing / should not affect anything self.assertEqual(result, mdinst.lookup("/openstack/")) # the 'content' should not show up in directory listing self.assertNotIn(base.CONTENT_DIR, result) self.assertIn('2012-08-10', result) self.assertIn('latest', result) def test_version_content_listing(self): # request for /openstack// should show metadata.json inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) listing = mdinst.lookup("/openstack/2012-08-10") self.assertIn("meta_data.json", listing) def test_returns_apis_supported_in_liberty_version(self): mdinst = fake_InstanceMetadata(self.stubs, self.instance) liberty_supported_apis = mdinst.lookup("/openstack/2015-10-15") self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME, base.VD_JSON_NAME, base.NW_JSON_NAME], liberty_supported_apis) def test_returns_apis_supported_in_havana_version(self): mdinst = fake_InstanceMetadata(self.stubs, self.instance) havana_supported_apis = mdinst.lookup("/openstack/2013-10-17") self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME, base.VD_JSON_NAME], havana_supported_apis) def test_returns_apis_supported_in_folsom_version(self): mdinst = fake_InstanceMetadata(self.stubs, self.instance) folsom_supported_apis = mdinst.lookup("/openstack/2012-08-10") self.assertEqual([base.MD_JSON_NAME, base.UD_NAME], folsom_supported_apis) def test_returns_apis_supported_in_grizzly_version(self): mdinst = fake_InstanceMetadata(self.stubs, self.instance) grizzly_supported_apis = mdinst.lookup("/openstack/2013-04-04") self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME], grizzly_supported_apis) def test_metadata_json(self): fakes.stub_out_key_pair_funcs(self.stubs) inst = self.instance.obj_clone() content = [ ('/etc/my.conf', "content of my.conf"), ('/root/hello', "content of /root/hello"), ] mdinst = fake_InstanceMetadata(self.stubs, inst, content=content) mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json") mdjson = mdinst.lookup("/openstack/latest/meta_data.json") mddict = jsonutils.loads(mdjson) self.assertEqual(mddict['uuid'], self.instance['uuid']) self.assertIn('files', mddict) self.assertIn('public_keys', mddict) self.assertEqual(mddict['public_keys'][self.instance['key_name']], self.instance['key_data']) self.assertIn('launch_index', mddict) self.assertEqual(mddict['launch_index'], self.instance['launch_index']) # verify that each of the things we put in content # resulted in an entry in 'files', that their content # there is as expected, and that /content lists them. for (path, content) in content: fent = [f for f in mddict['files'] if f['path'] == path] self.assertEqual(1, len(fent)) fent = fent[0] found = mdinst.lookup("/openstack%s" % fent['content_path']) self.assertEqual(found, content) def test_x509_keypair(self): # check if the x509 content is set, if the keypair type is x509. fakes.stub_out_key_pair_funcs(self.stubs, type='x509') inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json") mddict = jsonutils.loads(mdjson) # keypair is stubbed-out, so it's public_key is 'public_key'. expected = {'name': self.instance['key_name'], 'type': 'x509', 'data': 'public_key'} self.assertEqual([expected], mddict['keys']) def test_extra_md(self): # make sure extra_md makes it through to metadata fakes.stub_out_key_pair_funcs(self.stubs) inst = self.instance.obj_clone() extra = {'foo': 'bar', 'mylist': [1, 2, 3], 'mydict': {"one": 1, "two": 2}} mdinst = fake_InstanceMetadata(self.stubs, inst, extra_md=extra) mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json") mddict = jsonutils.loads(mdjson) for key, val in six.iteritems(extra): self.assertEqual(mddict[key], val) def test_password(self): # make sure extra_md makes it through to metadata inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) result = mdinst.lookup("/openstack/latest/password") self.assertEqual(result, password.handle_password) def test_userdata(self): inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) userdata_found = mdinst.lookup("/openstack/2012-08-10/user_data") self.assertEqual(USER_DATA_STRING, userdata_found) # since we had user-data in this instance, it should be in listing self.assertIn('user_data', mdinst.lookup("/openstack/2012-08-10")) inst.user_data = None mdinst = fake_InstanceMetadata(self.stubs, inst) # since this instance had no user-data it should not be there. self.assertNotIn('user_data', mdinst.lookup("/openstack/2012-08-10")) self.assertRaises(base.InvalidMetadataPath, mdinst.lookup, "/openstack/2012-08-10/user_data") def test_random_seed(self): fakes.stub_out_key_pair_funcs(self.stubs) inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) # verify that 2013-04-04 has the 'random' field mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json") mddict = jsonutils.loads(mdjson) self.assertIn("random_seed", mddict) self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512) # verify that older version do not have it mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json") self.assertNotIn("random_seed", jsonutils.loads(mdjson)) def test_project_id(self): fakes.stub_out_key_pair_funcs(self.stubs) mdinst = fake_InstanceMetadata(self.stubs, self.instance) # verify that 2015-10-15 has the 'project_id' field mdjson = mdinst.lookup("/openstack/2015-10-15/meta_data.json") mddict = jsonutils.loads(mdjson) self.assertIn("project_id", mddict) self.assertEqual(mddict["project_id"], self.instance.project_id) # verify that older version do not have it mdjson = mdinst.lookup("/openstack/2013-10-17/meta_data.json") self.assertNotIn("project_id", jsonutils.loads(mdjson)) def test_no_dashes_in_metadata(self): # top level entries in meta_data should not contain '-' in their name fakes.stub_out_key_pair_funcs(self.stubs) inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) mdjson = jsonutils.loads( mdinst.lookup("/openstack/latest/meta_data.json")) self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1]) def test_vendor_data_presence(self): inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) # verify that 2013-10-17 has the vendor_data.json file result = mdinst.lookup("/openstack/2013-10-17") self.assertIn('vendor_data.json', result) # verify that older version do not have it result = mdinst.lookup("/openstack/2013-04-04") self.assertNotIn('vendor_data.json', result) def test_vendor_data_response(self): inst = self.instance.obj_clone() mydata = {'mykey1': 'value1', 'mykey2': 'value2'} class myVdriver(base.VendorDataDriver): def __init__(self, *args, **kwargs): super(myVdriver, self).__init__(*args, **kwargs) data = mydata.copy() uuid = kwargs['instance']['uuid'] data.update({'inst_uuid': uuid}) self.data = data def get(self): return self.data mdinst = fake_InstanceMetadata(self.stubs, inst, vd_driver=myVdriver) # verify that 2013-10-17 has the vendor_data.json file vdpath = "/openstack/2013-10-17/vendor_data.json" vd = jsonutils.loads(mdinst.lookup(vdpath)) # the instance should be passed through, and our class copies the # uuid through to 'inst_uuid'. self.assertEqual(vd['inst_uuid'], inst['uuid']) # check the other expected values for k, v in mydata.items(): self.assertEqual(vd[k], v) def test_network_data_presence(self): inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) # verify that 2015-10-15 has the network_data.json file result = mdinst.lookup("/openstack/2015-10-15") self.assertIn('network_data.json', result) # verify that older version do not have it result = mdinst.lookup("/openstack/2013-10-17") self.assertNotIn('network_data.json', result) def test_network_data_response(self): inst = self.instance.obj_clone() nw_data = { "links": [{"ethernet_mac_address": "aa:aa:aa:aa:aa:aa", "id": "nic0", "type": "ethernet", "vif_id": 1, "mtu": 1500}], "networks": [{"id": "network0", "ip_address": "10.10.0.2", "link": "nic0", "netmask": "255.255.255.0", "network_id": "00000000-0000-0000-0000-000000000000", "routes": [], "type": "ipv4"}], "services": [{'address': '1.2.3.4', 'type': 'dns'}]} mdinst = fake_InstanceMetadata(self.stubs, inst, network_metadata=nw_data) # verify that 2015-10-15 has the network_data.json file nwpath = "/openstack/2015-10-15/network_data.json" nw = jsonutils.loads(mdinst.lookup(nwpath)) # check the other expected values for k, v in nw_data.items(): self.assertEqual(nw[k], v) class MetadataHandlerTestCase(test.TestCase): """Test that metadata is returning proper values.""" def setUp(self): super(MetadataHandlerTestCase, self).setUp() fake_network.stub_out_nw_api_get_instance_nw_info(self) self.context = context.RequestContext('fake', 'fake') self.instance = fake_inst_obj(self.context) self.flags(use_local=True, group='conductor') self.mdinst = fake_InstanceMetadata(self.stubs, self.instance, address=None, sgroups=None) def test_callable(self): def verify(req, meta_data): self.assertIsInstance(meta_data, CallableMD) return "foo" class CallableMD(object): def lookup(self, path_info): return verify response = fake_request(self.stubs, CallableMD(), "/bar") self.assertEqual(response.status_int, 200) self.assertEqual(response.body, "foo") def test_root(self): expected = "\n".join(base.VERSIONS) + "\nlatest" response = fake_request(self.stubs, self.mdinst, "/") self.assertEqual(response.body, expected) response = fake_request(self.stubs, self.mdinst, "/foo/../") self.assertEqual(response.body, expected) def test_root_metadata_proxy_enabled(self): self.flags(service_metadata_proxy=True, group='neutron') expected = "\n".join(base.VERSIONS) + "\nlatest" response = fake_request(self.stubs, self.mdinst, "/") self.assertEqual(response.body, expected) response = fake_request(self.stubs, self.mdinst, "/foo/../") self.assertEqual(response.body, expected) def test_version_root(self): response = fake_request(self.stubs, self.mdinst, "/2009-04-04") response_ctype = response.headers['Content-Type'] self.assertTrue(response_ctype.startswith("text/plain")) self.assertEqual(response.body, 'meta-data/\nuser-data') response = fake_request(self.stubs, self.mdinst, "/9999-99-99") self.assertEqual(response.status_int, 404) def test_json_data(self): fakes.stub_out_key_pair_funcs(self.stubs) response = fake_request(self.stubs, self.mdinst, "/openstack/latest/meta_data.json") response_ctype = response.headers['Content-Type'] self.assertTrue(response_ctype.startswith("application/json")) response = fake_request(self.stubs, self.mdinst, "/openstack/latest/vendor_data.json") response_ctype = response.headers['Content-Type'] self.assertTrue(response_ctype.startswith("application/json")) def test_user_data_non_existing_fixed_address(self): self.stub_out('nova.network.api.get_fixed_ip_by_address', return_non_existing_address) response = fake_request(None, self.mdinst, "/2009-04-04/user-data", "127.1.1.1") self.assertEqual(response.status_int, 404) def test_fixed_address_none(self): response = fake_request(None, self.mdinst, relpath="/2009-04-04/user-data", address=None) self.assertEqual(response.status_int, 500) def test_invalid_path_is_404(self): response = fake_request(self.stubs, self.mdinst, relpath="/2009-04-04/user-data-invalid") self.assertEqual(response.status_int, 404) def test_user_data_with_use_forwarded_header(self): expected_addr = "192.192.192.2" def fake_get_metadata(address): if address == expected_addr: return self.mdinst else: raise Exception("Expected addr of %s, got %s" % (expected_addr, address)) self.flags(use_forwarded_for=True) response = fake_request(self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="168.168.168.1", fake_get_metadata=fake_get_metadata, headers={'X-Forwarded-For': expected_addr}) self.assertEqual(response.status_int, 200) response_ctype = response.headers['Content-Type'] self.assertTrue(response_ctype.startswith("text/plain")) self.assertEqual(response.body, base64.b64decode(self.instance['user_data'])) response = fake_request(self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="168.168.168.1", fake_get_metadata=fake_get_metadata, headers=None) self.assertEqual(response.status_int, 500) @mock.patch('oslo_utils.secretutils.constant_time_compare') def test_by_instance_id_uses_constant_time_compare(self, mock_compare): mock_compare.side_effect = test.TestingException req = webob.Request.blank('/') hnd = handler.MetadataRequestHandler() req.headers['X-Instance-ID'] = 'fake-inst' req.headers['X-Instance-ID-Signature'] = 'fake-sig' req.headers['X-Tenant-ID'] = 'fake-proj' self.assertRaises(test.TestingException, hnd._handle_instance_id_request, req) self.assertEqual(1, mock_compare.call_count) def _fake_x_get_metadata(self, instance_id, remote_address): if remote_address is None: raise Exception('Expected X-Forwared-For header') elif instance_id == self.expected_instance_id: return self.mdinst else: # raise the exception to aid with 500 response code test raise Exception("Expected instance_id of %s, got %s" % (self.expected_instance_id, instance_id)) def test_user_data_with_neutron_instance_id(self): self.expected_instance_id = 'a-b-c-d' signed = hmac.new( CONF.neutron.metadata_proxy_shared_secret, self.expected_instance_id, hashlib.sha256).hexdigest() # try a request with service disabled response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", headers={'X-Instance-ID': 'a-b-c-d', 'X-Tenant-ID': 'test', 'X-Instance-ID-Signature': signed}) self.assertEqual(response.status_int, 200) # now enable the service self.flags(service_metadata_proxy=True, group='neutron') response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=self._fake_x_get_metadata, headers={'X-Forwarded-For': '192.192.192.2', 'X-Instance-ID': 'a-b-c-d', 'X-Tenant-ID': 'test', 'X-Instance-ID-Signature': signed}) self.assertEqual(response.status_int, 200) response_ctype = response.headers['Content-Type'] self.assertTrue(response_ctype.startswith("text/plain")) self.assertEqual(response.body, base64.b64decode(self.instance['user_data'])) # mismatched signature response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=self._fake_x_get_metadata, headers={'X-Forwarded-For': '192.192.192.2', 'X-Instance-ID': 'a-b-c-d', 'X-Tenant-ID': 'test', 'X-Instance-ID-Signature': ''}) self.assertEqual(response.status_int, 403) # missing X-Tenant-ID from request response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=self._fake_x_get_metadata, headers={'X-Forwarded-For': '192.192.192.2', 'X-Instance-ID': 'a-b-c-d', 'X-Instance-ID-Signature': signed}) self.assertEqual(response.status_int, 400) # mismatched X-Tenant-ID response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=self._fake_x_get_metadata, headers={'X-Forwarded-For': '192.192.192.2', 'X-Instance-ID': 'a-b-c-d', 'X-Tenant-ID': 'FAKE', 'X-Instance-ID-Signature': signed}) self.assertEqual(response.status_int, 404) # without X-Forwarded-For response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=self._fake_x_get_metadata, headers={'X-Instance-ID': 'a-b-c-d', 'X-Tenant-ID': 'test', 'X-Instance-ID-Signature': signed}) self.assertEqual(response.status_int, 500) # unexpected Instance-ID signed = hmac.new( CONF.neutron.metadata_proxy_shared_secret, 'z-z-z-z', hashlib.sha256).hexdigest() response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=self._fake_x_get_metadata, headers={'X-Forwarded-For': '192.192.192.2', 'X-Instance-ID': 'z-z-z-z', 'X-Tenant-ID': 'test', 'X-Instance-ID-Signature': signed}) self.assertEqual(response.status_int, 500) def test_get_metadata(self): def _test_metadata_path(relpath): # recursively confirm a http 200 from all meta-data elements # available at relpath. response = fake_request(self.stubs, self.mdinst, relpath=relpath) for item in response.body.split('\n'): if 'public-keys' in relpath: # meta-data/public-keys/0=keyname refers to # meta-data/public-keys/0 item = item.split('=')[0] if item.endswith('/'): path = relpath + '/' + item _test_metadata_path(path) continue path = relpath + '/' + item response = fake_request(self.stubs, self.mdinst, relpath=path) self.assertEqual(response.status_int, 200, message=path) _test_metadata_path('/2009-04-04/meta-data') def _metadata_handler_with_instance_id(self, hnd): expected_instance_id = 'a-b-c-d' signed = hmac.new( CONF.neutron.metadata_proxy_shared_secret, expected_instance_id, hashlib.sha256).hexdigest() self.flags(service_metadata_proxy=True, group='neutron') response = fake_request( None, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata=False, app=hnd, headers={'X-Forwarded-For': '192.192.192.2', 'X-Instance-ID': 'a-b-c-d', 'X-Tenant-ID': 'test', 'X-Instance-ID-Signature': signed}) self.assertEqual(200, response.status_int) self.assertEqual(base64.b64decode(self.instance['user_data']), response.body) @mock.patch.object(base, 'get_metadata_by_instance_id') def test_metadata_handler_with_instance_id(self, get_by_uuid): # test twice to ensure that the cache works get_by_uuid.return_value = self.mdinst self.flags(metadata_cache_expiration=15) hnd = handler.MetadataRequestHandler() self._metadata_handler_with_instance_id(hnd) self._metadata_handler_with_instance_id(hnd) self.assertEqual(1, get_by_uuid.call_count) @mock.patch.object(base, 'get_metadata_by_instance_id') def test_metadata_handler_with_instance_id_no_cache(self, get_by_uuid): # test twice to ensure that disabling the cache works get_by_uuid.return_value = self.mdinst self.flags(metadata_cache_expiration=0) hnd = handler.MetadataRequestHandler() self._metadata_handler_with_instance_id(hnd) self._metadata_handler_with_instance_id(hnd) self.assertEqual(2, get_by_uuid.call_count) def _metadata_handler_with_remote_address(self, hnd): response = fake_request( None, self.mdinst, fake_get_metadata=False, app=hnd, relpath="/2009-04-04/user-data", address="192.192.192.2") self.assertEqual(200, response.status_int) self.assertEqual(base64.b64decode(self.instance.user_data), response.body) @mock.patch.object(base, 'get_metadata_by_address') def test_metadata_handler_with_remote_address(self, get_by_uuid): # test twice to ensure that the cache works get_by_uuid.return_value = self.mdinst self.flags(metadata_cache_expiration=15) hnd = handler.MetadataRequestHandler() self._metadata_handler_with_remote_address(hnd) self._metadata_handler_with_remote_address(hnd) self.assertEqual(1, get_by_uuid.call_count) @mock.patch.object(base, 'get_metadata_by_address') def test_metadata_handler_with_remote_address_no_cache(self, get_by_uuid): # test twice to ensure that disabling the cache works get_by_uuid.return_value = self.mdinst self.flags(metadata_cache_expiration=0) hnd = handler.MetadataRequestHandler() self._metadata_handler_with_remote_address(hnd) self._metadata_handler_with_remote_address(hnd) self.assertEqual(2, get_by_uuid.call_count) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_metadata_lb_proxy(self, mock_get_client): self.flags(service_metadata_proxy=True, group='neutron') self.expected_instance_id = 'a-b-c-d' # with X-Metadata-Provider proxy_lb_id = 'edge-x' mock_client = mock_get_client() mock_client.list_ports.return_value = { 'ports': [{'device_id': 'a-b-c-d', 'tenant_id': 'test'}]} mock_client.list_subnets.return_value = { 'subnets': [{'network_id': 'f-f-f-f'}]} response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=self._fake_x_get_metadata, headers={'X-Forwarded-For': '192.192.192.2', 'X-Metadata-Provider': proxy_lb_id}) self.assertEqual(200, response.status_int) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_metadata_lb_proxy_chain(self, mock_get_client): self.flags(service_metadata_proxy=True, group='neutron') self.expected_instance_id = 'a-b-c-d' # with X-Metadata-Provider proxy_lb_id = 'edge-x' def fake_list_ports(ctx, **kwargs): if kwargs.get('fixed_ips') == 'ip_address=192.192.192.2': return { 'ports': [{ 'device_id': 'a-b-c-d', 'tenant_id': 'test'}]} else: return {'ports': []} mock_client = mock_get_client() mock_client.list_ports.side_effect = fake_list_ports mock_client.list_subnets.return_value = { 'subnets': [{'network_id': 'f-f-f-f'}]} response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="10.10.10.10", fake_get_metadata_by_instance_id=self._fake_x_get_metadata, headers={'X-Forwarded-For': '192.192.192.2, 10.10.10.10', 'X-Metadata-Provider': proxy_lb_id}) self.assertEqual(200, response.status_int) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_metadata_lb_proxy_signed(self, mock_get_client): shared_secret = "testing1234" self.flags( metadata_proxy_shared_secret=shared_secret, service_metadata_proxy=True, group='neutron') self.expected_instance_id = 'a-b-c-d' # with X-Metadata-Provider proxy_lb_id = 'edge-x' signature = hmac.new( shared_secret, proxy_lb_id, hashlib.sha256).hexdigest() mock_client = mock_get_client() mock_client.list_ports.return_value = { 'ports': [{'device_id': 'a-b-c-d', 'tenant_id': 'test'}]} mock_client.list_subnets.return_value = { 'subnets': [{'network_id': 'f-f-f-f'}]} response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=self._fake_x_get_metadata, headers={'X-Forwarded-For': '192.192.192.2', 'X-Metadata-Provider': proxy_lb_id, 'X-Metadata-Provider-Signature': signature}) self.assertEqual(200, response.status_int) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_metadata_lb_proxy_signed_fail(self, mock_get_client): shared_secret = "testing1234" bad_secret = "testing3468" self.flags( metadata_proxy_shared_secret=shared_secret, service_metadata_proxy=True, group='neutron') self.expected_instance_id = 'a-b-c-d' # with X-Metadata-Provider proxy_lb_id = 'edge-x' signature = hmac.new( bad_secret, proxy_lb_id, hashlib.sha256).hexdigest() mock_client = mock_get_client() mock_client.list_ports.return_value = { 'ports': [{'device_id': 'a-b-c-d', 'tenant_id': 'test'}]} mock_client.list_subnets.return_value = { 'subnets': [{'network_id': 'f-f-f-f'}]} response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=self._fake_x_get_metadata, headers={'X-Forwarded-For': '192.192.192.2', 'X-Metadata-Provider': proxy_lb_id, 'X-Metadata-Provider-Signature': signature}) self.assertEqual(403, response.status_int) @mock.patch.object(context, 'get_admin_context') @mock.patch.object(network_api, 'API') def test_get_metadata_by_address(self, mock_net_api, mock_get_context): mock_get_context.return_value = 'CONTEXT' api = mock.Mock() fixed_ip = objects.FixedIP( instance_uuid='2bfd8d71-6b69-410c-a2f5-dbca18d02966') api.get_fixed_ip_by_address.return_value = fixed_ip mock_net_api.return_value = api with mock.patch.object(base, 'get_metadata_by_instance_id') as gmd: base.get_metadata_by_address('foo') api.get_fixed_ip_by_address.assert_called_once_with( 'CONTEXT', 'foo') gmd.assert_called_once_with(fixed_ip.instance_uuid, 'foo', 'CONTEXT') @mock.patch.object(context, 'get_admin_context') @mock.patch.object(objects.Instance, 'get_by_uuid') def test_get_metadata_by_instance_id(self, mock_uuid, mock_context): inst = objects.Instance() mock_uuid.return_value = inst with mock.patch.object(base, 'InstanceMetadata') as imd: base.get_metadata_by_instance_id('foo', 'bar', ctxt='CONTEXT') self.assertFalse(mock_context.called, "get_admin_context() should not" "have been called, the context was given") mock_uuid.assert_called_once_with('CONTEXT', 'foo', expected_attrs=['ec2_ids', 'flavor', 'info_cache', 'metadata', 'system_metadata', 'security_groups']) imd.assert_called_once_with(inst, 'bar') @mock.patch.object(context, 'get_admin_context') @mock.patch.object(objects.Instance, 'get_by_uuid') def test_get_metadata_by_instance_id_null_context(self, mock_uuid, mock_context): inst = objects.Instance() mock_uuid.return_value = inst mock_context.return_value = 'CONTEXT' with mock.patch.object(base, 'InstanceMetadata') as imd: base.get_metadata_by_instance_id('foo', 'bar') mock_context.assert_called_once_with() mock_uuid.assert_called_once_with('CONTEXT', 'foo', expected_attrs=['ec2_ids', 'flavor', 'info_cache', 'metadata', 'system_metadata', 'security_groups']) imd.assert_called_once_with(inst, 'bar') class MetadataPasswordTestCase(test.TestCase): def setUp(self): super(MetadataPasswordTestCase, self).setUp() fake_network.stub_out_nw_api_get_instance_nw_info(self) self.context = context.RequestContext('fake', 'fake') self.instance = fake_inst_obj(self.context) self.flags(use_local=True, group='conductor') self.mdinst = fake_InstanceMetadata(self.stubs, self.instance, address=None, sgroups=None) self.flags(use_local=True, group='conductor') def test_get_password(self): request = webob.Request.blank('') self.mdinst.password = 'foo' result = password.handle_password(request, self.mdinst) self.assertEqual(result, 'foo') def test_bad_method(self): request = webob.Request.blank('') request.method = 'PUT' self.assertRaises(webob.exc.HTTPBadRequest, password.handle_password, request, self.mdinst) @mock.patch('nova.objects.Instance.get_by_uuid') def _try_set_password(self, get_by_uuid, val='bar'): request = webob.Request.blank('') request.method = 'POST' request.body = val get_by_uuid.return_value = self.instance with mock.patch.object(self.instance, 'save') as save: password.handle_password(request, self.mdinst) save.assert_called_once_with() self.assertIn('password_0', self.instance.system_metadata) def test_set_password(self): self.mdinst.password = '' self._try_set_password() def test_conflict(self): self.mdinst.password = 'foo' self.assertRaises(webob.exc.HTTPConflict, self._try_set_password) def test_too_large(self): self.mdinst.password = '' self.assertRaises(webob.exc.HTTPBadRequest, self._try_set_password, val=('a' * (password.MAX_SIZE + 1))) nova-13.0.0/nova/tests/unit/cells/0000775000567000056710000000000012701410205020055 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/cells/test_cells_scheduler.py0000664000567000056710000005671712701407773024666 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CellsScheduler """ import copy import time import mock from oslo_utils import uuidutils from nova import block_device from nova.cells import filters from nova.cells import weights from nova.compute import vm_states import nova.conf from nova import context from nova import db from nova import exception from nova import objects from nova.scheduler import utils as scheduler_utils from nova import test from nova.tests.unit.cells import fakes from nova.tests.unit import fake_block_device from nova.tests import uuidsentinel from nova import utils CONF = nova.conf.CONF class FakeFilterClass1(filters.BaseCellFilter): pass class FakeFilterClass2(filters.BaseCellFilter): pass class FakeWeightClass1(weights.BaseCellWeigher): def _weigh_object(self, obj, weight_properties): pass class FakeWeightClass2(weights.BaseCellWeigher): def _weigh_object(self, obj, weight_properties): pass class CellsSchedulerTestCase(test.TestCase): """Test case for CellsScheduler class.""" def setUp(self): super(CellsSchedulerTestCase, self).setUp() self.flags(scheduler_filter_classes=[], scheduler_weight_classes=[], group='cells') self._init_cells_scheduler() def _init_cells_scheduler(self): fakes.init(self) self.msg_runner = fakes.get_message_runner('api-cell') self.scheduler = self.msg_runner.scheduler self.state_manager = self.msg_runner.state_manager self.my_cell_state = self.state_manager.get_my_state() self.ctxt = context.RequestContext('fake', 'fake') instance_uuids = [] for x in range(3): instance_uuids.append(uuidutils.generate_uuid()) self.instance_uuids = instance_uuids self.instances = [objects.Instance(uuid=uuid, id=id) for id, uuid in enumerate(instance_uuids)] self.request_spec = { 'num_instances': len(instance_uuids), 'instance_properties': self.instances[0], 'instance_type': 'fake_type', 'image': 'fake_image'} self.build_inst_kwargs = { 'instances': self.instances, 'image': 'fake_image', 'filter_properties': {'instance_type': 'fake_type'}, 'security_groups': 'fake_sec_groups', 'block_device_mapping': 'fake_bdm'} def test_create_instances_here(self): # Just grab the first instance type inst_type = objects.Flavor.get_by_id(self.ctxt, 1) image = {'properties': {}} instance_uuids = self.instance_uuids instance_props = {'id': 'removed', 'security_groups': 'removed', 'info_cache': 'removed', 'name': 'instance-00000001', 'hostname': 'meow', 'display_name': 'moo', 'image_ref': 'fake_image_ref', 'user_id': self.ctxt.user_id, # Test these as lists 'metadata': {'moo': 'cow'}, 'system_metadata': {'meow': 'cat'}, 'flavor': inst_type, 'project_id': self.ctxt.project_id} call_info = {'uuids': []} block_device_mapping = [ objects.BlockDeviceMapping(context=self.ctxt, **fake_block_device.FakeDbBlockDeviceDict( block_device.create_image_bdm('fake_image_ref'), anon=True)) ] def _fake_instance_update_at_top(_ctxt, instance): call_info['uuids'].append(instance['uuid']) self.stubs.Set(self.msg_runner, 'instance_update_at_top', _fake_instance_update_at_top) self.scheduler._create_instances_here(self.ctxt, instance_uuids, instance_props, inst_type, image, ['default'], block_device_mapping) self.assertEqual(instance_uuids, call_info['uuids']) for count, instance_uuid in enumerate(instance_uuids): instance = db.instance_get_by_uuid(self.ctxt, instance_uuid) meta = utils.instance_meta(instance) self.assertEqual('cow', meta['moo']) sys_meta = utils.instance_sys_meta(instance) self.assertEqual('cat', sys_meta['meow']) self.assertEqual('meow', instance['hostname']) self.assertEqual('moo-%d' % (count + 1), instance['display_name']) self.assertEqual('fake_image_ref', instance['image_ref']) @mock.patch('nova.objects.Instance.update') def test_create_instances_here_pops_problematic_properties(self, mock_update): values = { 'uuid': uuidsentinel.instance, 'metadata': [], 'id': 1, 'name': 'foo', 'info_cache': 'bar', 'security_groups': 'not secure', 'flavor': 'chocolate', 'pci_requests': 'no thanks', 'ec2_ids': 'prime', } @mock.patch.object(self.scheduler.compute_api, 'create_db_entry_for_new_instance') def test(mock_create_db): self.scheduler._create_instances_here( self.ctxt, [uuidsentinel.instance], values, objects.Flavor(), 'foo', [], []) test() # NOTE(danms): Make sure that only the expected properties # are applied to the instance object. The complex ones that # would have been mangled over RPC should be removed. mock_update.assert_called_once_with( {'uuid': uuidsentinel.instance, 'metadata': {}}) def test_build_instances_selects_child_cell(self): # Make sure there's no capacity info so we're sure to # select a child cell our_cell_info = self.state_manager.get_my_state() our_cell_info.capacities = {} call_info = {'times': 0} orig_fn = self.msg_runner.build_instances def msg_runner_build_instances(ctxt, target_cell, build_inst_kwargs): # This gets called twice. Once for our running it # in this cell.. and then it'll get called when the # child cell is picked. So, first time.. just run it # like normal. if not call_info['times']: call_info['times'] += 1 return orig_fn(ctxt, target_cell, build_inst_kwargs) call_info['ctxt'] = ctxt call_info['target_cell'] = target_cell call_info['build_inst_kwargs'] = build_inst_kwargs def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'image': image} return request_spec self.stubs.Set(self.msg_runner, 'build_instances', msg_runner_build_instances) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) self.msg_runner.build_instances(self.ctxt, self.my_cell_state, self.build_inst_kwargs) self.assertEqual(self.ctxt, call_info['ctxt']) self.assertEqual(self.build_inst_kwargs, call_info['build_inst_kwargs']) child_cells = self.state_manager.get_child_cells() self.assertIn(call_info['target_cell'], child_cells) def test_build_instances_selects_current_cell(self): # Make sure there's no child cells so that we will be # selected self.state_manager.child_cells = {} call_info = {} build_inst_kwargs = copy.deepcopy(self.build_inst_kwargs) def fake_create_instances_here(ctxt, instance_uuids, instance_properties, instance_type, image, security_groups, block_device_mapping): call_info['ctxt'] = ctxt call_info['instance_uuids'] = instance_uuids call_info['instance_properties'] = instance_properties call_info['instance_type'] = instance_type call_info['image'] = image call_info['security_groups'] = security_groups call_info['block_device_mapping'] = block_device_mapping return self.instances def fake_rpc_build_instances(ctxt, **build_inst_kwargs): call_info['build_inst_kwargs'] = build_inst_kwargs def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'image': image} return request_spec self.stubs.Set(self.scheduler, '_create_instances_here', fake_create_instances_here) self.stubs.Set(self.scheduler.compute_task_api, 'build_instances', fake_rpc_build_instances) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) self.msg_runner.build_instances(self.ctxt, self.my_cell_state, build_inst_kwargs) self.assertEqual(self.ctxt, call_info['ctxt']) self.assertEqual(self.instance_uuids, call_info['instance_uuids']) self.assertEqual(self.build_inst_kwargs['instances'][0]['id'], call_info['instance_properties']['id']) self.assertEqual( self.build_inst_kwargs['filter_properties']['instance_type'], call_info['instance_type']) self.assertEqual(self.build_inst_kwargs['image'], call_info['image']) self.assertEqual(self.build_inst_kwargs['security_groups'], call_info['security_groups']) self.assertEqual(self.build_inst_kwargs['block_device_mapping'], call_info['block_device_mapping']) self.assertEqual(build_inst_kwargs, call_info['build_inst_kwargs']) self.assertEqual(self.instance_uuids, call_info['instance_uuids']) def test_build_instances_retries_when_no_cells_avail(self): self.flags(scheduler_retries=7, group='cells') call_info = {'num_tries': 0, 'errored_uuids': []} def fake_grab_target_cells(filter_properties): call_info['num_tries'] += 1 raise exception.NoCellsAvailable() def fake_sleep(_secs): return def fake_instance_save(inst): self.assertEqual(vm_states.ERROR, inst.vm_state) call_info['errored_uuids'].append(inst.uuid) def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'image': image} return request_spec self.stubs.Set(self.scheduler, '_grab_target_cells', fake_grab_target_cells) self.stubs.Set(time, 'sleep', fake_sleep) self.stubs.Set(objects.Instance, 'save', fake_instance_save) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) self.msg_runner.build_instances(self.ctxt, self.my_cell_state, self.build_inst_kwargs) self.assertEqual(8, call_info['num_tries']) self.assertEqual(self.instance_uuids, call_info['errored_uuids']) def test_schedule_method_on_random_exception(self): self.flags(scheduler_retries=7, group='cells') instances = [objects.Instance(uuid=uuid) for uuid in self.instance_uuids] method_kwargs = { 'image': 'fake_image', 'instances': instances, 'filter_properties': {}} call_info = {'num_tries': 0, 'errored_uuids1': [], 'errored_uuids2': []} def fake_grab_target_cells(filter_properties): call_info['num_tries'] += 1 raise test.TestingException() def fake_instance_save(inst): self.assertEqual(vm_states.ERROR, inst.vm_state) call_info['errored_uuids1'].append(inst.uuid) def fake_instance_update_at_top(ctxt, instance): self.assertEqual(vm_states.ERROR, instance['vm_state']) call_info['errored_uuids2'].append(instance['uuid']) def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'image': image} return request_spec self.stubs.Set(self.scheduler, '_grab_target_cells', fake_grab_target_cells) self.stubs.Set(objects.Instance, 'save', fake_instance_save) self.stubs.Set(self.msg_runner, 'instance_update_at_top', fake_instance_update_at_top) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) self.msg_runner.build_instances(self.ctxt, self.my_cell_state, method_kwargs) # Shouldn't retry self.assertEqual(1, call_info['num_tries']) self.assertEqual(self.instance_uuids, call_info['errored_uuids1']) self.assertEqual(self.instance_uuids, call_info['errored_uuids2']) def test_filter_schedule_skipping(self): # if a filter handles scheduling, short circuit def _grab(filter_properties): return None self.stubs.Set(self.scheduler, '_grab_target_cells', _grab) def _test(self, *args): raise test.TestingException("shouldn't be called") try: self.scheduler._schedule_build_to_cells(None, None, None, _test, None) except test.TestingException: self.fail("Scheduling did not properly short circuit") def test_cells_filter_args_correct(self): # Re-init our fakes with some filters. our_path = 'nova.tests.unit.cells.test_cells_scheduler' cls_names = [our_path + '.' + 'FakeFilterClass1', our_path + '.' + 'FakeFilterClass2'] self.flags(scheduler_filter_classes=cls_names, group='cells') self._init_cells_scheduler() # Make sure there's no child cells so that we will be # selected. Makes stubbing easier. self.state_manager.child_cells = {} call_info = {} def fake_create_instances_here(ctxt, instance_uuids, instance_properties, instance_type, image, security_groups, block_device_mapping): call_info['ctxt'] = ctxt call_info['instance_uuids'] = instance_uuids call_info['instance_properties'] = instance_properties call_info['instance_type'] = instance_type call_info['image'] = image call_info['security_groups'] = security_groups call_info['block_device_mapping'] = block_device_mapping def fake_rpc_build_instances(ctxt, **host_sched_kwargs): call_info['host_sched_kwargs'] = host_sched_kwargs def fake_get_filtered_objs(filters, cells, filt_properties): call_info['filt_objects'] = filters call_info['filt_cells'] = cells call_info['filt_props'] = filt_properties return cells def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'instance_properties': instances[0], 'image': image, 'instance_type': 'fake_type'} return request_spec self.stubs.Set(self.scheduler, '_create_instances_here', fake_create_instances_here) self.stubs.Set(self.scheduler.compute_task_api, 'build_instances', fake_rpc_build_instances) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) filter_handler = self.scheduler.filter_handler self.stubs.Set(filter_handler, 'get_filtered_objects', fake_get_filtered_objs) host_sched_kwargs = {'image': 'fake_image', 'instances': self.instances, 'filter_properties': {'instance_type': 'fake_type'}, 'security_groups': 'fake_sec_groups', 'block_device_mapping': 'fake_bdm'} self.msg_runner.build_instances(self.ctxt, self.my_cell_state, host_sched_kwargs) # Our cell was selected. self.assertEqual(self.ctxt, call_info['ctxt']) self.assertEqual(self.instance_uuids, call_info['instance_uuids']) self.assertEqual(self.request_spec['instance_properties']['id'], call_info['instance_properties']['id']) self.assertEqual(self.request_spec['instance_type'], call_info['instance_type']) self.assertEqual(self.request_spec['image'], call_info['image']) self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs']) # Filter args are correct expected_filt_props = {'context': self.ctxt, 'scheduler': self.scheduler, 'routing_path': self.my_cell_state.name, 'host_sched_kwargs': host_sched_kwargs, 'request_spec': self.request_spec, 'instance_type': 'fake_type'} self.assertEqual(expected_filt_props, call_info['filt_props']) self.assertEqual([FakeFilterClass1, FakeFilterClass2], [obj.__class__ for obj in call_info['filt_objects']]) self.assertEqual([self.my_cell_state], call_info['filt_cells']) def test_cells_filter_returning_none(self): # Re-init our fakes with some filters. our_path = 'nova.tests.unit.cells.test_cells_scheduler' cls_names = [our_path + '.' + 'FakeFilterClass1', our_path + '.' + 'FakeFilterClass2'] self.flags(scheduler_filter_classes=cls_names, group='cells') self._init_cells_scheduler() # Make sure there's no child cells so that we will be # selected. Makes stubbing easier. self.state_manager.child_cells = {} call_info = {'scheduled': False} def fake_create_instances_here(ctxt, request_spec): # Should not be called call_info['scheduled'] = True def fake_get_filtered_objs(filter_classes, cells, filt_properties): # Should cause scheduling to be skipped. Means that the # filter did it. return None self.stubs.Set(self.scheduler, '_create_instances_here', fake_create_instances_here) filter_handler = self.scheduler.filter_handler self.stubs.Set(filter_handler, 'get_filtered_objects', fake_get_filtered_objs) self.msg_runner.build_instances(self.ctxt, self.my_cell_state, {}) self.assertFalse(call_info['scheduled']) def test_cells_weight_args_correct(self): # Re-init our fakes with some filters. our_path = 'nova.tests.unit.cells.test_cells_scheduler' cls_names = [our_path + '.' + 'FakeWeightClass1', our_path + '.' + 'FakeWeightClass2'] self.flags(scheduler_weight_classes=cls_names, group='cells') self._init_cells_scheduler() # Make sure there's no child cells so that we will be # selected. Makes stubbing easier. self.state_manager.child_cells = {} call_info = {} def fake_create_instances_here(ctxt, instance_uuids, instance_properties, instance_type, image, security_groups, block_device_mapping): call_info['ctxt'] = ctxt call_info['instance_uuids'] = instance_uuids call_info['instance_properties'] = instance_properties call_info['instance_type'] = instance_type call_info['image'] = image call_info['security_groups'] = security_groups call_info['block_device_mapping'] = block_device_mapping def fake_rpc_build_instances(ctxt, **host_sched_kwargs): call_info['host_sched_kwargs'] = host_sched_kwargs def fake_get_weighed_objs(weighers, cells, filt_properties): call_info['weighers'] = weighers call_info['weight_cells'] = cells call_info['weight_props'] = filt_properties return [weights.WeightedCell(cells[0], 0.0)] def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'instance_properties': instances[0], 'image': image, 'instance_type': 'fake_type'} return request_spec self.stubs.Set(self.scheduler, '_create_instances_here', fake_create_instances_here) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) self.stubs.Set(self.scheduler.compute_task_api, 'build_instances', fake_rpc_build_instances) weight_handler = self.scheduler.weight_handler self.stubs.Set(weight_handler, 'get_weighed_objects', fake_get_weighed_objs) host_sched_kwargs = {'image': 'fake_image', 'instances': self.instances, 'filter_properties': {'instance_type': 'fake_type'}, 'security_groups': 'fake_sec_groups', 'block_device_mapping': 'fake_bdm'} self.msg_runner.build_instances(self.ctxt, self.my_cell_state, host_sched_kwargs) # Our cell was selected. self.assertEqual(self.ctxt, call_info['ctxt']) self.assertEqual(self.instance_uuids, call_info['instance_uuids']) self.assertEqual(self.request_spec['instance_properties']['id'], call_info['instance_properties']['id']) self.assertEqual(self.request_spec['instance_type'], call_info['instance_type']) self.assertEqual(self.request_spec['image'], call_info['image']) self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs']) # Weight args are correct expected_filt_props = {'context': self.ctxt, 'scheduler': self.scheduler, 'routing_path': self.my_cell_state.name, 'host_sched_kwargs': host_sched_kwargs, 'request_spec': self.request_spec, 'instance_type': 'fake_type'} self.assertEqual(expected_filt_props, call_info['weight_props']) self.assertEqual([FakeWeightClass1, FakeWeightClass2], [obj.__class__ for obj in call_info['weighers']]) self.assertEqual([self.my_cell_state], call_info['weight_cells']) nova-13.0.0/nova/tests/unit/cells/__init__.py0000664000567000056710000000000012701407773022174 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/cells/test_cells_manager.py0000664000567000056710000012371412701407773024312 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CellsManager """ import copy import datetime import mock from oslo_utils import timeutils from six.moves import range from nova.cells import messaging from nova.cells import utils as cells_utils import nova.conf from nova import context from nova import objects from nova import test from nova.tests.unit.cells import fakes from nova.tests.unit import fake_instance from nova.tests.unit import fake_server_actions from nova.tests.unit.objects import test_flavor CONF = nova.conf.CONF CONF.import_opt('compute_topic', 'nova.compute.rpcapi') FAKE_COMPUTE_NODES = [dict(id=1, host='host1'), dict(id=2, host='host2')] FAKE_SERVICES = [dict(id=1, host='host1'), dict(id=2, host='host2'), dict(id=3, host='host3')] FAKE_TASK_LOGS = [dict(id=1, host='host1'), dict(id=2, host='host2')] class CellsManagerClassTestCase(test.NoDBTestCase): """Test case for CellsManager class.""" def setUp(self): super(CellsManagerClassTestCase, self).setUp() fakes.init(self) # pick a child cell to use for tests. self.our_cell = 'grandchild-cell1' self.cells_manager = fakes.get_cells_manager(self.our_cell) self.msg_runner = self.cells_manager.msg_runner self.state_manager = fakes.get_state_manager(self.our_cell) self.driver = self.cells_manager.driver self.ctxt = 'fake_context' def _get_fake_response(self, raw_response=None, exc=False): if exc: return messaging.Response(self.ctxt, 'fake', test.TestingException(), True) if raw_response is None: raw_response = 'fake-response' return messaging.Response(self.ctxt, 'fake', raw_response, False) def test_get_cell_info_for_neighbors(self): self.mox.StubOutWithMock(self.cells_manager.state_manager, 'get_cell_info_for_neighbors') self.cells_manager.state_manager.get_cell_info_for_neighbors() self.mox.ReplayAll() self.cells_manager.get_cell_info_for_neighbors(self.ctxt) def test_post_start_hook_child_cell(self): self.mox.StubOutWithMock(self.driver, 'start_servers') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents') self.driver.start_servers(self.msg_runner) context.get_admin_context().AndReturn(self.ctxt) self.cells_manager._update_our_parents(self.ctxt) self.mox.ReplayAll() self.cells_manager.post_start_hook() def test_post_start_hook_middle_cell(self): cells_manager = fakes.get_cells_manager('child-cell2') msg_runner = cells_manager.msg_runner driver = cells_manager.driver self.mox.StubOutWithMock(driver, 'start_servers') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(msg_runner, 'ask_children_for_capabilities') self.mox.StubOutWithMock(msg_runner, 'ask_children_for_capacities') driver.start_servers(msg_runner) context.get_admin_context().AndReturn(self.ctxt) msg_runner.ask_children_for_capabilities(self.ctxt) msg_runner.ask_children_for_capacities(self.ctxt) self.mox.ReplayAll() cells_manager.post_start_hook() def test_update_our_parents(self): self.mox.StubOutWithMock(self.msg_runner, 'tell_parents_our_capabilities') self.mox.StubOutWithMock(self.msg_runner, 'tell_parents_our_capacities') self.msg_runner.tell_parents_our_capabilities(self.ctxt) self.msg_runner.tell_parents_our_capacities(self.ctxt) self.mox.ReplayAll() self.cells_manager._update_our_parents(self.ctxt) def test_build_instances(self): build_inst_kwargs = {'instances': [objects.Instance(), objects.Instance()]} self.mox.StubOutWithMock(self.msg_runner, 'build_instances') our_cell = self.msg_runner.state_manager.get_my_state() self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs) self.mox.ReplayAll() self.cells_manager.build_instances(self.ctxt, build_inst_kwargs=build_inst_kwargs) def test_build_instances_old_flavor(self): flavor_dict = test_flavor.fake_flavor args = {'filter_properties': {'instance_type': flavor_dict}, 'instances': [objects.Instance()]} with mock.patch.object(self.msg_runner, 'build_instances') as mock_bi: self.cells_manager.build_instances(self.ctxt, build_inst_kwargs=args) filter_properties = mock_bi.call_args[0][2]['filter_properties'] self.assertIsInstance(filter_properties['instance_type'], objects.Flavor) def test_build_instances_old_instances(self): args = {'instances': [fake_instance.fake_db_instance()]} with mock.patch.object(self.msg_runner, 'build_instances') as mock_bi: self.cells_manager.build_instances(self.ctxt, build_inst_kwargs=args) self.assertIsInstance(mock_bi.call_args[0][2]['instances'][0], objects.Instance) def test_run_compute_api_method(self): # Args should just be silently passed through cell_name = 'fake-cell-name' method_info = 'fake-method-info' self.mox.StubOutWithMock(self.msg_runner, 'run_compute_api_method') fake_response = self._get_fake_response() self.msg_runner.run_compute_api_method(self.ctxt, cell_name, method_info, True).AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.run_compute_api_method( self.ctxt, cell_name=cell_name, method_info=method_info, call=True) self.assertEqual('fake-response', response) def test_instance_update_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top') self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.instance_update_at_top(self.ctxt, instance='fake-instance') def test_instance_destroy_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top') self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.instance_destroy_at_top(self.ctxt, instance='fake-instance') def test_instance_delete_everywhere(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_delete_everywhere') self.msg_runner.instance_delete_everywhere(self.ctxt, 'fake-instance', 'fake-type') self.mox.ReplayAll() self.cells_manager.instance_delete_everywhere( self.ctxt, instance='fake-instance', delete_type='fake-type') def test_instance_fault_create_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_fault_create_at_top') self.msg_runner.instance_fault_create_at_top(self.ctxt, 'fake-fault') self.mox.ReplayAll() self.cells_manager.instance_fault_create_at_top( self.ctxt, instance_fault='fake-fault') def test_bw_usage_update_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'bw_usage_update_at_top') self.msg_runner.bw_usage_update_at_top(self.ctxt, 'fake-bw-info') self.mox.ReplayAll() self.cells_manager.bw_usage_update_at_top( self.ctxt, bw_update_info='fake-bw-info') def test_heal_instances(self): self.flags(instance_updated_at_threshold=1000, instance_update_num_instances=2, group='cells') fake_context = context.RequestContext('fake', 'fake') stalled_time = timeutils.utcnow() updated_since = stalled_time - datetime.timedelta(seconds=1000) def utcnow(): return stalled_time call_info = {'get_instances': 0, 'sync_instances': []} instances = ['instance1', 'instance2', 'instance3'] def get_instances_to_sync(context, **kwargs): self.assertEqual(fake_context, context) call_info['shuffle'] = kwargs.get('shuffle') call_info['project_id'] = kwargs.get('project_id') call_info['updated_since'] = kwargs.get('updated_since') call_info['get_instances'] += 1 return iter(instances) @staticmethod def instance_get_by_uuid(context, uuid): return instances[int(uuid[-1]) - 1] def sync_instance(context, instance): self.assertEqual(fake_context, context) call_info['sync_instances'].append(instance) self.stubs.Set(cells_utils, 'get_instances_to_sync', get_instances_to_sync) self.stubs.Set(objects.Instance, 'get_by_uuid', instance_get_by_uuid) self.stubs.Set(self.cells_manager, '_sync_instance', sync_instance) self.stubs.Set(timeutils, 'utcnow', utcnow) self.cells_manager._heal_instances(fake_context) self.assertTrue(call_info['shuffle']) self.assertIsNone(call_info['project_id']) self.assertEqual(updated_since, call_info['updated_since']) self.assertEqual(1, call_info['get_instances']) # Only first 2 self.assertEqual(instances[:2], call_info['sync_instances']) call_info['sync_instances'] = [] self.cells_manager._heal_instances(fake_context) self.assertTrue(call_info['shuffle']) self.assertIsNone(call_info['project_id']) self.assertEqual(updated_since, call_info['updated_since']) self.assertEqual(2, call_info['get_instances']) # Now the last 1 and the first 1 self.assertEqual([instances[-1], instances[0]], call_info['sync_instances']) def test_sync_instances(self): self.mox.StubOutWithMock(self.msg_runner, 'sync_instances') self.msg_runner.sync_instances(self.ctxt, 'fake-project', 'fake-time', 'fake-deleted') self.mox.ReplayAll() self.cells_manager.sync_instances(self.ctxt, project_id='fake-project', updated_since='fake-time', deleted='fake-deleted') def test_service_get_all(self): responses = [] expected_response = [] # 3 cells... so 3 responses. Each response is a list of services. # Manager should turn these into a single list of responses. for i in range(3): cell_name = 'path!to!cell%i' % i services = [] for service in FAKE_SERVICES: fake_service = objects.Service(**service) services.append(fake_service) expected_service = cells_utils.ServiceProxy(fake_service, cell_name) expected_response.append( (cell_name, expected_service, fake_service)) response = messaging.Response(self.ctxt, cell_name, services, False) responses.append(response) self.mox.StubOutWithMock(self.msg_runner, 'service_get_all') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service') self.msg_runner.service_get_all(self.ctxt, 'fake-filters').AndReturn(responses) # Calls are done by cells, so we need to sort the list by the cell name expected_response.sort(key=lambda k: k[0]) for cell_name, service_proxy, service in expected_response: cells_utils.add_cell_to_service( service, cell_name).AndReturn(service_proxy) self.mox.ReplayAll() response = self.cells_manager.service_get_all(self.ctxt, filters='fake-filters') self.assertEqual([proxy for cell, proxy, service in expected_response], response) def test_service_get_by_compute_host(self): fake_cell = 'fake-cell' fake_service = objects.Service(**FAKE_SERVICES[0]) fake_response = messaging.Response(self.ctxt, fake_cell, fake_service, False) expected_response = cells_utils.ServiceProxy(fake_service, fake_cell) cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') self.mox.StubOutWithMock(self.msg_runner, 'service_get_by_compute_host') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service') self.msg_runner.service_get_by_compute_host(self.ctxt, fake_cell, 'fake-host').AndReturn(fake_response) cells_utils.add_cell_to_service(fake_service, fake_cell).AndReturn( expected_response) self.mox.ReplayAll() response = self.cells_manager.service_get_by_compute_host(self.ctxt, host_name=cell_and_host) self.assertEqual(expected_response, response) def test_get_host_uptime(self): fake_cell = 'parent!fake-cell' fake_host = 'fake-host' fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host) host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:" " 0.20, 0.12, 0.14") fake_response = messaging.Response(self.ctxt, fake_cell, host_uptime, False) self.mox.StubOutWithMock(self.msg_runner, 'get_host_uptime') self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\ AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.get_host_uptime(self.ctxt, fake_cell_and_host) self.assertEqual(host_uptime, response) def test_service_update(self): fake_cell = 'fake-cell' fake_service = objects.Service(**FAKE_SERVICES[0]) fake_response = messaging.Response( self.ctxt, fake_cell, fake_service, False) expected_response = cells_utils.ServiceProxy(fake_service, fake_cell) cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') params_to_update = {'disabled': True} self.mox.StubOutWithMock(self.msg_runner, 'service_update') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service') self.msg_runner.service_update(self.ctxt, fake_cell, 'fake-host', 'nova-api', params_to_update).AndReturn(fake_response) cells_utils.add_cell_to_service(fake_service, fake_cell).AndReturn( expected_response) self.mox.ReplayAll() response = self.cells_manager.service_update( self.ctxt, host_name=cell_and_host, binary='nova-api', params_to_update=params_to_update) self.assertEqual(expected_response, response) def test_service_delete(self): fake_cell = 'fake-cell' service_id = '1' cell_service_id = cells_utils.cell_with_item(fake_cell, service_id) with mock.patch.object(self.msg_runner, 'service_delete') as service_delete: self.cells_manager.service_delete(self.ctxt, cell_service_id) service_delete.assert_called_once_with( self.ctxt, fake_cell, service_id) def test_proxy_rpc_to_manager(self): self.mox.StubOutWithMock(self.msg_runner, 'proxy_rpc_to_manager') fake_response = self._get_fake_response() cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') topic = "%s.%s" % (CONF.compute_topic, cell_and_host) self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell', 'fake-host', topic, 'fake-rpc-msg', True, -1).AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.proxy_rpc_to_manager(self.ctxt, topic=topic, rpc_message='fake-rpc-msg', call=True, timeout=-1) self.assertEqual('fake-response', response) def _build_task_log_responses(self, num): responses = [] expected_response = [] # 3 cells... so 3 responses. Each response is a list of task log # entries. Manager should turn these into a single list of # task log entries. for i in range(num): cell_name = 'path!to!cell%i' % i task_logs = [] for task_log in FAKE_TASK_LOGS: task_logs.append(copy.deepcopy(task_log)) expected_task_log = copy.deepcopy(task_log) cells_utils.add_cell_to_task_log(expected_task_log, cell_name) expected_response.append(expected_task_log) response = messaging.Response(self.ctxt, cell_name, task_logs, False) responses.append(response) return expected_response, responses def test_task_log_get_all(self): expected_response, responses = self._build_task_log_responses(3) self.mox.StubOutWithMock(self.msg_runner, 'task_log_get_all') self.msg_runner.task_log_get_all(self.ctxt, None, 'fake-name', 'fake-begin', 'fake-end', host=None, state=None).AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.task_log_get_all(self.ctxt, task_name='fake-name', period_beginning='fake-begin', period_ending='fake-end') self.assertEqual(expected_response, response) def test_task_log_get_all_with_filters(self): expected_response, responses = self._build_task_log_responses(1) cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') self.mox.StubOutWithMock(self.msg_runner, 'task_log_get_all') self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell', 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state').AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.task_log_get_all(self.ctxt, task_name='fake-name', period_beginning='fake-begin', period_ending='fake-end', host=cell_and_host, state='fake-state') self.assertEqual(expected_response, response) def test_task_log_get_all_with_cell_but_no_host_filters(self): expected_response, responses = self._build_task_log_responses(1) # Host filter only has cell name. cell_and_host = 'fake-cell' self.mox.StubOutWithMock(self.msg_runner, 'task_log_get_all') self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell', 'fake-name', 'fake-begin', 'fake-end', host=None, state='fake-state').AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.task_log_get_all(self.ctxt, task_name='fake-name', period_beginning='fake-begin', period_ending='fake-end', host=cell_and_host, state='fake-state') self.assertEqual(expected_response, response) def test_compute_node_get_all(self): responses = [] expected_response = [] # 3 cells... so 3 responses. Each response is a list of computes. # Manager should turn these into a single list of responses. for i in range(3): cell_name = 'path!to!cell%i' % i compute_nodes = [] for compute_node in FAKE_COMPUTE_NODES: fake_compute = objects.ComputeNode(**compute_node) fake_compute._cached_service = None compute_nodes.append(fake_compute) expected_compute_node = cells_utils.ComputeNodeProxy( fake_compute, cell_name) expected_response.append( (cell_name, expected_compute_node, fake_compute)) response = messaging.Response(self.ctxt, cell_name, compute_nodes, False) responses.append(response) self.mox.StubOutWithMock(self.msg_runner, 'compute_node_get_all') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_compute_node') self.msg_runner.compute_node_get_all(self.ctxt, hypervisor_match='fake-match').AndReturn(responses) # Calls are done by cells, so we need to sort the list by the cell name expected_response.sort(key=lambda k: k[0]) for cell_name, compute_proxy, compute_node in expected_response: cells_utils.add_cell_to_compute_node( compute_node, cell_name).AndReturn(compute_proxy) self.mox.ReplayAll() response = self.cells_manager.compute_node_get_all(self.ctxt, hypervisor_match='fake-match') self.assertEqual([proxy for cell, proxy, compute in expected_response], response) def test_compute_node_stats(self): raw_resp1 = {'key1': 1, 'key2': 2} raw_resp2 = {'key2': 1, 'key3': 2} raw_resp3 = {'key3': 1, 'key4': 2} responses = [messaging.Response(self.ctxt, 'cell1', raw_resp1, False), messaging.Response(self.ctxt, 'cell2', raw_resp2, False), messaging.Response(self.ctxt, 'cell2', raw_resp3, False)] expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2} self.mox.StubOutWithMock(self.msg_runner, 'compute_node_stats') self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.compute_node_stats(self.ctxt) self.assertEqual(expected_resp, response) def test_compute_node_get(self): fake_cell = 'fake-cell' fake_compute = objects.ComputeNode(**FAKE_COMPUTE_NODES[0]) fake_compute._cached_service = None fake_response = messaging.Response(self.ctxt, fake_cell, fake_compute, False) expected_response = cells_utils.ComputeNodeProxy(fake_compute, fake_cell) cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id') self.mox.StubOutWithMock(self.msg_runner, 'compute_node_get') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_compute_node') self.msg_runner.compute_node_get(self.ctxt, 'fake-cell', 'fake-id').AndReturn(fake_response) cells_utils.add_cell_to_compute_node( fake_compute, fake_cell).AndReturn(expected_response) self.mox.ReplayAll() response = self.cells_manager.compute_node_get(self.ctxt, compute_id=cell_and_id) self.assertEqual(expected_response, response) def test_actions_get(self): fake_uuid = fake_server_actions.FAKE_UUID fake_req_id = fake_server_actions.FAKE_REQUEST_ID1 fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id] fake_response = messaging.Response(self.ctxt, 'fake-cell', [fake_act], False) expected_response = [fake_act] self.mox.StubOutWithMock(self.msg_runner, 'actions_get') self.msg_runner.actions_get(self.ctxt, 'fake-cell', 'fake-uuid').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.actions_get(self.ctxt, 'fake-cell', 'fake-uuid') self.assertEqual(expected_response, response) def test_action_get_by_request_id(self): fake_uuid = fake_server_actions.FAKE_UUID fake_req_id = fake_server_actions.FAKE_REQUEST_ID1 fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id] fake_response = messaging.Response(self.ctxt, 'fake-cell', fake_act, False) expected_response = fake_act self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id') self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell', 'fake-uuid', 'req-fake').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.action_get_by_request_id(self.ctxt, 'fake-cell', 'fake-uuid', 'req-fake') self.assertEqual(expected_response, response) def test_action_events_get(self): fake_action_id = fake_server_actions.FAKE_ACTION_ID1 fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id] fake_response = messaging.Response(self.ctxt, 'fake-cell', fake_events, False) expected_response = fake_events self.mox.StubOutWithMock(self.msg_runner, 'action_events_get') self.msg_runner.action_events_get(self.ctxt, 'fake-cell', 'fake-action').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell', 'fake-action') self.assertEqual(expected_response, response) def test_consoleauth_delete_tokens(self): instance_uuid = 'fake-instance-uuid' self.mox.StubOutWithMock(self.msg_runner, 'consoleauth_delete_tokens') self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid) self.mox.ReplayAll() self.cells_manager.consoleauth_delete_tokens(self.ctxt, instance_uuid=instance_uuid) def test_get_capacities(self): cell_name = 'cell_name' response = {"ram_free": {"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}} self.mox.StubOutWithMock(self.state_manager, 'get_capacities') self.state_manager.get_capacities(cell_name).AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.get_capacities(self.ctxt, cell_name)) def test_validate_console_port(self): instance_uuid = 'fake-instance-uuid' cell_name = 'fake-cell-name' instance = objects.Instance(cell_name=cell_name) console_port = 'fake-console-port' console_type = 'fake-console-type' self.mox.StubOutWithMock(self.msg_runner, 'validate_console_port') self.mox.StubOutWithMock(objects.Instance, 'get_by_uuid') fake_response = self._get_fake_response() objects.Instance.get_by_uuid(self.ctxt, instance_uuid).AndReturn(instance) self.msg_runner.validate_console_port(self.ctxt, cell_name, instance_uuid, console_port, console_type).AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.validate_console_port(self.ctxt, instance_uuid=instance_uuid, console_port=console_port, console_type=console_type) self.assertEqual('fake-response', response) def test_bdm_update_or_create_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'bdm_update_or_create_at_top') self.msg_runner.bdm_update_or_create_at_top(self.ctxt, 'fake-bdm', create='foo') self.mox.ReplayAll() self.cells_manager.bdm_update_or_create_at_top(self.ctxt, 'fake-bdm', create='foo') def test_bdm_destroy_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'bdm_destroy_at_top') self.msg_runner.bdm_destroy_at_top(self.ctxt, 'fake_instance_uuid', device_name='fake_device_name', volume_id='fake_volume_id') self.mox.ReplayAll() self.cells_manager.bdm_destroy_at_top(self.ctxt, 'fake_instance_uuid', device_name='fake_device_name', volume_id='fake_volume_id') def test_get_migrations(self): filters = {'status': 'confirmed'} cell1_migrations = [{'id': 123}] cell2_migrations = [{'id': 456}] fake_responses = [self._get_fake_response(cell1_migrations), self._get_fake_response(cell2_migrations)] self.mox.StubOutWithMock(self.msg_runner, 'get_migrations') self.msg_runner.get_migrations(self.ctxt, None, False, filters).\ AndReturn(fake_responses) self.mox.ReplayAll() response = self.cells_manager.get_migrations(self.ctxt, filters) self.assertEqual([cell1_migrations[0], cell2_migrations[0]], response) def test_get_migrations_for_a_given_cell(self): filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'} target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name']) migrations = [{'id': 123}] fake_responses = [self._get_fake_response(migrations)] self.mox.StubOutWithMock(self.msg_runner, 'get_migrations') self.msg_runner.get_migrations(self.ctxt, target_cell, False, filters).AndReturn(fake_responses) self.mox.ReplayAll() response = self.cells_manager.get_migrations(self.ctxt, filters) self.assertEqual(migrations, response) def test_instance_update_from_api(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_update_from_api') self.msg_runner.instance_update_from_api(self.ctxt, 'fake-instance', 'exp_vm', 'exp_task', 'admin_reset') self.mox.ReplayAll() self.cells_manager.instance_update_from_api( self.ctxt, instance='fake-instance', expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset='admin_reset') def test_start_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'start_instance') self.msg_runner.start_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.start_instance(self.ctxt, instance='fake-instance') def test_stop_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'stop_instance') self.msg_runner.stop_instance(self.ctxt, 'fake-instance', do_cast='meow', clean_shutdown='purr') self.mox.ReplayAll() self.cells_manager.stop_instance(self.ctxt, instance='fake-instance', do_cast='meow', clean_shutdown='purr') def test_cell_create(self): values = 'values' response = 'created_cell' self.mox.StubOutWithMock(self.state_manager, 'cell_create') self.state_manager.cell_create(self.ctxt, values).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_create(self.ctxt, values)) def test_cell_update(self): cell_name = 'cell_name' values = 'values' response = 'updated_cell' self.mox.StubOutWithMock(self.state_manager, 'cell_update') self.state_manager.cell_update(self.ctxt, cell_name, values).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_update(self.ctxt, cell_name, values)) def test_cell_delete(self): cell_name = 'cell_name' response = 1 self.mox.StubOutWithMock(self.state_manager, 'cell_delete') self.state_manager.cell_delete(self.ctxt, cell_name).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_delete(self.ctxt, cell_name)) def test_cell_get(self): cell_name = 'cell_name' response = 'cell_info' self.mox.StubOutWithMock(self.state_manager, 'cell_get') self.state_manager.cell_get(self.ctxt, cell_name).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_get(self.ctxt, cell_name)) def test_reboot_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance') self.msg_runner.reboot_instance(self.ctxt, 'fake-instance', 'HARD') self.mox.ReplayAll() self.cells_manager.reboot_instance(self.ctxt, instance='fake-instance', reboot_type='HARD') def test_suspend_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance') self.msg_runner.suspend_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.suspend_instance(self.ctxt, instance='fake-instance') def test_resume_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'resume_instance') self.msg_runner.resume_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.resume_instance(self.ctxt, instance='fake-instance') def test_terminate_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance') self.msg_runner.terminate_instance(self.ctxt, 'fake-instance', delete_type='delete') self.mox.ReplayAll() self.cells_manager.terminate_instance(self.ctxt, instance='fake-instance', delete_type='delete') def test_soft_delete_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance') self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.soft_delete_instance(self.ctxt, instance='fake-instance') def _test_resize_instance(self, clean_shutdown=True): self.mox.StubOutWithMock(self.msg_runner, 'resize_instance') self.msg_runner.resize_instance(self.ctxt, 'fake-instance', 'fake-flavor', 'fake-updates', clean_shutdown=clean_shutdown) self.mox.ReplayAll() self.cells_manager.resize_instance( self.ctxt, instance='fake-instance', flavor='fake-flavor', extra_instance_updates='fake-updates', clean_shutdown=clean_shutdown) def test_resize_instance(self): self._test_resize_instance() def test_resize_instance_forced_shutdown(self): self._test_resize_instance(clean_shutdown=False) def test_live_migrate_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance') self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance', 'fake-block', 'fake-commit', 'fake-host') self.mox.ReplayAll() self.cells_manager.live_migrate_instance( self.ctxt, instance='fake-instance', block_migration='fake-block', disk_over_commit='fake-commit', host_name='fake-host') def test_revert_resize(self): self.mox.StubOutWithMock(self.msg_runner, 'revert_resize') self.msg_runner.revert_resize(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.revert_resize(self.ctxt, instance='fake-instance') def test_confirm_resize(self): self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize') self.msg_runner.confirm_resize(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance') def test_reset_network(self): self.mox.StubOutWithMock(self.msg_runner, 'reset_network') self.msg_runner.reset_network(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.reset_network(self.ctxt, instance='fake-instance') def test_inject_network_info(self): self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info') self.msg_runner.inject_network_info(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.inject_network_info(self.ctxt, instance='fake-instance') def test_snapshot_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance') self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance', 'fake-id') self.mox.ReplayAll() self.cells_manager.snapshot_instance(self.ctxt, instance='fake-instance', image_id='fake-id') def test_backup_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'backup_instance') self.msg_runner.backup_instance(self.ctxt, 'fake-instance', 'fake-id', 'backup-type', 'rotation') self.mox.ReplayAll() self.cells_manager.backup_instance(self.ctxt, instance='fake-instance', image_id='fake-id', backup_type='backup-type', rotation='rotation') def test_set_admin_password(self): with mock.patch.object(self.msg_runner, 'set_admin_password') as set_admin_password: self.cells_manager.set_admin_password(self.ctxt, instance='fake-instance', new_pass='fake-password') set_admin_password.assert_called_once_with(self.ctxt, 'fake-instance', 'fake-password') def test_get_keypair_at_top(self): keypairs = [self._get_fake_response('fake_keypair'), self._get_fake_response('fake_keypair2')] with mock.patch.object(self.msg_runner, 'get_keypair_at_top', return_value=keypairs) as fake_get_keypair: response = self.cells_manager.get_keypair_at_top(self.ctxt, 'fake_user_id', 'fake_name') fake_get_keypair.assert_called_once_with(self.ctxt, 'fake_user_id', 'fake_name') self.assertEqual('fake_keypair', response) def test_get_keypair_at_top_with_empty_responses(self): with mock.patch.object(self.msg_runner, 'get_keypair_at_top', return_value=[]) as fake_get_keypair: self.assertIsNone( self.cells_manager.get_keypair_at_top(self.ctxt, 'fake_user_id', 'fake_name')) fake_get_keypair.assert_called_once_with(self.ctxt, 'fake_user_id', 'fake_name') nova-13.0.0/nova/tests/unit/cells/test_cells_rpc_driver.py0000664000567000056710000002140312701407773025027 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Cells RPC Communication Driver """ import mock from mox3 import mox import oslo_messaging from nova.cells import messaging from nova.cells import rpc_driver import nova.conf from nova import context from nova import rpc from nova import test from nova.tests.unit.cells import fakes CONF = nova.conf.CONF class CellsRPCDriverTestCase(test.NoDBTestCase): """Test case for Cells communication via RPC.""" def setUp(self): super(CellsRPCDriverTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self.driver = rpc_driver.CellsRPCDriver() def test_start_servers(self): self.flags(rpc_driver_queue_base='cells.intercell42', group='cells') fake_msg_runner = fakes.get_message_runner('api-cell') class FakeInterCellRPCDispatcher(object): def __init__(_self, msg_runner): self.assertEqual(fake_msg_runner, msg_runner) self.stubs.Set(rpc_driver, 'InterCellRPCDispatcher', FakeInterCellRPCDispatcher) self.mox.StubOutWithMock(rpc, 'get_server') for message_type in messaging.MessageRunner.get_message_types(): topic = 'cells.intercell42.' + message_type target = oslo_messaging.Target(topic=topic, server=CONF.host) endpoints = [mox.IsA(FakeInterCellRPCDispatcher)] rpcserver = self.mox.CreateMockAnything() rpc.get_server(target, endpoints=endpoints).AndReturn(rpcserver) rpcserver.start() self.mox.ReplayAll() self.driver.start_servers(fake_msg_runner) def test_stop_servers(self): call_info = {'stopped': []} class FakeRPCServer(object): def stop(self): call_info['stopped'].append(self) fake_servers = [FakeRPCServer() for x in range(5)] self.driver.rpc_servers = fake_servers self.driver.stop_servers() self.assertEqual(fake_servers, call_info['stopped']) def test_create_transport_once(self): # should only construct each Transport once rpcapi = self.driver.intercell_rpcapi transport_url = 'amqp://fakeurl' next_hop = fakes.FakeCellState('cellname') next_hop.db_info['transport_url'] = transport_url # first call to _get_transport creates a oslo.messaging.Transport obj with mock.patch.object(oslo_messaging, 'get_transport') as get_trans: transport = rpcapi._get_transport(next_hop) get_trans.assert_called_once_with(rpc_driver.CONF, transport_url, rpc.TRANSPORT_ALIASES) self.assertIn(transport_url, rpcapi.transports) self.assertEqual(transport, rpcapi.transports[transport_url]) # subsequent calls should return the pre-created Transport obj transport2 = rpcapi._get_transport(next_hop) self.assertEqual(transport, transport2) def test_send_message_to_cell_cast(self): msg_runner = fakes.get_message_runner('api-cell') cell_state = fakes.get_cell_state('api-cell', 'child-cell2') message = messaging._TargetedMessage(msg_runner, self.ctxt, 'fake', {}, 'down', cell_state, fanout=False) expected_server_params = {'hostname': 'rpc_host2', 'password': 'password2', 'port': 3092, 'username': 'username2', 'virtual_host': 'rpc_vhost2'} expected_url = ('rabbit://%(username)s:%(password)s@' '%(hostname)s:%(port)d/%(virtual_host)s' % expected_server_params) def check_transport_url(cell_state): return cell_state.db_info['transport_url'] == expected_url rpcapi = self.driver.intercell_rpcapi rpcclient = self.mox.CreateMockAnything() self.mox.StubOutWithMock(rpcapi, '_get_client') rpcapi._get_client( mox.Func(check_transport_url), 'cells.intercell.targeted').AndReturn(rpcclient) rpcclient.cast(mox.IgnoreArg(), 'process_message', message=message.to_json()) self.mox.ReplayAll() self.driver.send_message_to_cell(cell_state, message) def test_send_message_to_cell_fanout_cast(self): msg_runner = fakes.get_message_runner('api-cell') cell_state = fakes.get_cell_state('api-cell', 'child-cell2') message = messaging._TargetedMessage(msg_runner, self.ctxt, 'fake', {}, 'down', cell_state, fanout=True) expected_server_params = {'hostname': 'rpc_host2', 'password': 'password2', 'port': 3092, 'username': 'username2', 'virtual_host': 'rpc_vhost2'} expected_url = ('rabbit://%(username)s:%(password)s@' '%(hostname)s:%(port)d/%(virtual_host)s' % expected_server_params) def check_transport_url(cell_state): return cell_state.db_info['transport_url'] == expected_url rpcapi = self.driver.intercell_rpcapi rpcclient = self.mox.CreateMockAnything() self.mox.StubOutWithMock(rpcapi, '_get_client') rpcapi._get_client( mox.Func(check_transport_url), 'cells.intercell.targeted').AndReturn(rpcclient) rpcclient.prepare(fanout=True).AndReturn(rpcclient) rpcclient.cast(mox.IgnoreArg(), 'process_message', message=message.to_json()) self.mox.ReplayAll() self.driver.send_message_to_cell(cell_state, message) def test_rpc_topic_uses_message_type(self): self.flags(rpc_driver_queue_base='cells.intercell42', group='cells') msg_runner = fakes.get_message_runner('api-cell') cell_state = fakes.get_cell_state('api-cell', 'child-cell2') message = messaging._BroadcastMessage(msg_runner, self.ctxt, 'fake', {}, 'down', fanout=True) message.message_type = 'fake-message-type' expected_server_params = {'hostname': 'rpc_host2', 'password': 'password2', 'port': 3092, 'username': 'username2', 'virtual_host': 'rpc_vhost2'} expected_url = ('rabbit://%(username)s:%(password)s@' '%(hostname)s:%(port)d/%(virtual_host)s' % expected_server_params) def check_transport_url(cell_state): return cell_state.db_info['transport_url'] == expected_url rpcapi = self.driver.intercell_rpcapi rpcclient = self.mox.CreateMockAnything() self.mox.StubOutWithMock(rpcapi, '_get_client') rpcapi._get_client( mox.Func(check_transport_url), 'cells.intercell42.fake-message-type').AndReturn(rpcclient) rpcclient.prepare(fanout=True).AndReturn(rpcclient) rpcclient.cast(mox.IgnoreArg(), 'process_message', message=message.to_json()) self.mox.ReplayAll() self.driver.send_message_to_cell(cell_state, message) def test_process_message(self): msg_runner = fakes.get_message_runner('api-cell') dispatcher = rpc_driver.InterCellRPCDispatcher(msg_runner) message = messaging._BroadcastMessage(msg_runner, self.ctxt, 'fake', {}, 'down', fanout=True) call_info = {} def _fake_message_from_json(json_message): call_info['json_message'] = json_message self.assertEqual(message.to_json(), json_message) return message def _fake_process(): call_info['process_called'] = True self.stubs.Set(msg_runner, 'message_from_json', _fake_message_from_json) self.stubs.Set(message, 'process', _fake_process) dispatcher.process_message(self.ctxt, message.to_json()) self.assertEqual(message.to_json(), call_info['json_message']) self.assertTrue(call_info['process_called']) nova-13.0.0/nova/tests/unit/cells/test_cells_weights.py0000664000567000056710000002145312701407773024347 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for testing the cells weight algorithms. Cells with higher weights should be given priority for new builds. """ import datetime from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from nova.cells import state from nova.cells import weights from nova import test class FakeCellState(state.CellState): def __init__(self, cell_name): super(FakeCellState, self).__init__(cell_name) self.capacities['ram_free'] = {'total_mb': 0, 'units_by_mb': {}} self.db_info = {} def _update_ram_free(self, *args): ram_free = self.capacities['ram_free'] for ram_size, units in args: ram_free['total_mb'] += units * ram_size ram_free['units_by_mb'][str(ram_size)] = units def _get_fake_cells(): cell1 = FakeCellState('cell1') cell1._update_ram_free((512, 1), (1024, 4), (2048, 3)) cell1.db_info['weight_offset'] = -200.0 cell2 = FakeCellState('cell2') cell2._update_ram_free((512, 2), (1024, 3), (2048, 4)) cell2.db_info['weight_offset'] = -200.1 cell3 = FakeCellState('cell3') cell3._update_ram_free((512, 3), (1024, 2), (2048, 1)) cell3.db_info['weight_offset'] = 400.0 cell4 = FakeCellState('cell4') cell4._update_ram_free((512, 4), (1024, 1), (2048, 2)) cell4.db_info['weight_offset'] = 300.0 return [cell1, cell2, cell3, cell4] class CellsWeightsTestCase(test.NoDBTestCase): """Makes sure the proper weighers are in the directory.""" def test_all_weighers(self): weighers = weights.all_weighers() # Check at least a couple that we expect are there self.assertTrue(len(weighers) >= 2) class_names = [cls.__name__ for cls in weighers] self.assertIn('WeightOffsetWeigher', class_names) self.assertIn('RamByInstanceTypeWeigher', class_names) class _WeigherTestClass(test.NoDBTestCase): """Base class for testing individual weigher plugins.""" weigher_cls_name = None def setUp(self): super(_WeigherTestClass, self).setUp() self.weight_handler = weights.CellWeightHandler() weigher_classes = self.weight_handler.get_matching_classes( [self.weigher_cls_name]) self.weighers = [cls() for cls in weigher_classes] def _get_weighed_cells(self, cells, weight_properties): return self.weight_handler.get_weighed_objects(self.weighers, cells, weight_properties) class RAMByInstanceTypeWeigherTestClass(_WeigherTestClass): weigher_cls_name = ('nova.cells.weights.ram_by_instance_type.' 'RamByInstanceTypeWeigher') def test_default_spreading(self): """Test that cells with more ram available return a higher weight.""" cells = _get_fake_cells() # Simulate building a new 512MB instance. instance_type = {'memory_mb': 512} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[3], cells[2], cells[1], cells[0]] self.assertEqual(expected_cells, resulting_cells) # Simulate building a new 1024MB instance. instance_type = {'memory_mb': 1024} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[0], cells[1], cells[2], cells[3]] self.assertEqual(expected_cells, resulting_cells) # Simulate building a new 2048MB instance. instance_type = {'memory_mb': 2048} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[1], cells[0], cells[3], cells[2]] self.assertEqual(expected_cells, resulting_cells) def test_negative_multiplier(self): """Test that cells with less ram available return a higher weight.""" self.flags(ram_weight_multiplier=-1.0, group='cells') cells = _get_fake_cells() # Simulate building a new 512MB instance. instance_type = {'memory_mb': 512} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[0], cells[1], cells[2], cells[3]] self.assertEqual(expected_cells, resulting_cells) # Simulate building a new 1024MB instance. instance_type = {'memory_mb': 1024} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[3], cells[2], cells[1], cells[0]] self.assertEqual(expected_cells, resulting_cells) # Simulate building a new 2048MB instance. instance_type = {'memory_mb': 2048} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[2], cells[3], cells[0], cells[1]] self.assertEqual(expected_cells, resulting_cells) class WeightOffsetWeigherTestClass(_WeigherTestClass): """Test the RAMWeigher class.""" weigher_cls_name = 'nova.cells.weights.weight_offset.WeightOffsetWeigher' def test_weight_offset(self): """Test that cells with higher weight_offsets return higher weights. """ cells = _get_fake_cells() weighed_cells = self._get_weighed_cells(cells, {}) self.assertEqual(4, len(weighed_cells)) expected_cells = [cells[2], cells[3], cells[0], cells[1]] resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] self.assertEqual(expected_cells, resulting_cells) class MuteWeigherTestClass(_WeigherTestClass): weigher_cls_name = 'nova.cells.weights.mute_child.MuteChildWeigher' def setUp(self): super(MuteWeigherTestClass, self).setUp() self.flags(mute_weight_multiplier=-10.0, mute_child_interval=100, group='cells') self.now = timeutils.utcnow() self.useFixture(utils_fixture.TimeFixture(self.now)) self.cells = _get_fake_cells() for cell in self.cells: cell.last_seen = self.now def test_non_mute(self): weight_properties = {} weighed_cells = self._get_weighed_cells(self.cells, weight_properties) self.assertEqual(4, len(weighed_cells)) for weighed_cell in weighed_cells: self.assertEqual(0, weighed_cell.weight) def test_mutes(self): # make 2 of them mute: self.cells[0].last_seen = (self.cells[0].last_seen - datetime.timedelta(seconds=200)) self.cells[1].last_seen = (self.cells[1].last_seen - datetime.timedelta(seconds=200)) weight_properties = {} weighed_cells = self._get_weighed_cells(self.cells, weight_properties) self.assertEqual(4, len(weighed_cells)) for i in range(2): weighed_cell = weighed_cells.pop(0) self.assertEqual(0, weighed_cell.weight) self.assertIn(weighed_cell.obj.name, ['cell3', 'cell4']) for i in range(2): weighed_cell = weighed_cells.pop(0) self.assertEqual(-10.0, weighed_cell.weight) self.assertIn(weighed_cell.obj.name, ['cell1', 'cell2']) nova-13.0.0/nova/tests/unit/cells/test_cells_messaging.py0000664000567000056710000026652012701410011024633 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Cells Messaging module """ import uuid import mock from mox3 import mox import oslo_messaging from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils from nova.cells import messaging from nova.cells import rpcapi as cells_rpcapi from nova.cells import utils as cells_utils from nova.compute import task_states from nova.compute import vm_states import nova.conf from nova import context from nova import db from nova import exception from nova import objects from nova.objects import base as objects_base from nova.objects import fields as objects_fields from nova import rpc from nova import test from nova.tests.unit.cells import fakes from nova.tests.unit import fake_instance from nova.tests.unit import fake_server_actions CONF = nova.conf.CONF class CellsMessageClassesTestCase(test.NoDBTestCase): """Test case for the main Cells Message classes.""" def setUp(self): super(CellsMessageClassesTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self.our_name = 'api-cell' self.msg_runner = fakes.get_message_runner(self.our_name) self.state_manager = self.msg_runner.state_manager def test_reverse_path(self): path = 'a!b!c!d' expected = 'd!c!b!a' rev_path = messaging._reverse_path(path) self.assertEqual(expected, rev_path) def test_response_cell_name_from_path(self): # test array with tuples of inputs/expected outputs test_paths = [('cell1', 'cell1'), ('cell1!cell2', 'cell2!cell1'), ('cell1!cell2!cell3', 'cell3!cell2!cell1')] for test_input, expected_output in test_paths: self.assertEqual(expected_output, messaging._response_cell_name_from_path(test_input)) def test_response_cell_name_from_path_neighbor_only(self): # test array with tuples of inputs/expected outputs test_paths = [('cell1', 'cell1'), ('cell1!cell2', 'cell2!cell1'), ('cell1!cell2!cell3', 'cell3!cell2')] for test_input, expected_output in test_paths: self.assertEqual(expected_output, messaging._response_cell_name_from_path(test_input, neighbor_only=True)) def test_response_to_json_and_from_json(self): fake_uuid = str(uuid.uuid4()) response = messaging.Response(self.ctxt, 'child-cell!api-cell', objects.Instance(id=1, uuid=fake_uuid), False) json_response = response.to_json() deserialized_response = messaging.Response.from_json(self.ctxt, json_response) obj = deserialized_response.value self.assertIsInstance(obj, objects.Instance) self.assertEqual(1, obj.id) self.assertEqual(fake_uuid, obj.uuid) def test_targeted_message(self): self.flags(max_hop_count=99, group='cells') target_cell = 'api-cell!child-cell2!grandchild-cell1' method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) self.assertEqual(self.ctxt, tgt_message.ctxt) self.assertEqual(method, tgt_message.method_name) self.assertEqual(method_kwargs, tgt_message.method_kwargs) self.assertEqual(direction, tgt_message.direction) self.assertEqual(target_cell, target_cell) self.assertFalse(tgt_message.fanout) self.assertFalse(tgt_message.need_response) self.assertEqual(self.our_name, tgt_message.routing_path) self.assertEqual(1, tgt_message.hop_count) self.assertEqual(99, tgt_message.max_hop_count) self.assertFalse(tgt_message.is_broadcast) # Correct next hop? next_hop = tgt_message._get_next_hop() child_cell = self.state_manager.get_child_cell('child-cell2') self.assertEqual(child_cell, next_hop) def test_create_targeted_message_with_response(self): self.flags(max_hop_count=99, group='cells') our_name = 'child-cell1' target_cell = 'child-cell1!api-cell' msg_runner = fakes.get_message_runner(our_name) method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'up' tgt_message = messaging._TargetedMessage(msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) self.assertEqual(self.ctxt, tgt_message.ctxt) self.assertEqual(method, tgt_message.method_name) self.assertEqual(method_kwargs, tgt_message.method_kwargs) self.assertEqual(direction, tgt_message.direction) self.assertEqual(target_cell, target_cell) self.assertFalse(tgt_message.fanout) self.assertTrue(tgt_message.need_response) self.assertEqual(our_name, tgt_message.routing_path) self.assertEqual(1, tgt_message.hop_count) self.assertEqual(99, tgt_message.max_hop_count) self.assertFalse(tgt_message.is_broadcast) # Correct next hop? next_hop = tgt_message._get_next_hop() parent_cell = msg_runner.state_manager.get_parent_cell('api-cell') self.assertEqual(parent_cell, next_hop) def test_targeted_message_when_target_is_cell_state(self): method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' target_cell = self.state_manager.get_child_cell('child-cell2') tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) self.assertEqual('api-cell!child-cell2', tgt_message.target_cell) # Correct next hop? next_hop = tgt_message._get_next_hop() self.assertEqual(target_cell, next_hop) def test_targeted_message_when_target_cell_state_is_me(self): method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' target_cell = self.state_manager.get_my_state() tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) self.assertEqual('api-cell', tgt_message.target_cell) # Correct next hop? next_hop = tgt_message._get_next_hop() self.assertEqual(target_cell, next_hop) def test_create_broadcast_message(self): self.flags(max_hop_count=99, group='cells') self.flags(name='api-cell', max_hop_count=99, group='cells') method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction) self.assertEqual(self.ctxt, bcast_message.ctxt) self.assertEqual(method, bcast_message.method_name) self.assertEqual(method_kwargs, bcast_message.method_kwargs) self.assertEqual(direction, bcast_message.direction) self.assertFalse(bcast_message.fanout) self.assertFalse(bcast_message.need_response) self.assertEqual(self.our_name, bcast_message.routing_path) self.assertEqual(1, bcast_message.hop_count) self.assertEqual(99, bcast_message.max_hop_count) self.assertTrue(bcast_message.is_broadcast) # Correct next hops? next_hops = bcast_message._get_next_hops() child_cells = self.state_manager.get_child_cells() self.assertEqual(child_cells, next_hops) def test_create_broadcast_message_with_response(self): self.flags(max_hop_count=99, group='cells') our_name = 'child-cell1' msg_runner = fakes.get_message_runner(our_name) method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'up' bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt, method, method_kwargs, direction, need_response=True) self.assertEqual(self.ctxt, bcast_message.ctxt) self.assertEqual(method, bcast_message.method_name) self.assertEqual(method_kwargs, bcast_message.method_kwargs) self.assertEqual(direction, bcast_message.direction) self.assertFalse(bcast_message.fanout) self.assertTrue(bcast_message.need_response) self.assertEqual(our_name, bcast_message.routing_path) self.assertEqual(1, bcast_message.hop_count) self.assertEqual(99, bcast_message.max_hop_count) self.assertTrue(bcast_message.is_broadcast) # Correct next hops? next_hops = bcast_message._get_next_hops() parent_cells = msg_runner.state_manager.get_parent_cells() self.assertEqual(parent_cells, next_hops) def test_self_targeted_message(self): target_cell = 'api-cell' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' call_info = {} def our_fake_method(message, **kwargs): call_info['context'] = message.ctxt call_info['routing_path'] = message.routing_path call_info['kwargs'] = kwargs fakes.stub_tgt_method(self, 'api-cell', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) tgt_message.process() self.assertEqual(self.ctxt, call_info['context']) self.assertEqual(method_kwargs, call_info['kwargs']) self.assertEqual(target_cell, call_info['routing_path']) def test_child_targeted_message(self): target_cell = 'api-cell!child-cell1' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' call_info = {} def our_fake_method(message, **kwargs): call_info['context'] = message.ctxt call_info['routing_path'] = message.routing_path call_info['kwargs'] = kwargs fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) tgt_message.process() self.assertEqual(self.ctxt, call_info['context']) self.assertEqual(method_kwargs, call_info['kwargs']) self.assertEqual(target_cell, call_info['routing_path']) def test_child_targeted_message_with_object(self): target_cell = 'api-cell!child-cell1' method = 'our_fake_method' direction = 'down' call_info = {} class CellsMsgingTestObject(objects_base.NovaObject): """Test object. We just need 1 field in order to test that this gets serialized properly. """ fields = {'test': objects_fields.StringField()} objects_base.NovaObjectRegistry.register(CellsMsgingTestObject) test_obj = CellsMsgingTestObject() test_obj.test = 'meow' method_kwargs = dict(obj=test_obj, arg1=1, arg2=2) def our_fake_method(message, **kwargs): call_info['context'] = message.ctxt call_info['routing_path'] = message.routing_path call_info['kwargs'] = kwargs fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) tgt_message.process() self.assertEqual(self.ctxt, call_info['context']) self.assertEqual(target_cell, call_info['routing_path']) self.assertEqual(3, len(call_info['kwargs'])) self.assertEqual(1, call_info['kwargs']['arg1']) self.assertEqual(2, call_info['kwargs']['arg2']) # Verify we get a new object with what we expect. obj = call_info['kwargs']['obj'] self.assertIsInstance(obj, CellsMsgingTestObject) self.assertNotEqual(id(test_obj), id(obj)) self.assertEqual(test_obj.test, obj.test) def test_grandchild_targeted_message(self): target_cell = 'api-cell!child-cell2!grandchild-cell1' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' call_info = {} def our_fake_method(message, **kwargs): call_info['context'] = message.ctxt call_info['routing_path'] = message.routing_path call_info['kwargs'] = kwargs fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) tgt_message.process() self.assertEqual(self.ctxt, call_info['context']) self.assertEqual(method_kwargs, call_info['kwargs']) self.assertEqual(target_cell, call_info['routing_path']) def test_grandchild_targeted_message_with_response(self): target_cell = 'api-cell!child-cell2!grandchild-cell1' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' call_info = {} def our_fake_method(message, **kwargs): call_info['context'] = message.ctxt call_info['routing_path'] = message.routing_path call_info['kwargs'] = kwargs return 'our_fake_response' fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) response = tgt_message.process() self.assertEqual(self.ctxt, call_info['context']) self.assertEqual(method_kwargs, call_info['kwargs']) self.assertEqual(target_cell, call_info['routing_path']) self.assertFalse(response.failure) self.assertEqual('our_fake_response', response.value_or_raise()) def test_grandchild_targeted_message_with_error(self): target_cell = 'api-cell!child-cell2!grandchild-cell1' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method(message, **kwargs): raise test.TestingException('this should be returned') fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) response = tgt_message.process() self.assertTrue(response.failure) self.assertRaises(test.TestingException, response.value_or_raise) def test_grandchild_targeted_message_max_hops(self): self.flags(max_hop_count=2, group='cells') target_cell = 'api-cell!child-cell2!grandchild-cell1' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method(message, **kwargs): raise test.TestingException('should not be reached') fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) response = tgt_message.process() self.assertTrue(response.failure) self.assertRaises(exception.CellMaxHopCountReached, response.value_or_raise) def test_targeted_message_invalid_cell(self): target_cell = 'api-cell!child-cell2!grandchild-cell4' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) response = tgt_message.process() self.assertTrue(response.failure) self.assertRaises(exception.CellRoutingInconsistency, response.value_or_raise) def test_targeted_message_invalid_cell2(self): target_cell = 'unknown-cell!child-cell2' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) response = tgt_message.process() self.assertTrue(response.failure) self.assertRaises(exception.CellRoutingInconsistency, response.value_or_raise) def test_targeted_message_target_cell_none(self): target_cell = None method = 'our_fake_method' method_kwargs = dict(arg=1, arg2=2) direction = 'down' self.assertRaises(exception.CellRoutingInconsistency, messaging._TargetedMessage, self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=False) def test_broadcast_routing(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' cells = set() def our_fake_method(message, **kwargs): cells.add(message.routing_path) fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True) bcast_message.process() # fakes creates 8 cells (including ourself). self.assertEqual(8, len(cells)) def test_broadcast_routing_up(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'up' msg_runner = fakes.get_message_runner('grandchild-cell3') cells = set() def our_fake_method(message, **kwargs): cells.add(message.routing_path) fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True) bcast_message.process() # Paths are reversed, since going 'up' expected = set(['grandchild-cell3', 'grandchild-cell3!child-cell3', 'grandchild-cell3!child-cell3!api-cell']) self.assertEqual(expected, cells) def test_broadcast_routing_without_ourselves(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' cells = set() def our_fake_method(message, **kwargs): cells.add(message.routing_path) fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=False) bcast_message.process() # fakes creates 8 cells (including ourself). So we should see # only 7 here. self.assertEqual(7, len(cells)) def test_broadcast_routing_with_response(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method(message, **kwargs): return 'response-%s' % message.routing_path fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True, need_response=True) responses = bcast_message.process() self.assertEqual(8, len(responses)) for response in responses: self.assertFalse(response.failure) self.assertEqual('response-%s' % response.cell_name, response.value_or_raise()) def test_broadcast_routing_with_response_max_hops(self): self.flags(max_hop_count=2, group='cells') method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method(message, **kwargs): return 'response-%s' % message.routing_path fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True, need_response=True) responses = bcast_message.process() # Should only get responses from our immediate children (and # ourselves) self.assertEqual(5, len(responses)) for response in responses: self.assertFalse(response.failure) self.assertEqual('response-%s' % response.cell_name, response.value_or_raise()) def test_broadcast_routing_with_all_erroring(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method(message, **kwargs): raise test.TestingException('fake failure') fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True, need_response=True) responses = bcast_message.process() self.assertEqual(8, len(responses)) for response in responses: self.assertTrue(response.failure) self.assertRaises(test.TestingException, response.value_or_raise) def test_broadcast_routing_with_two_erroring(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method_failing(message, **kwargs): raise test.TestingException('fake failure') def our_fake_method(message, **kwargs): return 'response-%s' % message.routing_path fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) fakes.stub_bcast_method(self, 'child-cell2', 'our_fake_method', our_fake_method_failing) fakes.stub_bcast_method(self, 'grandchild-cell3', 'our_fake_method', our_fake_method_failing) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True, need_response=True) responses = bcast_message.process() self.assertEqual(8, len(responses)) failure_responses = [resp for resp in responses if resp.failure] success_responses = [resp for resp in responses if not resp.failure] self.assertEqual(2, len(failure_responses)) self.assertEqual(6, len(success_responses)) for response in success_responses: self.assertFalse(response.failure) self.assertEqual('response-%s' % response.cell_name, response.value_or_raise()) for response in failure_responses: self.assertIn(response.cell_name, ['api-cell!child-cell2', 'api-cell!child-cell3!grandchild-cell3']) self.assertTrue(response.failure) self.assertRaises(test.TestingException, response.value_or_raise) class CellsTargetedMethodsWithDatabaseTestCase(test.TestCase): """These tests access the database unlike the others.""" def setUp(self): super(CellsTargetedMethodsWithDatabaseTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self._setup_attrs('api-cell', 'api-cell!child-cell2') def _setup_attrs(self, source_cell, target_cell): self.tgt_cell_name = target_cell self.src_msg_runner = fakes.get_message_runner(source_cell) def test_service_delete(self): fake_service = dict(id=42, host='fake_host', binary='nova-compute', topic='compute') ctxt = self.ctxt.elevated() db.service_create(ctxt, fake_service) self.src_msg_runner.service_delete( ctxt, self.tgt_cell_name, fake_service['id']) self.assertRaises(exception.ServiceNotFound, db.service_get, ctxt, fake_service['id']) class CellsTargetedMethodsTestCase(test.NoDBTestCase): """Test case for _TargetedMessageMethods class. Most of these tests actually test the full path from the MessageRunner through to the functionality of the message method. Hits 2 birds with 1 stone, even though it's a little more than a unit test. """ def setUp(self): super(CellsTargetedMethodsTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self._setup_attrs('api-cell', 'api-cell!child-cell2') def _setup_attrs(self, source_cell, target_cell): self.tgt_cell_name = target_cell self.src_msg_runner = fakes.get_message_runner(source_cell) self.src_state_manager = self.src_msg_runner.state_manager tgt_shortname = target_cell.split('!')[-1] self.tgt_cell_mgr = fakes.get_cells_manager(tgt_shortname) self.tgt_msg_runner = self.tgt_cell_mgr.msg_runner self.tgt_scheduler = self.tgt_msg_runner.scheduler self.tgt_state_manager = self.tgt_msg_runner.state_manager methods_cls = self.tgt_msg_runner.methods_by_type['targeted'] self.tgt_methods_cls = methods_cls self.tgt_compute_api = methods_cls.compute_api self.tgt_host_api = methods_cls.host_api self.tgt_db_inst = methods_cls.db self.tgt_c_rpcapi = methods_cls.compute_rpcapi def test_build_instances(self): build_inst_kwargs = {'filter_properties': {}, 'key1': 'value1', 'key2': 'value2'} self.mox.StubOutWithMock(self.tgt_scheduler, 'build_instances') self.tgt_scheduler.build_instances(self.ctxt, build_inst_kwargs) self.mox.ReplayAll() self.src_msg_runner.build_instances(self.ctxt, self.tgt_cell_name, build_inst_kwargs) def _run_compute_api_method(self, method_name): instance = fake_instance.fake_instance_obj(self.ctxt) method_info = {'method': method_name, 'method_args': (instance.uuid, 2, 3), 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}} expected_attrs = ['metadata', 'system_metadata', 'security_groups', 'info_cache'] @mock.patch.object(self.tgt_compute_api, method_name, return_value='fake-result') @mock.patch.object(objects.Instance, 'get_by_uuid', return_value=instance) def run_method(mock_get_by_uuid, mock_method): response = self.src_msg_runner.run_compute_api_method( self.ctxt, self.tgt_cell_name, method_info, True) result = response.value_or_raise() self.assertEqual('fake-result', result) mock_get_by_uuid.assert_called_once_with(self.ctxt, instance.uuid, expected_attrs=expected_attrs) mock_method.assert_called_once_with(self.ctxt, instance, 2, 3, arg1='val1', arg2='val2') run_method() def test_run_compute_api_method_expects_obj(self): # Run compute_api start method self._run_compute_api_method('start') def test_run_compute_api_method_shelve_with_info_cache(self): # Run compute_api shelve method as it requires info_cache and # metadata to be present in instance object self._run_compute_api_method('shelve') def test_run_compute_api_method_unknown_instance(self): # Unknown instance should send a broadcast up that instance # is gone. instance = fake_instance.fake_instance_obj(self.ctxt) instance_uuid = instance.uuid method_info = {'method': 'reboot', 'method_args': (instance_uuid, 2, 3), 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}} self.mox.StubOutWithMock(objects.Instance, 'get_by_uuid') self.mox.StubOutWithMock(self.tgt_msg_runner, 'instance_destroy_at_top') objects.Instance.get_by_uuid(self.ctxt, instance.uuid, expected_attrs=['metadata', 'system_metadata', 'security_groups', 'info_cache']).AndRaise( exception.InstanceNotFound(instance_id=instance_uuid)) self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, mox.IsA(objects.Instance)) self.mox.ReplayAll() response = self.src_msg_runner.run_compute_api_method( self.ctxt, self.tgt_cell_name, method_info, True) self.assertRaises(exception.InstanceNotFound, response.value_or_raise) def test_update_capabilities(self): # Route up to API self._setup_attrs('child-cell2', 'child-cell2!api-cell') capabs = {'cap1': set(['val1', 'val2']), 'cap2': set(['val3'])} # The list(set([])) seems silly, but we can't assume the order # of the list... This behavior should match the code we're # testing... which is check that a set was converted to a list. expected_capabs = {'cap1': list(set(['val1', 'val2'])), 'cap2': ['val3']} self.mox.StubOutWithMock(self.src_state_manager, 'get_our_capabilities') self.mox.StubOutWithMock(self.tgt_state_manager, 'update_cell_capabilities') self.mox.StubOutWithMock(self.tgt_msg_runner, 'tell_parents_our_capabilities') self.src_state_manager.get_our_capabilities().AndReturn(capabs) self.tgt_state_manager.update_cell_capabilities('child-cell2', expected_capabs) self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt) self.mox.ReplayAll() self.src_msg_runner.tell_parents_our_capabilities(self.ctxt) def test_update_capacities(self): self._setup_attrs('child-cell2', 'child-cell2!api-cell') capacs = 'fake_capacs' self.mox.StubOutWithMock(self.src_state_manager, 'get_our_capacities') self.mox.StubOutWithMock(self.tgt_state_manager, 'update_cell_capacities') self.mox.StubOutWithMock(self.tgt_msg_runner, 'tell_parents_our_capacities') self.src_state_manager.get_our_capacities().AndReturn(capacs) self.tgt_state_manager.update_cell_capacities('child-cell2', capacs) self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt) self.mox.ReplayAll() self.src_msg_runner.tell_parents_our_capacities(self.ctxt) def test_announce_capabilities(self): self._setup_attrs('api-cell', 'api-cell!child-cell1') # To make this easier to test, make us only have 1 child cell. cell_state = self.src_state_manager.child_cells['child-cell1'] self.src_state_manager.child_cells = {'child-cell1': cell_state} self.mox.StubOutWithMock(self.tgt_msg_runner, 'tell_parents_our_capabilities') self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt) self.mox.ReplayAll() self.src_msg_runner.ask_children_for_capabilities(self.ctxt) def test_announce_capacities(self): self._setup_attrs('api-cell', 'api-cell!child-cell1') # To make this easier to test, make us only have 1 child cell. cell_state = self.src_state_manager.child_cells['child-cell1'] self.src_state_manager.child_cells = {'child-cell1': cell_state} self.mox.StubOutWithMock(self.tgt_msg_runner, 'tell_parents_our_capacities') self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt) self.mox.ReplayAll() self.src_msg_runner.ask_children_for_capacities(self.ctxt) def test_service_get_by_compute_host(self): fake_host_name = 'fake-host-name' self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') objects.Service.get_by_compute_host(self.ctxt, fake_host_name).AndReturn('fake-service') self.mox.ReplayAll() response = self.src_msg_runner.service_get_by_compute_host( self.ctxt, self.tgt_cell_name, fake_host_name) result = response.value_or_raise() self.assertEqual('fake-service', result) def test_service_update(self): binary = 'nova-compute' params_to_update = {'disabled': True, 'report_count': 13} fake_service = objects.Service(id=42, host='fake_host', binary='nova-compute', topic='compute') fake_service.compute_node = objects.ComputeNode(id=1, host='fake_host') self.mox.StubOutWithMock(objects.Service, 'get_by_args') self.mox.StubOutWithMock(objects.Service, 'save') objects.Service.get_by_args( self.ctxt, 'fake_host', 'nova-compute').AndReturn(fake_service) fake_service.save() self.mox.ReplayAll() response = self.src_msg_runner.service_update( self.ctxt, self.tgt_cell_name, 'fake_host', binary, params_to_update) result = response.value_or_raise() self.assertIsInstance(result, objects.Service) # NOTE(sbauza): As NovaObjects can't be comparated directly, we need to # check the fields by primitiving them first self.assertEqual(jsonutils.to_primitive(fake_service), jsonutils.to_primitive(result)) def test_proxy_rpc_to_manager_call(self): fake_topic = 'fake-topic' fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}} fake_host_name = 'fake-host-name' self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') objects.Service.get_by_compute_host(self.ctxt, fake_host_name) target = oslo_messaging.Target(topic='fake-topic') rpcclient = self.mox.CreateMockAnything() self.mox.StubOutWithMock(rpc, 'get_client') rpc.get_client(target).AndReturn(rpcclient) rpcclient.prepare(timeout=5).AndReturn(rpcclient) rpcclient.call(mox.IgnoreArg(), 'fake_rpc_method').AndReturn('fake_result') self.mox.ReplayAll() response = self.src_msg_runner.proxy_rpc_to_manager( self.ctxt, self.tgt_cell_name, fake_host_name, fake_topic, fake_rpc_message, True, timeout=5) result = response.value_or_raise() self.assertEqual('fake_result', result) def test_proxy_rpc_to_manager_cast(self): fake_topic = 'fake-topic' fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}} fake_host_name = 'fake-host-name' self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') objects.Service.get_by_compute_host(self.ctxt, fake_host_name) target = oslo_messaging.Target(topic='fake-topic') rpcclient = self.mox.CreateMockAnything() self.mox.StubOutWithMock(rpc, 'get_client') rpc.get_client(target).AndReturn(rpcclient) rpcclient.cast(mox.IgnoreArg(), 'fake_rpc_method') self.mox.ReplayAll() self.src_msg_runner.proxy_rpc_to_manager( self.ctxt, self.tgt_cell_name, fake_host_name, fake_topic, fake_rpc_message, False, timeout=None) def test_task_log_get_all_targeted(self): task_name = 'fake_task_name' begin = 'fake_begin' end = 'fake_end' host = 'fake_host' state = 'fake_state' self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all') self.tgt_db_inst.task_log_get_all(self.ctxt, task_name, begin, end, host=host, state=state).AndReturn(['fake_result']) self.mox.ReplayAll() response = self.src_msg_runner.task_log_get_all(self.ctxt, self.tgt_cell_name, task_name, begin, end, host=host, state=state) self.assertIsInstance(response, list) self.assertEqual(1, len(response)) result = response[0].value_or_raise() self.assertEqual(['fake_result'], result) def test_compute_node_get(self): compute_id = 'fake-id' self.mox.StubOutWithMock(objects.ComputeNode, 'get_by_id') objects.ComputeNode.get_by_id(self.ctxt, compute_id).AndReturn('fake_result') self.mox.ReplayAll() response = self.src_msg_runner.compute_node_get(self.ctxt, self.tgt_cell_name, compute_id) result = response.value_or_raise() self.assertEqual('fake_result', result) def test_actions_get(self): fake_uuid = fake_server_actions.FAKE_UUID fake_req_id = fake_server_actions.FAKE_REQUEST_ID1 fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id] self.mox.StubOutWithMock(self.tgt_db_inst, 'actions_get') self.tgt_db_inst.actions_get(self.ctxt, 'fake-uuid').AndReturn([fake_act]) self.mox.ReplayAll() response = self.src_msg_runner.actions_get(self.ctxt, self.tgt_cell_name, 'fake-uuid') result = response.value_or_raise() self.assertEqual([jsonutils.to_primitive(fake_act)], result) def test_action_get_by_request_id(self): fake_uuid = fake_server_actions.FAKE_UUID fake_req_id = fake_server_actions.FAKE_REQUEST_ID1 fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id] self.mox.StubOutWithMock(self.tgt_db_inst, 'action_get_by_request_id') self.tgt_db_inst.action_get_by_request_id(self.ctxt, 'fake-uuid', 'req-fake').AndReturn(fake_act) self.mox.ReplayAll() response = self.src_msg_runner.action_get_by_request_id(self.ctxt, self.tgt_cell_name, 'fake-uuid', 'req-fake') result = response.value_or_raise() self.assertEqual(jsonutils.to_primitive(fake_act), result) def test_action_events_get(self): fake_action_id = fake_server_actions.FAKE_ACTION_ID1 fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id] self.mox.StubOutWithMock(self.tgt_db_inst, 'action_events_get') self.tgt_db_inst.action_events_get(self.ctxt, 'fake-action').AndReturn(fake_events) self.mox.ReplayAll() response = self.src_msg_runner.action_events_get(self.ctxt, self.tgt_cell_name, 'fake-action') result = response.value_or_raise() self.assertEqual(jsonutils.to_primitive(fake_events), result) def test_validate_console_port(self): instance_uuid = 'fake_instance_uuid' instance = objects.Instance(uuid=instance_uuid) console_port = 'fake-port' console_type = 'fake-type' @mock.patch.object(objects.Instance, 'get_by_uuid', return_value=instance) @mock.patch.object(self.tgt_c_rpcapi, 'validate_console_port', return_value='fake_result') def do_test(mock_validate, mock_get): response = self.src_msg_runner.validate_console_port(self.ctxt, self.tgt_cell_name, instance_uuid, console_port, console_type) result = response.value_or_raise() self.assertEqual('fake_result', result) mock_get.assert_called_once_with(self.ctxt, instance_uuid) mock_validate.assert_called_once_with(self.ctxt, instance, console_port, console_type) do_test() def test_get_migrations_for_a_given_cell(self): filters = {'cell_name': 'child-cell2', 'status': 'confirmed'} migrations_in_progress = [{'id': 123}] self.mox.StubOutWithMock(self.tgt_compute_api, 'get_migrations') self.tgt_compute_api.get_migrations(self.ctxt, filters).\ AndReturn(migrations_in_progress) self.mox.ReplayAll() responses = self.src_msg_runner.get_migrations( self.ctxt, self.tgt_cell_name, False, filters) result = responses[0].value_or_raise() self.assertEqual(migrations_in_progress, result) def test_get_migrations_for_an_invalid_cell(self): filters = {'cell_name': 'invalid_Cell', 'status': 'confirmed'} responses = self.src_msg_runner.get_migrations( self.ctxt, 'api_cell!invalid_cell', False, filters) self.assertEqual(0, len(responses)) def test_call_compute_api_with_obj(self): instance = objects.Instance() instance.uuid = uuidutils.generate_uuid() self.mox.StubOutWithMock(instance, 'refresh') # Using 'snapshot' for this test, because it # takes args and kwargs. self.mox.StubOutWithMock(self.tgt_compute_api, 'snapshot') instance.refresh() self.tgt_compute_api.snapshot( self.ctxt, instance, 'name', extra_properties='props').AndReturn('foo') self.mox.ReplayAll() result = self.tgt_methods_cls._call_compute_api_with_obj( self.ctxt, instance, 'snapshot', 'name', extra_properties='props') self.assertEqual('foo', result) def test_call_compute_api_with_obj_no_cache(self): instance = objects.Instance() instance.uuid = uuidutils.generate_uuid() error = exception.InstanceInfoCacheNotFound( instance_uuid=instance.uuid) with mock.patch.object(instance, 'refresh', side_effect=error): self.assertRaises(exception.InstanceInfoCacheNotFound, self.tgt_methods_cls._call_compute_api_with_obj, self.ctxt, instance, 'snapshot') def test_call_delete_compute_api_with_obj_no_cache(self): instance = objects.Instance() instance.uuid = uuidutils.generate_uuid() error = exception.InstanceInfoCacheNotFound( instance_uuid=instance.uuid) with test.nested( mock.patch.object(instance, 'refresh', side_effect=error), mock.patch.object(self.tgt_compute_api, 'delete')) as (inst, delete): self.tgt_methods_cls._call_compute_api_with_obj(self.ctxt, instance, 'delete') delete.assert_called_once_with(self.ctxt, instance) def test_call_compute_with_obj_unknown_instance(self): instance = objects.Instance() instance.uuid = uuidutils.generate_uuid() instance.vm_state = vm_states.ACTIVE instance.task_state = None self.mox.StubOutWithMock(instance, 'refresh') self.mox.StubOutWithMock(self.tgt_msg_runner, 'instance_destroy_at_top') instance.refresh().AndRaise( exception.InstanceNotFound(instance_id=instance.uuid)) self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, mox.IsA(objects.Instance)) self.mox.ReplayAll() self.assertRaises(exception.InstanceNotFound, self.tgt_methods_cls._call_compute_api_with_obj, self.ctxt, instance, 'snapshot', 'name') def _instance_update_helper(self, admin_state_reset): class FakeMessage(object): pass message = FakeMessage() message.ctxt = self.ctxt instance = objects.Instance() instance.cell_name = self.tgt_cell_name instance.obj_reset_changes() instance.task_state = 'meow' instance.vm_state = 'wuff' instance.user_data = 'foo' instance.metadata = {'meta': 'data'} instance.system_metadata = {'system': 'metadata'} self.assertEqual(set(['user_data', 'vm_state', 'task_state', 'metadata', 'system_metadata']), instance.obj_what_changed()) self.mox.StubOutWithMock(instance, 'save') def _check_object(*args, **kwargs): # task_state and vm_state changes should have been cleared # before calling save() if admin_state_reset: self.assertEqual( set(['user_data', 'vm_state', 'task_state']), instance.obj_what_changed()) else: self.assertEqual(set(['user_data']), instance.obj_what_changed()) instance.save(expected_task_state='exp_task', expected_vm_state='exp_vm').WithSideEffects( _check_object) self.mox.ReplayAll() self.tgt_methods_cls.instance_update_from_api( message, instance, expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset=admin_state_reset) def test_instance_update_from_api(self): self._instance_update_helper(False) def test_instance_update_from_api_admin_state_reset(self): self._instance_update_helper(True) def test_instance_update_from_api_calls_skip_cells_sync(self): self.flags(enable=True, cell_type='compute', group='cells') instance = fake_instance.fake_instance_obj(self.ctxt) instance.cell_name = self.tgt_cell_name instance.task_state = 'meow' instance.vm_state = 'wuff' instance.user_data = 'foo' message = '' @mock.patch.object(instance, 'save', side_effect=test.TestingException) @mock.patch.object(instance, 'skip_cells_sync') def _ensure_skip_cells_sync_called(mock_sync, mock_save): self.assertRaises(test.TestingException, self.tgt_methods_cls.instance_update_from_api, message, instance, expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset=False) mock_sync.assert_has_calls([mock.call()]) _ensure_skip_cells_sync_called() self.assertEqual(self.tgt_cell_name, instance.cell_name) @mock.patch.object(db, 'instance_update_and_get_original') def test_instance_update_from_api_skips_cell_sync(self, mock_db_update): self.flags(enable=True, cell_type='compute', group='cells') instance = fake_instance.fake_instance_obj(self.ctxt) instance.cell_name = self.tgt_cell_name instance.task_state = 'meow' instance.vm_state = 'wuff' instance.user_data = 'foo' message = '' inst_ref = dict(objects_base.obj_to_primitive(instance)) mock_db_update.return_value = (inst_ref, inst_ref) with mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_at_top') as inst_upd_at_top: self.tgt_methods_cls.instance_update_from_api(message, instance, expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset=False) self.assertEqual(0, inst_upd_at_top.call_count) def _test_instance_action_method(self, method, args, kwargs, expected_args, expected_kwargs, expect_result): class FakeMessage(object): pass message = FakeMessage() message.ctxt = self.ctxt message.need_response = expect_result meth_cls = self.tgt_methods_cls self.mox.StubOutWithMock(meth_cls, '_call_compute_api_with_obj') method_corrections = { 'terminate': 'delete', } api_method = method_corrections.get(method, method) meth_cls._call_compute_api_with_obj( self.ctxt, 'fake-instance', api_method, *expected_args, **expected_kwargs).AndReturn('meow') self.mox.ReplayAll() method_translations = {'revert_resize': 'revert_resize', 'confirm_resize': 'confirm_resize', 'reset_network': 'reset_network', 'inject_network_info': 'inject_network_info', 'set_admin_password': 'set_admin_password', } tgt_method = method_translations.get(method, '%s_instance' % method) result = getattr(meth_cls, tgt_method)( message, 'fake-instance', *args, **kwargs) if expect_result: self.assertEqual('meow', result) def test_start_instance(self): self._test_instance_action_method('start', (), {}, (), {}, False) def test_stop_instance_cast(self): self._test_instance_action_method('stop', (), {}, (), {'do_cast': True, 'clean_shutdown': True}, False) def test_stop_instance_call(self): self._test_instance_action_method('stop', (), {}, (), {'do_cast': False, 'clean_shutdown': True}, True) def test_reboot_instance(self): kwargs = dict(reboot_type='HARD') self._test_instance_action_method('reboot', (), kwargs, (), kwargs, False) def test_suspend_instance(self): self._test_instance_action_method('suspend', (), {}, (), {}, False) def test_resume_instance(self): self._test_instance_action_method('resume', (), {}, (), {}, False) def test_get_host_uptime(self): host_name = "fake-host" host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:" " 0.20, 0.12, 0.14") self.mox.StubOutWithMock(self.tgt_host_api, 'get_host_uptime') self.tgt_host_api.get_host_uptime(self.ctxt, host_name).\ AndReturn(host_uptime) self.mox.ReplayAll() response = self.src_msg_runner.get_host_uptime(self.ctxt, self.tgt_cell_name, host_name) expected_host_uptime = response.value_or_raise() self.assertEqual(host_uptime, expected_host_uptime) def test_terminate_instance(self): self._test_instance_action_method('terminate', (), {}, (), {}, False) def test_soft_delete_instance(self): self._test_instance_action_method('soft_delete', (), {}, (), {}, False) def test_pause_instance(self): self._test_instance_action_method('pause', (), {}, (), {}, False) def test_unpause_instance(self): self._test_instance_action_method('unpause', (), {}, (), {}, False) def _test_resize_instance(self, clean_shutdown=True): kwargs = dict(flavor=dict(id=42, flavorid='orangemocchafrappuccino'), extra_instance_updates=dict(cow='moo'), clean_shutdown=clean_shutdown) expected_kwargs = dict(flavor_id='orangemocchafrappuccino', cow='moo', clean_shutdown=clean_shutdown) self._test_instance_action_method('resize', (), kwargs, (), expected_kwargs, False) def test_resize_instance(self): self._test_resize_instance() def test_resize_instance_forced_shutdown(self): self._test_resize_instance(clean_shutdown=False) def test_live_migrate_instance(self): kwargs = dict(block_migration='fake-block-mig', disk_over_commit='fake-commit', host_name='fake-host') expected_args = ('fake-block-mig', 'fake-commit', 'fake-host') self._test_instance_action_method('live_migrate', (), kwargs, expected_args, {}, False) def test_revert_resize(self): self._test_instance_action_method('revert_resize', (), {}, (), {}, False) def test_confirm_resize(self): self._test_instance_action_method('confirm_resize', (), {}, (), {}, False) def test_reset_network(self): self._test_instance_action_method('reset_network', (), {}, (), {}, False) def test_inject_network_info(self): self._test_instance_action_method('inject_network_info', (), {}, (), {}, False) def test_snapshot_instance(self): inst = objects.Instance() meth_cls = self.tgt_methods_cls self.mox.StubOutWithMock(inst, 'refresh') self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'snapshot_instance') def check_state(expected_task_state=None): self.assertEqual(task_states.IMAGE_SNAPSHOT_PENDING, inst.task_state) inst.refresh() inst.save(expected_task_state=[None]).WithSideEffects(check_state) meth_cls.compute_rpcapi.snapshot_instance(self.ctxt, inst, 'image-id') self.mox.ReplayAll() class FakeMessage(object): pass message = FakeMessage() message.ctxt = self.ctxt message.need_response = False meth_cls.snapshot_instance(message, inst, image_id='image-id') def test_backup_instance(self): inst = objects.Instance() meth_cls = self.tgt_methods_cls self.mox.StubOutWithMock(inst, 'refresh') self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'backup_instance') def check_state(expected_task_state=None): self.assertEqual(task_states.IMAGE_BACKUP, inst.task_state) inst.refresh() inst.save(expected_task_state=[None]).WithSideEffects(check_state) meth_cls.compute_rpcapi.backup_instance(self.ctxt, inst, 'image-id', 'backup-type', 'rotation') self.mox.ReplayAll() class FakeMessage(object): pass message = FakeMessage() message.ctxt = self.ctxt message.need_response = False meth_cls.backup_instance(message, inst, image_id='image-id', backup_type='backup-type', rotation='rotation') def test_set_admin_password(self): args = ['fake-password'] self._test_instance_action_method('set_admin_password', args, {}, args, {}, False) class CellsBroadcastMethodsTestCase(test.NoDBTestCase): """Test case for _BroadcastMessageMethods class. Most of these tests actually test the full path from the MessageRunner through to the functionality of the message method. Hits 2 birds with 1 stone, even though it's a little more than a unit test. """ def setUp(self): super(CellsBroadcastMethodsTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self._setup_attrs() def _setup_attrs(self, up=True): mid_cell = 'child-cell2' if up: src_cell = 'grandchild-cell1' tgt_cell = 'api-cell' else: src_cell = 'api-cell' tgt_cell = 'grandchild-cell1' self.src_msg_runner = fakes.get_message_runner(src_cell) methods_cls = self.src_msg_runner.methods_by_type['broadcast'] self.src_methods_cls = methods_cls self.src_db_inst = methods_cls.db self.src_compute_api = methods_cls.compute_api self.src_ca_rpcapi = methods_cls.consoleauth_rpcapi if not up: # fudge things so we only have 1 child to broadcast to state_manager = self.src_msg_runner.state_manager for cell in state_manager.get_child_cells(): if cell.name != 'child-cell2': del state_manager.child_cells[cell.name] self.mid_msg_runner = fakes.get_message_runner(mid_cell) methods_cls = self.mid_msg_runner.methods_by_type['broadcast'] self.mid_methods_cls = methods_cls self.mid_db_inst = methods_cls.db self.mid_compute_api = methods_cls.compute_api self.mid_ca_rpcapi = methods_cls.consoleauth_rpcapi self.tgt_msg_runner = fakes.get_message_runner(tgt_cell) methods_cls = self.tgt_msg_runner.methods_by_type['broadcast'] self.tgt_methods_cls = methods_cls self.tgt_db_inst = methods_cls.db self.tgt_compute_api = methods_cls.compute_api self.tgt_ca_rpcapi = methods_cls.consoleauth_rpcapi def test_at_the_top(self): self.assertTrue(self.tgt_methods_cls._at_the_top()) self.assertFalse(self.mid_methods_cls._at_the_top()) self.assertFalse(self.src_methods_cls._at_the_top()) def test_apply_expected_states_building(self): instance_info = objects.Instance(vm_state=vm_states.BUILDING) expected = instance_info.obj_clone() expected.expected_vm_state = [vm_states.BUILDING, None] expected_vm_state = self.src_methods_cls._get_expected_vm_state( instance_info) self.assertEqual(expected.expected_vm_state, expected_vm_state) def test_apply_expected_states_resize_finish(self): instance_info = objects.Instance(task_state=task_states.RESIZE_FINISH) exp_states = [task_states.RESIZE_FINISH, task_states.RESIZE_MIGRATED, task_states.RESIZE_MIGRATING, task_states.RESIZE_PREP] expected = instance_info.obj_clone() expected.expected_task_state = exp_states expected_task_state = self.src_methods_cls._get_expected_task_state( instance_info) self.assertEqual(expected.expected_task_state, expected_task_state) def _test_instance_update_at_top(self, exists=True): fake_uuid = fake_server_actions.FAKE_UUID fake_info_cache = objects.InstanceInfoCache(instance_uuid='fake-uuid') fake_sys_metadata = {'key1': 'value1', 'key2': 'value2'} fake_attrs = {'uuid': fake_uuid, 'cell_name': 'fake', 'info_cache': fake_info_cache, 'system_metadata': fake_sys_metadata} fake_instance = objects.Instance(**fake_attrs) expected_cell_name = 'api-cell!child-cell2!grandchild-cell1' def fake_save(instance): self.assertEqual(fake_uuid, instance.uuid) self.assertEqual(expected_cell_name, instance.cell_name) self.assertEqual(fake_info_cache, instance.info_cache) self.assertEqual(fake_sys_metadata, instance.system_metadata) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'create') def do_test(mock_create, mock_save): if exists: mock_save.side_effect = fake_save else: error = exception.InstanceNotFound(instance_id='fake_uuid') mock_save.side_effect = error self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance) if exists: mock_save.assert_called_once_with(expected_vm_state=None, expected_task_state=None) self.assertFalse(mock_create.called) else: mock_save.assert_called_once_with(expected_vm_state=None, expected_task_state=None) mock_create.assert_called_once_with() do_test() def test_instance_update_at_top(self): self._test_instance_update_at_top() def test_instance_update_at_top_does_not_already_exist(self): self._test_instance_update_at_top(exists=False) def test_instance_update_at_top_with_building_state(self): fake_uuid = fake_server_actions.FAKE_UUID fake_info_cache = objects.InstanceInfoCache(instance_uuid='fake-uuid') fake_sys_metadata = {'key1': 'value1', 'key2': 'value2'} fake_attrs = {'uuid': fake_uuid, 'cell_name': 'fake', 'info_cache': fake_info_cache, 'system_metadata': fake_sys_metadata, 'vm_state': vm_states.BUILDING} fake_instance = objects.Instance(**fake_attrs) expected_cell_name = 'api-cell!child-cell2!grandchild-cell1' expected_vm_state = [vm_states.BUILDING, None] def fake_save(instance): self.assertEqual(fake_uuid, instance.uuid) self.assertEqual(expected_cell_name, instance.cell_name) self.assertEqual(fake_info_cache, instance.info_cache) self.assertEqual(fake_sys_metadata, instance.system_metadata) with mock.patch.object(objects.Instance, 'save', side_effect=fake_save) as mock_save: self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance) # Check that save is called with the right expected states. mock_save.assert_called_once_with( expected_vm_state=expected_vm_state, expected_task_state=None) def test_instance_destroy_at_top(self): fake_instance = objects.Instance(uuid='fake_uuid') with mock.patch.object(objects.Instance, 'destroy') as mock_destroy: self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance) mock_destroy.assert_called_once_with() def test_instance_destroy_at_top_incomplete_instance_obj(self): fake_instance = objects.Instance(uuid='fake_uuid') with mock.patch.object(objects.Instance, 'get_by_uuid') as mock_get: self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance) mock_get.assert_called_once_with(self.ctxt, fake_instance.uuid) def test_instance_hard_delete_everywhere(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) instance = {'uuid': 'meow'} # Should not be called in src (API cell) self.mox.StubOutWithMock(self.src_compute_api, 'delete') self.mox.StubOutWithMock(self.mid_compute_api, 'delete') self.mox.StubOutWithMock(self.tgt_compute_api, 'delete') self.mid_compute_api.delete(self.ctxt, instance) self.tgt_compute_api.delete(self.ctxt, instance) self.mox.ReplayAll() self.src_msg_runner.instance_delete_everywhere(self.ctxt, instance, 'hard') def test_instance_soft_delete_everywhere(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) instance = {'uuid': 'meow'} # Should not be called in src (API cell) self.mox.StubOutWithMock(self.src_compute_api, 'soft_delete') self.mox.StubOutWithMock(self.mid_compute_api, 'soft_delete') self.mox.StubOutWithMock(self.tgt_compute_api, 'soft_delete') self.mid_compute_api.soft_delete(self.ctxt, instance) self.tgt_compute_api.soft_delete(self.ctxt, instance) self.mox.ReplayAll() self.src_msg_runner.instance_delete_everywhere(self.ctxt, instance, 'soft') def test_instance_fault_create_at_top(self): fake_instance_fault = {'id': 1, 'message': 'fake-message', 'details': 'fake-details'} if_mock = mock.Mock(spec_set=objects.InstanceFault) def _check_create(): self.assertEqual('fake-message', if_mock.message) self.assertEqual('fake-details', if_mock.details) # Should not be set self.assertNotEqual(1, if_mock.id) if_mock.create.side_effect = _check_create with mock.patch.object(objects, 'InstanceFault') as if_obj_mock: if_obj_mock.return_value = if_mock self.src_msg_runner.instance_fault_create_at_top( self.ctxt, fake_instance_fault) if_obj_mock.assert_called_once_with(context=self.ctxt) if_mock.create.assert_called_once_with() def test_bw_usage_update_at_top(self): fake_bw_update_info = {'uuid': 'fake_uuid', 'mac': 'fake_mac', 'start_period': 'fake_start_period', 'bw_in': 'fake_bw_in', 'bw_out': 'fake_bw_out', 'last_ctr_in': 'fake_last_ctr_in', 'last_ctr_out': 'fake_last_ctr_out', 'last_refreshed': 'fake_last_refreshed'} # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'bw_usage_update') self.mox.StubOutWithMock(self.mid_db_inst, 'bw_usage_update') self.mox.StubOutWithMock(self.tgt_db_inst, 'bw_usage_update') self.tgt_db_inst.bw_usage_update(self.ctxt, **fake_bw_update_info) self.mox.ReplayAll() self.src_msg_runner.bw_usage_update_at_top(self.ctxt, fake_bw_update_info) def test_sync_instances(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) project_id = 'fake_project_id' updated_since_raw = 'fake_updated_since_raw' updated_since_parsed = 'fake_updated_since_parsed' deleted = 'fake_deleted' instance1 = objects.Instance(uuid='fake_uuid1', deleted=False) instance2 = objects.Instance(uuid='fake_uuid2', deleted=True) fake_instances = [instance1, instance2] self.mox.StubOutWithMock(self.tgt_msg_runner, 'instance_update_at_top') self.mox.StubOutWithMock(self.tgt_msg_runner, 'instance_destroy_at_top') self.mox.StubOutWithMock(timeutils, 'parse_isotime') self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync') # Middle cell. timeutils.parse_isotime(updated_since_raw).AndReturn( updated_since_parsed) cells_utils.get_instances_to_sync(self.ctxt, updated_since=updated_since_parsed, project_id=project_id, deleted=deleted).AndReturn([]) # Bottom/Target cell timeutils.parse_isotime(updated_since_raw).AndReturn( updated_since_parsed) cells_utils.get_instances_to_sync(self.ctxt, updated_since=updated_since_parsed, project_id=project_id, deleted=deleted).AndReturn(fake_instances) self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1) self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2) self.mox.ReplayAll() self.src_msg_runner.sync_instances(self.ctxt, project_id, updated_since_raw, deleted) def test_service_get_all_with_disabled(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(objects.ServiceList, 'get_all') # Calls are made from grandchild-cell to api-cell objects.ServiceList.get_all( mox.IgnoreArg(), disabled=None).AndReturn([4, 5]) objects.ServiceList.get_all( mox.IgnoreArg(), disabled=None).AndReturn([3]) objects.ServiceList.get_all( mox.IgnoreArg(), disabled=None).AndReturn([1, 2]) self.mox.ReplayAll() responses = self.src_msg_runner.service_get_all(ctxt, filters={}) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_service_get_all_without_disabled(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) disabled = False filters = {'disabled': disabled} ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(objects.ServiceList, 'get_all') # Calls are made from grandchild-cell to api-cell objects.ServiceList.get_all( mox.IgnoreArg(), disabled=disabled).AndReturn([4, 5]) objects.ServiceList.get_all( mox.IgnoreArg(), disabled=disabled).AndReturn([3]) objects.ServiceList.get_all( mox.IgnoreArg(), disabled=disabled).AndReturn([1, 2]) self.mox.ReplayAll() responses = self.src_msg_runner.service_get_all(ctxt, filters=filters) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_task_log_get_all_broadcast(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) task_name = 'fake_task_name' begin = 'fake_begin' end = 'fake_end' host = 'fake_host' state = 'fake_state' ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(self.src_db_inst, 'task_log_get_all') self.mox.StubOutWithMock(self.mid_db_inst, 'task_log_get_all') self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all') self.src_db_inst.task_log_get_all(ctxt, task_name, begin, end, host=host, state=state).AndReturn([1, 2]) self.mid_db_inst.task_log_get_all(ctxt, task_name, begin, end, host=host, state=state).AndReturn([3]) self.tgt_db_inst.task_log_get_all(ctxt, task_name, begin, end, host=host, state=state).AndReturn([4, 5]) self.mox.ReplayAll() responses = self.src_msg_runner.task_log_get_all(ctxt, None, task_name, begin, end, host=host, state=state) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_compute_node_get_all(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') # Calls are made from grandchild-cell to api-cell objects.ComputeNodeList.get_all(mox.IgnoreArg()).AndReturn([4, 5]) objects.ComputeNodeList.get_all(mox.IgnoreArg()).AndReturn([3]) objects.ComputeNodeList.get_all(mox.IgnoreArg()).AndReturn([1, 2]) self.mox.ReplayAll() responses = self.src_msg_runner.compute_node_get_all(ctxt) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_compute_node_get_all_with_hyp_match(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) hypervisor_match = 'meow' ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_by_hypervisor') # Calls are made from grandchild-cell to api-cell objects.ComputeNodeList.get_by_hypervisor( ctxt, hypervisor_match).AndReturn([4, 5]) objects.ComputeNodeList.get_by_hypervisor( ctxt, hypervisor_match).AndReturn([3]) objects.ComputeNodeList.get_by_hypervisor( ctxt, hypervisor_match).AndReturn([1, 2]) self.mox.ReplayAll() responses = self.src_msg_runner.compute_node_get_all(ctxt, hypervisor_match=hypervisor_match) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_compute_node_stats(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(self.src_db_inst, 'compute_node_statistics') self.mox.StubOutWithMock(self.mid_db_inst, 'compute_node_statistics') self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_statistics') self.src_db_inst.compute_node_statistics(ctxt).AndReturn([1, 2]) self.mid_db_inst.compute_node_statistics(ctxt).AndReturn([3]) self.tgt_db_inst.compute_node_statistics(ctxt).AndReturn([4, 5]) self.mox.ReplayAll() responses = self.src_msg_runner.compute_node_stats(ctxt) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_consoleauth_delete_tokens(self): fake_uuid = 'fake-instance-uuid' # To show these should not be called in src/mid-level cell self.mox.StubOutWithMock(self.src_ca_rpcapi, 'delete_tokens_for_instance') self.mox.StubOutWithMock(self.mid_ca_rpcapi, 'delete_tokens_for_instance') self.mox.StubOutWithMock(self.tgt_ca_rpcapi, 'delete_tokens_for_instance') self.tgt_ca_rpcapi.delete_tokens_for_instance(self.ctxt, fake_uuid) self.mox.ReplayAll() self.src_msg_runner.consoleauth_delete_tokens(self.ctxt, fake_uuid) def test_bdm_update_or_create_with_none_create(self): fake_bdm = {'id': 'fake_id', 'volume_id': 'fake_volume_id'} expected_bdm = fake_bdm.copy() expected_bdm.pop('id') # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_update_or_create') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_update_or_create') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_update_or_create') self.tgt_db_inst.block_device_mapping_update_or_create( self.ctxt, expected_bdm, legacy=False) self.mox.ReplayAll() self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt, fake_bdm, create=None) def test_bdm_update_or_create_with_true_create(self): fake_bdm = {'id': 'fake_id', 'volume_id': 'fake_volume_id'} expected_bdm = fake_bdm.copy() expected_bdm.pop('id') # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_create') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_create') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_create') self.tgt_db_inst.block_device_mapping_create( self.ctxt, fake_bdm, legacy=False) self.mox.ReplayAll() self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt, fake_bdm, create=True) def test_bdm_update_or_create_with_false_create_vol_id(self): fake_bdm = {'id': 'fake_id', 'instance_uuid': 'fake_instance_uuid', 'device_name': 'fake_device_name', 'volume_id': 'fake_volume_id'} expected_bdm = fake_bdm.copy() expected_bdm.pop('id') fake_inst_bdms = [{'id': 1, 'volume_id': 'not-a-match', 'device_name': 'not-a-match'}, {'id': 2, 'volume_id': 'fake_volume_id', 'device_name': 'not-a-match'}, {'id': 3, 'volume_id': 'not-a-match', 'device_name': 'not-a-match'}] # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_update') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_update') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_update') self.tgt_db_inst.block_device_mapping_get_all_by_instance( self.ctxt, 'fake_instance_uuid').AndReturn( fake_inst_bdms) # Should try to update ID 2. self.tgt_db_inst.block_device_mapping_update( self.ctxt, 2, expected_bdm, legacy=False) self.mox.ReplayAll() self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt, fake_bdm, create=False) def test_bdm_update_or_create_with_false_create_dev_name(self): fake_bdm = {'id': 'fake_id', 'instance_uuid': 'fake_instance_uuid', 'device_name': 'fake_device_name', 'volume_id': 'fake_volume_id'} expected_bdm = fake_bdm.copy() expected_bdm.pop('id') fake_inst_bdms = [{'id': 1, 'volume_id': 'not-a-match', 'device_name': 'not-a-match'}, {'id': 2, 'volume_id': 'not-a-match', 'device_name': 'fake_device_name'}, {'id': 3, 'volume_id': 'not-a-match', 'device_name': 'not-a-match'}] # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_update') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_update') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_update') self.tgt_db_inst.block_device_mapping_get_all_by_instance( self.ctxt, 'fake_instance_uuid').AndReturn( fake_inst_bdms) # Should try to update ID 2. self.tgt_db_inst.block_device_mapping_update( self.ctxt, 2, expected_bdm, legacy=False) self.mox.ReplayAll() self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt, fake_bdm, create=False) def test_bdm_destroy_by_volume(self): fake_instance_uuid = 'fake-instance-uuid' fake_volume_id = 'fake-volume-name' # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_destroy_by_instance_and_volume') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_destroy_by_instance_and_volume') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_destroy_by_instance_and_volume') self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_volume( self.ctxt, fake_instance_uuid, fake_volume_id) self.mox.ReplayAll() self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid, volume_id=fake_volume_id) def test_bdm_destroy_by_device(self): fake_instance_uuid = 'fake-instance-uuid' fake_device_name = 'fake-device-name' # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_destroy_by_instance_and_device') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_destroy_by_instance_and_device') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_destroy_by_instance_and_device') self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_device( self.ctxt, fake_instance_uuid, fake_device_name) self.mox.ReplayAll() self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid, device_name=fake_device_name) def test_get_migrations(self): self._setup_attrs(up=False) filters = {'status': 'confirmed'} migrations_from_cell1 = [{'id': 123}] migrations_from_cell2 = [{'id': 456}] self.mox.StubOutWithMock(self.mid_compute_api, 'get_migrations') self.mid_compute_api.get_migrations(self.ctxt, filters).\ AndReturn(migrations_from_cell1) self.mox.StubOutWithMock(self.tgt_compute_api, 'get_migrations') self.tgt_compute_api.get_migrations(self.ctxt, filters).\ AndReturn(migrations_from_cell2) self.mox.ReplayAll() responses = self.src_msg_runner.get_migrations( self.ctxt, None, False, filters) self.assertEqual(2, len(responses)) for response in responses: self.assertIn(response.value_or_raise(), [migrations_from_cell1, migrations_from_cell2]) @mock.patch.object(objects.KeyPair, 'get_by_name', return_value='fake_keypair') def test_get_keypair_at_top(self, fake_get_by_name): user_id = 'fake_user_id' name = 'fake_keypair_name' responses = self.src_msg_runner.get_keypair_at_top(self.ctxt, user_id, name) fake_get_by_name.assert_called_once_with(self.ctxt, user_id, name) for response in responses: if response.value is not None: self.assertEqual('fake_keypair', response.value) @mock.patch.object(objects.KeyPair, 'get_by_name') def test_get_keypair_at_top_with_objects_exception(self, fake_get_by_name): user_id = 'fake_user_id' name = 'fake_keypair_name' keypair_exception = exception.KeypairNotFound(user_id=user_id, name=name) fake_get_by_name.side_effect = keypair_exception responses = self.src_msg_runner.get_keypair_at_top(self.ctxt, user_id, name) fake_get_by_name.assert_called_once_with(self.ctxt, user_id, name) for response in responses: self.assertIsNone(response.value) @mock.patch.object(messaging._BroadcastMessage, 'process') def test_get_keypair_at_top_with_process_response(self, fake_process): user_id = 'fake_user_id' name = 'fake_keypair_name' response = messaging.Response(self.ctxt, 'cell', 'keypair', False) other_response = messaging.Response(self.ctxt, 'cell', 'fake_other_keypair', False) fake_process.return_value = [response, other_response] responses = self.src_msg_runner.get_keypair_at_top(self.ctxt, user_id, name) fake_process.assert_called_once_with() self.assertEqual(fake_process.return_value, responses) class CellsPublicInterfacesTestCase(test.NoDBTestCase): """Test case for the public interfaces into cells messaging.""" def setUp(self): super(CellsPublicInterfacesTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self.our_name = 'api-cell' self.msg_runner = fakes.get_message_runner(self.our_name) self.state_manager = self.msg_runner.state_manager @mock.patch.object(messaging, '_TargetedMessage') def test_resize_instance(self, mock_message): instance = objects.Instance(cell_name='api-cell!child-cell') flavor = 'fake' extra_instance_updates = {'fake': 'fake'} clean_shutdown = True self.msg_runner.resize_instance(self.ctxt, instance, flavor, extra_instance_updates, clean_shutdown=clean_shutdown) extra_kwargs = dict(flavor=flavor, extra_instance_updates=extra_instance_updates, clean_shutdown=clean_shutdown) method_kwargs = {'instance': instance} method_kwargs.update(extra_kwargs) mock_message.assert_called_once_with(self.msg_runner, self.ctxt, 'resize_instance', method_kwargs, 'down', instance.cell_name, need_response=False) nova-13.0.0/nova/tests/unit/cells/test_cells_state_manager.py0000664000567000056710000003277112701407773025514 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CellStateManager """ import datetime import time import mock from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import timeutils import six from nova.cells import state from nova.db.sqlalchemy import models from nova import exception from nova import objects from nova import test from nova import utils FAKE_COMPUTES = [ ('host1', 1024, 100, 0, 0), ('host2', 1024, 100, -1, -1), ('host3', 1024, 100, 1024, 100), ('host4', 1024, 100, 300, 30), ] FAKE_COMPUTES_N_TO_ONE = [ ('host1', 1024, 100, 0, 0), ('host1', 1024, 100, -1, -1), ('host2', 1024, 100, 1024, 100), ('host2', 1024, 100, 300, 30), ] FAKE_SERVICES = [ ('host1', 0), ('host2', 0), ('host3', 0), ('host4', 3600), ] # NOTE(alaski): It's important to have multiple types that end up having the # same memory and disk requirements. So two types need the same first value, # and two need the second and third values to add up to the same thing. FAKE_ITYPES = [ (0, 0, 0), (50, 12, 13), (50, 2, 4), (10, 20, 5), ] def _create_fake_node(host, total_mem, total_disk, free_mem, free_disk): return objects.ComputeNode(host=host, memory_mb=total_mem, local_gb=total_disk, free_ram_mb=free_mem, free_disk_gb=free_disk) @classmethod def _fake_service_get_all_by_binary(cls, context, binary): def _node(host, total_mem, total_disk, free_mem, free_disk): now = timeutils.utcnow() return objects.Service(host=host, disabled=False, forced_down=False, last_seen_up=now) return [_node(*fake) for fake in FAKE_COMPUTES] @classmethod def _fake_service_get_all_by_binary_nodedown(cls, context, binary): def _service(host, noupdate_sec): now = timeutils.utcnow() last_seen = now - datetime.timedelta(seconds=noupdate_sec) return objects.Service(host=host, disabled=False, forced_down=False, last_seen_up=last_seen, binary=binary) return [_service(*fake) for fake in FAKE_SERVICES] @classmethod def _fake_compute_node_get_all(cls, context): return [_create_fake_node(*fake) for fake in FAKE_COMPUTES] @classmethod def _fake_compute_node_n_to_one_get_all(cls, context): return [_create_fake_node(*fake) for fake in FAKE_COMPUTES_N_TO_ONE] def _fake_cell_get_all(context): return [] def _fake_instance_type_all(context): def _type(mem, root, eph): return {'root_gb': root, 'ephemeral_gb': eph, 'memory_mb': mem} return [_type(*fake) for fake in FAKE_ITYPES] class TestCellsStateManager(test.NoDBTestCase): def setUp(self): super(TestCellsStateManager, self).setUp() self.stubs.Set(objects.ComputeNodeList, 'get_all', _fake_compute_node_get_all) self.stubs.Set(objects.ServiceList, 'get_by_binary', _fake_service_get_all_by_binary) self.stub_out('nova.db.flavor_get_all', _fake_instance_type_all) self.stub_out('nova.db.cell_get_all', _fake_cell_get_all) def test_cells_config_not_found(self): self.flags(cells_config='no_such_file_exists.conf', group='cells') e = self.assertRaises(cfg.ConfigFilesNotFoundError, state.CellStateManager) self.assertEqual(['no_such_file_exists.conf'], e.config_files) @mock.patch.object(cfg.ConfigOpts, 'find_file') @mock.patch.object(utils, 'read_cached_file') def test_filemanager_returned(self, mock_read_cached_file, mock_find_file): mock_find_file.return_value = "/etc/nova/cells.json" mock_read_cached_file.return_value = (False, six.StringIO('{}')) self.flags(cells_config='cells.json', group='cells') manager = state.CellStateManager() self.assertIsInstance(manager, state.CellStateManagerFile) self.assertRaises(exception.CellsUpdateUnsupported, manager.cell_create, None, None) self.assertRaises(exception.CellsUpdateUnsupported, manager.cell_update, None, None, None) self.assertRaises(exception.CellsUpdateUnsupported, manager.cell_delete, None, None) def test_dbmanager_returned(self): self.assertIsInstance(state.CellStateManager(), state.CellStateManagerDB) def test_capacity_no_reserve(self): # utilize entire cell cap = self._capacity(0.0) cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) units = cell_free_ram // 50 self.assertEqual(units, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 units = 5 # 4 on host 3, 1 on host4 self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)]) def test_capacity_full_reserve(self): # reserve the entire cell. (utilize zero percent) cap = self._capacity(100.0) cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) self.assertEqual(0, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 self.assertEqual(0, cap['disk_free']['units_by_mb'][str(sz)]) def test_capacity_part_reserve(self): # utilize half the cell's free capacity cap = self._capacity(50.0) cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) units = 10 # 10 from host 3 self.assertEqual(units, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 units = 2 # 2 on host 3 self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)]) def _get_state_manager(self, reserve_percent=0.0): self.flags(reserve_percent=reserve_percent, group='cells') return state.CellStateManager() def _capacity(self, reserve_percent): state_manager = self._get_state_manager(reserve_percent) my_state = state_manager.get_my_state() return my_state.capacities class TestCellsStateManagerNToOne(TestCellsStateManager): def setUp(self): super(TestCellsStateManagerNToOne, self).setUp() self.stubs.Set(objects.ComputeNodeList, 'get_all', _fake_compute_node_n_to_one_get_all) def test_capacity_part_reserve(self): # utilize half the cell's free capacity cap = self._capacity(50.0) cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES_N_TO_ONE) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = (1024 * sum(compute[4] for compute in FAKE_COMPUTES_N_TO_ONE)) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) units = 6 # 6 from host 2 self.assertEqual(units, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 units = 1 # 1 on host 2 self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)]) class TestCellsStateManagerNodeDown(test.NoDBTestCase): def setUp(self): super(TestCellsStateManagerNodeDown, self).setUp() self.stub_out('nova.objects.ComputeNodeList.get_all', _fake_compute_node_get_all) self.stub_out('nova.objects.ServiceList.get_by_binary', _fake_service_get_all_by_binary_nodedown) self.stub_out('nova.db.flavor_get_all', _fake_instance_type_all) self.stub_out('nova.db.cell_get_all', _fake_cell_get_all) def test_capacity_no_reserve_nodedown(self): cap = self._capacity(0.0) cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES[:-1]) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) free_disk = sum(compute[4] for compute in FAKE_COMPUTES[:-1]) cell_free_disk = 1024 * free_disk self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) def _get_state_manager(self, reserve_percent=0.0): self.flags(reserve_percent=reserve_percent, group='cells') return state.CellStateManager() def _capacity(self, reserve_percent): state_manager = self._get_state_manager(reserve_percent) my_state = state_manager.get_my_state() return my_state.capacities class TestCellStateManagerException(test.NoDBTestCase): @mock.patch.object(time, 'sleep') def test_init_db_error(self, mock_sleep): class TestCellStateManagerDB(state.CellStateManagerDB): def __init__(self): self._cell_data_sync = mock.Mock() self._cell_data_sync.side_effect = [db_exc.DBError(), []] super(TestCellStateManagerDB, self).__init__() test = TestCellStateManagerDB() mock_sleep.assert_called_once_with(30) self.assertEqual(2, test._cell_data_sync.call_count) class TestCellsGetCapacity(TestCellsStateManager): def setUp(self): super(TestCellsGetCapacity, self).setUp() self.capacities = {"ram_free": 1234} self.state_manager = self._get_state_manager() cell = models.Cell(name="cell_name") other_cell = models.Cell(name="other_cell_name") cell.capacities = self.capacities other_cell.capacities = self.capacities self.stubs.Set(self.state_manager, 'child_cells', {"cell_name": cell, "other_cell_name": other_cell}) def test_get_cell_capacity_for_all_cells(self): self.stubs.Set(self.state_manager.my_cell_state, 'capacities', self.capacities) capacities = self.state_manager.get_capacities() self.assertEqual({"ram_free": 3702}, capacities) def test_get_cell_capacity_for_the_parent_cell(self): self.stubs.Set(self.state_manager.my_cell_state, 'capacities', self.capacities) capacities = self.state_manager.\ get_capacities(self.state_manager.my_cell_state.name) self.assertEqual({"ram_free": 3702}, capacities) def test_get_cell_capacity_for_a_cell(self): self.assertEqual(self.capacities, self.state_manager.get_capacities(cell_name="cell_name")) def test_get_cell_capacity_for_non_existing_cell(self): self.assertRaises(exception.CellNotFound, self.state_manager.get_capacities, cell_name="invalid_cell_name") class FakeCellStateManager(object): def __init__(self): self.called = [] def _cell_data_sync(self, force=False): self.called.append(('_cell_data_sync', force)) class TestSyncDecorators(test.NoDBTestCase): def test_sync_before(self): manager = FakeCellStateManager() def test(inst, *args, **kwargs): self.assertEqual(manager, inst) self.assertEqual((1, 2, 3), args) self.assertEqual(dict(a=4, b=5, c=6), kwargs) return 'result' wrapper = state.sync_before(test) result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6) self.assertEqual('result', result) self.assertEqual([('_cell_data_sync', False)], manager.called) def test_sync_after(self): manager = FakeCellStateManager() def test(inst, *args, **kwargs): self.assertEqual(manager, inst) self.assertEqual((1, 2, 3), args) self.assertEqual(dict(a=4, b=5, c=6), kwargs) return 'result' wrapper = state.sync_after(test) result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6) self.assertEqual('result', result) self.assertEqual([('_cell_data_sync', True)], manager.called) nova-13.0.0/nova/tests/unit/cells/test_cells_utils.py0000664000567000056710000002367512701407773024045 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Cells Utility methods """ import inspect import mock import random from nova.cells import utils as cells_utils from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_instance class CellsUtilsTestCase(test.NoDBTestCase): """Test case for Cells utility methods.""" def test_get_instances_to_sync(self): fake_context = 'fake_context' call_info = {'get_all': 0, 'shuffle': 0} def random_shuffle(_list): call_info['shuffle'] += 1 @staticmethod def instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit, marker): # Pretend we return a full list the first time otherwise we loop # infinitely if marker is not None: return [] self.assertEqual(fake_context, context) self.assertEqual('deleted', sort_key) self.assertEqual('asc', sort_dir) call_info['got_filters'] = filters call_info['get_all'] += 1 instances = [fake_instance.fake_db_instance() for i in range(3)] return instances self.stubs.Set(objects.InstanceList, 'get_by_filters', instance_get_all_by_filters) self.stubs.Set(random, 'shuffle', random_shuffle) instances = cells_utils.get_instances_to_sync(fake_context) self.assertTrue(inspect.isgenerator(instances)) self.assertEqual(3, len([x for x in instances])) self.assertEqual(1, call_info['get_all']) self.assertEqual({}, call_info['got_filters']) self.assertEqual(0, call_info['shuffle']) instances = cells_utils.get_instances_to_sync(fake_context, shuffle=True) self.assertTrue(inspect.isgenerator(instances)) self.assertEqual(3, len([x for x in instances])) self.assertEqual(2, call_info['get_all']) self.assertEqual({}, call_info['got_filters']) self.assertEqual(1, call_info['shuffle']) instances = cells_utils.get_instances_to_sync(fake_context, updated_since='fake-updated-since') self.assertTrue(inspect.isgenerator(instances)) self.assertEqual(3, len([x for x in instances])) self.assertEqual(3, call_info['get_all']) self.assertEqual({'changes-since': 'fake-updated-since'}, call_info['got_filters']) self.assertEqual(1, call_info['shuffle']) instances = cells_utils.get_instances_to_sync(fake_context, project_id='fake-project', updated_since='fake-updated-since', shuffle=True) self.assertTrue(inspect.isgenerator(instances)) self.assertEqual(3, len([x for x in instances])) self.assertEqual(4, call_info['get_all']) self.assertEqual({'changes-since': 'fake-updated-since', 'project_id': 'fake-project'}, call_info['got_filters']) self.assertEqual(2, call_info['shuffle']) @mock.patch.object(objects.InstanceList, 'get_by_filters') @mock.patch.object(random, 'shuffle') def _test_get_instances_pagination(self, mock_shuffle, mock_get_by_filters, shuffle=False, updated_since=None, project_id=None): fake_context = 'fake_context' instances0 = objects.instance._make_instance_list(fake_context, objects.InstanceList(), [fake_instance.fake_db_instance() for i in range(3)], expected_attrs=None) marker0 = instances0[-1]['uuid'] instances1 = objects.instance._make_instance_list(fake_context, objects.InstanceList(), [fake_instance.fake_db_instance() for i in range(3)], expected_attrs=None) marker1 = instances1[-1]['uuid'] mock_get_by_filters.side_effect = [instances0, instances1, []] instances = cells_utils.get_instances_to_sync(fake_context, updated_since, project_id, shuffle=shuffle) self.assertEqual(len([x for x in instances]), 6) filters = {} if updated_since is not None: filters['changes-since'] = updated_since if project_id is not None: filters['project_id'] = project_id limit = 100 expected_calls = [mock.call(fake_context, filters, sort_key='deleted', sort_dir='asc', limit=limit, marker=None), mock.call(fake_context, filters, sort_key='deleted', sort_dir='asc', limit=limit, marker=marker0), mock.call(fake_context, filters, sort_key='deleted', sort_dir='asc', limit=limit, marker=marker1)] mock_get_by_filters.assert_has_calls(expected_calls) self.assertEqual(3, mock_get_by_filters.call_count) def test_get_instances_to_sync_limit(self): self._test_get_instances_pagination() def test_get_instances_to_sync_shuffle(self): self._test_get_instances_pagination(shuffle=True) def test_get_instances_to_sync_updated_since(self): self._test_get_instances_pagination(updated_since='fake-updated-since') def test_get_instances_to_sync_multiple_params(self): self._test_get_instances_pagination(project_id='fake-project', updated_since='fake-updated-since', shuffle=True) def test_split_cell_and_item(self): path = 'australia', 'queensland', 'gold_coast' cell = cells_utils.PATH_CELL_SEP.join(path) item = 'host_5' together = cells_utils.cell_with_item(cell, item) self.assertEqual(cells_utils._CELL_ITEM_SEP.join([cell, item]), together) # Test normal usage result_cell, result_item = cells_utils.split_cell_and_item(together) self.assertEqual(cell, result_cell) self.assertEqual(item, result_item) # Test with no cell cell = None together = cells_utils.cell_with_item(cell, item) self.assertEqual(item, together) result_cell, result_item = cells_utils.split_cell_and_item(together) self.assertEqual(cell, result_cell) self.assertEqual(item, result_item) def test_add_cell_to_compute_node(self): fake_compute = objects.ComputeNode(id=1, host='fake') cell_path = 'fake_path' proxy = cells_utils.add_cell_to_compute_node(fake_compute, cell_path) self.assertIsInstance(proxy, cells_utils.ComputeNodeProxy) self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id) self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'), proxy.host) @mock.patch.object(objects.Service, 'obj_load_attr') def test_add_cell_to_service_no_compute_node(self, mock_get_by_id): fake_service = objects.Service(id=1, host='fake') mock_get_by_id.side_effect = exception.ServiceNotFound(service_id=1) cell_path = 'fake_path' proxy = cells_utils.add_cell_to_service(fake_service, cell_path) self.assertIsInstance(proxy, cells_utils.ServiceProxy) self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id) self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'), proxy.host) self.assertRaises(AttributeError, getattr, proxy, 'compute_node') def test_add_cell_to_service_with_compute_node(self): fake_service = objects.Service(id=1, host='fake') fake_service.compute_node = objects.ComputeNode(id=1, host='fake') cell_path = 'fake_path' proxy = cells_utils.add_cell_to_service(fake_service, cell_path) self.assertIsInstance(proxy, cells_utils.ServiceProxy) self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id) self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'), proxy.host) self.assertRaises(AttributeError, getattr, proxy, 'compute_node') def test_proxy_object_serializer_to_primitive(self): obj = objects.ComputeNode(id=1, host='fake') obj_proxy = cells_utils.ComputeNodeProxy(obj, 'fake_path') serializer = cells_utils.ProxyObjectSerializer() primitive = serializer.serialize_entity('ctx', obj_proxy) self.assertIsInstance(primitive, dict) class_name = primitive.pop('cell_proxy.class_name') cell_path = primitive.pop('cell_proxy.cell_path') self.assertEqual('ComputeNodeProxy', class_name) self.assertEqual('fake_path', cell_path) self.assertEqual(obj.obj_to_primitive(), primitive) def test_proxy_object_serializer_from_primitive(self): obj = objects.ComputeNode(id=1, host='fake') serializer = cells_utils.ProxyObjectSerializer() # Recreating the primitive by hand to isolate the test for only # the deserializing method primitive = obj.obj_to_primitive() primitive['cell_proxy.class_name'] = 'ComputeNodeProxy' primitive['cell_proxy.cell_path'] = 'fake_path' result = serializer.deserialize_entity('ctx', primitive) self.assertIsInstance(result, cells_utils.ComputeNodeProxy) self.assertEqual(obj.obj_to_primitive(), result._obj.obj_to_primitive()) self.assertEqual('fake_path', result._cell_path) nova-13.0.0/nova/tests/unit/cells/test_cells_filters.py0000664000567000056710000002264612701407773024352 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012-2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for cells scheduler filters. """ from nova.cells import filters from nova.cells import state from nova import context from nova.db.sqlalchemy import models from nova import test from nova.tests.unit.cells import fakes class FiltersTestCase(test.NoDBTestCase): """Makes sure the proper filters are in the directory.""" def test_all_filters(self): filter_classes = filters.all_filters() class_names = [cls.__name__ for cls in filter_classes] self.assertIn("TargetCellFilter", class_names) self.assertIn("DifferentCellFilter", class_names) class _FilterTestClass(test.NoDBTestCase): """Base class for testing individual filter plugins.""" filter_cls_name = None def setUp(self): super(_FilterTestClass, self).setUp() fakes.init(self) self.msg_runner = fakes.get_message_runner('api-cell') self.scheduler = self.msg_runner.scheduler self.my_cell_state = self.msg_runner.state_manager.get_my_state() self.filter_handler = filters.CellFilterHandler() filter_classes = self.filter_handler.get_matching_classes( [self.filter_cls_name]) self.filters = [cls() for cls in filter_classes] self.context = context.RequestContext('fake', 'fake', is_admin=True) def _filter_cells(self, cells, filter_properties): return self.filter_handler.get_filtered_objects(self.filters, cells, filter_properties) class ImagePropertiesFilter(_FilterTestClass): filter_cls_name = \ 'nova.cells.filters.image_properties.ImagePropertiesFilter' def setUp(self): super(ImagePropertiesFilter, self).setUp() self.cell1 = models.Cell() self.cell2 = models.Cell() self.cell3 = models.Cell() self.cells = [self.cell1, self.cell2, self.cell3] for cell in self.cells: cell.capabilities = {} self.filter_props = {'context': self.context, 'request_spec': {}} def test_missing_image_properties(self): self.assertEqual(self.cells, self._filter_cells(self.cells, self.filter_props)) def test_missing_hypervisor_version_requires(self): self.filter_props['request_spec'] = {'image': {'properties': {}}} for cell in self.cells: cell.capabilities = {"prominent_hypervisor_version": set([u"6.2"])} self.assertEqual(self.cells, self._filter_cells(self.cells, self.filter_props)) def test_missing_hypervisor_version_in_cells(self): image = {'properties': {'hypervisor_version_requires': '>6.2.1'}} self.filter_props['request_spec'] = {'image': image} self.cell1.capabilities = {"prominent_hypervisor_version": set([])} self.assertEqual(self.cells, self._filter_cells(self.cells, self.filter_props)) def test_cells_matching_hypervisor_version(self): image = {'properties': {'hypervisor_version_requires': '>6.0, <=6.3'}} self.filter_props['request_spec'] = {'image': image} self.cell1.capabilities = {"prominent_hypervisor_version": set([u"6.2"])} self.cell2.capabilities = {"prominent_hypervisor_version": set([u"6.3"])} self.cell3.capabilities = {"prominent_hypervisor_version": set([u"6.0"])} self.assertEqual([self.cell1, self.cell2], self._filter_cells(self.cells, self.filter_props)) # assert again to verify filter doesn't mutate state # LP bug #1325705 self.assertEqual([self.cell1, self.cell2], self._filter_cells(self.cells, self.filter_props)) class TestTargetCellFilter(_FilterTestClass): filter_cls_name = 'nova.cells.filters.target_cell.TargetCellFilter' def test_missing_scheduler_hints(self): cells = [1, 2, 3] # No filtering filter_props = {'context': self.context} self.assertEqual(cells, self._filter_cells(cells, filter_props)) def test_no_target_cell_hint(self): cells = [1, 2, 3] filter_props = {'scheduler_hints': {}, 'context': self.context} # No filtering self.assertEqual(cells, self._filter_cells(cells, filter_props)) def test_target_cell_specified_me(self): cells = [1, 2, 3] target_cell = 'fake!cell!path' current_cell = 'fake!cell!path' filter_props = {'scheduler_hints': {'target_cell': target_cell}, 'routing_path': current_cell, 'scheduler': self.scheduler, 'context': self.context} # Only myself in the list. self.assertEqual([self.my_cell_state], self._filter_cells(cells, filter_props)) def test_target_cell_specified_me_but_not_admin(self): ctxt = context.RequestContext('fake', 'fake') cells = [1, 2, 3] target_cell = 'fake!cell!path' current_cell = 'fake!cell!path' filter_props = {'scheduler_hints': {'target_cell': target_cell}, 'routing_path': current_cell, 'scheduler': self.scheduler, 'context': ctxt} # No filtering, because not an admin. self.assertEqual(cells, self._filter_cells(cells, filter_props)) def test_target_cell_specified_not_me(self): info = {} def _fake_build_instances(ctxt, cell, sched_kwargs): info['ctxt'] = ctxt info['cell'] = cell info['sched_kwargs'] = sched_kwargs self.stubs.Set(self.msg_runner, 'build_instances', _fake_build_instances) cells = [1, 2, 3] target_cell = 'fake!cell!path' current_cell = 'not!the!same' filter_props = {'scheduler_hints': {'target_cell': target_cell}, 'routing_path': current_cell, 'scheduler': self.scheduler, 'context': self.context, 'host_sched_kwargs': 'meow'} # None is returned to bypass further scheduling. self.assertIsNone(self._filter_cells(cells, filter_props)) # The filter should have re-scheduled to the child cell itself. expected_info = {'ctxt': self.context, 'cell': 'fake!cell!path', 'sched_kwargs': 'meow'} self.assertEqual(expected_info, info) class TestDifferentCellFilter(_FilterTestClass): filter_cls_name = 'nova.cells.filters.different_cell.DifferentCellFilter' def setUp(self): super(TestDifferentCellFilter, self).setUp() # We only load one filter so we know the first one is the one we want self.policy.set_rules({'cells_scheduler_filter:DifferentCellFilter': ''}) self.cells = [state.CellState('1'), state.CellState('2'), state.CellState('3')] def test_missing_scheduler_hints(self): filter_props = {'context': self.context} # No filtering self.assertEqual(self.cells, self._filter_cells(self.cells, filter_props)) def test_no_different_cell_hint(self): filter_props = {'scheduler_hints': {}, 'context': self.context} # No filtering self.assertEqual(self.cells, self._filter_cells(self.cells, filter_props)) def test_different_cell(self): filter_props = {'scheduler_hints': {'different_cell': 'fake!2'}, 'routing_path': 'fake', 'context': self.context} filtered_cells = self._filter_cells(self.cells, filter_props) self.assertEqual(2, len(filtered_cells)) self.assertNotIn(self.cells[1], filtered_cells) def test_different_multiple_cells(self): filter_props = {'scheduler_hints': {'different_cell': ['fake!1', 'fake!2']}, 'routing_path': 'fake', 'context': self.context} filtered_cells = self._filter_cells(self.cells, filter_props) self.assertEqual(1, len(filtered_cells)) self.assertNotIn(self.cells[0], filtered_cells) self.assertNotIn(self.cells[1], filtered_cells) def test_different_cell_specified_me_not_authorized(self): self.policy.set_rules({'cells_scheduler_filter:DifferentCellFilter': '!'}) filter_props = {'scheduler_hints': {'different_cell': 'fake!2'}, 'routing_path': 'fake', 'context': self.context} # No filtering, because not an admin. self.assertEqual(self.cells, self._filter_cells(self.cells, filter_props)) nova-13.0.0/nova/tests/unit/cells/fakes.py0000664000567000056710000001630312701407773021543 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Cells tests. """ from nova.cells import driver from nova.cells import manager as cells_manager from nova.cells import state as cells_state from nova.cells import utils as cells_utils import nova.conf import nova.db from nova.db import base from nova import exception from nova import objects CONF = nova.conf.CONF # Fake Cell Hierarchy FAKE_TOP_LEVEL_CELL_NAME = 'api-cell' FAKE_CELL_LAYOUT = [{'child-cell1': []}, {'child-cell2': [{'grandchild-cell1': []}]}, {'child-cell3': [{'grandchild-cell2': []}, {'grandchild-cell3': []}]}, {'child-cell4': []}] # build_cell_stub_infos() below will take the above layout and create # a fake view of the DB from the perspective of each of the cells. # For each cell, a CellStubInfo will be created with this info. CELL_NAME_TO_STUB_INFO = {} class FakeDBApi(object): """Cells uses a different DB in each cell. This means in order to stub out things differently per cell, I need to create a fake DBApi object that is instantiated by each fake cell. """ def __init__(self, cell_db_entries): self.cell_db_entries = cell_db_entries def __getattr__(self, key): return getattr(nova.db, key) def cell_get_all(self, ctxt): return self.cell_db_entries def instance_get_all_by_filters(self, ctxt, *args, **kwargs): return [] def instance_get_by_uuid(self, ctxt, instance_uuid): raise exception.InstanceNotFound(instance_id=instance_uuid) class FakeCellsDriver(driver.BaseCellsDriver): pass class FakeCellState(cells_state.CellState): def send_message(self, message): message_runner = get_message_runner(self.name) orig_ctxt = message.ctxt json_message = message.to_json() message = message_runner.message_from_json(json_message) # Restore this so we can use mox and verify same context message.ctxt = orig_ctxt message.process() class FakeCellStateManager(cells_state.CellStateManagerDB): def __init__(self, *args, **kwargs): super(FakeCellStateManager, self).__init__(*args, cell_state_cls=FakeCellState, **kwargs) class FakeCellsManager(cells_manager.CellsManager): def __init__(self, *args, **kwargs): super(FakeCellsManager, self).__init__(*args, cell_state_manager=FakeCellStateManager, **kwargs) class CellStubInfo(object): def __init__(self, test_case, cell_name, db_entries): self.test_case = test_case self.cell_name = cell_name self.db_entries = db_entries def fake_base_init(_self, *args, **kwargs): _self.db = FakeDBApi(db_entries) @staticmethod def _fake_compute_node_get_all(context): return [] @staticmethod def _fake_service_get_by_binary(context, binary): return [] test_case.stubs.Set(base.Base, '__init__', fake_base_init) test_case.stubs.Set(objects.ComputeNodeList, 'get_all', _fake_compute_node_get_all) test_case.stubs.Set(objects.ServiceList, 'get_by_binary', _fake_service_get_by_binary) self.cells_manager = FakeCellsManager() # Fix the cell name, as it normally uses CONF.cells.name msg_runner = self.cells_manager.msg_runner msg_runner.our_name = self.cell_name self.cells_manager.state_manager.my_cell_state.name = self.cell_name def _build_cell_transport_url(cur_db_id): username = 'username%s' % cur_db_id password = 'password%s' % cur_db_id hostname = 'rpc_host%s' % cur_db_id port = 3090 + cur_db_id virtual_host = 'rpc_vhost%s' % cur_db_id return 'rabbit://%s:%s@%s:%s/%s' % (username, password, hostname, port, virtual_host) def _build_cell_stub_info(test_case, our_name, parent_path, children): cell_db_entries = [] cur_db_id = 1 sep_char = cells_utils.PATH_CELL_SEP if parent_path: cell_db_entries.append( dict(id=cur_db_id, name=parent_path.split(sep_char)[-1], is_parent=True, transport_url=_build_cell_transport_url(cur_db_id))) cur_db_id += 1 our_path = parent_path + sep_char + our_name else: our_path = our_name for child in children: for child_name, grandchildren in child.items(): _build_cell_stub_info(test_case, child_name, our_path, grandchildren) cell_entry = dict(id=cur_db_id, name=child_name, transport_url=_build_cell_transport_url( cur_db_id), is_parent=False) cell_db_entries.append(cell_entry) cur_db_id += 1 stub_info = CellStubInfo(test_case, our_name, cell_db_entries) CELL_NAME_TO_STUB_INFO[our_name] = stub_info def _build_cell_stub_infos(test_case): _build_cell_stub_info(test_case, FAKE_TOP_LEVEL_CELL_NAME, '', FAKE_CELL_LAYOUT) def init(test_case): global CELL_NAME_TO_STUB_INFO test_case.flags(driver='nova.tests.unit.cells.fakes.FakeCellsDriver', group='cells') CELL_NAME_TO_STUB_INFO = {} _build_cell_stub_infos(test_case) def _get_cell_stub_info(cell_name): return CELL_NAME_TO_STUB_INFO[cell_name] def get_state_manager(cell_name): return _get_cell_stub_info(cell_name).cells_manager.state_manager def get_cell_state(cur_cell_name, tgt_cell_name): state_manager = get_state_manager(cur_cell_name) cell = state_manager.child_cells.get(tgt_cell_name) if cell is None: cell = state_manager.parent_cells.get(tgt_cell_name) return cell def get_cells_manager(cell_name): return _get_cell_stub_info(cell_name).cells_manager def get_message_runner(cell_name): return _get_cell_stub_info(cell_name).cells_manager.msg_runner def stub_tgt_method(test_case, cell_name, method_name, method): msg_runner = get_message_runner(cell_name) tgt_msg_methods = msg_runner.methods_by_type['targeted'] setattr(tgt_msg_methods, method_name, method) def stub_bcast_method(test_case, cell_name, method_name, method): msg_runner = get_message_runner(cell_name) tgt_msg_methods = msg_runner.methods_by_type['broadcast'] setattr(tgt_msg_methods, method_name, method) def stub_bcast_methods(test_case, method_name, method): for cell_name in CELL_NAME_TO_STUB_INFO.keys(): stub_bcast_method(test_case, cell_name, method_name, method) nova-13.0.0/nova/tests/unit/cells/test_cells_rpcapi.py0000664000567000056710000010574212701407773024157 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Cells RPCAPI """ import six from nova.cells import rpcapi as cells_rpcapi import nova.conf from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_instance CONF = nova.conf.CONF class CellsAPITestCase(test.NoDBTestCase): """Test case for cells.api interfaces.""" def setUp(self): super(CellsAPITestCase, self).setUp() self.fake_topic = 'fake_topic' self.fake_context = 'fake_context' self.flags(topic=self.fake_topic, enable=True, group='cells') self.cells_rpcapi = cells_rpcapi.CellsAPI() def _stub_rpc_method(self, rpc_method, result): call_info = {} orig_prepare = self.cells_rpcapi.client.prepare def fake_rpc_prepare(**kwargs): if 'version' in kwargs: call_info['version'] = kwargs.pop('version') return self.cells_rpcapi.client def fake_csv(version): return orig_prepare(version).can_send_version() def fake_rpc_method(ctxt, method, **kwargs): call_info['context'] = ctxt call_info['method'] = method call_info['args'] = kwargs return result self.stubs.Set(self.cells_rpcapi.client, 'prepare', fake_rpc_prepare) self.stubs.Set(self.cells_rpcapi.client, 'can_send_version', fake_csv) self.stubs.Set(self.cells_rpcapi.client, rpc_method, fake_rpc_method) return call_info def _check_result(self, call_info, method, args, version=None): self.assertEqual(self.fake_topic, self.cells_rpcapi.client.target.topic) self.assertEqual(self.fake_context, call_info['context']) self.assertEqual(method, call_info['method']) self.assertEqual(args, call_info['args']) if version is not None: self.assertIn('version', call_info) self.assertIsInstance(call_info['version'], six.string_types, msg="Message version %s is not a string" % call_info['version']) self.assertEqual(version, call_info['version']) else: self.assertNotIn('version', call_info) def test_cast_compute_api_method(self): fake_cell_name = 'fake_cell_name' fake_method = 'fake_method' fake_method_args = (1, 2) fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20} expected_method_info = {'method': fake_method, 'method_args': fake_method_args, 'method_kwargs': fake_method_kwargs} expected_args = {'method_info': expected_method_info, 'cell_name': fake_cell_name, 'call': False} call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.cast_compute_api_method(self.fake_context, fake_cell_name, fake_method, *fake_method_args, **fake_method_kwargs) self._check_result(call_info, 'run_compute_api_method', expected_args) def test_call_compute_api_method(self): fake_cell_name = 'fake_cell_name' fake_method = 'fake_method' fake_method_args = (1, 2) fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20} fake_response = 'fake_response' expected_method_info = {'method': fake_method, 'method_args': fake_method_args, 'method_kwargs': fake_method_kwargs} expected_args = {'method_info': expected_method_info, 'cell_name': fake_cell_name, 'call': True} call_info = self._stub_rpc_method('call', fake_response) result = self.cells_rpcapi.call_compute_api_method(self.fake_context, fake_cell_name, fake_method, *fake_method_args, **fake_method_kwargs) self._check_result(call_info, 'run_compute_api_method', expected_args) self.assertEqual(fake_response, result) def test_build_instances(self): call_info = self._stub_rpc_method('cast', None) instances = [objects.Instance(id=1), objects.Instance(id=2)] self.cells_rpcapi.build_instances( self.fake_context, instances=instances, image={'fake': 'image'}, arg1=1, arg2=2, arg3=3) expected_args = {'build_inst_kwargs': {'instances': instances, 'image': {'fake': 'image'}, 'arg1': 1, 'arg2': 2, 'arg3': 3}} self._check_result(call_info, 'build_instances', expected_args, version='1.34') def test_get_capacities(self): capacity_info = {"capacity": "info"} call_info = self._stub_rpc_method('call', result=capacity_info) result = self.cells_rpcapi.get_capacities(self.fake_context, cell_name="name") self._check_result(call_info, 'get_capacities', {'cell_name': 'name'}, version='1.9') self.assertEqual(capacity_info, result) def test_instance_update_at_top(self): fake_info_cache = objects.InstanceInfoCache(instance_uuid='fake-uuid') fake_sys_metadata = {'key1': 'value1', 'key2': 'value2'} fake_attrs = {'id': 2, 'cell_name': 'fake', 'metadata': {'fake': 'fake'}, 'info_cache': fake_info_cache, 'system_metadata': fake_sys_metadata} fake_instance = objects.Instance(**fake_attrs) call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.instance_update_at_top( self.fake_context, fake_instance) expected_args = {'instance': fake_instance} self._check_result(call_info, 'instance_update_at_top', expected_args, version='1.35') def test_instance_destroy_at_top(self): fake_instance = objects.Instance(uuid='fake-uuid') call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.instance_destroy_at_top( self.fake_context, fake_instance) expected_args = {'instance': fake_instance} self._check_result(call_info, 'instance_destroy_at_top', expected_args, version='1.35') def test_instance_delete_everywhere(self): instance = fake_instance.fake_instance_obj(self.fake_context) call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.instance_delete_everywhere( self.fake_context, instance, 'fake-type') expected_args = {'instance': instance, 'delete_type': 'fake-type'} self._check_result(call_info, 'instance_delete_everywhere', expected_args, version='1.27') def test_instance_fault_create_at_top(self): fake_instance_fault = {'id': 2, 'other': 'meow'} call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.instance_fault_create_at_top( self.fake_context, fake_instance_fault) expected_args = {'instance_fault': fake_instance_fault} self._check_result(call_info, 'instance_fault_create_at_top', expected_args) def test_bw_usage_update_at_top(self): update_args = ('fake_uuid', 'fake_mac', 'fake_start_period', 'fake_bw_in', 'fake_bw_out', 'fake_ctr_in', 'fake_ctr_out') update_kwargs = {'last_refreshed': 'fake_refreshed'} call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.bw_usage_update_at_top( self.fake_context, *update_args, **update_kwargs) bw_update_info = {'uuid': 'fake_uuid', 'mac': 'fake_mac', 'start_period': 'fake_start_period', 'bw_in': 'fake_bw_in', 'bw_out': 'fake_bw_out', 'last_ctr_in': 'fake_ctr_in', 'last_ctr_out': 'fake_ctr_out', 'last_refreshed': 'fake_refreshed'} expected_args = {'bw_update_info': bw_update_info} self._check_result(call_info, 'bw_usage_update_at_top', expected_args) def test_get_cell_info_for_neighbors(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.get_cell_info_for_neighbors( self.fake_context) self._check_result(call_info, 'get_cell_info_for_neighbors', {}, version='1.1') self.assertEqual('fake_response', result) def test_sync_instances(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.sync_instances(self.fake_context, project_id='fake_project', updated_since='fake_time', deleted=True) expected_args = {'project_id': 'fake_project', 'updated_since': 'fake_time', 'deleted': True} self._check_result(call_info, 'sync_instances', expected_args, version='1.1') def test_service_get_all(self): call_info = self._stub_rpc_method('call', 'fake_response') fake_filters = {'key1': 'val1', 'key2': 'val2'} result = self.cells_rpcapi.service_get_all(self.fake_context, filters=fake_filters) expected_args = {'filters': fake_filters} self._check_result(call_info, 'service_get_all', expected_args, version='1.2') self.assertEqual('fake_response', result) def test_service_get_by_compute_host(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.service_get_by_compute_host( self.fake_context, host_name='fake-host-name') expected_args = {'host_name': 'fake-host-name'} self._check_result(call_info, 'service_get_by_compute_host', expected_args, version='1.2') self.assertEqual('fake_response', result) def test_get_host_uptime(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.get_host_uptime( self.fake_context, host_name='fake-host-name') expected_args = {'host_name': 'fake-host-name'} self._check_result(call_info, 'get_host_uptime', expected_args, version='1.17') self.assertEqual('fake_response', result) def test_service_update(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.service_update( self.fake_context, host_name='fake-host-name', binary='nova-api', params_to_update={'disabled': True}) expected_args = { 'host_name': 'fake-host-name', 'binary': 'nova-api', 'params_to_update': {'disabled': True}} self._check_result(call_info, 'service_update', expected_args, version='1.7') self.assertEqual('fake_response', result) def test_service_delete(self): call_info = self._stub_rpc_method('call', None) cell_service_id = 'cell@id' result = self.cells_rpcapi.service_delete( self.fake_context, cell_service_id=cell_service_id) expected_args = {'cell_service_id': cell_service_id} self._check_result(call_info, 'service_delete', expected_args, version='1.26') self.assertIsNone(result) def test_proxy_rpc_to_manager(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.proxy_rpc_to_manager( self.fake_context, rpc_message='fake-msg', topic='fake-topic', call=True, timeout=-1) expected_args = {'rpc_message': 'fake-msg', 'topic': 'fake-topic', 'call': True, 'timeout': -1} self._check_result(call_info, 'proxy_rpc_to_manager', expected_args, version='1.2') self.assertEqual('fake_response', result) def test_task_log_get_all(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.task_log_get_all(self.fake_context, task_name='fake_name', period_beginning='fake_begin', period_ending='fake_end', host='fake_host', state='fake_state') expected_args = {'task_name': 'fake_name', 'period_beginning': 'fake_begin', 'period_ending': 'fake_end', 'host': 'fake_host', 'state': 'fake_state'} self._check_result(call_info, 'task_log_get_all', expected_args, version='1.3') self.assertEqual('fake_response', result) def test_compute_node_get_all(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.compute_node_get_all(self.fake_context, hypervisor_match='fake-match') expected_args = {'hypervisor_match': 'fake-match'} self._check_result(call_info, 'compute_node_get_all', expected_args, version='1.4') self.assertEqual('fake_response', result) def test_compute_node_stats(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.compute_node_stats(self.fake_context) expected_args = {} self._check_result(call_info, 'compute_node_stats', expected_args, version='1.4') self.assertEqual('fake_response', result) def test_compute_node_get(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.compute_node_get(self.fake_context, 'fake_compute_id') expected_args = {'compute_id': 'fake_compute_id'} self._check_result(call_info, 'compute_node_get', expected_args, version='1.4') self.assertEqual('fake_response', result) def test_actions_get(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'} call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.actions_get(self.fake_context, fake_instance) expected_args = {'cell_name': 'region!child', 'instance_uuid': fake_instance['uuid']} self._check_result(call_info, 'actions_get', expected_args, version='1.5') self.assertEqual('fake_response', result) def test_actions_get_no_cell(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': None} self.assertRaises(exception.InstanceUnknownCell, self.cells_rpcapi.actions_get, self.fake_context, fake_instance) def test_action_get_by_request_id(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'} call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.action_get_by_request_id(self.fake_context, fake_instance, 'req-fake') expected_args = {'cell_name': 'region!child', 'instance_uuid': fake_instance['uuid'], 'request_id': 'req-fake'} self._check_result(call_info, 'action_get_by_request_id', expected_args, version='1.5') self.assertEqual('fake_response', result) def test_action_get_by_request_id_no_cell(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': None} self.assertRaises(exception.InstanceUnknownCell, self.cells_rpcapi.action_get_by_request_id, self.fake_context, fake_instance, 'req-fake') def test_action_events_get(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'} call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.action_events_get(self.fake_context, fake_instance, 'fake-action') expected_args = {'cell_name': 'region!child', 'action_id': 'fake-action'} self._check_result(call_info, 'action_events_get', expected_args, version='1.5') self.assertEqual('fake_response', result) def test_action_events_get_no_cell(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': None} self.assertRaises(exception.InstanceUnknownCell, self.cells_rpcapi.action_events_get, self.fake_context, fake_instance, 'fake-action') def test_consoleauth_delete_tokens(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context, 'fake-uuid') expected_args = {'instance_uuid': 'fake-uuid'} self._check_result(call_info, 'consoleauth_delete_tokens', expected_args, version='1.6') def test_validate_console_port(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.validate_console_port(self.fake_context, 'fake-uuid', 'fake-port', 'fake-type') expected_args = {'instance_uuid': 'fake-uuid', 'console_port': 'fake-port', 'console_type': 'fake-type'} self._check_result(call_info, 'validate_console_port', expected_args, version='1.6') self.assertEqual('fake_response', result) def test_bdm_update_or_create_at_top(self): fake_bdm = {'id': 2, 'other': 'meow'} call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.bdm_update_or_create_at_top( self.fake_context, fake_bdm, create='fake-create') expected_args = {'bdm': fake_bdm, 'create': 'fake-create'} self._check_result(call_info, 'bdm_update_or_create_at_top', expected_args, version='1.28') def test_bdm_destroy_at_top(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.bdm_destroy_at_top(self.fake_context, 'fake-uuid', device_name='fake-device', volume_id='fake-vol') expected_args = {'instance_uuid': 'fake-uuid', 'device_name': 'fake-device', 'volume_id': 'fake-vol'} self._check_result(call_info, 'bdm_destroy_at_top', expected_args, version='1.10') def test_get_migrations(self): call_info = self._stub_rpc_method('call', None) filters = {'cell_name': 'ChildCell', 'status': 'confirmed'} self.cells_rpcapi.get_migrations(self.fake_context, filters) expected_args = {'filters': filters} self._check_result(call_info, 'get_migrations', expected_args, version="1.11") def test_instance_update_from_api(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.instance_update_from_api( self.fake_context, 'fake-instance', expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset='admin_reset') expected_args = {'instance': 'fake-instance', 'expected_vm_state': 'exp_vm', 'expected_task_state': 'exp_task', 'admin_state_reset': 'admin_reset'} self._check_result(call_info, 'instance_update_from_api', expected_args, version='1.16') def test_start_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.start_instance( self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'start_instance', expected_args, version='1.12') def test_stop_instance_cast(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.stop_instance( self.fake_context, 'fake-instance', do_cast=True, clean_shutdown=True) expected_args = {'instance': 'fake-instance', 'do_cast': True, 'clean_shutdown': True} self._check_result(call_info, 'stop_instance', expected_args, version='1.31') def test_stop_instance_call(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.stop_instance( self.fake_context, 'fake-instance', do_cast=False, clean_shutdown=True) expected_args = {'instance': 'fake-instance', 'do_cast': False, 'clean_shutdown': True} self._check_result(call_info, 'stop_instance', expected_args, version='1.31') self.assertEqual('fake_response', result) def test_cell_create(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.cell_create(self.fake_context, 'values') expected_args = {'values': 'values'} self._check_result(call_info, 'cell_create', expected_args, version='1.13') self.assertEqual('fake_response', result) def test_cell_update(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.cell_update(self.fake_context, 'cell_name', 'values') expected_args = {'cell_name': 'cell_name', 'values': 'values'} self._check_result(call_info, 'cell_update', expected_args, version='1.13') self.assertEqual('fake_response', result) def test_cell_delete(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.cell_delete(self.fake_context, 'cell_name') expected_args = {'cell_name': 'cell_name'} self._check_result(call_info, 'cell_delete', expected_args, version='1.13') self.assertEqual('fake_response', result) def test_cell_get(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.cell_get(self.fake_context, 'cell_name') expected_args = {'cell_name': 'cell_name'} self._check_result(call_info, 'cell_get', expected_args, version='1.13') self.assertEqual('fake_response', result) def test_reboot_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.reboot_instance( self.fake_context, 'fake-instance', block_device_info='ignored', reboot_type='HARD') expected_args = {'instance': 'fake-instance', 'reboot_type': 'HARD'} self._check_result(call_info, 'reboot_instance', expected_args, version='1.14') def test_pause_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.pause_instance( self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'pause_instance', expected_args, version='1.19') def test_unpause_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.unpause_instance( self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'unpause_instance', expected_args, version='1.19') def test_suspend_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.suspend_instance( self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'suspend_instance', expected_args, version='1.15') def test_resume_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.resume_instance( self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'resume_instance', expected_args, version='1.15') def test_terminate_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.terminate_instance(self.fake_context, 'fake-instance', [], delete_type='delete') expected_args = {'instance': 'fake-instance', 'delete_type': 'delete'} self._check_result(call_info, 'terminate_instance', expected_args, version='1.36') def test_soft_delete_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.soft_delete_instance(self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'soft_delete_instance', expected_args, version='1.18') def test_resize_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.resize_instance(self.fake_context, 'fake-instance', dict(cow='moo'), 'fake-hint', 'fake-flavor', 'fake-reservations', clean_shutdown=True) expected_args = {'instance': 'fake-instance', 'flavor': 'fake-flavor', 'extra_instance_updates': dict(cow='moo'), 'clean_shutdown': True} self._check_result(call_info, 'resize_instance', expected_args, version='1.33') def test_live_migrate_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.live_migrate_instance(self.fake_context, 'fake-instance', 'fake-host', 'fake-block', 'fake-commit') expected_args = {'instance': 'fake-instance', 'block_migration': 'fake-block', 'disk_over_commit': 'fake-commit', 'host_name': 'fake-host'} self._check_result(call_info, 'live_migrate_instance', expected_args, version='1.20') def test_live_migrate_instance_not_passing_request_spec(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.live_migrate_instance(self.fake_context, 'fake-instance', 'fake-host', 'fake-block', 'fake-commit', 'fake-spec') expected_args = {'instance': 'fake-instance', 'block_migration': 'fake-block', 'disk_over_commit': 'fake-commit', 'host_name': 'fake-host'} self._check_result(call_info, 'live_migrate_instance', expected_args, version='1.20') def test_rebuild_instance_not_passing_request_spec(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.rebuild_instance(self.fake_context, 'fake-instance', 'fake-pass', 'fake-files', 'fake-image_ref', 'fake-orig_image_ref', 'fake-orig_sys_metadata', 'fake-bdms', recreate=False, on_shared_storage=False, host=None, preserve_ephemeral=False, request_spec='fake-spec', kwargs=None) expected_args = {'instance': 'fake-instance', 'image_href': 'fake-image_ref', 'admin_password': 'fake-pass', 'files_to_inject': 'fake-files', 'preserve_ephemeral': False, 'kwargs': None} self._check_result(call_info, 'rebuild_instance', expected_args, version='1.25') def test_revert_resize(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.revert_resize(self.fake_context, 'fake-instance', 'fake-migration', 'fake-dest', 'resvs') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'revert_resize', expected_args, version='1.21') def test_confirm_resize(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.confirm_resize(self.fake_context, 'fake-instance', 'fake-migration', 'fake-source', 'resvs') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'confirm_resize', expected_args, version='1.21') def test_reset_network(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.reset_network(self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'reset_network', expected_args, version='1.22') def test_inject_network_info(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.inject_network_info(self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'inject_network_info', expected_args, version='1.23') def test_snapshot_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.snapshot_instance(self.fake_context, 'fake-instance', 'image-id') expected_args = {'instance': 'fake-instance', 'image_id': 'image-id'} self._check_result(call_info, 'snapshot_instance', expected_args, version='1.24') def test_backup_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.backup_instance(self.fake_context, 'fake-instance', 'image-id', 'backup-type', 'rotation') expected_args = {'instance': 'fake-instance', 'image_id': 'image-id', 'backup_type': 'backup-type', 'rotation': 'rotation'} self._check_result(call_info, 'backup_instance', expected_args, version='1.24') def test_set_admin_password(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.set_admin_password(self.fake_context, 'fake-instance', 'fake-password') expected_args = {'instance': 'fake-instance', 'new_pass': 'fake-password'} self._check_result(call_info, 'set_admin_password', expected_args, version='1.29') def test_get_keypair_at_top(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.get_keypair_at_top(self.fake_context, 'fake_user_id', 'fake_name') expected_args = {'user_id': 'fake_user_id', 'name': 'fake_name'} self._check_result(call_info, 'get_keypair_at_top', expected_args, version='1.37') self.assertEqual(result, 'fake_response') def test_get_keypair_at_top_with_not_found(self): call_info = self._stub_rpc_method('call', None) self.assertRaises(exception.KeypairNotFound, self.cells_rpcapi.get_keypair_at_top, self.fake_context, 'fake_user_id', 'fake_name') expected_args = {'user_id': 'fake_user_id', 'name': 'fake_name'} self._check_result(call_info, 'get_keypair_at_top', expected_args, version='1.37') nova-13.0.0/nova/tests/unit/fake_block_device.py0000664000567000056710000000421012701407773022741 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_utils import timeutils from nova import block_device from nova import objects def fake_bdm_object(context, bdm_dict): """Creates a BlockDeviceMapping object from the given bdm_dict :param context: nova request context :param bdm_dict: dict of block device mapping info :returns: nova.objects.block_device.BlockDeviceMapping """ # FakeDbBlockDeviceDict mutates the bdm_dict so make a copy of it. return objects.BlockDeviceMapping._from_db_object( context, objects.BlockDeviceMapping(), FakeDbBlockDeviceDict(bdm_dict.copy())) class FakeDbBlockDeviceDict(block_device.BlockDeviceDict): """Defaults db fields - useful for mocking database calls.""" def __init__(self, bdm_dict=None, anon=False, **kwargs): bdm_dict = bdm_dict or {} db_id = bdm_dict.pop('id', 1) instance_uuid = bdm_dict.pop('instance_uuid', str(uuid.uuid4())) super(FakeDbBlockDeviceDict, self).__init__(bdm_dict=bdm_dict, **kwargs) fake_db_fields = {'instance_uuid': instance_uuid, 'deleted_at': None, 'deleted': 0} if not anon: fake_db_fields['id'] = db_id fake_db_fields['created_at'] = timeutils.utcnow() fake_db_fields['updated_at'] = timeutils.utcnow() self.update(fake_db_fields) def AnonFakeDbBlockDeviceDict(bdm_dict, **kwargs): return FakeDbBlockDeviceDict(bdm_dict=bdm_dict, anon=True, **kwargs) nova-13.0.0/nova/tests/unit/scheduler/0000775000567000056710000000000012701410205020731 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/scheduler/test_scheduler_options.py0000664000567000056710000001227312701407773026120 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For PickledScheduler. """ import datetime from oslo_serialization import jsonutils import six from nova.scheduler import scheduler_options from nova import test class FakeSchedulerOptions(scheduler_options.SchedulerOptions): def __init__(self, last_checked, now, file_old, file_now, data, filedata): super(FakeSchedulerOptions, self).__init__() # Change internals ... self.last_modified = file_old self.last_checked = last_checked self.data = data # For overrides ... self._time_now = now self._file_now = file_now self._file_data = filedata self.file_was_loaded = False def _get_file_timestamp(self, filename): return self._file_now def _get_file_handle(self, filename): self.file_was_loaded = True if six.PY3: return six.BytesIO(self._file_data.encode('utf-8')) return six.StringIO(self._file_data) def _get_time_now(self): return self._time_now class SchedulerOptionsTestCase(test.NoDBTestCase): def test_get_configuration_first_time_no_flag(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual({}, fake.get_configuration()) self.assertFalse(fake.file_was_loaded) def test_get_configuration_first_time_empty_file(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) jdata = "" fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual({}, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_first_time_happy_day(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_second_time_no_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, data, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_too_fast(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2011, 1, 1, 1, 1, 2) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEqual(old_data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) nova-13.0.0/nova/tests/unit/scheduler/test_filters.py0000664000567000056710000002544612701407773024045 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Host Filters. """ import inspect import mock from six.moves import range from nova import filters from nova import loadables from nova import objects from nova import test class Filter1(filters.BaseFilter): """Test Filter class #1.""" pass class Filter2(filters.BaseFilter): """Test Filter class #2.""" pass class FiltersTestCase(test.NoDBTestCase): def setUp(self): super(FiltersTestCase, self).setUp() with mock.patch.object(loadables.BaseLoader, "__init__") as mock_load: mock_load.return_value = None self.filter_handler = filters.BaseFilterHandler(filters.BaseFilter) @mock.patch('nova.filters.BaseFilter._filter_one') def test_filter_all(self, mock_filter_one): mock_filter_one.side_effect = [True, False, True] filter_obj_list = ['obj1', 'obj2', 'obj3'] spec_obj = objects.RequestSpec() base_filter = filters.BaseFilter() result = base_filter.filter_all(filter_obj_list, spec_obj) self.assertTrue(inspect.isgenerator(result)) self.assertEqual(['obj1', 'obj3'], list(result)) @mock.patch('nova.filters.BaseFilter._filter_one') def test_filter_all_recursive_yields(self, mock_filter_one): # Test filter_all() allows generators from previous filter_all()s. # filter_all() yields results. We want to make sure that we can # call filter_all() with generators returned from previous calls # to filter_all(). filter_obj_list = ['obj1', 'obj2', 'obj3'] spec_obj = objects.RequestSpec() base_filter = filters.BaseFilter() # The order that _filter_one is going to get called gets # confusing because we will be recursively yielding things.. # We are going to simulate the first call to filter_all() # returning False for 'obj2'. So, 'obj1' will get yielded # 'total_iterations' number of times before the first filter_all() # call gets to processing 'obj2'. We then return 'False' for it. # After that, 'obj3' gets yielded 'total_iterations' number of # times. mock_results = [] total_iterations = 200 for x in range(total_iterations): mock_results.append(True) mock_results.append(False) for x in range(total_iterations): mock_results.append(True) mock_filter_one.side_effect = mock_results objs = iter(filter_obj_list) for x in range(total_iterations): # Pass in generators returned from previous calls. objs = base_filter.filter_all(objs, spec_obj) self.assertTrue(inspect.isgenerator(objs)) self.assertEqual(['obj1', 'obj3'], list(objs)) def test_get_filtered_objects(self): filter_objs_initial = ['initial', 'filter1', 'objects1'] filter_objs_second = ['second', 'filter2', 'objects2'] filter_objs_last = ['last', 'filter3', 'objects3'] spec_obj = objects.RequestSpec() def _fake_base_loader_init(*args, **kwargs): pass self.stub_out('nova.loadables.BaseLoader.__init__', _fake_base_loader_init) filt1_mock = mock.Mock(Filter1) filt1_mock.run_filter_for_index.return_value = True filt1_mock.filter_all.return_value = filter_objs_second filt2_mock = mock.Mock(Filter2) filt2_mock.run_filter_for_index.return_value = True filt2_mock.filter_all.return_value = filter_objs_last filter_handler = filters.BaseFilterHandler(filters.BaseFilter) filter_mocks = [filt1_mock, filt2_mock] result = filter_handler.get_filtered_objects(filter_mocks, filter_objs_initial, spec_obj) self.assertEqual(filter_objs_last, result) filt1_mock.filter_all.assert_called_once_with(filter_objs_initial, spec_obj) filt2_mock.filter_all.assert_called_once_with(filter_objs_second, spec_obj) def test_get_filtered_objects_for_index(self): """Test that we don't call a filter when its run_filter_for_index() method returns false """ filter_objs_initial = ['initial', 'filter1', 'objects1'] filter_objs_second = ['second', 'filter2', 'objects2'] spec_obj = objects.RequestSpec() def _fake_base_loader_init(*args, **kwargs): pass self.stub_out('nova.loadables.BaseLoader.__init__', _fake_base_loader_init) filt1_mock = mock.Mock(Filter1) filt1_mock.run_filter_for_index.return_value = True filt1_mock.filter_all.return_value = filter_objs_second filt2_mock = mock.Mock(Filter2) filt2_mock.run_filter_for_index.return_value = False filter_handler = filters.BaseFilterHandler(filters.BaseFilter) filter_mocks = [filt1_mock, filt2_mock] result = filter_handler.get_filtered_objects(filter_mocks, filter_objs_initial, spec_obj) self.assertEqual(filter_objs_second, result) filt1_mock.filter_all.assert_called_once_with(filter_objs_initial, spec_obj) filt2_mock.filter_all.assert_not_called() def test_get_filtered_objects_none_response(self): filter_objs_initial = ['initial', 'filter1', 'objects1'] spec_obj = objects.RequestSpec() def _fake_base_loader_init(*args, **kwargs): pass self.stub_out('nova.loadables.BaseLoader.__init__', _fake_base_loader_init) filt1_mock = mock.Mock(Filter1) filt1_mock.run_filter_for_index.return_value = True filt1_mock.filter_all.return_value = None filt2_mock = mock.Mock(Filter2) filter_handler = filters.BaseFilterHandler(filters.BaseFilter) filter_mocks = [filt1_mock, filt2_mock] result = filter_handler.get_filtered_objects(filter_mocks, filter_objs_initial, spec_obj) self.assertIsNone(result) filt1_mock.filter_all.assert_called_once_with(filter_objs_initial, spec_obj) filt2_mock.filter_all.assert_not_called() def test_get_filtered_objects_info_log_none_returned(self): LOG = filters.LOG class FilterA(filters.BaseFilter): def filter_all(self, list_objs, spec_obj): # return all but the first object return list_objs[1:] class FilterB(filters.BaseFilter): def filter_all(self, list_objs, spec_obj): # return an empty list return [] filter_a = FilterA() filter_b = FilterB() all_filters = [filter_a, filter_b] hosts = ["Host0", "Host1", "Host2"] fake_uuid = "uuid" spec_obj = objects.RequestSpec(instance_uuid=fake_uuid) with mock.patch.object(LOG, "info") as mock_log: result = self.filter_handler.get_filtered_objects( all_filters, hosts, spec_obj) self.assertFalse(result) # FilterA should leave Host1 and Host2; FilterB should leave None. exp_output = ("['FilterA: (start: 3, end: 2)', " "'FilterB: (start: 2, end: 0)']") cargs = mock_log.call_args[0][0] self.assertIn("with instance ID '%s'" % fake_uuid, cargs) self.assertIn(exp_output, cargs) def test_get_filtered_objects_debug_log_none_returned(self): LOG = filters.LOG class FilterA(filters.BaseFilter): def filter_all(self, list_objs, spec_obj): # return all but the first object return list_objs[1:] class FilterB(filters.BaseFilter): def filter_all(self, list_objs, spec_obj): # return an empty list return [] filter_a = FilterA() filter_b = FilterB() all_filters = [filter_a, filter_b] hosts = ["Host0", "Host1", "Host2"] fake_uuid = "uuid" spec_obj = objects.RequestSpec(instance_uuid=fake_uuid) with mock.patch.object(LOG, "debug") as mock_log: result = self.filter_handler.get_filtered_objects( all_filters, hosts, spec_obj) self.assertFalse(result) # FilterA should leave Host1 and Host2; FilterB should leave None. exp_output = ("[('FilterA', [('Host1', ''), ('Host2', '')]), " + "('FilterB', None)]") cargs = mock_log.call_args[0][0] self.assertIn("with instance ID '%s'" % fake_uuid, cargs) self.assertIn(exp_output, cargs) def test_get_filtered_objects_compatible_with_filt_props_dicts(self): LOG = filters.LOG class FilterA(filters.BaseFilter): def filter_all(self, list_objs, spec_obj): # return all but the first object return list_objs[1:] class FilterB(filters.BaseFilter): def filter_all(self, list_objs, spec_obj): # return an empty list return [] filter_a = FilterA() filter_b = FilterB() all_filters = [filter_a, filter_b] hosts = ["Host0", "Host1", "Host2"] fake_uuid = "uuid" filt_props = {"request_spec": {"instance_properties": { "uuid": fake_uuid}}} with mock.patch.object(LOG, "info") as mock_log: result = self.filter_handler.get_filtered_objects( all_filters, hosts, filt_props) self.assertFalse(result) # FilterA should leave Host1 and Host2; FilterB should leave None. exp_output = ("['FilterA: (start: 3, end: 2)', " "'FilterB: (start: 2, end: 0)']") cargs = mock_log.call_args[0][0] self.assertIn("with instance ID '%s'" % fake_uuid, cargs) self.assertIn(exp_output, cargs) nova-13.0.0/nova/tests/unit/scheduler/__init__.py0000664000567000056710000000000012701407773023050 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/scheduler/test_ironic_host_manager.py0000664000567000056710000005307312701410011026357 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation # Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For IronicHostManager """ import mock from nova import exception from nova import objects from nova.objects import base as obj_base from nova.scheduler import filters from nova.scheduler import host_manager from nova.scheduler import ironic_host_manager from nova import test from nova.tests.unit.scheduler import ironic_fakes class FakeFilterClass1(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass class FakeFilterClass2(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass class IronicHostManagerTestCase(test.NoDBTestCase): """Test case for IronicHostManager class.""" @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def setUp(self, mock_init_agg, mock_init_inst): super(IronicHostManagerTestCase, self).setUp() self.host_manager = ironic_host_manager.IronicHostManager() @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_manager_public_api_signatures(self, mock_init_aggs, mock_init_inst): self.assertPublicAPISignatures(host_manager.HostManager(), self.host_manager) def test_state_public_api_signatures(self): self.assertPublicAPISignatures( host_manager.HostState("dummy", "dummy"), ironic_host_manager.IronicNodeState("dummy", "dummy") ) @mock.patch('nova.objects.ServiceList.get_by_binary') @mock.patch('nova.objects.ComputeNodeList.get_all') @mock.patch('nova.objects.InstanceList.get_by_host') def test_get_all_host_states(self, mock_get_by_host, mock_get_all, mock_get_by_binary): mock_get_all.return_value = ironic_fakes.COMPUTE_NODES mock_get_by_binary.return_value = ironic_fakes.SERVICES context = 'fake_context' self.host_manager.get_all_host_states(context) self.assertEqual(0, mock_get_by_host.call_count) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) for i in range(4): compute_node = ironic_fakes.COMPUTE_NODES[i] host = compute_node.host node = compute_node.hypervisor_hostname state_key = (host, node) self.assertEqual(host_states_map[state_key].service, obj_base.obj_to_primitive( ironic_fakes.get_service_by_host(host))) self.assertEqual(compute_node.stats, host_states_map[state_key].stats) self.assertEqual(compute_node.free_ram_mb, host_states_map[state_key].free_ram_mb) self.assertEqual(compute_node.free_disk_gb * 1024, host_states_map[state_key].free_disk_mb) class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase): """Test case for IronicHostManager class.""" @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def setUp(self, mock_init_agg, mock_init_inst): super(IronicHostManagerChangedNodesTestCase, self).setUp() self.host_manager = ironic_host_manager.IronicHostManager() ironic_driver = "nova.virt.ironic.driver.IronicDriver" supported_instances = [ objects.HVSpec.from_list(["i386", "baremetal", "baremetal"])] self.compute_node = objects.ComputeNode( id=1, local_gb=10, memory_mb=1024, vcpus=1, vcpus_used=0, local_gb_used=0, memory_mb_used=0, updated_at=None, cpu_info='baremetal cpu', stats=dict( ironic_driver=ironic_driver, cpu_arch='i386'), supported_hv_specs=supported_instances, free_disk_gb=10, free_ram_mb=1024, hypervisor_type='ironic', hypervisor_version=1, hypervisor_hostname='fake_host', cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0) @mock.patch.object(ironic_host_manager.IronicNodeState, '__init__') def test_create_ironic_node_state(self, init_mock): init_mock.return_value = None compute = {'hypervisor_type': 'ironic'} host_state = self.host_manager.host_state_cls('fake-host', 'fake-node', compute=compute) self.assertIs(ironic_host_manager.IronicNodeState, type(host_state)) @mock.patch.object(host_manager.HostState, '__init__') def test_create_non_ironic_host_state(self, init_mock): init_mock.return_value = None compute = {'cpu_info': 'other cpu'} host_state = self.host_manager.host_state_cls('fake-host', 'fake-node', compute=compute) self.assertIs(host_manager.HostState, type(host_state)) @mock.patch('nova.objects.ServiceList.get_by_binary') @mock.patch('nova.objects.ComputeNodeList.get_all') def test_get_all_host_states_after_delete_one(self, mock_get_all, mock_get_by_binary): running_nodes = [n for n in ironic_fakes.COMPUTE_NODES if n.get('hypervisor_hostname') != 'node4uuid'] mock_get_all.side_effect = [ ironic_fakes.COMPUTE_NODES, running_nodes] mock_get_by_binary.side_effect = [ ironic_fakes.SERVICES, ironic_fakes.SERVICES] context = 'fake_context' # first call: all nodes self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(4, len(host_states_map)) # second call: just running nodes self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(3, len(host_states_map)) @mock.patch('nova.objects.ServiceList.get_by_binary') @mock.patch('nova.objects.ComputeNodeList.get_all') def test_get_all_host_states_after_delete_all(self, mock_get_all, mock_get_by_binary): mock_get_all.side_effect = [ ironic_fakes.COMPUTE_NODES, []] mock_get_by_binary.side_effect = [ ironic_fakes.SERVICES, ironic_fakes.SERVICES] context = 'fake_context' # first call: all nodes self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # second call: no nodes self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 0) def test_update_from_compute_node(self): host = ironic_host_manager.IronicNodeState("fakehost", "fakenode") host.update(compute=self.compute_node) self.assertEqual(1024, host.free_ram_mb) self.assertEqual(1024, host.total_usable_ram_mb) self.assertEqual(10240, host.free_disk_mb) self.assertEqual(1, host.vcpus_total) self.assertEqual(0, host.vcpus_used) self.assertEqual(self.compute_node['stats'], host.stats) self.assertEqual('ironic', host.hypervisor_type) self.assertEqual(1, host.hypervisor_version) self.assertEqual('fake_host', host.hypervisor_hostname) def test_consume_identical_instance_from_compute(self): host = ironic_host_manager.IronicNodeState("fakehost", "fakenode") host.update(compute=self.compute_node) self.assertIsNone(host.updated) spec_obj = objects.RequestSpec( flavor=objects.Flavor(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1), uuid='fake-uuid') host.consume_from_request(spec_obj) self.assertEqual(1, host.vcpus_used) self.assertEqual(0, host.free_ram_mb) self.assertEqual(0, host.free_disk_mb) self.assertIsNotNone(host.updated) def test_consume_larger_instance_from_compute(self): host = ironic_host_manager.IronicNodeState("fakehost", "fakenode") host.update(compute=self.compute_node) self.assertIsNone(host.updated) spec_obj = objects.RequestSpec( flavor=objects.Flavor(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)) host.consume_from_request(spec_obj) self.assertEqual(1, host.vcpus_used) self.assertEqual(0, host.free_ram_mb) self.assertEqual(0, host.free_disk_mb) self.assertIsNotNone(host.updated) def test_consume_smaller_instance_from_compute(self): host = ironic_host_manager.IronicNodeState("fakehost", "fakenode") host.update(compute=self.compute_node) self.assertIsNone(host.updated) spec_obj = objects.RequestSpec( flavor=objects.Flavor(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)) host.consume_from_request(spec_obj) self.assertEqual(1, host.vcpus_used) self.assertEqual(0, host.free_ram_mb) self.assertEqual(0, host.free_disk_mb) self.assertIsNotNone(host.updated) class IronicHostManagerTestFilters(test.NoDBTestCase): """Test filters work for IronicHostManager.""" @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def setUp(self, mock_init_agg, mock_init_inst): super(IronicHostManagerTestFilters, self).setUp() self.flags(scheduler_available_filters=['%s.%s' % (__name__, cls) for cls in ['FakeFilterClass1', 'FakeFilterClass2']]) self.flags(scheduler_default_filters=['FakeFilterClass1']) self.flags(baremetal_scheduler_default_filters=['FakeFilterClass2']) self.host_manager = ironic_host_manager.IronicHostManager() self.fake_hosts = [ironic_host_manager.IronicNodeState( 'fake_host%s' % x, 'fake-node') for x in range(1, 5)] self.fake_hosts += [ironic_host_manager.IronicNodeState( 'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)] def test_default_filters(self): default_filters = self.host_manager.default_filters self.assertEqual(1, len(default_filters)) self.assertIsInstance(default_filters[0], FakeFilterClass1) def test_choose_host_filters_not_found(self): self.assertRaises(exception.SchedulerHostFilterNotFound, self.host_manager._choose_host_filters, 'FakeFilterClass3') def test_choose_host_filters(self): # Test we return 1 correct filter object host_filters = self.host_manager._choose_host_filters( ['FakeFilterClass2']) self.assertEqual(1, len(host_filters)) self.assertIsInstance(host_filters[0], FakeFilterClass2) def test_host_manager_default_filters(self): default_filters = self.host_manager.default_filters self.assertEqual(1, len(default_filters)) self.assertIsInstance(default_filters[0], FakeFilterClass1) @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_host_manager_default_filters_uses_baremetal(self, mock_init_agg, mock_init_inst): self.flags(scheduler_use_baremetal_filters=True) host_manager = ironic_host_manager.IronicHostManager() # ensure the defaults come from baremetal_scheduler_default_filters # and not scheduler_default_filters default_filters = host_manager.default_filters self.assertEqual(1, len(default_filters)) self.assertIsInstance(default_filters[0], FakeFilterClass2) def test_load_filters(self): # without scheduler_use_baremetal_filters filters = self.host_manager._load_filters() self.assertEqual(['FakeFilterClass1'], filters) def test_load_filters_baremetal(self): # with scheduler_use_baremetal_filters self.flags(scheduler_use_baremetal_filters=True) filters = self.host_manager._load_filters() self.assertEqual(['FakeFilterClass2'], filters) def _mock_get_filtered_hosts(self, info): info['got_objs'] = [] info['got_fprops'] = [] def fake_filter_one(_self, obj, filter_props): info['got_objs'].append(obj) info['got_fprops'].append(filter_props) return True self.stub_out(__name__ + '.FakeFilterClass1._filter_one', fake_filter_one) def _verify_result(self, info, result, filters=True): for x in info['got_fprops']: self.assertEqual(x, info['expected_fprops']) if filters: self.assertEqual(set(info['expected_objs']), set(info['got_objs'])) self.assertEqual(set(info['expected_objs']), set(result)) def test_get_filtered_hosts(self): fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=[], force_hosts=[], force_nodes=[]) info = {'expected_objs': self.fake_hosts, 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result) @mock.patch.object(FakeFilterClass2, '_filter_one', return_value=True) def test_get_filtered_hosts_with_specified_filters(self, mock_filter_one): fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=[], force_hosts=[], force_nodes=[]) specified_filters = ['FakeFilterClass1', 'FakeFilterClass2'] info = {'expected_objs': self.fake_hosts, 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties, filter_class_names=specified_filters) self._verify_result(info, result) def test_get_filtered_hosts_with_ignore(self): fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=['fake_host1', 'fake_host3', 'fake_host5', 'fake_multihost'], force_hosts=[], force_nodes=[]) # [1] and [3] are host2 and host4 info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result) def test_get_filtered_hosts_with_force_hosts(self): fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=[], force_hosts=['fake_host1', 'fake_host3', 'fake_host5'], force_nodes=[]) # [0] and [2] are host1 and host3 info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_no_matching_force_hosts(self): fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=[], force_hosts=['fake_host5', 'fake_host6'], force_nodes=[]) info = {'expected_objs': [], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_ignore_and_force_hosts(self): # Ensure ignore_hosts processed before force_hosts in host filters. fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=['fake_host1'], force_hosts=['fake_host3', 'fake_host1'], force_nodes=[]) # only fake_host3 should be left. info = {'expected_objs': [self.fake_hosts[2]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_force_host_and_many_nodes(self): # Ensure all nodes returned for a host with many nodes fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=[], force_hosts=['fake_multihost'], force_nodes=[]) info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5], self.fake_hosts[6], self.fake_hosts[7]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_force_nodes(self): fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=[], force_hosts=[], force_nodes=['fake-node2', 'fake-node4', 'fake-node9']) # [5] is fake-node2, [7] is fake-node4 info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_force_hosts_and_nodes(self): # Ensure only overlapping results if both force host and node fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=[], force_hosts=['fake_host1', 'fake_multihost'], force_nodes=['fake-node2', 'fake-node9']) # [5] is fake-node2 info = {'expected_objs': [self.fake_hosts[5]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self): # Ensure non-overlapping force_node and force_host yield no result fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=[], force_hosts=['fake_multihost'], force_nodes=['fake-node']) info = {'expected_objs': [], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self): # Ensure ignore_hosts can coexist with force_nodes fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=['fake_host1', 'fake_host2'], force_hosts=[], force_nodes=['fake-node4', 'fake-node2']) info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self): # Ensure ignore_hosts is processed before force_nodes fake_properties = objects.RequestSpec( instance_uuid='fake-uuid', ignore_hosts=['fake_multihost'], force_hosts=[], force_nodes=['fake_node4', 'fake_node2']) info = {'expected_objs': [], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) nova-13.0.0/nova/tests/unit/scheduler/test_caching_scheduler.py0000664000567000056710000001666112701407773026026 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import timeutils from six.moves import range from nova import exception from nova import objects from nova.scheduler import caching_scheduler from nova.scheduler import host_manager from nova.tests.unit.scheduler import test_scheduler ENABLE_PROFILER = False class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase): """Test case for Caching Scheduler.""" driver_cls = caching_scheduler.CachingScheduler @mock.patch.object(caching_scheduler.CachingScheduler, "_get_up_hosts") def test_run_periodic_tasks_loads_hosts(self, mock_up_hosts): mock_up_hosts.return_value = [] context = mock.Mock() self.driver.run_periodic_tasks(context) self.assertTrue(mock_up_hosts.called) self.assertEqual([], self.driver.all_host_states) context.elevated.assert_called_with() @mock.patch.object(caching_scheduler.CachingScheduler, "_get_up_hosts") def test_get_all_host_states_returns_cached_value(self, mock_up_hosts): self.driver.all_host_states = [] self.driver._get_all_host_states(self.context) self.assertFalse(mock_up_hosts.called) self.assertEqual([], self.driver.all_host_states) @mock.patch.object(caching_scheduler.CachingScheduler, "_get_up_hosts") def test_get_all_host_states_loads_hosts(self, mock_up_hosts): mock_up_hosts.return_value = ["asdf"] result = self.driver._get_all_host_states(self.context) self.assertTrue(mock_up_hosts.called) self.assertEqual(["asdf"], self.driver.all_host_states) self.assertEqual(["asdf"], result) def test_get_up_hosts(self): with mock.patch.object(self.driver.host_manager, "get_all_host_states") as mock_get_hosts: mock_get_hosts.return_value = ["asdf"] result = self.driver._get_up_hosts(self.context) self.assertTrue(mock_get_hosts.called) self.assertEqual(mock_get_hosts.return_value, result) def test_select_destination_raises_with_no_hosts(self): spec_obj = self._get_fake_request_spec() self.driver.all_host_states = [] self.assertRaises(exception.NoValidHost, self.driver.select_destinations, self.context, spec_obj) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value={'numa_topology': None, 'pci_requests': None}) def test_select_destination_works(self, mock_get_extra): spec_obj = self._get_fake_request_spec() fake_host = self._get_fake_host_state() self.driver.all_host_states = [fake_host] result = self._test_select_destinations(spec_obj) self.assertEqual(1, len(result)) self.assertEqual(result[0]["host"], fake_host.host) def _test_select_destinations(self, spec_obj): return self.driver.select_destinations( self.context, spec_obj) def _get_fake_request_spec(self): # NOTE(sbauza): Prevent to stub the Flavor.get_by_id call just by # directly providing a Flavor object flavor = objects.Flavor( flavorid="small", memory_mb=512, root_gb=1, ephemeral_gb=1, vcpus=1, swap=0, ) instance_properties = { "os_type": "linux", "project_id": "1234", } request_spec = objects.RequestSpec( flavor=flavor, num_instances=1, ignore_hosts=None, force_hosts=None, force_nodes=None, retry=None, availability_zone=None, image=None, instance_group=None, pci_requests=None, numa_topology=None, instance_uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', **instance_properties ) return request_spec def _get_fake_host_state(self, index=0): host_state = host_manager.HostState( 'host_%s' % index, 'node_%s' % index) host_state.free_ram_mb = 50000 host_state.total_usable_ram_mb = 50000 host_state.free_disk_mb = 4096 host_state.service = { "disabled": False, "updated_at": timeutils.utcnow(), "created_at": timeutils.utcnow(), } host_state.cpu_allocation_ratio = 16.0 host_state.ram_allocation_ratio = 1.5 host_state.disk_allocation_ratio = 1.0 host_state.metrics = objects.MonitorMetricList(objects=[]) return host_state @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value={'numa_topology': None, 'pci_requests': None}) def test_performance_check_select_destination(self, mock_get_extra): hosts = 2 requests = 1 self.flags(service_down_time=240) spec_obj = self._get_fake_request_spec() host_states = [] for x in range(hosts): host_state = self._get_fake_host_state(x) host_states.append(host_state) self.driver.all_host_states = host_states def run_test(): a = timeutils.utcnow() for x in range(requests): self.driver.select_destinations( self.context, spec_obj) b = timeutils.utcnow() c = b - a seconds = (c.days * 24 * 60 * 60 + c.seconds) microseconds = seconds * 1000 + c.microseconds / 1000.0 per_request_ms = microseconds / requests return per_request_ms per_request_ms = None if ENABLE_PROFILER: import pycallgraph from pycallgraph import output config = pycallgraph.Config(max_depth=10) config.trace_filter = pycallgraph.GlobbingFilter(exclude=[ 'pycallgraph.*', 'unittest.*', 'testtools.*', 'nova.tests.unit.*', ]) graphviz = output.GraphvizOutput(output_file='scheduler.png') with pycallgraph.PyCallGraph(output=graphviz): per_request_ms = run_test() else: per_request_ms = run_test() # This has proved to be around 1 ms on a random dev box # But this is here so you can do simply performance testing easily. self.assertTrue(per_request_ms < 1000) if __name__ == '__main__': # A handy tool to help profile the schedulers performance ENABLE_PROFILER = True import testtools suite = testtools.ConcurrentTestSuite() test = "test_performance_check_select_destination" test_case = CachingSchedulerTestCase(test) suite.addTest(test_case) runner = testtools.TextTestResult.TextTestRunner() runner.run(suite) nova-13.0.0/nova/tests/unit/scheduler/test_host_manager.py0000664000567000056710000014575712701410011025027 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For HostManager """ import collections import datetime import mock from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import versionutils import six import nova from nova.compute import task_states from nova.compute import vm_states from nova import exception from nova import objects from nova.objects import base as obj_base from nova.pci import stats as pci_stats from nova.scheduler import filters from nova.scheduler import host_manager from nova import test from nova.tests import fixtures from nova.tests.unit import fake_instance from nova.tests.unit import matchers from nova.tests.unit.scheduler import fakes from nova.tests import uuidsentinel as uuids CONF = cfg.CONF CONF.import_opt('scheduler_tracks_instance_changes', 'nova.scheduler.host_manager') class FakeFilterClass1(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass class FakeFilterClass2(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass class HostManagerTestCase(test.NoDBTestCase): """Test case for HostManager class.""" @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def setUp(self, mock_init_agg, mock_init_inst): super(HostManagerTestCase, self).setUp() self.flags(scheduler_available_filters=['%s.%s' % (__name__, cls) for cls in ['FakeFilterClass1', 'FakeFilterClass2']]) self.flags(scheduler_default_filters=['FakeFilterClass1']) self.host_manager = host_manager.HostManager() self.fake_hosts = [host_manager.HostState('fake_host%s' % x, 'fake-node') for x in range(1, 5)] self.fake_hosts += [host_manager.HostState('fake_multihost', 'fake-node%s' % x) for x in range(1, 5)] self.useFixture(fixtures.SpawnIsSynchronousFixture()) def test_load_filters(self): filters = self.host_manager._load_filters() self.assertEqual(filters, ['FakeFilterClass1']) @mock.patch.object(nova.objects.InstanceList, 'get_by_filters') @mock.patch.object(nova.objects.ComputeNodeList, 'get_all') def test_init_instance_info_batches(self, mock_get_all, mock_get_by_filters): cn_list = objects.ComputeNodeList() for num in range(22): host_name = 'host_%s' % num cn_list.objects.append(objects.ComputeNode(host=host_name)) mock_get_all.return_value = cn_list self.host_manager._init_instance_info() self.assertEqual(mock_get_by_filters.call_count, 3) @mock.patch.object(nova.objects.InstanceList, 'get_by_filters') @mock.patch.object(nova.objects.ComputeNodeList, 'get_all') def test_init_instance_info(self, mock_get_all, mock_get_by_filters): cn1 = objects.ComputeNode(host='host1') cn2 = objects.ComputeNode(host='host2') inst1 = objects.Instance(host='host1', uuid='uuid1') inst2 = objects.Instance(host='host1', uuid='uuid2') inst3 = objects.Instance(host='host2', uuid='uuid3') mock_get_all.return_value = objects.ComputeNodeList(objects=[cn1, cn2]) mock_get_by_filters.return_value = objects.InstanceList( objects=[inst1, inst2, inst3]) hm = self.host_manager hm._instance_info = {} hm._init_instance_info() self.assertEqual(len(hm._instance_info), 2) fake_info = hm._instance_info['host1'] self.assertIn('uuid1', fake_info['instances']) self.assertIn('uuid2', fake_info['instances']) self.assertNotIn('uuid3', fake_info['instances']) exp_filters = {'deleted': False, 'host': [u'host1', u'host2']} mock_get_by_filters.assert_called_once_with(mock.ANY, exp_filters) def test_default_filters(self): default_filters = self.host_manager.default_filters self.assertEqual(1, len(default_filters)) self.assertIsInstance(default_filters[0], FakeFilterClass1) @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(objects.AggregateList, 'get_all') def test_init_aggregates_no_aggs(self, agg_get_all, mock_init_info): agg_get_all.return_value = [] self.host_manager = host_manager.HostManager() self.assertEqual({}, self.host_manager.aggs_by_id) self.assertEqual({}, self.host_manager.host_aggregates_map) @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(objects.AggregateList, 'get_all') def test_init_aggregates_one_agg_no_hosts(self, agg_get_all, mock_init_info): fake_agg = objects.Aggregate(id=1, hosts=[]) agg_get_all.return_value = [fake_agg] self.host_manager = host_manager.HostManager() self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id) self.assertEqual({}, self.host_manager.host_aggregates_map) @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(objects.AggregateList, 'get_all') def test_init_aggregates_one_agg_with_hosts(self, agg_get_all, mock_init_info): fake_agg = objects.Aggregate(id=1, hosts=['fake-host']) agg_get_all.return_value = [fake_agg] self.host_manager = host_manager.HostManager() self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id) self.assertEqual({'fake-host': set([1])}, self.host_manager.host_aggregates_map) def test_update_aggregates(self): fake_agg = objects.Aggregate(id=1, hosts=['fake-host']) self.host_manager.update_aggregates([fake_agg]) self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id) self.assertEqual({'fake-host': set([1])}, self.host_manager.host_aggregates_map) def test_update_aggregates_remove_hosts(self): fake_agg = objects.Aggregate(id=1, hosts=['fake-host']) self.host_manager.update_aggregates([fake_agg]) self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id) self.assertEqual({'fake-host': set([1])}, self.host_manager.host_aggregates_map) # Let's remove the host from the aggregate and update again fake_agg.hosts = [] self.host_manager.update_aggregates([fake_agg]) self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id) self.assertEqual({'fake-host': set([])}, self.host_manager.host_aggregates_map) def test_delete_aggregate(self): fake_agg = objects.Aggregate(id=1, hosts=['fake-host']) self.host_manager.host_aggregates_map = collections.defaultdict( set, {'fake-host': set([1])}) self.host_manager.aggs_by_id = {1: fake_agg} self.host_manager.delete_aggregate(fake_agg) self.assertEqual({}, self.host_manager.aggs_by_id) self.assertEqual({'fake-host': set([])}, self.host_manager.host_aggregates_map) def test_choose_host_filters_not_found(self): self.assertRaises(exception.SchedulerHostFilterNotFound, self.host_manager._choose_host_filters, 'FakeFilterClass3') def test_choose_host_filters(self): # Test we return 1 correct filter object host_filters = self.host_manager._choose_host_filters( ['FakeFilterClass2']) self.assertEqual(1, len(host_filters)) self.assertIsInstance(host_filters[0], FakeFilterClass2) def _mock_get_filtered_hosts(self, info): info['got_objs'] = [] info['got_fprops'] = [] def fake_filter_one(_self, obj, filter_props): info['got_objs'].append(obj) info['got_fprops'].append(filter_props) return True self.stub_out(__name__ + '.FakeFilterClass1._filter_one', fake_filter_one) def _verify_result(self, info, result, filters=True): for x in info['got_fprops']: self.assertEqual(x, info['expected_fprops']) if filters: self.assertEqual(set(info['expected_objs']), set(info['got_objs'])) self.assertEqual(set(info['expected_objs']), set(result)) def test_get_filtered_hosts(self): fake_properties = objects.RequestSpec(ignore_hosts=[], instance_uuid='fake-uuid1', force_hosts=[], force_nodes=[]) info = {'expected_objs': self.fake_hosts, 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result) @mock.patch.object(FakeFilterClass2, '_filter_one', return_value=True) def test_get_filtered_hosts_with_specified_filters(self, mock_filter_one): fake_properties = objects.RequestSpec(ignore_hosts=[], instance_uuid='fake-uuid1', force_hosts=[], force_nodes=[]) specified_filters = ['FakeFilterClass1', 'FakeFilterClass2'] info = {'expected_objs': self.fake_hosts, 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties, filter_class_names=specified_filters) self._verify_result(info, result) def test_get_filtered_hosts_with_ignore(self): fake_properties = objects.RequestSpec( instance_uuid='fake-uuid1', ignore_hosts=['fake_host1', 'fake_host3', 'fake_host5', 'fake_multihost'], force_hosts=[], force_nodes=[]) # [1] and [3] are host2 and host4 info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result) def test_get_filtered_hosts_with_ignore_case_insensitive(self): fake_properties = objects.RequestSpec( instance_uuids=uuids.fakehost, ignore_hosts=['FAKE_HOST1', 'FaKe_HoSt3', 'Fake_Multihost'], force_hosts=[], force_nodes=[]) # [1] and [3] are host2 and host4 info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result) def test_get_filtered_hosts_with_force_hosts(self): fake_properties = objects.RequestSpec( instance_uuid='fake-uuid1', ignore_hosts=[], force_hosts=['fake_host1', 'fake_host3', 'fake_host5'], force_nodes=[]) # [0] and [2] are host1 and host3 info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_force_case_insensitive(self): fake_properties = objects.RequestSpec( instance_uuids=uuids.fakehost, ignore_hosts=[], force_hosts=['FAKE_HOST1', 'FaKe_HoSt3', 'fake_host4', 'faKe_host5'], force_nodes=[]) # [1] and [3] are host2 and host4 info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2], self.fake_hosts[3]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_no_matching_force_hosts(self): fake_properties = objects.RequestSpec( instance_uuid='fake-uuid1', ignore_hosts=[], force_hosts=['fake_host5', 'fake_host6'], force_nodes=[]) info = {'expected_objs': [], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) with mock.patch.object(self.host_manager.filter_handler, 'get_filtered_objects') as fake_filter: result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self.assertFalse(fake_filter.called) self._verify_result(info, result, False) def test_get_filtered_hosts_with_ignore_and_force_hosts(self): # Ensure ignore_hosts processed before force_hosts in host filters. fake_properties = objects.RequestSpec( instance_uuid='fake-uuid1', ignore_hosts=['fake_host1'], force_hosts=['fake_host3', 'fake_host1'], force_nodes=[]) # only fake_host3 should be left. info = {'expected_objs': [self.fake_hosts[2]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_force_host_and_many_nodes(self): # Ensure all nodes returned for a host with many nodes fake_properties = objects.RequestSpec( instance_uuid='fake-uuid1', ignore_hosts=[], force_hosts=['fake_multihost'], force_nodes=[]) info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5], self.fake_hosts[6], self.fake_hosts[7]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_force_nodes(self): fake_properties = objects.RequestSpec( instance_uuid='fake-uuid1', ignore_hosts=[], force_hosts=[], force_nodes=['fake-node2', 'fake-node4', 'fake-node9']) # [5] is fake-node2, [7] is fake-node4 info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_force_hosts_and_nodes(self): # Ensure only overlapping results if both force host and node fake_properties = objects.RequestSpec( instance_uuid='fake-uuid1', ignore_hosts=[], force_hosts=['fake-host1', 'fake_multihost'], force_nodes=['fake-node2', 'fake-node9']) # [5] is fake-node2 info = {'expected_objs': [self.fake_hosts[5]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self): # Ensure non-overlapping force_node and force_host yield no result fake_properties = objects.RequestSpec( instance_uuid='fake-uuid1', ignore_hosts=[], force_hosts=['fake_multihost'], force_nodes=['fake-node']) info = {'expected_objs': [], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self): # Ensure ignore_hosts can coexist with force_nodes fake_properties = objects.RequestSpec( instance_uuid='fake-uuid1', ignore_hosts=['fake_host1', 'fake_host2'], force_hosts=[], force_nodes=['fake-node4', 'fake-node2']) info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self): # Ensure ignore_hosts is processed before force_nodes fake_properties = objects.RequestSpec( instance_uuid='fake-uuid1', ignore_hosts=['fake_multihost'], force_hosts=[], force_nodes=['fake_node4', 'fake_node2']) info = {'expected_objs': [], 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result, False) @mock.patch('nova.scheduler.host_manager.LOG') @mock.patch('nova.objects.ServiceList.get_by_binary') @mock.patch('nova.objects.ComputeNodeList.get_all') @mock.patch('nova.objects.InstanceList.get_by_host') def test_get_all_host_states(self, mock_get_by_host, mock_get_all, mock_get_by_binary, mock_log): mock_get_by_host.return_value = objects.InstanceList() mock_get_all.return_value = fakes.COMPUTE_NODES mock_get_by_binary.return_value = fakes.SERVICES context = 'fake_context' self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) calls = [ mock.call( "Host %(hostname)s has more disk space than database " "expected (%(physical)s GB > %(database)s GB)", {'physical': 3333, 'database': 3072, 'hostname': 'node3'} ), mock.call( "No compute service record found for host %(host)s", {'host': 'fake'} ) ] self.assertEqual(calls, mock_log.warning.call_args_list) # Check that .service is set properly for i in range(4): compute_node = fakes.COMPUTE_NODES[i] host = compute_node['host'] node = compute_node['hypervisor_hostname'] state_key = (host, node) self.assertEqual(host_states_map[state_key].service, obj_base.obj_to_primitive(fakes.get_service_by_host(host))) self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb, 512) # 511GB self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb, 524288) self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb, 1024) # 1023GB self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb, 1048576) self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb, 3072) # 3071GB self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb, 3145728) self.assertThat( objects.NUMATopology.obj_from_db_obj( host_states_map[('host3', 'node3')].numa_topology )._to_dict(), matchers.DictMatches(fakes.NUMA_TOPOLOGY._to_dict())) self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb, 8192) # 8191GB self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb, 8388608) @mock.patch.object(nova.objects.InstanceList, 'get_by_host') @mock.patch.object(host_manager.HostState, '_update_from_compute_node') @mock.patch.object(objects.ComputeNodeList, 'get_all') @mock.patch.object(objects.ServiceList, 'get_by_binary') def test_get_all_host_states_with_no_aggs(self, svc_get_by_binary, cn_get_all, update_from_cn, mock_get_by_host): svc_get_by_binary.return_value = [objects.Service(host='fake')] cn_get_all.return_value = [ objects.ComputeNode(host='fake', hypervisor_hostname='fake')] mock_get_by_host.return_value = objects.InstanceList() self.host_manager.host_aggregates_map = collections.defaultdict(set) self.host_manager.get_all_host_states('fake-context') host_state = self.host_manager.host_state_map[('fake', 'fake')] self.assertEqual([], host_state.aggregates) @mock.patch.object(nova.objects.InstanceList, 'get_by_host') @mock.patch.object(host_manager.HostState, '_update_from_compute_node') @mock.patch.object(objects.ComputeNodeList, 'get_all') @mock.patch.object(objects.ServiceList, 'get_by_binary') def test_get_all_host_states_with_matching_aggs(self, svc_get_by_binary, cn_get_all, update_from_cn, mock_get_by_host): svc_get_by_binary.return_value = [objects.Service(host='fake')] cn_get_all.return_value = [ objects.ComputeNode(host='fake', hypervisor_hostname='fake')] mock_get_by_host.return_value = objects.InstanceList() fake_agg = objects.Aggregate(id=1) self.host_manager.host_aggregates_map = collections.defaultdict( set, {'fake': set([1])}) self.host_manager.aggs_by_id = {1: fake_agg} self.host_manager.get_all_host_states('fake-context') host_state = self.host_manager.host_state_map[('fake', 'fake')] self.assertEqual([fake_agg], host_state.aggregates) @mock.patch.object(nova.objects.InstanceList, 'get_by_host') @mock.patch.object(host_manager.HostState, '_update_from_compute_node') @mock.patch.object(objects.ComputeNodeList, 'get_all') @mock.patch.object(objects.ServiceList, 'get_by_binary') def test_get_all_host_states_with_not_matching_aggs(self, svc_get_by_binary, cn_get_all, update_from_cn, mock_get_by_host): svc_get_by_binary.return_value = [objects.Service(host='fake'), objects.Service(host='other')] cn_get_all.return_value = [ objects.ComputeNode(host='fake', hypervisor_hostname='fake'), objects.ComputeNode(host='other', hypervisor_hostname='other')] mock_get_by_host.return_value = objects.InstanceList() fake_agg = objects.Aggregate(id=1) self.host_manager.host_aggregates_map = collections.defaultdict( set, {'other': set([1])}) self.host_manager.aggs_by_id = {1: fake_agg} self.host_manager.get_all_host_states('fake-context') host_state = self.host_manager.host_state_map[('fake', 'fake')] self.assertEqual([], host_state.aggregates) @mock.patch('nova.objects.ServiceList.get_by_binary') @mock.patch('nova.objects.ComputeNodeList.get_all') @mock.patch('nova.objects.InstanceList.get_by_host') def test_get_all_host_states_updated(self, mock_get_by_host, mock_get_all_comp, mock_get_svc_by_binary): mock_get_all_comp.return_value = fakes.COMPUTE_NODES mock_get_svc_by_binary.return_value = fakes.SERVICES context = 'fake_context' hm = self.host_manager inst1 = objects.Instance(uuid='uuid1') cn1 = objects.ComputeNode(host='host1') hm._instance_info = {'host1': {'instances': {'uuid1': inst1}, 'updated': True}} host_state = host_manager.HostState('host1', cn1) self.assertFalse(host_state.instances) mock_get_by_host.return_value = None host_state.update( inst_dict=hm._get_instance_info(context, cn1)) self.assertFalse(mock_get_by_host.called) self.assertTrue(host_state.instances) self.assertEqual(host_state.instances['uuid1'], inst1) @mock.patch('nova.objects.ServiceList.get_by_binary') @mock.patch('nova.objects.ComputeNodeList.get_all') @mock.patch('nova.objects.InstanceList.get_by_host') def test_get_all_host_states_not_updated(self, mock_get_by_host, mock_get_all_comp, mock_get_svc_by_binary): mock_get_all_comp.return_value = fakes.COMPUTE_NODES mock_get_svc_by_binary.return_value = fakes.SERVICES context = 'fake_context' hm = self.host_manager inst1 = objects.Instance(uuid='uuid1') cn1 = objects.ComputeNode(host='host1') hm._instance_info = {'host1': {'instances': {'uuid1': inst1}, 'updated': False}} host_state = host_manager.HostState('host1', cn1) self.assertFalse(host_state.instances) mock_get_by_host.return_value = objects.InstanceList(objects=[inst1]) host_state.update( inst_dict=hm._get_instance_info(context, cn1)) mock_get_by_host.assert_called_once_with(context, cn1.host) self.assertTrue(host_state.instances) self.assertEqual(host_state.instances['uuid1'], inst1) @mock.patch('nova.objects.InstanceList.get_by_host') def test_recreate_instance_info(self, mock_get_by_host): host_name = 'fake_host' inst1 = fake_instance.fake_instance_obj('fake_context', uuid='aaa', host=host_name) inst2 = fake_instance.fake_instance_obj('fake_context', uuid='bbb', host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} new_inst_list = objects.InstanceList(objects=[inst1, inst2]) mock_get_by_host.return_value = new_inst_list self.host_manager._instance_info = { host_name: { 'instances': orig_inst_dict, 'updated': True, }} self.host_manager._recreate_instance_info('fake_context', host_name) new_info = self.host_manager._instance_info[host_name] self.assertEqual(len(new_info['instances']), len(new_inst_list)) self.assertFalse(new_info['updated']) def test_update_instance_info(self): host_name = 'fake_host' inst1 = fake_instance.fake_instance_obj('fake_context', uuid='aaa', host=host_name) inst2 = fake_instance.fake_instance_obj('fake_context', uuid='bbb', host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} self.host_manager._instance_info = { host_name: { 'instances': orig_inst_dict, 'updated': False, }} inst3 = fake_instance.fake_instance_obj('fake_context', uuid='ccc', host=host_name) inst4 = fake_instance.fake_instance_obj('fake_context', uuid='ddd', host=host_name) update = objects.InstanceList(objects=[inst3, inst4]) self.host_manager.update_instance_info('fake_context', host_name, update) new_info = self.host_manager._instance_info[host_name] self.assertEqual(len(new_info['instances']), 4) self.assertTrue(new_info['updated']) def test_update_instance_info_unknown_host(self): self.host_manager._recreate_instance_info = mock.MagicMock() host_name = 'fake_host' inst1 = fake_instance.fake_instance_obj('fake_context', uuid='aaa', host=host_name) inst2 = fake_instance.fake_instance_obj('fake_context', uuid='bbb', host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} self.host_manager._instance_info = { host_name: { 'instances': orig_inst_dict, 'updated': False, }} bad_host = 'bad_host' inst3 = fake_instance.fake_instance_obj('fake_context', uuid='ccc', host=bad_host) inst_list3 = objects.InstanceList(objects=[inst3]) self.host_manager.update_instance_info('fake_context', bad_host, inst_list3) new_info = self.host_manager._instance_info[host_name] self.host_manager._recreate_instance_info.assert_called_once_with( 'fake_context', bad_host) self.assertEqual(len(new_info['instances']), len(orig_inst_dict)) self.assertFalse(new_info['updated']) def test_delete_instance_info(self): host_name = 'fake_host' inst1 = fake_instance.fake_instance_obj('fake_context', uuid='aaa', host=host_name) inst2 = fake_instance.fake_instance_obj('fake_context', uuid='bbb', host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} self.host_manager._instance_info = { host_name: { 'instances': orig_inst_dict, 'updated': False, }} self.host_manager.delete_instance_info('fake_context', host_name, inst1.uuid) new_info = self.host_manager._instance_info[host_name] self.assertEqual(len(new_info['instances']), 1) self.assertTrue(new_info['updated']) def test_delete_instance_info_unknown_host(self): self.host_manager._recreate_instance_info = mock.MagicMock() host_name = 'fake_host' inst1 = fake_instance.fake_instance_obj('fake_context', uuid='aaa', host=host_name) inst2 = fake_instance.fake_instance_obj('fake_context', uuid='bbb', host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} self.host_manager._instance_info = { host_name: { 'instances': orig_inst_dict, 'updated': False, }} bad_host = 'bad_host' self.host_manager.delete_instance_info('fake_context', bad_host, 'aaa') new_info = self.host_manager._instance_info[host_name] self.host_manager._recreate_instance_info.assert_called_once_with( 'fake_context', bad_host) self.assertEqual(len(new_info['instances']), len(orig_inst_dict)) self.assertFalse(new_info['updated']) def test_sync_instance_info(self): self.host_manager._recreate_instance_info = mock.MagicMock() host_name = 'fake_host' inst1 = fake_instance.fake_instance_obj('fake_context', uuid='aaa', host=host_name) inst2 = fake_instance.fake_instance_obj('fake_context', uuid='bbb', host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} self.host_manager._instance_info = { host_name: { 'instances': orig_inst_dict, 'updated': False, }} self.host_manager.sync_instance_info('fake_context', host_name, ['bbb', 'aaa']) new_info = self.host_manager._instance_info[host_name] self.assertFalse(self.host_manager._recreate_instance_info.called) self.assertTrue(new_info['updated']) def test_sync_instance_info_fail(self): self.host_manager._recreate_instance_info = mock.MagicMock() host_name = 'fake_host' inst1 = fake_instance.fake_instance_obj('fake_context', uuid='aaa', host=host_name) inst2 = fake_instance.fake_instance_obj('fake_context', uuid='bbb', host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} self.host_manager._instance_info = { host_name: { 'instances': orig_inst_dict, 'updated': False, }} self.host_manager.sync_instance_info('fake_context', host_name, ['bbb', 'aaa', 'new']) new_info = self.host_manager._instance_info[host_name] self.host_manager._recreate_instance_info.assert_called_once_with( 'fake_context', host_name) self.assertFalse(new_info['updated']) class HostManagerChangedNodesTestCase(test.NoDBTestCase): """Test case for HostManager class.""" @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def setUp(self, mock_init_agg, mock_init_inst): super(HostManagerChangedNodesTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [ host_manager.HostState('host1', 'node1'), host_manager.HostState('host2', 'node2'), host_manager.HostState('host3', 'node3'), host_manager.HostState('host4', 'node4') ] @mock.patch('nova.objects.ServiceList.get_by_binary') @mock.patch('nova.objects.ComputeNodeList.get_all') @mock.patch('nova.objects.InstanceList.get_by_host') def test_get_all_host_states(self, mock_get_by_host, mock_get_all, mock_get_by_binary): mock_get_by_host.return_value = objects.InstanceList() mock_get_all.return_value = fakes.COMPUTE_NODES mock_get_by_binary.return_value = fakes.SERVICES context = 'fake_context' self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) @mock.patch('nova.objects.ServiceList.get_by_binary') @mock.patch('nova.objects.ComputeNodeList.get_all') @mock.patch('nova.objects.InstanceList.get_by_host') def test_get_all_host_states_after_delete_one(self, mock_get_by_host, mock_get_all, mock_get_by_binary): running_nodes = [n for n in fakes.COMPUTE_NODES if n.get('hypervisor_hostname') != 'node4'] mock_get_by_host.return_value = objects.InstanceList() mock_get_all.side_effect = [fakes.COMPUTE_NODES, running_nodes] mock_get_by_binary.side_effect = [fakes.SERVICES, fakes.SERVICES] context = 'fake_context' # first call: all nodes self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # second call: just running nodes self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 3) @mock.patch('nova.objects.ServiceList.get_by_binary') @mock.patch('nova.objects.ComputeNodeList.get_all') @mock.patch('nova.objects.InstanceList.get_by_host') def test_get_all_host_states_after_delete_all(self, mock_get_by_host, mock_get_all, mock_get_by_binary): mock_get_by_host.return_value = objects.InstanceList() mock_get_all.side_effect = [fakes.COMPUTE_NODES, []] mock_get_by_binary.side_effect = [fakes.SERVICES, fakes.SERVICES] context = 'fake_context' # first call: all nodes self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # second call: no nodes self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 0) class HostStateTestCase(test.NoDBTestCase): """Test case for HostState class.""" # update_from_compute_node() and consume_from_request() are tested # in HostManagerTestCase.test_get_all_host_states() @mock.patch('nova.utils.synchronized', side_effect=lambda a: lambda f: lambda *args: f(*args)) def test_stat_consumption_from_compute_node(self, sync_mock): stats = { 'num_instances': '5', 'num_proj_12345': '3', 'num_proj_23456': '1', 'num_vm_%s' % vm_states.BUILDING: '2', 'num_vm_%s' % vm_states.SUSPENDED: '1', 'num_task_%s' % task_states.RESIZE_MIGRATING: '1', 'num_task_%s' % task_states.MIGRATING: '2', 'num_os_type_linux': '4', 'num_os_type_windoze': '1', 'io_workload': '42', } hyper_ver_int = versionutils.convert_version_to_int('6.0.0') compute = objects.ComputeNode( stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=None, pci_device_pools=None, metrics=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0) host = host_manager.HostState("fakehost", "fakenode") host.update(compute=compute) sync_mock.assert_called_once_with(("fakehost", "fakenode")) self.assertEqual(5, host.num_instances) self.assertEqual(42, host.num_io_ops) self.assertEqual(10, len(host.stats)) self.assertEqual('127.0.0.1', str(host.host_ip)) self.assertEqual('htype', host.hypervisor_type) self.assertEqual('hostname', host.hypervisor_hostname) self.assertEqual('cpu_info', host.cpu_info) self.assertEqual([], host.supported_instances) self.assertEqual(hyper_ver_int, host.hypervisor_version) def test_stat_consumption_from_compute_node_non_pci(self): stats = { 'num_instances': '5', 'num_proj_12345': '3', 'num_proj_23456': '1', 'num_vm_%s' % vm_states.BUILDING: '2', 'num_vm_%s' % vm_states.SUSPENDED: '1', 'num_task_%s' % task_states.RESIZE_MIGRATING: '1', 'num_task_%s' % task_states.MIGRATING: '2', 'num_os_type_linux': '4', 'num_os_type_windoze': '1', 'io_workload': '42', } hyper_ver_int = versionutils.convert_version_to_int('6.0.0') compute = objects.ComputeNode( stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=None, pci_device_pools=None, metrics=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0) host = host_manager.HostState("fakehost", "fakenode") host.update(compute=compute) self.assertEqual([], host.pci_stats.pools) self.assertEqual(hyper_ver_int, host.hypervisor_version) def test_stat_consumption_from_compute_node_rescue_unshelving(self): stats = { 'num_instances': '5', 'num_proj_12345': '3', 'num_proj_23456': '1', 'num_vm_%s' % vm_states.BUILDING: '2', 'num_vm_%s' % vm_states.SUSPENDED: '1', 'num_task_%s' % task_states.UNSHELVING: '1', 'num_task_%s' % task_states.RESCUING: '2', 'num_os_type_linux': '4', 'num_os_type_windoze': '1', 'io_workload': '42', } hyper_ver_int = versionutils.convert_version_to_int('6.0.0') compute = objects.ComputeNode( stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=None, pci_device_pools=None, metrics=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0) host = host_manager.HostState("fakehost", "fakenode") host.update(compute=compute) self.assertEqual(5, host.num_instances) self.assertEqual(42, host.num_io_ops) self.assertEqual(10, len(host.stats)) self.assertEqual([], host.pci_stats.pools) self.assertEqual(hyper_ver_int, host.hypervisor_version) @mock.patch('nova.utils.synchronized', side_effect=lambda a: lambda f: lambda *args: f(*args)) @mock.patch('nova.virt.hardware.get_host_numa_usage_from_instance') @mock.patch('nova.objects.Instance') @mock.patch('nova.virt.hardware.numa_fit_instance_to_host') @mock.patch('nova.virt.hardware.host_topology_and_format_from_host') def test_stat_consumption_from_instance(self, host_topo_mock, numa_fit_mock, instance_init_mock, numa_usage_mock, sync_mock): fake_numa_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell()]) fake_host_numa_topology = mock.Mock() fake_instance = objects.Instance(numa_topology=fake_numa_topology) host_topo_mock.return_value = (fake_host_numa_topology, True) numa_usage_mock.return_value = fake_host_numa_topology numa_fit_mock.return_value = fake_numa_topology instance_init_mock.return_value = fake_instance spec_obj = objects.RequestSpec( instance_uuid='fake-uuid', flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0), numa_topology=fake_numa_topology, pci_requests=objects.InstancePCIRequests(requests=[])) host = host_manager.HostState("fakehost", "fakenode") self.assertIsNone(host.updated) host.consume_from_request(spec_obj) numa_fit_mock.assert_called_once_with(fake_host_numa_topology, fake_numa_topology, limits=None, pci_requests=None, pci_stats=None) numa_usage_mock.assert_called_once_with(host, fake_instance) sync_mock.assert_called_once_with(("fakehost", "fakenode")) self.assertEqual(fake_host_numa_topology, host.numa_topology) self.assertIsNotNone(host.updated) second_numa_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell()]) spec_obj = objects.RequestSpec( instance_uuid='fake-uuid', flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0), numa_topology=second_numa_topology, pci_requests=objects.InstancePCIRequests(requests=[])) second_host_numa_topology = mock.Mock() numa_usage_mock.return_value = second_host_numa_topology numa_fit_mock.return_value = second_numa_topology host.consume_from_request(spec_obj) self.assertEqual(2, host.num_instances) self.assertEqual(2, host.num_io_ops) self.assertEqual(2, numa_usage_mock.call_count) self.assertEqual(((host, fake_instance),), numa_usage_mock.call_args) self.assertEqual(second_host_numa_topology, host.numa_topology) self.assertIsNotNone(host.updated) def test_stat_consumption_from_instance_pci(self): inst_topology = objects.InstanceNUMATopology( cells = [objects.InstanceNUMACell( cpuset=set([0]), memory=512, id=0)]) fake_requests = [{'request_id': 'fake_request1', 'count': 1, 'spec': [{'vendor_id': '8086'}]}] fake_requests_obj = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(**r) for r in fake_requests], instance_uuid='fake-uuid') req_spec = objects.RequestSpec( instance_uuid='fake-uuid', project_id='12345', numa_topology=inst_topology, pci_requests=fake_requests_obj, flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=512, vcpus=1)) host = host_manager.HostState("fakehost", "fakenode") self.assertIsNone(host.updated) host.pci_stats = pci_stats.PciDeviceStats( [objects.PciDevicePool(vendor_id='8086', product_id='15ed', numa_node=1, count=1)]) host.numa_topology = fakes.NUMA_TOPOLOGY host.consume_from_request(req_spec) self.assertIsInstance(req_spec.numa_topology, objects.InstanceNUMATopology) self.assertEqual(512, host.numa_topology.cells[1].memory_usage) self.assertEqual(1, host.numa_topology.cells[1].cpu_usage) self.assertEqual(0, len(host.pci_stats.pools)) self.assertIsNotNone(host.updated) def test_stat_consumption_from_instance_with_pci_exception(self): fake_requests = [{'request_id': 'fake_request1', 'count': 3, 'spec': [{'vendor_id': '8086'}]}] fake_requests_obj = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(**r) for r in fake_requests], instance_uuid='fake-uuid') req_spec = objects.RequestSpec( instance_uuid='fake-uuid', project_id='12345', numa_topology=None, pci_requests=fake_requests_obj, flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=1024, vcpus=1)) host = host_manager.HostState("fakehost", "fakenode") self.assertIsNone(host.updated) fake_updated = mock.sentinel.fake_updated host.updated = fake_updated host.pci_stats = pci_stats.PciDeviceStats() with mock.patch.object(host.pci_stats, 'apply_requests', side_effect=exception.PciDeviceRequestFailed): host.consume_from_request(req_spec) self.assertEqual(fake_updated, host.updated) def test_resources_consumption_from_compute_node(self): _ts_now = datetime.datetime(2015, 11, 11, 11, 0, 0) metrics = [ dict(name='cpu.frequency', value=1.0, source='source1', timestamp=_ts_now), dict(name='numa.membw.current', numa_membw_values={"0": 10, "1": 43}, source='source2', timestamp=_ts_now), ] hyper_ver_int = versionutils.convert_version_to_int('6.0.0') compute = objects.ComputeNode( metrics=jsonutils.dumps(metrics), memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=fakes.NUMA_TOPOLOGY._to_json(), stats=None, pci_device_pools=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0) host = host_manager.HostState("fakehost", "fakenode") host.update(compute=compute) self.assertEqual(len(host.metrics), 2) self.assertEqual(1.0, host.metrics.to_list()[0]['value']) self.assertEqual('source1', host.metrics[0].source) self.assertEqual('cpu.frequency', host.metrics[0].name) self.assertEqual('numa.membw.current', host.metrics[1].name) self.assertEqual('source2', host.metrics.to_list()[1]['source']) self.assertEqual({'0': 10, '1': 43}, host.metrics[1].numa_membw_values) self.assertIsInstance(host.numa_topology, six.string_types) nova-13.0.0/nova/tests/unit/scheduler/test_scheduler_utils.py0000664000567000056710000004103212701407773025560 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Utils """ import uuid import mock import six from nova.compute import flavors from nova.compute import utils as compute_utils from nova import exception from nova import objects from nova import rpc from nova.scheduler import utils as scheduler_utils from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_flavor class SchedulerUtilsTestCase(test.NoDBTestCase): """Test case for scheduler utils methods.""" def setUp(self): super(SchedulerUtilsTestCase, self).setUp() self.context = 'fake-context' def test_build_request_spec_without_image(self): instance = {'uuid': 'fake-uuid'} instance_type = objects.Flavor(**test_flavor.fake_flavor) with mock.patch.object(flavors, 'extract_flavor') as mock_extract: mock_extract.return_value = instance_type request_spec = scheduler_utils.build_request_spec(self.context, None, [instance]) mock_extract.assert_called_once_with({'uuid': 'fake-uuid'}) self.assertEqual({}, request_spec['image']) def test_build_request_spec_with_object(self): instance_type = objects.Flavor() instance = fake_instance.fake_instance_obj(self.context) with mock.patch.object(instance, 'get_flavor') as mock_get: mock_get.return_value = instance_type request_spec = scheduler_utils.build_request_spec(self.context, None, [instance]) mock_get.assert_called_once_with() self.assertIsInstance(request_spec['instance_properties'], dict) @mock.patch.object(rpc, 'get_notifier', return_value=mock.Mock()) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(objects.Instance, 'save') def test_set_vm_state_and_notify(self, mock_save, mock_add, mock_get): expected_uuid = 'fake-uuid' request_spec = dict(instance_properties=dict(uuid='other-uuid')) updates = dict(vm_state='fake-vm-state') service = 'fake-service' method = 'fake-method' exc_info = 'exc_info' payload = dict(request_spec=request_spec, instance_properties=request_spec.get( 'instance_properties', {}), instance_id=expected_uuid, state='fake-vm-state', method=method, reason=exc_info) event_type = '%s.%s' % (service, method) scheduler_utils.set_vm_state_and_notify(self.context, expected_uuid, service, method, updates, exc_info, request_spec) mock_save.assert_called_once_with() mock_add.assert_called_once_with(self.context, mock.ANY, exc_info, mock.ANY) self.assertIsInstance(mock_add.call_args[0][1], objects.Instance) self.assertIsInstance(mock_add.call_args[0][3], tuple) mock_get.return_value.error.assert_called_once_with(self.context, event_type, payload) def test_build_filter_properties(self): sched_hints = {'hint': ['over-there']} forced_host = 'forced-host1' forced_node = 'forced-node1' instance_type = objects.Flavor() filt_props = scheduler_utils.build_filter_properties(sched_hints, forced_host, forced_node, instance_type) self.assertEqual(sched_hints, filt_props['scheduler_hints']) self.assertEqual([forced_host], filt_props['force_hosts']) self.assertEqual([forced_node], filt_props['force_nodes']) self.assertEqual(instance_type, filt_props['instance_type']) def test_build_filter_properties_no_forced_host_no_force_node(self): sched_hints = {'hint': ['over-there']} forced_host = None forced_node = None instance_type = objects.Flavor() filt_props = scheduler_utils.build_filter_properties(sched_hints, forced_host, forced_node, instance_type) self.assertEqual(sched_hints, filt_props['scheduler_hints']) self.assertEqual(instance_type, filt_props['instance_type']) self.assertNotIn('forced_host', filt_props) self.assertNotIn('forced_node', filt_props) def _test_populate_filter_props(self, host_state_obj=True, with_retry=True, force_hosts=None, force_nodes=None): if force_hosts is None: force_hosts = [] if force_nodes is None: force_nodes = [] if with_retry: if ((len(force_hosts) == 1 and len(force_nodes) <= 1) or (len(force_nodes) == 1 and len(force_hosts) <= 1)): filter_properties = dict(force_hosts=force_hosts, force_nodes=force_nodes) elif len(force_hosts) > 1 or len(force_nodes) > 1: filter_properties = dict(retry=dict(hosts=[]), force_hosts=force_hosts, force_nodes=force_nodes) else: filter_properties = dict(retry=dict(hosts=[])) else: filter_properties = dict() if host_state_obj: class host_state(object): host = 'fake-host' nodename = 'fake-node' limits = 'fake-limits' else: host_state = dict(host='fake-host', nodename='fake-node', limits='fake-limits') scheduler_utils.populate_filter_properties(filter_properties, host_state) enable_retry_force_hosts = not force_hosts or len(force_hosts) > 1 enable_retry_force_nodes = not force_nodes or len(force_nodes) > 1 if with_retry or enable_retry_force_hosts or enable_retry_force_nodes: # So we can check for 2 hosts scheduler_utils.populate_filter_properties(filter_properties, host_state) if force_hosts: expected_limits = None else: expected_limits = 'fake-limits' self.assertEqual(expected_limits, filter_properties.get('limits')) if (with_retry and enable_retry_force_hosts and enable_retry_force_nodes): self.assertEqual([['fake-host', 'fake-node'], ['fake-host', 'fake-node']], filter_properties['retry']['hosts']) else: self.assertNotIn('retry', filter_properties) def test_populate_filter_props(self): self._test_populate_filter_props() def test_populate_filter_props_host_dict(self): self._test_populate_filter_props(host_state_obj=False) def test_populate_filter_props_no_retry(self): self._test_populate_filter_props(with_retry=False) def test_populate_filter_props_force_hosts_no_retry(self): self._test_populate_filter_props(force_hosts=['force-host']) def test_populate_filter_props_force_nodes_no_retry(self): self._test_populate_filter_props(force_nodes=['force-node']) def test_populate_filter_props_multi_force_hosts_with_retry(self): self._test_populate_filter_props(force_hosts=['force-host1', 'force-host2']) def test_populate_filter_props_multi_force_nodes_with_retry(self): self._test_populate_filter_props(force_nodes=['force-node1', 'force-node2']) def test_populate_retry_exception_at_max_attempts(self): self.flags(scheduler_max_attempts=2) msg = 'The exception text was preserved!' filter_properties = dict(retry=dict(num_attempts=2, hosts=[], exc_reason=[msg])) nvh = self.assertRaises(exception.MaxRetriesExceeded, scheduler_utils.populate_retry, filter_properties, 'fake-uuid') # make sure 'msg' is a substring of the complete exception text self.assertIn(msg, six.text_type(nvh)) def _check_parse_options(self, opts, sep, converter, expected): good = scheduler_utils.parse_options(opts, sep=sep, converter=converter) for item in expected: self.assertIn(item, good) def test_parse_options(self): # check normal self._check_parse_options(['foo=1', 'bar=-2.1'], '=', float, [('foo', 1.0), ('bar', -2.1)]) # check convert error self._check_parse_options(['foo=a1', 'bar=-2.1'], '=', float, [('bar', -2.1)]) # check separator missing self._check_parse_options(['foo', 'bar=-2.1'], '=', float, [('bar', -2.1)]) # check key missing self._check_parse_options(['=5', 'bar=-2.1'], '=', float, [('bar', -2.1)]) def test_validate_filters_configured(self): self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2') self.assertTrue(scheduler_utils.validate_filter('FakeFilter1')) self.assertTrue(scheduler_utils.validate_filter('FakeFilter2')) self.assertFalse(scheduler_utils.validate_filter('FakeFilter3')) def test_validate_weighers_configured(self): self.flags(scheduler_weight_classes= ['ServerGroupSoftAntiAffinityWeigher', 'FakeFilter1']) self.assertTrue(scheduler_utils.validate_weigher( 'ServerGroupSoftAntiAffinityWeigher')) self.assertTrue(scheduler_utils.validate_weigher('FakeFilter1')) self.assertFalse(scheduler_utils.validate_weigher( 'ServerGroupSoftAffinityWeigher')) def test_validate_weighers_configured_all_weighers(self): self.assertTrue(scheduler_utils.validate_weigher( 'ServerGroupSoftAffinityWeigher')) self.assertTrue(scheduler_utils.validate_weigher( 'ServerGroupSoftAntiAffinityWeigher')) def _create_server_group(self, policy='anti-affinity'): instance = fake_instance.fake_instance_obj(self.context, params={'host': 'hostA'}) group = objects.InstanceGroup() group.name = 'pele' group.uuid = str(uuid.uuid4()) group.members = [instance.uuid] group.policies = [policy] return group def _get_group_details(self, group, policy=None): group_hosts = ['hostB'] with test.nested( mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid', return_value=group), mock.patch.object(objects.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): scheduler_utils._SUPPORTS_ANTI_AFFINITY = None scheduler_utils._SUPPORTS_AFFINITY = None group_info = scheduler_utils._get_group_details( self.context, 'fake_uuid', group_hosts) self.assertEqual( (set(['hostA', 'hostB']), [policy], group.members), group_info) def test_get_group_details(self): for policy in ['affinity', 'anti-affinity', 'soft-affinity', 'soft-anti-affinity']: group = self._create_server_group(policy) self._get_group_details(group, policy=policy) def test_get_group_details_with_no_instance_uuid(self): group_info = scheduler_utils._get_group_details(self.context, None) self.assertIsNone(group_info) def _get_group_details_with_filter_not_configured(self, policy): self.flags(scheduler_default_filters=['fake']) self.flags(scheduler_weight_classes=['fake']) instance = fake_instance.fake_instance_obj(self.context, params={'host': 'hostA'}) group = objects.InstanceGroup() group.uuid = str(uuid.uuid4()) group.members = [instance.uuid] group.policies = [policy] with test.nested( mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid', return_value=group), ) as (get_group,): scheduler_utils._SUPPORTS_ANTI_AFFINITY = None scheduler_utils._SUPPORTS_AFFINITY = None scheduler_utils._SUPPORTS_SOFT_AFFINITY = None scheduler_utils._SUPPORTS_SOFT_ANTI_AFFINITY = None self.assertRaises(exception.UnsupportedPolicyException, scheduler_utils._get_group_details, self.context, 'fake-uuid') def test_get_group_details_with_filter_not_configured(self): policies = ['anti-affinity', 'affinity', 'soft-affinity', 'soft-anti-affinity'] for policy in policies: self._get_group_details_with_filter_not_configured(policy) @mock.patch.object(scheduler_utils, '_get_group_details') def test_setup_instance_group_in_filter_properties(self, mock_ggd): mock_ggd.return_value = scheduler_utils.GroupDetails( hosts=set(['hostA', 'hostB']), policies=['policy'], members=['instance1']) spec = {'instance_properties': {'uuid': 'fake-uuid'}} filter_props = {'group_hosts': ['hostC']} scheduler_utils.setup_instance_group(self.context, spec, filter_props) mock_ggd.assert_called_once_with(self.context, 'fake-uuid', ['hostC']) expected_filter_props = {'group_updated': True, 'group_hosts': set(['hostA', 'hostB']), 'group_policies': ['policy'], 'group_members': ['instance1']} self.assertEqual(expected_filter_props, filter_props) @mock.patch.object(scheduler_utils, '_get_group_details') def test_setup_instance_group_with_no_group(self, mock_ggd): mock_ggd.return_value = None spec = {'instance_properties': {'uuid': 'fake-uuid'}} filter_props = {'group_hosts': ['hostC']} scheduler_utils.setup_instance_group(self.context, spec, filter_props) mock_ggd.assert_called_once_with(self.context, 'fake-uuid', ['hostC']) self.assertNotIn('group_updated', filter_props) self.assertNotIn('group_policies', filter_props) self.assertEqual(['hostC'], filter_props['group_hosts']) @mock.patch.object(scheduler_utils, '_get_group_details') def test_setup_instance_group_with_filter_not_configured(self, mock_ggd): mock_ggd.side_effect = exception.NoValidHost(reason='whatever') spec = {'instance_properties': {'uuid': 'fake-uuid'}} filter_props = {'group_hosts': ['hostC']} self.assertRaises(exception.NoValidHost, scheduler_utils.setup_instance_group, self.context, spec, filter_props) nova-13.0.0/nova/tests/unit/scheduler/test_host_filters.py0000664000567000056710000000276112701407773025075 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Host Filters. """ from nova.scheduler import filters from nova.scheduler.filters import all_hosts_filter from nova.scheduler.filters import compute_filter from nova import test from nova.tests.unit.scheduler import fakes class HostFiltersTestCase(test.NoDBTestCase): def test_filter_handler(self): # Double check at least a couple of known filters exist filter_handler = filters.HostFilterHandler() classes = filter_handler.get_matching_classes( ['nova.scheduler.filters.all_filters']) self.assertIn(all_hosts_filter.AllHostsFilter, classes) self.assertIn(compute_filter.ComputeFilter, classes) def test_all_host_filter(self): filt_cls = all_hosts_filter.AllHostsFilter() host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(filt_cls.host_passes(host, {})) nova-13.0.0/nova/tests/unit/scheduler/test_scheduler.py0000664000567000056710000003021612701407773024342 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler """ import mock from nova import context from nova import objects from nova.scheduler import caching_scheduler from nova.scheduler import chance from nova.scheduler import driver from nova.scheduler import filter_scheduler from nova.scheduler import host_manager from nova.scheduler import ironic_host_manager from nova.scheduler import manager from nova import servicegroup from nova import test from nova.tests.unit import fake_server_actions from nova.tests.unit.scheduler import fakes class SchedulerManagerInitTestCase(test.NoDBTestCase): """Test case for scheduler manager initiation.""" manager_cls = manager.SchedulerManager @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_init_using_default_schedulerdriver(self, mock_init_agg, mock_init_inst): driver = self.manager_cls().driver self.assertIsInstance(driver, filter_scheduler.FilterScheduler) @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_init_using_chance_schedulerdriver(self, mock_init_agg, mock_init_inst): self.flags(scheduler_driver='chance_scheduler') driver = self.manager_cls().driver self.assertIsInstance(driver, chance.ChanceScheduler) @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_init_using_caching_schedulerdriver(self, mock_init_agg, mock_init_inst): self.flags(scheduler_driver='caching_scheduler') driver = self.manager_cls().driver self.assertIsInstance(driver, caching_scheduler.CachingScheduler) @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_init_nonexist_schedulerdriver(self, mock_init_agg, mock_init_inst): self.flags(scheduler_driver='nonexist_scheduler') self.assertRaises(RuntimeError, self.manager_cls) # NOTE(Yingxin): Loading full class path is deprecated and should be # removed in the N release. @mock.patch.object(manager.LOG, 'warning') @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_init_using_classpath_to_schedulerdriver(self, mock_init_agg, mock_init_inst, mock_warning): self.flags( scheduler_driver= 'nova.scheduler.chance.ChanceScheduler') driver = self.manager_cls().driver self.assertIsInstance(driver, chance.ChanceScheduler) warn_args, kwargs = mock_warning.call_args self.assertIn("DEPRECATED", warn_args[0]) class SchedulerManagerTestCase(test.NoDBTestCase): """Test case for scheduler manager.""" manager_cls = manager.SchedulerManager driver_cls = fakes.FakeScheduler driver_plugin_name = 'fake_scheduler' @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def setUp(self, mock_init_agg, mock_init_inst): super(SchedulerManagerTestCase, self).setUp() self.flags(scheduler_driver=self.driver_plugin_name) with mock.patch.object(host_manager.HostManager, '_init_aggregates'): self.manager = self.manager_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' self.fake_args = (1, 2, 3) self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} fake_server_actions.stub_out_action_events(self.stubs) def test_1_correct_init(self): # Correct scheduler driver manager = self.manager self.assertIsInstance(manager.driver, self.driver_cls) def test_select_destination(self): fake_spec = objects.RequestSpec() with mock.patch.object(self.manager.driver, 'select_destinations' ) as select_destinations: self.manager.select_destinations(None, spec_obj=fake_spec) select_destinations.assert_called_once_with(None, fake_spec) # TODO(sbauza): Remove that test once the API v4 is removed @mock.patch.object(objects.RequestSpec, 'from_primitives') def test_select_destination_with_old_client(self, from_primitives): fake_spec = objects.RequestSpec() from_primitives.return_value = fake_spec with mock.patch.object(self.manager.driver, 'select_destinations' ) as select_destinations: self.manager.select_destinations(None, request_spec='fake_spec', filter_properties='fake_props') select_destinations.assert_called_once_with(None, fake_spec) def test_update_aggregates(self): with mock.patch.object(self.manager.driver.host_manager, 'update_aggregates' ) as update_aggregates: self.manager.update_aggregates(None, aggregates='agg') update_aggregates.assert_called_once_with('agg') def test_delete_aggregate(self): with mock.patch.object(self.manager.driver.host_manager, 'delete_aggregate' ) as delete_aggregate: self.manager.delete_aggregate(None, aggregate='agg') delete_aggregate.assert_called_once_with('agg') def test_update_instance_info(self): with mock.patch.object(self.manager.driver.host_manager, 'update_instance_info') as mock_update: self.manager.update_instance_info(mock.sentinel.context, mock.sentinel.host_name, mock.sentinel.instance_info) mock_update.assert_called_once_with(mock.sentinel.context, mock.sentinel.host_name, mock.sentinel.instance_info) def test_delete_instance_info(self): with mock.patch.object(self.manager.driver.host_manager, 'delete_instance_info') as mock_delete: self.manager.delete_instance_info(mock.sentinel.context, mock.sentinel.host_name, mock.sentinel.instance_uuid) mock_delete.assert_called_once_with(mock.sentinel.context, mock.sentinel.host_name, mock.sentinel.instance_uuid) def test_sync_instance_info(self): with mock.patch.object(self.manager.driver.host_manager, 'sync_instance_info') as mock_sync: self.manager.sync_instance_info(mock.sentinel.context, mock.sentinel.host_name, mock.sentinel.instance_uuids) mock_sync.assert_called_once_with(mock.sentinel.context, mock.sentinel.host_name, mock.sentinel.instance_uuids) class SchedulerInitTestCase(test.NoDBTestCase): """Test case for base scheduler driver initiation.""" driver_cls = fakes.FakeScheduler @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_init_using_default_hostmanager(self, mock_init_agg, mock_init_inst): manager = self.driver_cls().host_manager self.assertIsInstance(manager, host_manager.HostManager) @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_init_using_ironic_hostmanager(self, mock_init_agg, mock_init_inst): self.flags(scheduler_host_manager='ironic_host_manager') manager = self.driver_cls().host_manager self.assertIsInstance(manager, ironic_host_manager.IronicHostManager) @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_init_nonexist_hostmanager(self, mock_init_agg, mock_init_inst): self.flags(scheduler_host_manager='nonexist_host_manager') self.assertRaises(RuntimeError, self.driver_cls) # NOTE(Yingxin): Loading full class path is deprecated and should be # removed in the N release. @mock.patch.object(driver.LOG, 'warning') @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def test_init_using_classpath_to_hostmanager(self, mock_init_agg, mock_init_inst, mock_warning): self.flags( scheduler_host_manager= 'nova.scheduler.ironic_host_manager.IronicHostManager') manager = self.driver_cls().host_manager self.assertIsInstance(manager, ironic_host_manager.IronicHostManager) warn_args, kwargs = mock_warning.call_args self.assertIn("DEPRECATED", warn_args[0]) class SchedulerTestCase(test.NoDBTestCase): """Test case for base scheduler driver class.""" # So we can subclass this test and re-use tests if we need. driver_cls = fakes.FakeScheduler @mock.patch.object(host_manager.HostManager, '_init_instance_info') @mock.patch.object(host_manager.HostManager, '_init_aggregates') def setUp(self, mock_init_agg, mock_init_inst): super(SchedulerTestCase, self).setUp() self.driver = self.driver_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' self.servicegroup_api = servicegroup.API() @mock.patch('nova.objects.ServiceList.get_by_topic') @mock.patch('nova.servicegroup.API.service_is_up') def test_hosts_up(self, mock_service_is_up, mock_get_by_topic): service1 = objects.Service(host='host1') service2 = objects.Service(host='host2') services = objects.ServiceList(objects=[service1, service2]) mock_get_by_topic.return_value = services mock_service_is_up.side_effect = [False, True] result = self.driver.hosts_up(self.context, self.topic) self.assertEqual(result, ['host2']) mock_get_by_topic.assert_called_once_with(self.context, self.topic) calls = [mock.call(service1), mock.call(service2)] self.assertEqual(calls, mock_service_is_up.call_args_list) nova-13.0.0/nova/tests/unit/scheduler/test_client.py0000664000567000056710000001470512701407773023647 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import oslo_messaging as messaging from nova import context from nova import objects from nova.objects import pci_device_pool from nova.scheduler import client as scheduler_client from nova.scheduler.client import query as scheduler_query_client from nova.scheduler.client import report as scheduler_report_client from nova.scheduler import rpcapi as scheduler_rpcapi from nova import test """Tests for Scheduler Client.""" class SchedulerReportClientTestCase(test.NoDBTestCase): def setUp(self): super(SchedulerReportClientTestCase, self).setUp() self.context = context.get_admin_context() self.flags(use_local=True, group='conductor') self.client = scheduler_report_client.SchedulerReportClient() @mock.patch.object(objects.ComputeNode, 'save') def test_update_resource_stats_saves(self, mock_save): cn = objects.ComputeNode() cn.host = 'fakehost' cn.hypervisor_hostname = 'fakenode' cn.pci_device_pools = pci_device_pool.from_pci_stats( [{"vendor_id": "foo", "product_id": "foo", "count": 1, "a": "b"}]) self.client.update_resource_stats(cn) mock_save.assert_called_once_with() class SchedulerQueryClientTestCase(test.NoDBTestCase): def setUp(self): super(SchedulerQueryClientTestCase, self).setUp() self.context = context.get_admin_context() self.client = scheduler_query_client.SchedulerQueryClient() def test_constructor(self): self.assertIsNotNone(self.client.scheduler_rpcapi) @mock.patch.object(scheduler_rpcapi.SchedulerAPI, 'select_destinations') def test_select_destinations(self, mock_select_destinations): fake_spec = objects.RequestSpec() self.client.select_destinations( context=self.context, spec_obj=fake_spec ) mock_select_destinations.assert_called_once_with( self.context, fake_spec) @mock.patch.object(scheduler_rpcapi.SchedulerAPI, 'update_aggregates') def test_update_aggregates(self, mock_update_aggs): aggregates = [objects.Aggregate(id=1)] self.client.update_aggregates( context=self.context, aggregates=aggregates) mock_update_aggs.assert_called_once_with( self.context, aggregates) @mock.patch.object(scheduler_rpcapi.SchedulerAPI, 'delete_aggregate') def test_delete_aggregate(self, mock_delete_agg): aggregate = objects.Aggregate(id=1) self.client.delete_aggregate( context=self.context, aggregate=aggregate) mock_delete_agg.assert_called_once_with( self.context, aggregate) class SchedulerClientTestCase(test.NoDBTestCase): def setUp(self): super(SchedulerClientTestCase, self).setUp() self.client = scheduler_client.SchedulerClient() def test_constructor(self): self.assertIsNotNone(self.client.queryclient) self.assertIsNotNone(self.client.reportclient) @mock.patch.object(scheduler_query_client.SchedulerQueryClient, 'select_destinations') def test_select_destinations(self, mock_select_destinations): fake_spec = objects.RequestSpec() self.assertIsNone(self.client.queryclient.instance) self.client.select_destinations('ctxt', fake_spec) self.assertIsNotNone(self.client.queryclient.instance) mock_select_destinations.assert_called_once_with('ctxt', fake_spec) @mock.patch.object(scheduler_query_client.SchedulerQueryClient, 'select_destinations', side_effect=messaging.MessagingTimeout()) def test_select_destinations_timeout(self, mock_select_destinations): # check if the scheduler service times out properly fake_spec = objects.RequestSpec() fake_args = ['ctxt', fake_spec] self.assertRaises(messaging.MessagingTimeout, self.client.select_destinations, *fake_args) mock_select_destinations.assert_has_calls([mock.call(*fake_args)] * 2) @mock.patch.object(scheduler_query_client.SchedulerQueryClient, 'select_destinations', side_effect=[ messaging.MessagingTimeout(), mock.DEFAULT]) def test_select_destinations_timeout_once(self, mock_select_destinations): # scenario: the scheduler service times out & recovers after failure fake_spec = objects.RequestSpec() fake_args = ['ctxt', fake_spec] self.client.select_destinations(*fake_args) mock_select_destinations.assert_has_calls([mock.call(*fake_args)] * 2) @mock.patch.object(scheduler_query_client.SchedulerQueryClient, 'update_aggregates') def test_update_aggregates(self, mock_update_aggs): aggregates = [objects.Aggregate(id=1)] self.client.update_aggregates( context='context', aggregates=aggregates) mock_update_aggs.assert_called_once_with( 'context', aggregates) @mock.patch.object(scheduler_query_client.SchedulerQueryClient, 'delete_aggregate') def test_delete_aggregate(self, mock_delete_agg): aggregate = objects.Aggregate(id=1) self.client.delete_aggregate( context='context', aggregate=aggregate) mock_delete_agg.assert_called_once_with( 'context', aggregate) @mock.patch.object(scheduler_report_client.SchedulerReportClient, 'update_resource_stats') def test_update_resource_stats(self, mock_update_resource_stats): self.assertIsNone(self.client.reportclient.instance) self.client.update_resource_stats(mock.sentinel.cn) self.assertIsNotNone(self.client.reportclient.instance) mock_update_resource_stats.assert_called_once_with(mock.sentinel.cn) nova-13.0.0/nova/tests/unit/scheduler/test_rpcapi.py0000664000567000056710000001133612701407773023644 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.scheduler.rpcapi """ import mock from oslo_config import cfg from nova import context from nova import objects from nova.scheduler import rpcapi as scheduler_rpcapi from nova import test CONF = cfg.CONF class SchedulerRpcAPITestCase(test.NoDBTestCase): def _test_scheduler_api(self, method, rpc_method, expected_args=None, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = scheduler_rpcapi.SchedulerAPI() self.assertIsNotNone(rpcapi.client) self.assertEqual(rpcapi.client.target.topic, CONF.scheduler_topic) expected_retval = 'foo' if rpc_method == 'call' else None expected_version = kwargs.pop('version', None) expected_fanout = kwargs.pop('fanout', None) expected_kwargs = kwargs.copy() if expected_args: expected_kwargs = expected_args prepare_kwargs = {} if expected_fanout: prepare_kwargs['fanout'] = True if expected_version: prepare_kwargs['version'] = expected_version # NOTE(sbauza): We need to persist the method before mocking it orig_prepare = rpcapi.client.prepare def fake_can_send_version(version=None): return orig_prepare(version=version).can_send_version() @mock.patch.object(rpcapi.client, rpc_method, return_value=expected_retval) @mock.patch.object(rpcapi.client, 'prepare', return_value=rpcapi.client) @mock.patch.object(rpcapi.client, 'can_send_version', side_effect=fake_can_send_version) def do_test(mock_csv, mock_prepare, mock_rpc_method): retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, expected_retval) mock_prepare.assert_called_once_with(**prepare_kwargs) mock_rpc_method.assert_called_once_with(ctxt, method, **expected_kwargs) do_test() def test_select_destinations(self): fake_spec = objects.RequestSpec() self._test_scheduler_api('select_destinations', rpc_method='call', spec_obj=fake_spec, version='4.3') @mock.patch.object(objects.RequestSpec, 'to_legacy_filter_properties_dict') @mock.patch.object(objects.RequestSpec, 'to_legacy_request_spec_dict') def test_select_destinations_with_old_manager(self, to_spec, to_props): self.flags(scheduler='4.0', group='upgrade_levels') to_spec.return_value = 'fake_request_spec' to_props.return_value = 'fake_prop' fake_spec = objects.RequestSpec() self._test_scheduler_api('select_destinations', rpc_method='call', expected_args={'request_spec': 'fake_request_spec', 'filter_properties': 'fake_prop'}, spec_obj=fake_spec, version='4.0') def test_update_aggregates(self): self._test_scheduler_api('update_aggregates', rpc_method='cast', aggregates='aggregates', version='4.1', fanout=True) def test_delete_aggregate(self): self._test_scheduler_api('delete_aggregate', rpc_method='cast', aggregate='aggregate', version='4.1', fanout=True) def test_update_instance_info(self): self._test_scheduler_api('update_instance_info', rpc_method='cast', host_name='fake_host', instance_info='fake_instance', fanout=True, version='4.2') def test_delete_instance_info(self): self._test_scheduler_api('delete_instance_info', rpc_method='cast', host_name='fake_host', instance_uuid='fake_uuid', fanout=True, version='4.2') def test_sync_instance_info(self): self._test_scheduler_api('sync_instance_info', rpc_method='cast', host_name='fake_host', instance_uuids=['fake1', 'fake2'], fanout=True, version='4.2') nova-13.0.0/nova/tests/unit/scheduler/test_chance_scheduler.py0000664000567000056710000000647412701407773025654 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Chance Scheduler. """ import mock from nova import exception from nova import objects from nova.scheduler import chance from nova.tests.unit.scheduler import test_scheduler class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase): """Test case for Chance Scheduler.""" driver_cls = chance.ChanceScheduler def test_filter_hosts_avoid(self): """Test to make sure _filter_hosts() filters original hosts if avoid_original_host is True. """ hosts = ['host1', 'host2', 'host3'] spec_obj = objects.RequestSpec(ignore_hosts=['host2']) filtered = self.driver._filter_hosts(hosts, spec_obj=spec_obj) self.assertEqual(filtered, ['host1', 'host3']) def test_filter_hosts_no_avoid(self): """Test to make sure _filter_hosts() does not filter original hosts if avoid_original_host is False. """ hosts = ['host1', 'host2', 'host3'] spec_obj = objects.RequestSpec(ignore_hosts=[]) filtered = self.driver._filter_hosts(hosts, spec_obj=spec_obj) self.assertEqual(filtered, hosts) @mock.patch('random.choice') def test_select_destinations(self, mock_random_choice): all_hosts = ['host1', 'host2', 'host3', 'host4'] def _return_hosts(*args, **kwargs): return all_hosts mock_random_choice.side_effect = ['host3', 'host2'] self.stub_out('nova.scheduler.chance.ChanceScheduler.hosts_up', _return_hosts) spec_obj = objects.RequestSpec(num_instances=2, ignore_hosts=None) dests = self.driver.select_destinations(self.context, spec_obj) self.assertEqual(2, len(dests)) (host, node) = (dests[0]['host'], dests[0]['nodename']) self.assertEqual('host3', host) self.assertIsNone(node) (host, node) = (dests[1]['host'], dests[1]['nodename']) self.assertEqual('host2', host) self.assertIsNone(node) calls = [mock.call(all_hosts), mock.call(all_hosts)] self.assertEqual(calls, mock_random_choice.call_args_list) def test_select_destinations_no_valid_host(self): def _return_hosts(*args, **kwargs): return ['host1', 'host2'] def _return_no_host(*args, **kwargs): return [] self.stub_out('nova.scheduler.chance.ChanceScheduler.hosts_up', _return_hosts) self.stub_out('nova.scheduler.chance.ChanceScheduler._filter_hosts', _return_no_host) spec_obj = objects.RequestSpec(num_instances=1) self.assertRaises(exception.NoValidHost, self.driver.select_destinations, self.context, spec_obj) nova-13.0.0/nova/tests/unit/scheduler/ironic_fakes.py0000664000567000056710000001103612701407773023760 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fake nodes for Ironic host manager tests. """ from nova import objects COMPUTE_NODES = [ objects.ComputeNode( id=1, local_gb=10, memory_mb=1024, vcpus=1, vcpus_used=0, local_gb_used=0, memory_mb_used=0, updated_at=None, cpu_info='baremetal cpu', host='host1', hypervisor_hostname='node1uuid', host_ip='127.0.0.1', hypervisor_version=1, hypervisor_type='ironic', stats=dict(ironic_driver= "nova.virt.ironic.driver.IronicDriver", cpu_arch='i386'), supported_hv_specs=[objects.HVSpec.from_list( ["i386", "baremetal", "baremetal"])], free_disk_gb=10, free_ram_mb=1024, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0), objects.ComputeNode( id=2, local_gb=20, memory_mb=2048, vcpus=1, vcpus_used=0, local_gb_used=0, memory_mb_used=0, updated_at=None, cpu_info='baremetal cpu', host='host2', hypervisor_hostname='node2uuid', host_ip='127.0.0.1', hypervisor_version=1, hypervisor_type='ironic', stats=dict(ironic_driver= "nova.virt.ironic.driver.IronicDriver", cpu_arch='i386'), supported_hv_specs=[objects.HVSpec.from_list( ["i386", "baremetal", "baremetal"])], free_disk_gb=20, free_ram_mb=2048, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0), objects.ComputeNode( id=3, local_gb=30, memory_mb=3072, vcpus=1, vcpus_used=0, local_gb_used=0, memory_mb_used=0, updated_at=None, cpu_info='baremetal cpu', host='host3', hypervisor_hostname='node3uuid', host_ip='127.0.0.1', hypervisor_version=1, hypervisor_type='ironic', stats=dict(ironic_driver= "nova.virt.ironic.driver.IronicDriver", cpu_arch='i386'), supported_hv_specs=[objects.HVSpec.from_list( ["i386", "baremetal", "baremetal"])], free_disk_gb=30, free_ram_mb=3072, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0), objects.ComputeNode( id=4, local_gb=40, memory_mb=4096, vcpus=1, vcpus_used=0, local_gb_used=0, memory_mb_used=0, updated_at=None, cpu_info='baremetal cpu', host='host4', hypervisor_hostname='node4uuid', host_ip='127.0.0.1', hypervisor_version=1, hypervisor_type='ironic', stats=dict(ironic_driver= "nova.virt.ironic.driver.IronicDriver", cpu_arch='i386'), supported_hv_specs=[objects.HVSpec.from_list( ["i386", "baremetal", "baremetal"])], free_disk_gb=40, free_ram_mb=4096, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0), # Broken entry objects.ComputeNode( id=5, local_gb=50, memory_mb=5120, vcpus=1, host='fake', cpu_info='baremetal cpu', stats=dict(ironic_driver= "nova.virt.ironic.driver.IronicDriver", cpu_arch='i386'), supported_hv_specs=[objects.HVSpec.from_list( ["i386", "baremetal", "baremetal"])], free_disk_gb=50, free_ram_mb=5120, hypervisor_hostname='fake-hyp'), ] SERVICES = [ objects.Service(host='host1', disabled=False), objects.Service(host='host2', disabled=True), objects.Service(host='host3', disabled=False), objects.Service(host='host4', disabled=False), ] def get_service_by_host(host): services = [service for service in SERVICES if service.host == host] return services[0] nova-13.0.0/nova/tests/unit/scheduler/test_filter_scheduler.py0000664000567000056710000003265612701407773025721 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Filter Scheduler. """ import mock from nova import exception from nova import objects from nova.scheduler import filter_scheduler from nova.scheduler import host_manager from nova.scheduler import utils as scheduler_utils from nova.scheduler import weights from nova import test # noqa from nova.tests.unit.scheduler import fakes from nova.tests.unit.scheduler import test_scheduler def fake_get_filtered_hosts(hosts, filter_properties, index): return list(hosts) class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): """Test case for Filter Scheduler.""" driver_cls = filter_scheduler.FilterScheduler @mock.patch('nova.objects.ServiceList.get_by_binary', return_value=fakes.SERVICES) @mock.patch('nova.objects.InstanceList.get_by_host') @mock.patch('nova.objects.ComputeNodeList.get_all', return_value=fakes.COMPUTE_NODES) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value={'numa_topology': None, 'pci_requests': None}) def test_schedule_happy_day(self, mock_get_extra, mock_get_all, mock_by_host, mock_get_by_binary): """Make sure there's nothing glaringly wrong with _schedule() by doing a happy day pass through. """ self.next_weight = 1.0 def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] return [weights.WeighedHost(host_state, self.next_weight)] self.stub_out('nova.scheduler.weights.HostWeightHandler.' 'get_weighed_objects', _fake_weigh_objects) spec_obj = objects.RequestSpec( num_instances=10, flavor=objects.Flavor(memory_mb=512, root_gb=512, ephemeral_gb=0, vcpus=1), project_id=1, os_type='Linux', uuid='fake-uuid', pci_requests=None, numa_topology=None, instance_group=None) with mock.patch.object(self.driver.host_manager, 'get_filtered_hosts') as mock_get_hosts: mock_get_hosts.side_effect = fake_get_filtered_hosts weighed_hosts = self.driver._schedule(self.context, spec_obj) self.assertEqual(len(weighed_hosts), 10) for weighed_host in weighed_hosts: self.assertIsNotNone(weighed_host.obj) def test_add_retry_host(self): retry = dict(num_attempts=1, hosts=[]) filter_properties = dict(retry=retry) host = "fakehost" node = "fakenode" scheduler_utils._add_retry_host(filter_properties, host, node) hosts = filter_properties['retry']['hosts'] self.assertEqual(1, len(hosts)) self.assertEqual([host, node], hosts[0]) def test_post_select_populate(self): # Test addition of certain filter props after a node is selected. retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} host_state = host_manager.HostState('host', 'node') host_state.limits['vcpu'] = 5 scheduler_utils.populate_filter_properties(filter_properties, host_state) self.assertEqual(['host', 'node'], filter_properties['retry']['hosts'][0]) self.assertEqual({'vcpu': 5}, host_state.limits) @mock.patch('nova.objects.ServiceList.get_by_binary', return_value=fakes.SERVICES) @mock.patch('nova.objects.InstanceList.get_by_host') @mock.patch('nova.objects.ComputeNodeList.get_all', return_value=fakes.COMPUTE_NODES) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value={'numa_topology': None, 'pci_requests': None}) def test_schedule_host_pool(self, mock_get_extra, mock_get_all, mock_by_host, mock_get_by_binary): """Make sure the scheduler_host_subset_size property works properly.""" self.flags(scheduler_host_subset_size=2) spec_obj = objects.RequestSpec( num_instances=1, project_id=1, os_type='Linux', uuid='fake-uuid', flavor=objects.Flavor(root_gb=512, memory_mb=512, ephemeral_gb=0, vcpus=1), pci_requests=None, numa_topology=None, instance_group=None) with mock.patch.object(self.driver.host_manager, 'get_filtered_hosts') as mock_get_hosts: mock_get_hosts.side_effect = fake_get_filtered_hosts hosts = self.driver._schedule(self.context, spec_obj) # one host should be chosen self.assertEqual(len(hosts), 1) @mock.patch('nova.objects.ServiceList.get_by_binary', return_value=fakes.SERVICES) @mock.patch('nova.objects.InstanceList.get_by_host') @mock.patch('nova.objects.ComputeNodeList.get_all', return_value=fakes.COMPUTE_NODES) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value={'numa_topology': None, 'pci_requests': None}) def test_schedule_large_host_pool(self, mock_get_extra, mock_get_all, mock_by_host, mock_get_by_binary): """Hosts should still be chosen if pool size is larger than number of filtered hosts. """ self.flags(scheduler_host_subset_size=20) spec_obj = objects.RequestSpec( num_instances=1, project_id=1, os_type='Linux', uuid='fake-uuid', flavor=objects.Flavor(root_gb=512, memory_mb=512, ephemeral_gb=0, vcpus=1), pci_requests=None, numa_topology=None, instance_group=None) with mock.patch.object(self.driver.host_manager, 'get_filtered_hosts') as mock_get_hosts: mock_get_hosts.side_effect = fake_get_filtered_hosts hosts = self.driver._schedule(self.context, spec_obj) # one host should be chosen self.assertEqual(len(hosts), 1) @mock.patch('nova.scheduler.host_manager.HostManager._get_instance_info') @mock.patch('nova.objects.ServiceList.get_by_binary', return_value=fakes.SERVICES) @mock.patch('nova.objects.ComputeNodeList.get_all', return_value=fakes.COMPUTE_NODES) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value={'numa_topology': None, 'pci_requests': None}) def test_schedule_chooses_best_host(self, mock_get_extra, mock_cn_get_all, mock_get_by_binary, mock_get_inst_info): """If scheduler_host_subset_size is 1, the largest host with greatest weight should be returned. """ self.flags(scheduler_host_subset_size=1) self.next_weight = 50 def _fake_weigh_objects(_self, functions, hosts, options): this_weight = self.next_weight self.next_weight = 0 host_state = hosts[0] return [weights.WeighedHost(host_state, this_weight)] self.stub_out('nova.scheduler.weights.HostWeightHandler.' 'get_weighed_objects', _fake_weigh_objects) spec_obj = objects.RequestSpec( num_instances=1, project_id=1, os_type='Linux', uuid='fake-uuid', flavor=objects.Flavor(root_gb=512, memory_mb=512, ephemeral_gb=0, vcpus=1), pci_requests=None, numa_topology=None, instance_group=None) with mock.patch.object(self.driver.host_manager, 'get_filtered_hosts') as mock_get_hosts: mock_get_hosts.side_effect = fake_get_filtered_hosts hosts = self.driver._schedule(self.context, spec_obj) # one host should be chosen self.assertEqual(1, len(hosts)) self.assertEqual(50, hosts[0].weight) @mock.patch('nova.objects.ServiceList.get_by_binary', return_value=fakes.SERVICES) @mock.patch('nova.objects.InstanceList.get_by_host') @mock.patch('nova.objects.ComputeNodeList.get_all', return_value=fakes.COMPUTE_NODES) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value={'numa_topology': None, 'pci_requests': None}) def test_select_destinations(self, mock_get_extra, mock_get_all, mock_by_host, mock_get_by_binary): """select_destinations is basically a wrapper around _schedule(). Similar to the _schedule tests, this just does a happy path test to ensure there is nothing glaringly wrong. """ self.next_weight = 1.0 selected_hosts = [] selected_nodes = [] def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] selected_hosts.append(host_state.host) selected_nodes.append(host_state.nodename) return [weights.WeighedHost(host_state, self.next_weight)] self.stub_out('nova.scheduler.weights.HostWeightHandler.' 'get_weighed_objects', _fake_weigh_objects) spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=512, root_gb=512, ephemeral_gb=0, vcpus=1), project_id=1, os_type='Linux', instance_uuid='fake-uuid', num_instances=1, pci_requests=None, numa_topology=None, instance_group=None) with mock.patch.object(self.driver.host_manager, 'get_filtered_hosts') as mock_get_hosts: mock_get_hosts.side_effect = fake_get_filtered_hosts dests = self.driver.select_destinations(self.context, spec_obj) (host, node) = (dests[0]['host'], dests[0]['nodename']) self.assertEqual(host, selected_hosts[0]) self.assertEqual(node, selected_nodes[0]) @mock.patch.object(filter_scheduler.FilterScheduler, '_schedule') def test_select_destinations_notifications(self, mock_schedule): mock_schedule.return_value = [mock.Mock()] with mock.patch.object(self.driver.notifier, 'info') as mock_info: expected = {'num_instances': 1, 'instance_properties': {'uuid': 'uuid1'}, 'instance_type': {}, 'image': {}} spec_obj = objects.RequestSpec(num_instances=1, instance_uuid='uuid1') self.driver.select_destinations(self.context, spec_obj) expected = [ mock.call(self.context, 'scheduler.select_destinations.start', dict(request_spec=expected)), mock.call(self.context, 'scheduler.select_destinations.end', dict(request_spec=expected))] self.assertEqual(expected, mock_info.call_args_list) @mock.patch.object(filter_scheduler.FilterScheduler, '_schedule') def test_select_destinations_no_valid_host(self, mock_schedule): mock_schedule.return_value = [] self.assertRaises(exception.NoValidHost, self.driver.select_destinations, self.context, objects.RequestSpec(num_instances=1)) def test_select_destinations_no_valid_host_not_enough(self): # Tests that we have fewer hosts available than number of instances # requested to build. consumed_hosts = [mock.MagicMock(), mock.MagicMock()] with mock.patch.object(self.driver, '_schedule', return_value=consumed_hosts): try: self.driver.select_destinations( self.context, objects.RequestSpec(num_instances=3)) self.fail('Expected NoValidHost to be raised.') except exception.NoValidHost as e: # Make sure that we provided a reason why NoValidHost. self.assertIn('reason', e.kwargs) self.assertTrue(len(e.kwargs['reason']) > 0) # Make sure that the consumed hosts have chance to be reverted. for host in consumed_hosts: self.assertIsNone(host.obj.updated) nova-13.0.0/nova/tests/unit/scheduler/filters/0000775000567000056710000000000012701410205022401 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/scheduler/filters/test_num_instances_filters.py0000664000567000056710000000556612701407773030444 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import num_instances_filter from nova import test from nova.tests.unit.scheduler import fakes class TestNumInstancesFilter(test.NoDBTestCase): def test_filter_num_instances_passes(self): self.flags(max_instances_per_host=5) self.filt_cls = num_instances_filter.NumInstancesFilter() host = fakes.FakeHostState('host1', 'node1', {'num_instances': 4}) spec_obj = objects.RequestSpec() self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_filter_num_instances_fails(self): self.flags(max_instances_per_host=5) self.filt_cls = num_instances_filter.NumInstancesFilter() host = fakes.FakeHostState('host1', 'node1', {'num_instances': 5}) spec_obj = objects.RequestSpec() self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_filter_aggregate_num_instances_value(self, agg_mock): self.flags(max_instances_per_host=4) self.filt_cls = num_instances_filter.AggregateNumInstancesFilter() host = fakes.FakeHostState('host1', 'node1', {'num_instances': 5}) spec_obj = objects.RequestSpec(context=mock.sentinel.ctx) agg_mock.return_value = set([]) # No aggregate defined for that host. self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) agg_mock.assert_called_once_with(host, 'max_instances_per_host') agg_mock.return_value = set(['6']) # Aggregate defined for that host. self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_filter_aggregate_num_instances_value_error(self, agg_mock): self.flags(max_instances_per_host=6) self.filt_cls = num_instances_filter.AggregateNumInstancesFilter() host = fakes.FakeHostState('host1', 'node1', {}) spec_obj = objects.RequestSpec(context=mock.sentinel.ctx) agg_mock.return_value = set(['XXX']) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) agg_mock.assert_called_once_with(host, 'max_instances_per_host') nova-13.0.0/nova/tests/unit/scheduler/filters/test_compute_filters.py0000664000567000056710000000446412701407773027246 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import compute_filter from nova import test from nova.tests.unit.scheduler import fakes @mock.patch('nova.servicegroup.API.service_is_up') class TestComputeFilter(test.NoDBTestCase): def test_compute_filter_manual_disable(self, service_up_mock): filt_cls = compute_filter.ComputeFilter() spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024)) service = {'disabled': True} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'service': service}) self.assertFalse(filt_cls.host_passes(host, spec_obj)) self.assertFalse(service_up_mock.called) def test_compute_filter_sgapi_passes(self, service_up_mock): filt_cls = compute_filter.ComputeFilter() spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024)) service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'service': service}) service_up_mock.return_value = True self.assertTrue(filt_cls.host_passes(host, spec_obj)) service_up_mock.assert_called_once_with(service) def test_compute_filter_sgapi_fails(self, service_up_mock): filt_cls = compute_filter.ComputeFilter() spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024)) service = {'disabled': False, 'updated_at': 'now'} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'service': service}) service_up_mock.return_value = False self.assertFalse(filt_cls.host_passes(host, spec_obj)) service_up_mock.assert_called_once_with(service) nova-13.0.0/nova/tests/unit/scheduler/filters/test_core_filters.py0000664000567000056710000001060712701407773026516 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import core_filter from nova import test from nova.tests.unit.scheduler import fakes class TestCoreFilter(test.NoDBTestCase): def test_core_filter_passes(self): self.filt_cls = core_filter.CoreFilter() spec_obj = objects.RequestSpec(flavor=objects.Flavor(vcpus=1)) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 4, 'vcpus_used': 7, 'cpu_allocation_ratio': 2}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_core_filter_fails_safe(self): self.filt_cls = core_filter.CoreFilter() spec_obj = objects.RequestSpec(flavor=objects.Flavor(vcpus=1)) host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_core_filter_fails(self): self.filt_cls = core_filter.CoreFilter() spec_obj = objects.RequestSpec(flavor=objects.Flavor(vcpus=1)) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 4, 'vcpus_used': 8, 'cpu_allocation_ratio': 2}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_core_filter_single_instance_overcommit_fails(self): self.filt_cls = core_filter.CoreFilter() spec_obj = objects.RequestSpec(flavor=objects.Flavor(vcpus=2)) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 1, 'vcpus_used': 0, 'cpu_allocation_ratio': 2}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_core_filter_value_error(self, agg_mock): self.filt_cls = core_filter.AggregateCoreFilter() spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(vcpus=1)) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 4, 'vcpus_used': 7, 'cpu_allocation_ratio': 2}) agg_mock.return_value = set(['XXX']) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) agg_mock.assert_called_once_with(host, 'cpu_allocation_ratio') self.assertEqual(4 * 2, host.limits['vcpu']) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_core_filter_default_value(self, agg_mock): self.filt_cls = core_filter.AggregateCoreFilter() spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(vcpus=1)) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 4, 'vcpus_used': 8, 'cpu_allocation_ratio': 2}) agg_mock.return_value = set([]) # False: fallback to default flag w/o aggregates self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) agg_mock.assert_called_once_with(host, 'cpu_allocation_ratio') # True: use ratio from aggregates agg_mock.return_value = set(['3']) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertEqual(4 * 3, host.limits['vcpu']) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_core_filter_conflict_values(self, agg_mock): self.filt_cls = core_filter.AggregateCoreFilter() spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(vcpus=1)) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 4, 'vcpus_used': 8, 'cpu_allocation_ratio': 1}) agg_mock.return_value = set(['2', '3']) # use the minimum ratio from aggregates self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) self.assertEqual(4 * 2, host.limits['vcpu']) nova-13.0.0/nova/tests/unit/scheduler/filters/test_type_filters.py0000664000567000056710000001342312701407773026546 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import type_filter from nova import test from nova.tests.unit.scheduler import fakes class TestTypeFilter(test.NoDBTestCase): def test_type_filter(self): self.filt_cls = type_filter.TypeAffinityFilter() host = fakes.FakeHostState('fake_host', 'fake_node', {}) host.instances = {} target_id = 1 spec_obj = objects.RequestSpec( context=mock.MagicMock(), flavor=objects.Flavor(id=target_id)) # True since no instances on host self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) # Add an instance with the same instance_type_id inst1 = objects.Instance(uuid='aa', instance_type_id=target_id) host.instances = {inst1.uuid: inst1} # True since only same instance_type_id on host self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) # Add an instance with a different instance_type_id diff_type = target_id + 1 inst2 = objects.Instance(uuid='bb', instance_type_id=diff_type) host.instances.update({inst2.uuid: inst2}) # False since host now has an instance of a different type self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_type_filter_no_metadata(self, agg_mock): self.filt_cls = type_filter.AggregateTypeAffinityFilter() spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(name='fake1')) host = fakes.FakeHostState('fake_host', 'fake_node', {}) # tests when no instance_type is defined for aggregate agg_mock.return_value = set([]) # True as no instance_type set for aggregate self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) agg_mock.assert_called_once_with(host, 'instance_type') @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_type_filter_single_instance_type(self, agg_mock): self.filt_cls = type_filter.AggregateTypeAffinityFilter() spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(name='fake1')) spec_obj2 = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(name='fake2')) host = fakes.FakeHostState('fake_host', 'fake_node', {}) # tests when a single instance_type is defined for an aggregate # using legacy single value syntax agg_mock.return_value = set(['fake1']) # True as instance_type is allowed for aggregate self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) # False as instance_type is not allowed for aggregate self.assertFalse(self.filt_cls.host_passes(host, spec_obj2)) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_type_filter_multi_aggregate(self, agg_mock): self.filt_cls = type_filter.AggregateTypeAffinityFilter() spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(name='fake1')) spec_obj2 = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(name='fake2')) spec_obj3 = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(name='fake3')) host = fakes.FakeHostState('fake_host', 'fake_node', {}) # tests when a single instance_type is defined for multiple aggregates # using legacy single value syntax agg_mock.return_value = set(['fake1', 'fake2']) # True as instance_type is allowed for first aggregate self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) # True as instance_type is allowed for second aggregate self.assertTrue(self.filt_cls.host_passes(host, spec_obj2)) # False as instance_type is not allowed for aggregates self.assertFalse(self.filt_cls.host_passes(host, spec_obj3)) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_type_filter_multi_instance_type(self, agg_mock): self.filt_cls = type_filter.AggregateTypeAffinityFilter() spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(name='fake1')) spec_obj2 = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(name='fake2')) spec_obj3 = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(name='fake3')) host = fakes.FakeHostState('fake_host', 'fake_node', {}) # tests when multiple instance_types are defined for aggregate agg_mock.return_value = set(['fake1,fake2']) # True as instance_type is allowed for aggregate self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) # True as instance_type is allowed for aggregate self.assertTrue(self.filt_cls.host_passes(host, spec_obj2)) # False as instance_type is not allowed for aggregate self.assertFalse(self.filt_cls.host_passes(host, spec_obj3)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_retry_filters.py0000664000567000056710000000431612701407773026733 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova.scheduler.filters import retry_filter from nova import test from nova.tests.unit.scheduler import fakes class TestRetryFilter(test.NoDBTestCase): def setUp(self): super(TestRetryFilter, self).setUp() self.filt_cls = retry_filter.RetryFilter() def test_retry_filter_disabled(self): # Test case where retry/re-scheduling is disabled. host = fakes.FakeHostState('host1', 'node1', {}) spec_obj = objects.RequestSpec(retry=None) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_retry_filter_pass(self): # Node not previously tried. host = fakes.FakeHostState('host1', 'nodeX', {}) retry = objects.SchedulerRetries( num_attempts=2, hosts=objects.ComputeNodeList(objects=[ # same host, different node objects.ComputeNode(host='host1', hypervisor_hostname='node1'), # different host and node objects.ComputeNode(host='host2', hypervisor_hostname='node2'), ])) spec_obj = objects.RequestSpec(retry=retry) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_retry_filter_fail(self): # Node was already tried. host = fakes.FakeHostState('host1', 'node1', {}) retry = objects.SchedulerRetries( num_attempts=1, hosts=objects.ComputeNodeList(objects=[ objects.ComputeNode(host='host1', hypervisor_hostname='node1') ])) spec_obj = objects.RequestSpec(retry=retry) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py0000664000567000056710000001010712701407773033773 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs from nova import test from nova.tests.unit.scheduler import fakes @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') class TestAggregateInstanceExtraSpecsFilter(test.NoDBTestCase): def setUp(self): super(TestAggregateInstanceExtraSpecsFilter, self).setUp() self.filt_cls = agg_specs.AggregateInstanceExtraSpecsFilter() def test_aggregate_filter_passes_no_extra_specs(self, agg_mock): capabilities = {'opt1': 1, 'opt2': 2} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024)) host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertFalse(agg_mock.called) def test_aggregate_filter_passes_empty_extra_specs(self, agg_mock): capabilities = {'opt1': 1, 'opt2': 2} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs={})) host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertFalse(agg_mock.called) def _do_test_aggregate_filter_extra_specs(self, especs, passes): spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=especs)) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024}) assertion = self.assertTrue if passes else self.assertFalse assertion(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_filter_passes_extra_specs_simple(self, agg_mock): agg_mock.return_value = {'opt1': '1', 'opt2': '2'} especs = { # Un-scoped extra spec 'opt1': '1', # Scoped extra spec that applies to this filter 'aggregate_instance_extra_specs:opt2': '2', # Scoped extra spec that does not apply to this filter 'trust:trusted_host': 'true', } self._do_test_aggregate_filter_extra_specs(especs, passes=True) def test_aggregate_filter_passes_extra_specs_simple_comma(self, agg_mock): agg_mock.return_value = {'opt1': '1,3', 'opt2': '2'} especs = { # Un-scoped extra spec 'opt1': '1', # Scoped extra spec that applies to this filter 'aggregate_instance_extra_specs:opt1': '3', # Scoped extra spec that does not apply to this filter 'trust:trusted_host': 'true', } self._do_test_aggregate_filter_extra_specs(especs, passes=True) def test_aggregate_filter_passes_with_key_same_as_scope(self, agg_mock): agg_mock.return_value = {'aggregate_instance_extra_specs': '1'} especs = { # Un-scoped extra spec, make sure we don't blow up if it # happens to match our scope. 'aggregate_instance_extra_specs': '1', } self._do_test_aggregate_filter_extra_specs(especs, passes=True) def test_aggregate_filter_fails_extra_specs_simple(self, agg_mock): agg_mock.return_value = {'opt1': '1', 'opt2': '2'} especs = { 'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true' } self._do_test_aggregate_filter_extra_specs(especs, passes=False) nova-13.0.0/nova/tests/unit/scheduler/filters/test_trusted_filters.py0000664000567000056710000003042712701407773027262 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils import requests from nova import objects from nova.scheduler.filters import trusted_filter from nova import test from nova.tests.unit.scheduler import fakes from nova import utils CONF = cfg.CONF class AttestationServiceTestCase(test.NoDBTestCase): def setUp(self): super(AttestationServiceTestCase, self).setUp() self.api_url = '/OpenAttestationWebServices/V1.0' self.host = 'localhost' self.port = '8443' self.statuses = (requests.codes.OK, requests.codes.CREATED, requests.codes.ACCEPTED, requests.codes.NO_CONTENT) @mock.patch.object(requests, 'request') def test_do_request_possible_statuses(self, request_mock): """This test case checks if '_do_request()' method returns appropriate status_code (200) and result (text converted to json), while status_code returned by request is in one of fourth eligible statuses """ for status_code in self.statuses: request_mock.return_value.status_code = status_code request_mock.return_value.text = '{"test": "test"}' attestation_service = trusted_filter.AttestationService() status, result = attestation_service._do_request( 'POST', 'PollHosts', {}, {}) self.assertEqual(requests.codes.OK, status) self.assertEqual(jsonutils.loads(request_mock.return_value.text), result) @mock.patch.object(requests, 'request') def test_do_request_other_status(self, request_mock): """This test case checks if '_do_request()' method returns appropriate status (this returned by request method) and result (None), while status_code returned by request is not in one of fourth eligible statuses """ request_mock.return_value.status_code = requests.codes.NOT_FOUND request_mock.return_value.text = '{"test": "test"}' attestation_service = trusted_filter.AttestationService() status, result = attestation_service._do_request( 'POST', 'PollHosts', {}, {}) self.assertEqual(requests.codes.NOT_FOUND, status) self.assertIsNone(result) @mock.patch.object(requests, 'request') def test_do_request_unconvertible_text(self, request_mock): for status_code in self.statuses: # this unconvertible_texts leads to TypeError and ValueError # in jsonutils.loads(res.text) in _do_request() method for unconvertible_text in ({"test": "test"}, '{}{}'): request_mock.return_value.status_code = status_code request_mock.return_value.text = unconvertible_text attestation_service = trusted_filter.AttestationService() status, result = attestation_service._do_request( 'POST', 'PollHosts', {}, {}) self.assertEqual(requests.codes.OK, status) self.assertEqual(unconvertible_text, result) @mock.patch.object(trusted_filter.AttestationService, '_request') class TestTrustedFilter(test.NoDBTestCase): def setUp(self): super(TestTrustedFilter, self).setUp() # TrustedFilter's constructor creates the attestation cache, which # calls to get a list of all the compute nodes. fake_compute_nodes = [ objects.ComputeNode(hypervisor_hostname='node1'), ] with mock.patch('nova.objects.ComputeNodeList.get_all') as mocked: mocked.return_value = fake_compute_nodes self.filt_cls = trusted_filter.TrustedFilter() def test_trusted_filter_default_passes(self, req_mock): spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024)) host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertFalse(req_mock.called) def test_trusted_filter_trusted_and_trusted_passes(self, req_mock): oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "trusted", "vtime": utils.isotime()}]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'trusted'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=extra_specs)) host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) req_mock.assert_called_once_with("POST", "PollHosts", ["node1"]) def test_trusted_filter_trusted_and_untrusted_fails(self, req_mock): oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "untrusted", "vtime": utils.isotime()}]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'trusted'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=extra_specs)) host = fakes.FakeHostState('host1', 'node1', {}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_trusted_filter_untrusted_and_trusted_fails(self, req_mock): oat_data = {"hosts": [{"host_name": "node", "trust_lvl": "trusted", "vtime": utils.isotime()}]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'untrusted'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=extra_specs)) host = fakes.FakeHostState('host1', 'node1', {}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_trusted_filter_untrusted_and_untrusted_passes(self, req_mock): oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "untrusted", "vtime": utils.isotime()}]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'untrusted'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=extra_specs)) host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_trusted_filter_update_cache(self, req_mock): oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "untrusted", "vtime": utils.isotime()}]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'untrusted'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=extra_specs)) host = fakes.FakeHostState('host1', 'node1', {}) self.filt_cls.host_passes(host, spec_obj) # Fill the caches req_mock.reset_mock() self.filt_cls.host_passes(host, spec_obj) self.assertFalse(req_mock.called) req_mock.reset_mock() time_fixture = self.useFixture(utils_fixture.TimeFixture()) time_fixture.advance_time_seconds( CONF.trusted_computing.attestation_auth_timeout + 80) self.filt_cls.host_passes(host, spec_obj) self.assertTrue(req_mock.called) def test_trusted_filter_update_cache_timezone(self, req_mock): oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "untrusted", "vtime": "2012-09-09T05:10:40-04:00"}]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'untrusted'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=extra_specs)) host = fakes.FakeHostState('host1', 'node1', {}) time_fixture = self.useFixture(utils_fixture.TimeFixture( timeutils.normalize_time( timeutils.parse_isotime("2012-09-09T09:10:40Z")))) self.filt_cls.host_passes(host, spec_obj) # Fill the caches req_mock.reset_mock() self.filt_cls.host_passes(host, spec_obj) self.assertFalse(req_mock.called) req_mock.reset_mock() time_fixture.advance_time_seconds( CONF.trusted_computing.attestation_auth_timeout - 10) self.filt_cls.host_passes(host, spec_obj) self.assertFalse(req_mock.called) def test_trusted_filter_combine_hosts(self, req_mock): fake_compute_nodes = [ objects.ComputeNode(hypervisor_hostname='node1'), objects.ComputeNode(hypervisor_hostname='node2') ] with mock.patch('nova.objects.ComputeNodeList.get_all') as mocked: mocked.return_value = fake_compute_nodes self.filt_cls = trusted_filter.TrustedFilter() oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "untrusted", "vtime": "2012-09-09T05:10:40-04:00"}]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'trusted'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=extra_specs)) host = fakes.FakeHostState('host1', 'node1', {}) self.filt_cls.host_passes(host, spec_obj) # Fill the caches self.assertTrue(req_mock.called) self.assertEqual(1, req_mock.call_count) call_args = list(req_mock.call_args[0]) expected_call_args = ['POST', 'PollHosts', ['node2', 'node1']] self.assertJsonEqual(call_args, expected_call_args) def test_trusted_filter_trusted_and_locale_formated_vtime_passes(self, req_mock): oat_data = {"hosts": [{"host_name": "host1", "trust_lvl": "trusted", "vtime": timeutils.utcnow().strftime( "%c")}, {"host_name": "host2", "trust_lvl": "trusted", "vtime": timeutils.utcnow().strftime( "%D")}, # This is just a broken date to ensure that # we're not just arbitrarily accepting any # date format. ]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'trusted'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=extra_specs)) host = fakes.FakeHostState('host1', 'host1', {}) bad_host = fakes.FakeHostState('host2', 'host2', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertFalse(self.filt_cls.host_passes(bad_host, spec_obj)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py0000664000567000056710000000405012701407773031266 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import availability_zone_filter from nova import test from nova.tests.unit.scheduler import fakes @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') class TestAvailabilityZoneFilter(test.NoDBTestCase): def setUp(self): super(TestAvailabilityZoneFilter, self).setUp() self.filt_cls = availability_zone_filter.AvailabilityZoneFilter() @staticmethod def _make_zone_request(zone): return objects.RequestSpec( context=mock.sentinel.ctx, availability_zone=zone) def test_availability_zone_filter_same(self, agg_mock): agg_mock.return_value = {'availability_zone': 'nova'} request = self._make_zone_request('nova') host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(self.filt_cls.host_passes(host, request)) def test_availability_zone_filter_same_comma(self, agg_mock): agg_mock.return_value = {'availability_zone': 'nova,nova2'} request = self._make_zone_request('nova') host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(self.filt_cls.host_passes(host, request)) def test_availability_zone_filter_different(self, agg_mock): agg_mock.return_value = {'availability_zone': 'nova'} request = self._make_zone_request('bad') host = fakes.FakeHostState('host1', 'node1', {}) self.assertFalse(self.filt_cls.host_passes(host, request)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_disk_filters.py0000664000567000056710000001060612701407773026517 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import disk_filter from nova import test from nova.tests.unit.scheduler import fakes class TestDiskFilter(test.NoDBTestCase): def setUp(self): super(TestDiskFilter, self).setUp() def test_disk_filter_passes(self): filt_cls = disk_filter.DiskFilter() spec_obj = objects.RequestSpec( flavor=objects.Flavor(root_gb=1, ephemeral_gb=1, swap=512)) host = fakes.FakeHostState('host1', 'node1', {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13, 'disk_allocation_ratio': 1.0}) self.assertTrue(filt_cls.host_passes(host, spec_obj)) def test_disk_filter_fails(self): filt_cls = disk_filter.DiskFilter() spec_obj = objects.RequestSpec( flavor=objects.Flavor( root_gb=10, ephemeral_gb=1, swap=1024)) host = fakes.FakeHostState('host1', 'node1', {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13, 'disk_allocation_ratio': 1.0}) self.assertFalse(filt_cls.host_passes(host, spec_obj)) def test_disk_filter_oversubscribe(self): filt_cls = disk_filter.DiskFilter() spec_obj = objects.RequestSpec( flavor=objects.Flavor( root_gb=100, ephemeral_gb=18, swap=1024)) # 1GB used... so 119GB allowed... host = fakes.FakeHostState('host1', 'node1', {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12, 'disk_allocation_ratio': 10.0}) self.assertTrue(filt_cls.host_passes(host, spec_obj)) self.assertEqual(12 * 10.0, host.limits['disk_gb']) def test_disk_filter_oversubscribe_fail(self): filt_cls = disk_filter.DiskFilter() spec_obj = objects.RequestSpec( flavor=objects.Flavor( root_gb=100, ephemeral_gb=19, swap=1024)) # 1GB used... so 119GB allowed... host = fakes.FakeHostState('host1', 'node1', {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12, 'disk_allocation_ratio': 10.0}) self.assertFalse(filt_cls.host_passes(host, spec_obj)) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_disk_filter_value_error(self, agg_mock): filt_cls = disk_filter.AggregateDiskFilter() spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor( root_gb=1, ephemeral_gb=1, swap=1024)) host = fakes.FakeHostState('host1', 'node1', {'free_disk_mb': 3 * 1024, 'total_usable_disk_gb': 1, 'disk_allocation_ratio': 1.0}) agg_mock.return_value = set(['XXX']) self.assertTrue(filt_cls.host_passes(host, spec_obj)) agg_mock.assert_called_once_with(host, 'disk_allocation_ratio') @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_disk_filter_default_value(self, agg_mock): filt_cls = disk_filter.AggregateDiskFilter() spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor( root_gb=2, ephemeral_gb=1, swap=1024)) host = fakes.FakeHostState('host1', 'node1', {'free_disk_mb': 3 * 1024, 'total_usable_disk_gb': 1, 'disk_allocation_ratio': 1.0}) # Uses global conf. agg_mock.return_value = set([]) self.assertFalse(filt_cls.host_passes(host, spec_obj)) agg_mock.assert_called_once_with(host, 'disk_allocation_ratio') agg_mock.return_value = set(['2']) self.assertTrue(filt_cls.host_passes(host, spec_obj)) nova-13.0.0/nova/tests/unit/scheduler/filters/__init__.py0000664000567000056710000000000012701407773024520 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py0000664000567000056710000000577512701407773034403 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import aggregate_multitenancy_isolation as ami from nova import test from nova.tests.unit.scheduler import fakes @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') class TestAggregateMultitenancyIsolationFilter(test.NoDBTestCase): def setUp(self): super(TestAggregateMultitenancyIsolationFilter, self).setUp() self.filt_cls = ami.AggregateMultiTenancyIsolation() def test_aggregate_multi_tenancy_isolation_with_meta_passes(self, agg_mock): agg_mock.return_value = {'filter_tenant_id': set(['my_tenantid'])} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='my_tenantid') host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_multi_tenancy_isolation_with_meta_passes_comma(self, agg_mock): agg_mock.return_value = {'filter_tenant_id': set(['my_tenantid', 'mytenantid2'])} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='my_tenantid') host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_multi_tenancy_isolation_fails(self, agg_mock): agg_mock.return_value = {'filter_tenant_id': set(['other_tenantid'])} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='my_tenantid') host = fakes.FakeHostState('host1', 'compute', {}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_multi_tenancy_isolation_fails_comma(self, agg_mock): agg_mock.return_value = {'filter_tenant_id': set(['other_tenantid', 'other_tenantid2'])} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='my_tenantid') host = fakes.FakeHostState('host1', 'compute', {}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_multi_tenancy_isolation_no_meta_passes(self, agg_mock): agg_mock.return_value = {} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='my_tenantid') host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py0000664000567000056710000001056712701407773030434 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova.scheduler.filters import isolated_hosts_filter from nova import test from nova.tests.unit.scheduler import fakes class TestIsolatedHostsFilter(test.NoDBTestCase): def setUp(self): super(TestIsolatedHostsFilter, self).setUp() self.filt_cls = isolated_hosts_filter.IsolatedHostsFilter() def _do_test_isolated_hosts(self, host_in_list, image_in_list, set_flags=True, restrict_isolated_hosts_to_isolated_images=True): if set_flags: self.flags(isolated_images=['isolated_image'], isolated_hosts=['isolated_host'], restrict_isolated_hosts_to_isolated_images= restrict_isolated_hosts_to_isolated_images) host_name = 'isolated_host' if host_in_list else 'free_host' image_ref = 'isolated_image' if image_in_list else 'free_image' spec_obj = objects.RequestSpec(image=objects.ImageMeta(id=image_ref)) host = fakes.FakeHostState(host_name, 'node', {}) return self.filt_cls.host_passes(host, spec_obj) def test_isolated_hosts_fails_isolated_on_non_isolated(self): self.assertFalse(self._do_test_isolated_hosts(False, True)) def test_isolated_hosts_fails_non_isolated_on_isolated(self): self.assertFalse(self._do_test_isolated_hosts(True, False)) def test_isolated_hosts_passes_isolated_on_isolated(self): self.assertTrue(self._do_test_isolated_hosts(True, True)) def test_isolated_hosts_passes_non_isolated_on_non_isolated(self): self.assertTrue(self._do_test_isolated_hosts(False, False)) def test_isolated_hosts_no_config(self): # If there are no hosts nor isolated images in the config, it should # not filter at all. This is the default config. self.assertTrue(self._do_test_isolated_hosts(False, True, False)) self.assertTrue(self._do_test_isolated_hosts(True, False, False)) self.assertTrue(self._do_test_isolated_hosts(True, True, False)) self.assertTrue(self._do_test_isolated_hosts(False, False, False)) def test_isolated_hosts_no_hosts_config(self): self.flags(isolated_images=['isolated_image']) # If there are no hosts in the config, it should only filter out # images that are listed self.assertFalse(self._do_test_isolated_hosts(False, True, False)) self.assertTrue(self._do_test_isolated_hosts(True, False, False)) self.assertFalse(self._do_test_isolated_hosts(True, True, False)) self.assertTrue(self._do_test_isolated_hosts(False, False, False)) def test_isolated_hosts_no_images_config(self): self.flags(isolated_hosts=['isolated_host']) # If there are no images in the config, it should only filter out # isolated_hosts self.assertTrue(self._do_test_isolated_hosts(False, True, False)) self.assertFalse(self._do_test_isolated_hosts(True, False, False)) self.assertFalse(self._do_test_isolated_hosts(True, True, False)) self.assertTrue(self._do_test_isolated_hosts(False, False, False)) def test_isolated_hosts_less_restrictive(self): # If there are isolated hosts and non isolated images self.assertTrue(self._do_test_isolated_hosts(True, False, True, False)) # If there are isolated hosts and isolated images self.assertTrue(self._do_test_isolated_hosts(True, True, True, False)) # If there are non isolated hosts and non isolated images self.assertTrue(self._do_test_isolated_hosts(False, False, True, False)) # If there are non isolated hosts and isolated images self.assertFalse(self._do_test_isolated_hosts(False, True, True, False)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py0000664000567000056710000001177212701407773031757 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from nova import objects from nova.scheduler.filters import compute_capabilities_filter from nova import test from nova.tests.unit.scheduler import fakes class TestComputeCapabilitiesFilter(test.NoDBTestCase): def setUp(self): super(TestComputeCapabilitiesFilter, self).setUp() self.filt_cls = compute_capabilities_filter.ComputeCapabilitiesFilter() def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes): # In real OpenStack runtime environment,compute capabilities # value may be number, so we should use number to do unit test. capabilities = {} capabilities.update(ecaps) spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024, extra_specs=especs)) host_state = {'free_ram_mb': 1024} host_state.update(capabilities) host = fakes.FakeHostState('host1', 'node1', host_state) assertion = self.assertTrue if passes else self.assertFalse assertion(self.filt_cls.host_passes(host, spec_obj)) def test_compute_filter_passes_without_extra_specs(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024)) host_state = {'free_ram_mb': 1024} host = fakes.FakeHostState('host1', 'node1', host_state) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_compute_filter_fails_without_host_state(self): especs = {'capabilities': '1'} spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024, extra_specs=especs)) self.assertFalse(self.filt_cls.host_passes(None, spec_obj)) def test_compute_filter_fails_without_capabilites(self): cpu_info = """ { } """ cpu_info = six.text_type(cpu_info) self._do_test_compute_filter_extra_specs( ecaps={'cpu_info': cpu_info}, especs={'capabilities:cpu_info:vendor': 'Intel'}, passes=False) def test_compute_filter_pass_cpu_info_as_text_type(self): cpu_info = """ { "vendor": "Intel", "model": "core2duo", "arch": "i686","features": ["lahf_lm", "rdtscp"], "topology": {"cores": 1, "threads":1, "sockets": 1}} """ cpu_info = six.text_type(cpu_info) self._do_test_compute_filter_extra_specs( ecaps={'cpu_info': cpu_info}, especs={'capabilities:cpu_info:vendor': 'Intel'}, passes=True) def test_compute_filter_fail_cpu_info_as_text_type_not_valid(self): cpu_info = "cpu_info" cpu_info = six.text_type(cpu_info) self._do_test_compute_filter_extra_specs( ecaps={'cpu_info': cpu_info}, especs={'capabilities:cpu_info:vendor': 'Intel'}, passes=False) def test_compute_filter_passes_extra_specs_simple(self): self._do_test_compute_filter_extra_specs( ecaps={'stats': {'opt1': 1, 'opt2': 2}}, especs={'opt1': '1', 'opt2': '2', 'trust:trusted_host': 'true'}, passes=True) def test_compute_filter_fails_extra_specs_simple(self): self._do_test_compute_filter_extra_specs( ecaps={'stats': {'opt1': 1, 'opt2': 2}}, especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'}, passes=False) def test_compute_filter_pass_extra_specs_simple_with_scope(self): self._do_test_compute_filter_extra_specs( ecaps={'stats': {'opt1': 1, 'opt2': 2}}, especs={'capabilities:opt1': '1', 'trust:trusted_host': 'true'}, passes=True) def test_compute_filter_pass_extra_specs_same_as_scope(self): # Make sure this still works even if the key is the same as the scope self._do_test_compute_filter_extra_specs( ecaps={'capabilities': 1}, especs={'capabilities': '1'}, passes=True) def test_compute_filter_extra_specs_simple_with_wrong_scope(self): self._do_test_compute_filter_extra_specs( ecaps={'opt1': 1, 'opt2': 2}, especs={'wrong_scope:opt1': '1', 'trust:trusted_host': 'true'}, passes=True) def test_compute_filter_extra_specs_pass_multi_level_with_scope(self): self._do_test_compute_filter_extra_specs( ecaps={'stats': {'opt1': {'a': 1, 'b': {'aa': 2}}, 'opt2': 2}}, especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2', 'trust:trusted_host': 'true'}, passes=True) nova-13.0.0/nova/tests/unit/scheduler/filters/test_utils.py0000664000567000056710000001015712701407773025176 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova.scheduler.filters import utils from nova import test from nova.tests.unit.scheduler import fakes _AGGREGATE_FIXTURES = [ objects.Aggregate( id=1, name='foo', hosts=['fake-host'], metadata={'k1': '1', 'k2': '2'}, ), objects.Aggregate( id=2, name='bar', hosts=['fake-host'], metadata={'k1': '3', 'k2': '4'}, ), objects.Aggregate( id=3, name='bar', hosts=['fake-host'], metadata={'k1': '6,7', 'k2': '8, 9'}, ), ] class TestUtils(test.NoDBTestCase): def setUp(self): super(TestUtils, self).setUp() def test_aggregate_values_from_key(self): host_state = fakes.FakeHostState( 'fake', 'node', {'aggregates': _AGGREGATE_FIXTURES}) values = utils.aggregate_values_from_key(host_state, key_name='k1') self.assertEqual(set(['1', '3', '6,7']), values) def test_aggregate_values_from_key_with_wrong_key(self): host_state = fakes.FakeHostState( 'fake', 'node', {'aggregates': _AGGREGATE_FIXTURES}) values = utils.aggregate_values_from_key(host_state, key_name='k3') self.assertEqual(set(), values) def test_aggregate_metadata_get_by_host_no_key(self): host_state = fakes.FakeHostState( 'fake', 'node', {'aggregates': _AGGREGATE_FIXTURES}) metadata = utils.aggregate_metadata_get_by_host(host_state) self.assertIn('k1', metadata) self.assertEqual(set(['1', '3', '7', '6']), metadata['k1']) self.assertIn('k2', metadata) self.assertEqual(set(['9', '8', '2', '4']), metadata['k2']) def test_aggregate_metadata_get_by_host_with_key(self): host_state = fakes.FakeHostState( 'fake', 'node', {'aggregates': _AGGREGATE_FIXTURES}) metadata = utils.aggregate_metadata_get_by_host(host_state, 'k1') self.assertIn('k1', metadata) self.assertEqual(set(['1', '3', '7', '6']), metadata['k1']) def test_aggregate_metadata_get_by_host_empty_result(self): host_state = fakes.FakeHostState( 'fake', 'node', {'aggregates': []}) metadata = utils.aggregate_metadata_get_by_host(host_state, 'k3') self.assertEqual({}, metadata) def test_validate_num_values(self): f = utils.validate_num_values self.assertEqual("x", f(set(), default="x")) self.assertEqual(1, f(set(["1"]), cast_to=int)) self.assertEqual(1.0, f(set(["1"]), cast_to=float)) self.assertEqual(1, f(set([1, 2]), based_on=min)) self.assertEqual(2, f(set([1, 2]), based_on=max)) self.assertEqual(9, f(set(['10', '9']), based_on=min)) def test_instance_uuids_overlap(self): inst1 = objects.Instance(uuid='aa') inst2 = objects.Instance(uuid='bb') instances = [inst1, inst2] host_state = fakes.FakeHostState('host1', 'node1', {}) host_state.instances = {instance.uuid: instance for instance in instances} self.assertTrue(utils.instance_uuids_overlap(host_state, ['aa'])) self.assertFalse(utils.instance_uuids_overlap(host_state, ['zz'])) def test_other_types_on_host(self): inst1 = objects.Instance(uuid='aa', instance_type_id=1) host_state = fakes.FakeHostState('host1', 'node1', {}) host_state.instances = {inst1.uuid: inst1} self.assertFalse(utils.other_types_on_host(host_state, 1)) self.assertTrue(utils.other_types_on_host(host_state, 2)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_ram_filters.py0000664000567000056710000001072612701407773026347 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import ram_filter from nova import test from nova.tests.unit.scheduler import fakes class TestRamFilter(test.NoDBTestCase): def setUp(self): super(TestRamFilter, self).setUp() self.filt_cls = ram_filter.RamFilter() def test_ram_filter_fails_on_memory(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024)) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024, 'ram_allocation_ratio': 1.0}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_ram_filter_passes(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024)) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024, 'ram_allocation_ratio': 1.0}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_ram_filter_oversubscribe(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024)) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': -1024, 'total_usable_ram_mb': 2048, 'ram_allocation_ratio': 2.0}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertEqual(2048 * 2.0, host.limits['memory_mb']) def test_ram_filter_oversubscribe_singe_instance_fails(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024)) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 512, 'total_usable_ram_mb': 512, 'ram_allocation_ratio': 2.0}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') class TestAggregateRamFilter(test.NoDBTestCase): def setUp(self): super(TestAggregateRamFilter, self).setUp() self.filt_cls = ram_filter.AggregateRamFilter() def test_aggregate_ram_filter_value_error(self, agg_mock): spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024)) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024, 'ram_allocation_ratio': 1.0}) agg_mock.return_value = set(['XXX']) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertEqual(1024 * 1.0, host.limits['memory_mb']) def test_aggregate_ram_filter_default_value(self, agg_mock): spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024)) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024, 'ram_allocation_ratio': 1.0}) # False: fallback to default flag w/o aggregates agg_mock.return_value = set() self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) agg_mock.return_value = set(['2.0']) # True: use ratio from aggregates self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertEqual(1024 * 2.0, host.limits['memory_mb']) def test_aggregate_ram_filter_conflict_values(self, agg_mock): spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024)) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024, 'ram_allocation_ratio': 1.0}) agg_mock.return_value = set(['1.5', '2.0']) # use the minimum ratio from aggregates self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertEqual(1024 * 1.5, host.limits['memory_mb']) nova-13.0.0/nova/tests/unit/scheduler/filters/test_json_filters.py0000664000567000056710000002576212701407773026547 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from nova import objects from nova.scheduler.filters import json_filter from nova import test from nova.tests.unit.scheduler import fakes class TestJsonFilter(test.NoDBTestCase): def setUp(self): super(TestJsonFilter, self).setUp() self.filt_cls = json_filter.JsonFilter() self.json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024]]) def test_json_filter_passes(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024, root_gb=200, ephemeral_gb=0), scheduler_hints=dict(query=[self.json_query])) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_json_filter_passes_with_no_query(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024, root_gb=200, ephemeral_gb=0), scheduler_hints=None) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 0, 'free_disk_mb': 0}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_json_filter_fails_on_memory(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024, root_gb=200, ephemeral_gb=0), scheduler_hints=dict(query=[self.json_query])) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1023, 'free_disk_mb': 200 * 1024}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_json_filter_fails_on_disk(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024, root_gb=200, ephemeral_gb=0), scheduler_hints=dict(query=[self.json_query])) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'free_disk_mb': (200 * 1024) - 1}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_json_filter_fails_on_service_disabled(self): json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], ['not', '$service.disabled']]) spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024, local_gb=200), scheduler_hints=dict(query=[json_query])) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_json_filter_happy_day(self): # Test json filter more thoroughly. raw = ['and', '$capabilities.enabled', ['=', '$capabilities.opt1', 'match'], ['or', ['and', ['<', '$free_ram_mb', 30], ['<', '$free_disk_mb', 300]], ['and', ['>', '$free_ram_mb', 30], ['>', '$free_disk_mb', 300]]]] spec_obj = objects.RequestSpec( scheduler_hints=dict(query=[jsonutils.dumps(raw)])) # Passes capabilities = {'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 10, 'free_disk_mb': 200, 'capabilities': capabilities, 'service': service}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) # Passes capabilities = {'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) # Fails due to capabilities being disabled capabilities = {'enabled': False, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) # Fails due to being exact memory/disk we don't want capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 30, 'free_disk_mb': 300, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) # Fails due to memory lower but disk higher capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) # Fails due to capabilities 'opt1' not equal capabilities = {'enabled': True, 'opt1': 'no-match'} service = {'enabled': True} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_json_filter_basic_operators(self): host = fakes.FakeHostState('host1', 'node1', {}) # (operator, arguments, expected_result) ops_to_test = [ ['=', [1, 1], True], ['=', [1, 2], False], ['<', [1, 2], True], ['<', [1, 1], False], ['<', [2, 1], False], ['>', [2, 1], True], ['>', [2, 2], False], ['>', [2, 3], False], ['<=', [1, 2], True], ['<=', [1, 1], True], ['<=', [2, 1], False], ['>=', [2, 1], True], ['>=', [2, 2], True], ['>=', [2, 3], False], ['in', [1, 1], True], ['in', [1, 1, 2, 3], True], ['in', [4, 1, 2, 3], False], ['not', [True], False], ['not', [False], True], ['or', [True, False], True], ['or', [False, False], False], ['and', [True, True], True], ['and', [False, False], False], ['and', [True, False], False], # Nested ((True or False) and (2 > 1)) == Passes ['and', [['or', True, False], ['>', 2, 1]], True]] for (op, args, expected) in ops_to_test: raw = [op] + args spec_obj = objects.RequestSpec( scheduler_hints=dict( query=[jsonutils.dumps(raw)])) self.assertEqual(expected, self.filt_cls.host_passes(host, spec_obj)) # This results in [False, True, False, True] and if any are True # then it passes... raw = ['not', True, False, True, False] spec_obj = objects.RequestSpec( scheduler_hints=dict( query=[jsonutils.dumps(raw)])) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) # This results in [False, False, False] and if any are True # then it passes...which this doesn't raw = ['not', True, True, True] spec_obj = objects.RequestSpec( scheduler_hints=dict( query=[jsonutils.dumps(raw)])) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_json_filter_unknown_operator_raises(self): raw = ['!=', 1, 2] spec_obj = objects.RequestSpec( scheduler_hints=dict( query=[jsonutils.dumps(raw)])) host = fakes.FakeHostState('host1', 'node1', {}) self.assertRaises(KeyError, self.filt_cls.host_passes, host, spec_obj) def test_json_filter_empty_filters_pass(self): host = fakes.FakeHostState('host1', 'node1', {}) raw = [] spec_obj = objects.RequestSpec( scheduler_hints=dict( query=[jsonutils.dumps(raw)])) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) raw = {} spec_obj = objects.RequestSpec( scheduler_hints=dict( query=[jsonutils.dumps(raw)])) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_json_filter_invalid_num_arguments_fails(self): host = fakes.FakeHostState('host1', 'node1', {}) raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] spec_obj = objects.RequestSpec( scheduler_hints=dict( query=[jsonutils.dumps(raw)])) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) raw = ['>', 1] spec_obj = objects.RequestSpec( scheduler_hints=dict( query=[jsonutils.dumps(raw)])) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_json_filter_unknown_variable_ignored(self): host = fakes.FakeHostState('host1', 'node1', {}) raw = ['=', '$........', 1, 1] spec_obj = objects.RequestSpec( scheduler_hints=dict( query=[jsonutils.dumps(raw)])) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) raw = ['=', '$foo', 2, 2] spec_obj = objects.RequestSpec( scheduler_hints=dict( query=[jsonutils.dumps(raw)])) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_io_ops_filters.py0000664000567000056710000000534212701407773027056 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import io_ops_filter from nova import test from nova.tests.unit.scheduler import fakes class TestNumInstancesFilter(test.NoDBTestCase): def test_filter_num_iops_passes(self): self.flags(max_io_ops_per_host=8) self.filt_cls = io_ops_filter.IoOpsFilter() host = fakes.FakeHostState('host1', 'node1', {'num_io_ops': 7}) spec_obj = objects.RequestSpec() self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_filter_num_iops_fails(self): self.flags(max_io_ops_per_host=8) self.filt_cls = io_ops_filter.IoOpsFilter() host = fakes.FakeHostState('host1', 'node1', {'num_io_ops': 8}) spec_obj = objects.RequestSpec() self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_filter_num_iops_value(self, agg_mock): self.flags(max_io_ops_per_host=7) self.filt_cls = io_ops_filter.AggregateIoOpsFilter() host = fakes.FakeHostState('host1', 'node1', {'num_io_ops': 7}) spec_obj = objects.RequestSpec(context=mock.sentinel.ctx) agg_mock.return_value = set([]) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) agg_mock.assert_called_once_with(host, 'max_io_ops_per_host') agg_mock.return_value = set(['8']) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key') def test_aggregate_filter_num_iops_value_error(self, agg_mock): self.flags(max_io_ops_per_host=8) self.filt_cls = io_ops_filter.AggregateIoOpsFilter() host = fakes.FakeHostState('host1', 'node1', {'num_io_ops': 7}) agg_mock.return_value = set(['XXX']) spec_obj = objects.RequestSpec(context=mock.sentinel.ctx) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) agg_mock.assert_called_once_with(host, 'max_io_ops_per_host') nova-13.0.0/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py0000664000567000056710000001152312701407773035211 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import aggregate_image_properties_isolation as aipi from nova import test from nova.tests.unit.scheduler import fakes @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') class TestAggImagePropsIsolationFilter(test.NoDBTestCase): def setUp(self): super(TestAggImagePropsIsolationFilter, self).setUp() self.filt_cls = aipi.AggregateImagePropertiesIsolation() def test_aggregate_image_properties_isolation_passes(self, agg_mock): agg_mock.return_value = {'hw_vm_mode': 'hvm'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, image=objects.ImageMeta(properties=objects.ImageMetaProps( hw_vm_mode='hvm'))) host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_image_properties_isolation_passes_comma(self, agg_mock): agg_mock.return_value = {'hw_vm_mode': 'hvm,xen'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, image=objects.ImageMeta(properties=objects.ImageMetaProps( hw_vm_mode='hvm'))) host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_image_properties_isolation_multi_props_passes(self, agg_mock): agg_mock.return_value = {'hw_vm_mode': 'hvm', 'hw_cpu_cores': '2'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, image=objects.ImageMeta(properties=objects.ImageMetaProps( hw_vm_mode='hvm', hw_cpu_cores=2))) host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_image_properties_isolation_props_with_meta_passes(self, agg_mock): agg_mock.return_value = {'hw_vm_mode': 'hvm'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, image=objects.ImageMeta(properties=objects.ImageMetaProps())) host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_image_properties_isolation_props_imgprops_passes(self, agg_mock): agg_mock.return_value = {} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, image=objects.ImageMeta(properties=objects.ImageMetaProps( hw_vm_mode='hvm'))) host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_image_properties_isolation_props_not_match_fails(self, agg_mock): agg_mock.return_value = {'hw_vm_mode': 'hvm'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, image=objects.ImageMeta(properties=objects.ImageMetaProps( hw_vm_mode='xen'))) host = fakes.FakeHostState('host1', 'compute', {}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_image_properties_isolation_props_not_match2_fails(self, agg_mock): agg_mock.return_value = {'hw_vm_mode': 'hvm', 'hw_cpu_cores': '1'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, image=objects.ImageMeta(properties=objects.ImageMetaProps( hw_vm_mode='hvm', hw_cpu_cores=2))) host = fakes.FakeHostState('host1', 'compute', {}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_image_properties_isolation_props_namespace(self, agg_mock): self.flags(aggregate_image_properties_isolation_namespace="hw") self.flags(aggregate_image_properties_isolation_separator="_") agg_mock.return_value = {'hw_vm_mode': 'hvm', 'img_owner_id': 'foo'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, image=objects.ImageMeta(properties=objects.ImageMetaProps( hw_vm_mode='hvm', img_owner_id='wrong'))) host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_exact_core_filter.py0000664000567000056710000000365612701407773027525 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova.scheduler.filters import exact_core_filter from nova import test from nova.tests.unit.scheduler import fakes class TestExactCoreFilter(test.NoDBTestCase): def setUp(self): super(TestExactCoreFilter, self).setUp() self.filt_cls = exact_core_filter.ExactCoreFilter() def test_exact_core_filter_passes(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(vcpus=1)) vcpus = 3 host = self._get_host({'vcpus_total': vcpus, 'vcpus_used': vcpus - 1}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertEqual(host.limits.get('vcpu'), vcpus) def test_exact_core_filter_fails(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(vcpus=2)) host = self._get_host({'vcpus_total': 3, 'vcpus_used': 2}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) self.assertNotIn('vcpu', host.limits) def test_exact_core_filter_fails_host_vcpus_not_set(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(vcpus=1)) host = self._get_host({}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) self.assertNotIn('vcpu', host.limits) def _get_host(self, host_attributes): return fakes.FakeHostState('host1', 'node1', host_attributes) nova-13.0.0/nova/tests/unit/scheduler/filters/test_exact_disk_filter.py0000664000567000056710000000336212701407773027521 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova.scheduler.filters import exact_disk_filter from nova import test from nova.tests.unit.scheduler import fakes class TestExactDiskFilter(test.NoDBTestCase): def setUp(self): super(TestExactDiskFilter, self).setUp() self.filt_cls = exact_disk_filter.ExactDiskFilter() def test_exact_disk_filter_passes(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(root_gb=1, ephemeral_gb=1, swap=1024)) disk_gb = 3 host = self._get_host({'free_disk_mb': disk_gb * 1024, 'total_usable_disk_gb': disk_gb}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertEqual(host.limits.get('disk_gb'), disk_gb) def test_exact_disk_filter_fails(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(root_gb=1, ephemeral_gb=1, swap=1024)) host = self._get_host({'free_disk_mb': 2 * 1024}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) self.assertNotIn('disk_gb', host.limits) def _get_host(self, host_attributes): return fakes.FakeHostState('host1', 'node1', host_attributes) nova-13.0.0/nova/tests/unit/scheduler/filters/test_metrics_filters.py0000664000567000056710000000473112701407773027235 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from nova import objects from nova.scheduler.filters import metrics_filter from nova import test from nova.tests.unit.scheduler import fakes class TestMetricsFilter(test.NoDBTestCase): def test_metrics_filter_pass(self): _ts_now = datetime.datetime(2015, 11, 11, 11, 0, 0) obj1 = objects.MonitorMetric(name='cpu.frequency', value=1000, timestamp=_ts_now, source='nova.virt.libvirt.driver') obj2 = objects.MonitorMetric(name='numa.membw.current', numa_membw_values={"0": 10, "1": 43}, timestamp=_ts_now, source='nova.virt.libvirt.driver') metrics_list = objects.MonitorMetricList(objects=[obj1, obj2]) self.flags(weight_setting=[ 'cpu.frequency=1', 'numa.membw.current=2'], group='metrics') filt_cls = metrics_filter.MetricsFilter() host = fakes.FakeHostState('host1', 'node1', attribute_dict={'metrics': metrics_list}) self.assertTrue(filt_cls.host_passes(host, None)) def test_metrics_filter_missing_metrics(self): _ts_now = datetime.datetime(2015, 11, 11, 11, 0, 0) obj1 = objects.MonitorMetric(name='cpu.frequency', value=1000, timestamp=_ts_now, source='nova.virt.libvirt.driver') metrics_list = objects.MonitorMetricList(objects=[obj1]) self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics') filt_cls = metrics_filter.MetricsFilter() host = fakes.FakeHostState('host1', 'node1', attribute_dict={'metrics': metrics_list}) self.assertFalse(filt_cls.host_passes(host, None)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_affinity_filters.py0000664000567000056710000001774112701407773027405 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from nova import objects from nova.scheduler.filters import affinity_filter from nova import test from nova.tests.unit.scheduler import fakes CONF = cfg.CONF CONF.import_opt('my_ip', 'nova.netconf') class TestDifferentHostFilter(test.NoDBTestCase): def setUp(self): super(TestDifferentHostFilter, self).setUp() self.filt_cls = affinity_filter.DifferentHostFilter() def test_affinity_different_filter_passes(self): host = fakes.FakeHostState('host1', 'node1', {}) inst1 = objects.Instance(uuid='different') host.instances = {inst1.uuid: inst1} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, scheduler_hints=dict(different_host=['same'])) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_affinity_different_filter_fails(self): inst1 = objects.Instance(uuid='same') host = fakes.FakeHostState('host1', 'node1', {}) host.instances = {inst1.uuid: inst1} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, scheduler_hints=dict(different_host=['same'])) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_affinity_different_filter_handles_none(self): inst1 = objects.Instance(uuid='same') host = fakes.FakeHostState('host1', 'node1', {}) host.instances = {inst1.uuid: inst1} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, scheduler_hints=None) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) class TestSameHostFilter(test.NoDBTestCase): def setUp(self): super(TestSameHostFilter, self).setUp() self.filt_cls = affinity_filter.SameHostFilter() def test_affinity_same_filter_passes(self): inst1 = objects.Instance(uuid='same') host = fakes.FakeHostState('host1', 'node1', {}) host.instances = {inst1.uuid: inst1} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, scheduler_hints=dict(same_host=['same'])) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_affinity_same_filter_no_list_passes(self): host = fakes.FakeHostState('host1', 'node1', {}) host.instances = {} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, scheduler_hints=dict(same_host=['same'])) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_affinity_same_filter_fails(self): inst1 = objects.Instance(uuid='different') host = fakes.FakeHostState('host1', 'node1', {}) host.instances = {inst1.uuid: inst1} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, scheduler_hints=dict(same_host=['same'])) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_affinity_same_filter_handles_none(self): inst1 = objects.Instance(uuid='different') host = fakes.FakeHostState('host1', 'node1', {}) host.instances = {inst1.uuid: inst1} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, scheduler_hints=None) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) class TestSimpleCIDRAffinityFilter(test.NoDBTestCase): def setUp(self): super(TestSimpleCIDRAffinityFilter, self).setUp() self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter() def test_affinity_simple_cidr_filter_passes(self): host = fakes.FakeHostState('host1', 'node1', {}) host.host_ip = '10.8.1.1' affinity_ip = "10.8.1.100" spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, scheduler_hints=dict( cidr=['/24'], build_near_host_ip=[affinity_ip])) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_affinity_simple_cidr_filter_fails(self): host = fakes.FakeHostState('host1', 'node1', {}) host.host_ip = '10.8.1.1' affinity_ip = "10.8.1.100" spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, scheduler_hints=dict( cidr=['/32'], build_near_host_ip=[affinity_ip])) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_affinity_simple_cidr_filter_handles_none(self): host = fakes.FakeHostState('host1', 'node1', {}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, scheduler_hints=None) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) class TestGroupAffinityFilter(test.NoDBTestCase): def _test_group_anti_affinity_filter_passes(self, filt_cls, policy): host = fakes.FakeHostState('host1', 'node1', {}) spec_obj = objects.RequestSpec(instance_group=None) self.assertTrue(filt_cls.host_passes(host, spec_obj)) spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup( policies=['affinity'])) self.assertTrue(filt_cls.host_passes(host, spec_obj)) spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup( policies=[policy])) spec_obj.instance_group.hosts = [] self.assertTrue(filt_cls.host_passes(host, spec_obj)) spec_obj.instance_group.hosts = ['host2'] self.assertTrue(filt_cls.host_passes(host, spec_obj)) def test_group_anti_affinity_filter_passes(self): self._test_group_anti_affinity_filter_passes( affinity_filter.ServerGroupAntiAffinityFilter(), 'anti-affinity') def _test_group_anti_affinity_filter_fails(self, filt_cls, policy): host = fakes.FakeHostState('host1', 'node1', {}) spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup( policies=[policy], hosts=['host1'])) self.assertFalse(filt_cls.host_passes(host, spec_obj)) def test_group_anti_affinity_filter_fails(self): self._test_group_anti_affinity_filter_fails( affinity_filter.ServerGroupAntiAffinityFilter(), 'anti-affinity') def _test_group_affinity_filter_passes(self, filt_cls, policy): host = fakes.FakeHostState('host1', 'node1', {}) spec_obj = objects.RequestSpec(instance_group=None) self.assertTrue(filt_cls.host_passes(host, spec_obj)) spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup( policies=['anti-affinity'])) self.assertTrue(filt_cls.host_passes(host, spec_obj)) spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup( policies=['affinity'], hosts=['host1'])) self.assertTrue(filt_cls.host_passes(host, spec_obj)) def test_group_affinity_filter_passes(self): self._test_group_affinity_filter_passes( affinity_filter.ServerGroupAffinityFilter(), 'affinity') def _test_group_affinity_filter_fails(self, filt_cls, policy): host = fakes.FakeHostState('host1', 'node1', {}) spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup( policies=[policy], hosts=['host2'])) self.assertFalse(filt_cls.host_passes(host, spec_obj)) def test_group_affinity_filter_fails(self): self._test_group_affinity_filter_fails( affinity_filter.ServerGroupAffinityFilter(), 'affinity') nova-13.0.0/nova/tests/unit/scheduler/filters/test_exact_ram_filter.py0000664000567000056710000000325112701407773027343 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova.scheduler.filters import exact_ram_filter from nova import test from nova.tests.unit.scheduler import fakes class TestRamFilter(test.NoDBTestCase): def setUp(self): super(TestRamFilter, self).setUp() self.filt_cls = exact_ram_filter.ExactRamFilter() def test_exact_ram_filter_passes(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=1024)) ram_mb = 1024 host = self._get_host({'free_ram_mb': ram_mb, 'total_usable_ram_mb': ram_mb}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertEqual(host.limits.get('memory_mb'), ram_mb) def test_exact_ram_filter_fails(self): spec_obj = objects.RequestSpec( flavor=objects.Flavor(memory_mb=512)) host = self._get_host({'free_ram_mb': 1024}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) self.assertNotIn('memory_mb', host.limits) def _get_host(self, host_attributes): return fakes.FakeHostState('host1', 'node1', host_attributes) nova-13.0.0/nova/tests/unit/scheduler/filters/test_extra_specs_ops.py0000664000567000056710000001566412701407773027247 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.scheduler.filters import extra_specs_ops from nova import test class ExtraSpecsOpsTestCase(test.NoDBTestCase): def _do_extra_specs_ops_test(self, value, req, matches): assertion = self.assertTrue if matches else self.assertFalse assertion(extra_specs_ops.match(value, req)) def test_extra_specs_matches_simple(self): self._do_extra_specs_ops_test( value='1', req='1', matches=True) def test_extra_specs_fails_simple(self): self._do_extra_specs_ops_test( value='', req='1', matches=False) def test_extra_specs_fails_simple2(self): self._do_extra_specs_ops_test( value='3', req='1', matches=False) def test_extra_specs_fails_simple3(self): self._do_extra_specs_ops_test( value='222', req='2', matches=False) def test_extra_specs_fails_with_bogus_ops(self): self._do_extra_specs_ops_test( value='4', req='> 2', matches=False) def test_extra_specs_matches_with_op_eq(self): self._do_extra_specs_ops_test( value='123', req='= 123', matches=True) def test_extra_specs_matches_with_op_eq2(self): self._do_extra_specs_ops_test( value='124', req='= 123', matches=True) def test_extra_specs_fails_with_op_eq(self): self._do_extra_specs_ops_test( value='34', req='= 234', matches=False) def test_extra_specs_fails_with_op_eq3(self): self._do_extra_specs_ops_test( value='34', req='=', matches=False) def test_extra_specs_matches_with_op_seq(self): self._do_extra_specs_ops_test( value='123', req='s== 123', matches=True) def test_extra_specs_fails_with_op_seq(self): self._do_extra_specs_ops_test( value='1234', req='s== 123', matches=False) def test_extra_specs_matches_with_op_sneq(self): self._do_extra_specs_ops_test( value='1234', req='s!= 123', matches=True) def test_extra_specs_fails_with_op_sneq(self): self._do_extra_specs_ops_test( value='123', req='s!= 123', matches=False) def test_extra_specs_fails_with_op_sge(self): self._do_extra_specs_ops_test( value='1000', req='s>= 234', matches=False) def test_extra_specs_fails_with_op_sle(self): self._do_extra_specs_ops_test( value='1234', req='s<= 1000', matches=False) def test_extra_specs_fails_with_op_sl(self): self._do_extra_specs_ops_test( value='2', req='s< 12', matches=False) def test_extra_specs_fails_with_op_sg(self): self._do_extra_specs_ops_test( value='12', req='s> 2', matches=False) def test_extra_specs_matches_with_op_in(self): self._do_extra_specs_ops_test( value='12311321', req=' 11', matches=True) def test_extra_specs_matches_with_op_in2(self): self._do_extra_specs_ops_test( value='12311321', req=' 12311321', matches=True) def test_extra_specs_matches_with_op_in3(self): self._do_extra_specs_ops_test( value='12311321', req=' 12311321 ', matches=True) def test_extra_specs_fails_with_op_in(self): self._do_extra_specs_ops_test( value='12310321', req=' 11', matches=False) def test_extra_specs_fails_with_op_in2(self): self._do_extra_specs_ops_test( value='12310321', req=' 11 ', matches=False) def test_extra_specs_matches_with_op_or(self): self._do_extra_specs_ops_test( value='12', req=' 11 12', matches=True) def test_extra_specs_matches_with_op_or2(self): self._do_extra_specs_ops_test( value='12', req=' 11 12 ', matches=True) def test_extra_specs_fails_with_op_or(self): self._do_extra_specs_ops_test( value='13', req=' 11 12', matches=False) def test_extra_specs_fails_with_op_or2(self): self._do_extra_specs_ops_test( value='13', req=' 11 12 ', matches=False) def test_extra_specs_matches_with_op_le(self): self._do_extra_specs_ops_test( value='2', req='<= 10', matches=True) def test_extra_specs_fails_with_op_le(self): self._do_extra_specs_ops_test( value='3', req='<= 2', matches=False) def test_extra_specs_matches_with_op_ge(self): self._do_extra_specs_ops_test( value='3', req='>= 1', matches=True) def test_extra_specs_fails_with_op_ge(self): self._do_extra_specs_ops_test( value='2', req='>= 3', matches=False) def test_extra_specs_matches_all_with_op_allin(self): values = ['aes', 'mmx', 'aux'] self._do_extra_specs_ops_test( value=str(values), req=' aes mmx', matches=True) def test_extra_specs_matches_one_with_op_allin(self): values = ['aes', 'mmx', 'aux'] self._do_extra_specs_ops_test( value=str(values), req=' mmx', matches=True) def test_extra_specs_fails_with_op_allin(self): values = ['aes', 'mmx', 'aux'] self._do_extra_specs_ops_test( value=str(values), req=' txt', matches=False) def test_extra_specs_fails_all_with_op_allin(self): values = ['aes', 'mmx', 'aux'] self._do_extra_specs_ops_test( value=str(values), req=' txt 3dnow', matches=False) def test_extra_specs_fails_match_one_with_op_allin(self): values = ['aes', 'mmx', 'aux'] self._do_extra_specs_ops_test( value=str(values), req=' txt aes', matches=False) nova-13.0.0/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py0000664000567000056710000002554612701407773030472 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import uuid from nova import objects from nova.objects import fields from nova.scheduler.filters import numa_topology_filter from nova import test from nova.tests.unit.scheduler import fakes class TestNUMATopologyFilter(test.NoDBTestCase): def setUp(self): super(TestNUMATopologyFilter, self).setUp() self.filt_cls = numa_topology_filter.NUMATopologyFilter() def _get_spec_obj(self, numa_topology): image_meta = objects.ImageMeta(properties=objects.ImageMetaProps()) spec_obj = objects.RequestSpec(numa_topology=numa_topology, pci_requests=None, instance_uuid=str(uuid.uuid4()), flavor=objects.Flavor(extra_specs={}), image=image_meta) return spec_obj def test_numa_topology_filter_pass(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) ]) spec_obj = self._get_spec_obj(numa_topology=instance_topology) host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY, 'pci_stats': None, 'cpu_allocation_ratio': 16.0, 'ram_allocation_ratio': 1.5}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_numa_topology_filter_numa_instance_no_numa_host_fail(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) ]) spec_obj = self._get_spec_obj(numa_topology=instance_topology) host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_numa_topology_filter_numa_host_no_numa_instance_pass(self): spec_obj = self._get_spec_obj(numa_topology=None) host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_numa_topology_filter_fail_fit(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([2]), memory=512), objects.InstanceNUMACell(id=2, cpuset=set([3]), memory=512) ]) spec_obj = self._get_spec_obj(numa_topology=instance_topology) host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY, 'pci_stats': None, 'cpu_allocation_ratio': 16.0, 'ram_allocation_ratio': 1.5}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_numa_topology_filter_fail_memory(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=1024), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) ]) spec_obj = self._get_spec_obj(numa_topology=instance_topology) host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY, 'pci_stats': None, 'cpu_allocation_ratio': 16.0, 'ram_allocation_ratio': 1}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_numa_topology_filter_fail_cpu(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3, 4, 5]), memory=512)]) spec_obj = self._get_spec_obj(numa_topology=instance_topology) host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY, 'pci_stats': None, 'cpu_allocation_ratio': 1, 'ram_allocation_ratio': 1.5}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_numa_topology_filter_pass_set_limit(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) ]) spec_obj = self._get_spec_obj(numa_topology=instance_topology) host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY, 'pci_stats': None, 'cpu_allocation_ratio': 21, 'ram_allocation_ratio': 1.3}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) limits = host.limits['numa_topology'] self.assertEqual(limits.cpu_allocation_ratio, 21) self.assertEqual(limits.ram_allocation_ratio, 1.3) def _do_test_numa_topology_filter_cpu_policy( self, numa_topology, cpu_policy, cpu_thread_policy, passes): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) ]) spec_obj = objects.RequestSpec(numa_topology=instance_topology, pci_requests=None, instance_uuid=str(uuid.uuid4())) extra_specs = [ {}, { 'hw:cpu_policy': cpu_policy, 'hw:cpu_thread_policy': cpu_thread_policy, } ] image_props = [ {}, { 'hw_cpu_policy': cpu_policy, 'hw_cpu_thread_policy': cpu_thread_policy, } ] host = fakes.FakeHostState('host1', 'node1', { 'numa_topology': numa_topology, 'pci_stats': None, 'cpu_allocation_ratio': 1, 'ram_allocation_ratio': 1.5}) assertion = self.assertTrue if passes else self.assertFalse # test combinations of image properties and extra specs for specs, props in itertools.product(extra_specs, image_props): # ...except for the one where no policy is specified if specs == props == {}: continue fake_flavor = objects.Flavor(memory_mb=1024, extra_specs=specs) fake_image_props = objects.ImageMetaProps(**props) fake_image = objects.ImageMeta(properties=fake_image_props) spec_obj.image = fake_image spec_obj.flavor = fake_flavor assertion(self.filt_cls.host_passes(host, spec_obj)) def test_numa_topology_filter_fail_cpu_thread_policy_require(self): cpu_policy = fields.CPUAllocationPolicy.DEDICATED cpu_thread_policy = fields.CPUThreadAllocationPolicy.REQUIRE numa_topology = fakes.NUMA_TOPOLOGY self._do_test_numa_topology_filter_cpu_policy( numa_topology, cpu_policy, cpu_thread_policy, False) def test_numa_topology_filter_pass_cpu_thread_policy_require(self): cpu_policy = fields.CPUAllocationPolicy.DEDICATED cpu_thread_policy = fields.CPUThreadAllocationPolicy.REQUIRE numa_topology = fakes.NUMA_TOPOLOGY_W_HT self._do_test_numa_topology_filter_cpu_policy( numa_topology, cpu_policy, cpu_thread_policy, True) def test_numa_topology_filter_pass_cpu_thread_policy_others(self): cpu_policy = fields.CPUAllocationPolicy.DEDICATED cpu_thread_policy = fields.CPUThreadAllocationPolicy.PREFER numa_topology = fakes.NUMA_TOPOLOGY for cpu_thread_policy in [ fields.CPUThreadAllocationPolicy.PREFER, fields.CPUThreadAllocationPolicy.ISOLATE]: self._do_test_numa_topology_filter_cpu_policy( numa_topology, cpu_policy, cpu_thread_policy, True) def test_numa_topology_filter_pass_mempages(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([3]), memory=128, pagesize=4), objects.InstanceNUMACell(id=1, cpuset=set([1]), memory=128, pagesize=16) ]) spec_obj = self._get_spec_obj(numa_topology=instance_topology) host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY, 'pci_stats': None, 'cpu_allocation_ratio': 16.0, 'ram_allocation_ratio': 1.5}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_numa_topology_filter_fail_mempages(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([3]), memory=128, pagesize=8), objects.InstanceNUMACell(id=1, cpuset=set([1]), memory=128, pagesize=16) ]) spec_obj = self._get_spec_obj(numa_topology=instance_topology) host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY, 'pci_stats': None, 'cpu_allocation_ratio': 16.0, 'ram_allocation_ratio': 1.5}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_image_props_filters.py0000664000567000056710000002522212701407773030072 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import versionutils from nova.compute import arch from nova.compute import hv_type from nova.compute import vm_mode from nova import objects from nova.scheduler.filters import image_props_filter from nova import test from nova.tests.unit.scheduler import fakes class TestImagePropsFilter(test.NoDBTestCase): def setUp(self): super(TestImagePropsFilter, self).setUp() self.filt_cls = image_props_filter.ImagePropertiesFilter() def test_image_properties_filter_passes_same_inst_props_and_version(self): img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture=arch.X86_64, img_hv_type=hv_type.KVM, hw_vm_mode=vm_mode.HVM, img_hv_requested_version='>=6.0,<6.2')) spec_obj = objects.RequestSpec(image=img_props) hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_fails_different_inst_props(self): img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture=arch.ARMV7, img_hv_type=hv_type.QEMU, hw_vm_mode=vm_mode.HVM)) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_fails_different_hyper_version(self): img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture=arch.X86_64, img_hv_type=hv_type.KVM, hw_vm_mode=vm_mode.HVM, img_hv_requested_version='>=6.2')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'enabled': True, 'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_passes_partial_inst_props(self): img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture=arch.X86_64, hw_vm_mode=vm_mode.HVM)) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_fails_partial_inst_props(self): img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture=arch.X86_64, hw_vm_mode=vm_mode.HVM)) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.X86_64, hv_type.XEN, vm_mode.XEN)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_passes_without_inst_props(self): spec_obj = objects.RequestSpec(image=None) hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_fails_without_host_props(self): img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture=arch.X86_64, img_hv_type=hv_type.KVM, hw_vm_mode=vm_mode.HVM)) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'enabled': True, 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_passes_without_hyper_version(self): img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture=arch.X86_64, img_hv_type=hv_type.KVM, hw_vm_mode=vm_mode.HVM, img_hv_requested_version='>=6.0')) spec_obj = objects.RequestSpec(image=img_props) capabilities = {'enabled': True, 'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)]} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_fails_with_unsupported_hyper_ver(self): img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture=arch.X86_64, img_hv_type=hv_type.KVM, hw_vm_mode=vm_mode.HVM, img_hv_requested_version='>=6.0')) spec_obj = objects.RequestSpec(image=img_props) capabilities = {'enabled': True, 'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': 5000} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_pv_mode_compat(self): # if an old image has 'pv' for a vm_mode it should be treated as xen img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_vm_mode='pv')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.X86_64, hv_type.XEN, vm_mode.XEN)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_hvm_mode_compat(self): # if an old image has 'hv' for a vm_mode it should be treated as xen img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_vm_mode='hv')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_xen_arch_compat(self): # if an old image has 'x86_32' for arch it should be treated as i686 img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture='x86_32')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.I686, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_xen_hv_type_compat(self): # if an old image has 'xapi' for hv_type it should be treated as xen img_props = objects.ImageMeta( properties=objects.ImageMetaProps( img_hv_type='xapi')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.I686, hv_type.XEN, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_image_properties_filter_baremetal_vmmode_compat(self): # if an old image has 'baremetal' for vmmode it should be # treated as hvm img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_vm_mode='baremetal')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.I686, hv_type.BAREMETAL, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) nova-13.0.0/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py0000664000567000056710000000663212701407773030773 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.pci import stats from nova.scheduler.filters import pci_passthrough_filter from nova import test from nova.tests.unit.scheduler import fakes class TestPCIPassthroughFilter(test.NoDBTestCase): def setUp(self): super(TestPCIPassthroughFilter, self).setUp() self.filt_cls = pci_passthrough_filter.PciPassthroughFilter() def test_pci_passthrough_pass(self): pci_stats_mock = mock.MagicMock() pci_stats_mock.support_requests.return_value = True request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': '8086'}]) requests = objects.InstancePCIRequests(requests=[request]) spec_obj = objects.RequestSpec(pci_requests=requests) host = fakes.FakeHostState( 'host1', 'node1', attribute_dict={'pci_stats': pci_stats_mock}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) pci_stats_mock.support_requests.assert_called_once_with( requests.requests) def test_pci_passthrough_fail(self): pci_stats_mock = mock.MagicMock() pci_stats_mock.support_requests.return_value = False request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': '8086'}]) requests = objects.InstancePCIRequests(requests=[request]) spec_obj = objects.RequestSpec(pci_requests=requests) host = fakes.FakeHostState( 'host1', 'node1', attribute_dict={'pci_stats': pci_stats_mock}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) pci_stats_mock.support_requests.assert_called_once_with( requests.requests) def test_pci_passthrough_no_pci_request(self): spec_obj = objects.RequestSpec(pci_requests=None) host = fakes.FakeHostState('h1', 'n1', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) def test_pci_passthrough_compute_stats(self): request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': '8086'}]) requests = objects.InstancePCIRequests(requests=[request]) spec_obj = objects.RequestSpec(pci_requests=requests) host = fakes.FakeHostState( 'host1', 'node1', attribute_dict={}) self.assertRaises(AttributeError, self.filt_cls.host_passes, host, spec_obj) def test_pci_passthrough_no_pci_stats(self): request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': '8086'}]) requests = objects.InstancePCIRequests(requests=[request]) spec_obj = objects.RequestSpec(pci_requests=requests) host = fakes.FakeHostState('host1', 'node1', attribute_dict={'pci_stats': stats.PciDeviceStats()}) self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) nova-13.0.0/nova/tests/unit/scheduler/fakes.py0000664000567000056710000001356512701407773022426 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Scheduler tests. """ import six from nova import objects from nova.scheduler import driver from nova.scheduler import host_manager NUMA_TOPOLOGY = objects.NUMATopology( cells=[ objects.NUMACell( id=0, cpuset=set([1, 2]), memory=512, cpu_usage=0, memory_usage=0, mempages=[ objects.NUMAPagesTopology(size_kb=16, total=387184, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)], siblings=[], pinned_cpus=set([])), objects.NUMACell( id=1, cpuset=set([3, 4]), memory=512, cpu_usage=0, memory_usage=0, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=1548736, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)], siblings=[], pinned_cpus=set([]))]) NUMA_TOPOLOGY_W_HT = objects.NUMATopology(cells=[ objects.NUMACell( id=0, cpuset=set([1, 2, 5, 6]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[set([1, 5]), set([2, 6])], pinned_cpus=set([])), objects.NUMACell( id=1, cpuset=set([3, 4, 7, 8]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[set([3, 4]), set([7, 8])], pinned_cpus=set([])) ]) COMPUTE_NODES = [ objects.ComputeNode( id=1, local_gb=1024, memory_mb=1024, vcpus=1, disk_available_least=None, free_ram_mb=512, vcpus_used=1, free_disk_gb=512, local_gb_used=0, updated_at=None, host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1', hypervisor_version=0, numa_topology=None, hypervisor_type='foo', supported_hv_specs=[], pci_device_pools=None, cpu_info=None, stats=None, metrics=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0), objects.ComputeNode( id=2, local_gb=2048, memory_mb=2048, vcpus=2, disk_available_least=1024, free_ram_mb=1024, vcpus_used=2, free_disk_gb=1024, local_gb_used=0, updated_at=None, host='host2', hypervisor_hostname='node2', host_ip='127.0.0.1', hypervisor_version=0, numa_topology=None, hypervisor_type='foo', supported_hv_specs=[], pci_device_pools=None, cpu_info=None, stats=None, metrics=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0), objects.ComputeNode( id=3, local_gb=4096, memory_mb=4096, vcpus=4, disk_available_least=3333, free_ram_mb=3072, vcpus_used=1, free_disk_gb=3072, local_gb_used=0, updated_at=None, host='host3', hypervisor_hostname='node3', host_ip='127.0.0.1', hypervisor_version=0, numa_topology=NUMA_TOPOLOGY._to_json(), hypervisor_type='foo', supported_hv_specs=[], pci_device_pools=None, cpu_info=None, stats=None, metrics=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0), objects.ComputeNode( id=4, local_gb=8192, memory_mb=8192, vcpus=8, disk_available_least=8192, free_ram_mb=8192, vcpus_used=0, free_disk_gb=8888, local_gb_used=0, updated_at=None, host='host4', hypervisor_hostname='node4', host_ip='127.0.0.1', hypervisor_version=0, numa_topology=None, hypervisor_type='foo', supported_hv_specs=[], pci_device_pools=None, cpu_info=None, stats=None, metrics=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0), # Broken entry objects.ComputeNode( id=5, local_gb=1024, memory_mb=1024, vcpus=1, host='fake', hypervisor_hostname='fake-hyp'), ] SERVICES = [ objects.Service(host='host1', disabled=False), objects.Service(host='host2', disabled=True), objects.Service(host='host3', disabled=False), objects.Service(host='host4', disabled=False), ] def get_service_by_host(host): services = [service for service in SERVICES if service.host == host] return services[0] class FakeHostState(host_manager.HostState): def __init__(self, host, node, attribute_dict, instances=None): super(FakeHostState, self).__init__(host, node) if instances: self.instances = {inst.uuid: inst for inst in instances} else: self.instances = {} for (key, val) in six.iteritems(attribute_dict): setattr(self, key, val) class FakeScheduler(driver.Scheduler): def select_destinations(self, context, request_spec, filter_properties): return [] nova-13.0.0/nova/tests/unit/scheduler/weights/0000775000567000056710000000000012701410205022403 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/scheduler/weights/test_weights_ioopsweight.py0000664000567000056710000000534112701407773030132 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler IoOpsWeigher weights """ from nova.scheduler import weights from nova.scheduler.weights import io_ops from nova import test from nova.tests.unit.scheduler import fakes class IoOpsWeigherTestCase(test.NoDBTestCase): def setUp(self): super(IoOpsWeigherTestCase, self).setUp() self.weight_handler = weights.HostWeightHandler() self.weighers = [io_ops.IoOpsWeigher()] def _get_weighed_host(self, hosts, io_ops_weight_multiplier): if io_ops_weight_multiplier is not None: self.flags(io_ops_weight_multiplier=io_ops_weight_multiplier) return self.weight_handler.get_weighed_objects(self.weighers, hosts, {})[0] def _get_all_hosts(self): host_values = [ ('host1', 'node1', {'num_io_ops': 1}), ('host2', 'node2', {'num_io_ops': 2}), ('host3', 'node3', {'num_io_ops': 0}), ('host4', 'node4', {'num_io_ops': 4}) ] return [fakes.FakeHostState(host, node, values) for host, node, values in host_values] def _do_test(self, io_ops_weight_multiplier, expected_weight, expected_host): hostinfo_list = self._get_all_hosts() weighed_host = self._get_weighed_host(hostinfo_list, io_ops_weight_multiplier) self.assertEqual(weighed_host.weight, expected_weight) if expected_host: self.assertEqual(weighed_host.obj.host, expected_host) def test_io_ops_weight_multiplier_by_default(self): self._do_test(io_ops_weight_multiplier=None, expected_weight=0.0, expected_host='host3') def test_io_ops_weight_multiplier_zero_value(self): # We do not know the host, all have same weight. self._do_test(io_ops_weight_multiplier=0.0, expected_weight=0.0, expected_host=None) def test_io_ops_weight_multiplier_positive_value(self): self._do_test(io_ops_weight_multiplier=2.0, expected_weight=2.0, expected_host='host4') nova-13.0.0/nova/tests/unit/scheduler/weights/test_weights_affinity.py0000664000567000056710000001400012701407773027372 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Ericsson AB # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler import weights from nova.scheduler.weights import affinity from nova import test from nova.tests.unit.scheduler import fakes class SoftWeigherTestBase(test.NoDBTestCase): def setUp(self): super(SoftWeigherTestBase, self).setUp() self.weight_handler = weights.HostWeightHandler() self.weighers = [] def _get_weighed_host(self, hosts, policy): request_spec = objects.RequestSpec( instance_group=objects.InstanceGroup( policies=[policy], members=['member1', 'member2', 'member3', 'member4', 'member5', 'member6', 'member7'])) return self.weight_handler.get_weighed_objects(self.weighers, hosts, request_spec)[0] def _get_all_hosts(self): host_values = [ ('host1', 'node1', {'instances': { 'member1': mock.sentinel, 'instance13': mock.sentinel }}), ('host2', 'node2', {'instances': { 'member2': mock.sentinel, 'member3': mock.sentinel, 'member4': mock.sentinel, 'member5': mock.sentinel, 'instance14': mock.sentinel }}), ('host3', 'node3', {'instances': { 'instance15': mock.sentinel }}), ('host4', 'node4', {'instances': { 'member6': mock.sentinel, 'member7': mock.sentinel, 'instance16': mock.sentinel }})] return [fakes.FakeHostState(host, node, values) for host, node, values in host_values] def _do_test(self, policy, expected_weight, expected_host): hostinfo_list = self._get_all_hosts() weighed_host = self._get_weighed_host(hostinfo_list, policy) self.assertEqual(expected_weight, weighed_host.weight) if expected_host: self.assertEqual(expected_host, weighed_host.obj.host) class SoftAffinityWeigherTestCase(SoftWeigherTestBase): def setUp(self): super(SoftAffinityWeigherTestCase, self).setUp() self.weighers = [affinity.ServerGroupSoftAffinityWeigher()] def test_soft_affinity_weight_multiplier_by_default(self): self._do_test(policy='soft-affinity', expected_weight=1.0, expected_host='host2') def test_soft_affinity_weight_multiplier_zero_value(self): # We do not know the host, all have same weight. self.flags(soft_affinity_weight_multiplier=0.0) self._do_test(policy='soft-affinity', expected_weight=0.0, expected_host=None) def test_soft_affinity_weight_multiplier_positive_value(self): self.flags(soft_affinity_weight_multiplier=2.0) self._do_test(policy='soft-affinity', expected_weight=2.0, expected_host='host2') @mock.patch.object(affinity, 'LOG') def test_soft_affinity_weight_multiplier_negative_value(self, mock_log): self.flags(soft_affinity_weight_multiplier=-1.0) self._do_test(policy='soft-affinity', expected_weight=0.0, expected_host='host3') # call twice and assert that only one warning is emitted self._do_test(policy='soft-affinity', expected_weight=0.0, expected_host='host3') self.assertEqual(1, mock_log.warn.call_count) class SoftAntiAffinityWeigherTestCase(SoftWeigherTestBase): def setUp(self): super(SoftAntiAffinityWeigherTestCase, self).setUp() self.weighers = [affinity.ServerGroupSoftAntiAffinityWeigher()] def test_soft_anti_affinity_weight_multiplier_by_default(self): self._do_test(policy='soft-anti-affinity', expected_weight=1.0, expected_host='host3') def test_soft_anti_affinity_weight_multiplier_zero_value(self): # We do not know the host, all have same weight. self.flags(soft_anti_affinity_weight_multiplier=0.0) self._do_test(policy='soft-anti-affinity', expected_weight=0.0, expected_host=None) def test_soft_anti_affinity_weight_multiplier_positive_value(self): self.flags(soft_anti_affinity_weight_multiplier=2.0) self._do_test(policy='soft-anti-affinity', expected_weight=2.0, expected_host='host3') @mock.patch.object(affinity, 'LOG') def test_soft_anti_affinity_weight_multiplier_negative_value(self, mock_log): self.flags(soft_anti_affinity_weight_multiplier=-1.0) self._do_test(policy='soft-anti-affinity', expected_weight=0.0, expected_host='host2') # call twice and assert that only one warning is emitted self._do_test(policy='soft-anti-affinity', expected_weight=0.0, expected_host='host2') self.assertEqual(1, mock_log.warn.call_count) nova-13.0.0/nova/tests/unit/scheduler/weights/test_weights_disk.py0000664000567000056710000001002512701407773026516 0ustar jenkinsjenkins00000000000000# Copyright 2011-2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler disk weights. """ from nova.scheduler import weights from nova.scheduler.weights import disk from nova import test from nova.tests.unit.scheduler import fakes class DiskWeigherTestCase(test.NoDBTestCase): def setUp(self): super(DiskWeigherTestCase, self).setUp() self.weight_handler = weights.HostWeightHandler() self.weighers = [disk.DiskWeigher()] def _get_weighed_host(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = {} return self.weight_handler.get_weighed_objects(self.weighers, hosts, weight_properties)[0] def _get_all_hosts(self): host_values = [ ('host1', 'node1', {'free_disk_mb': 5120}), ('host2', 'node2', {'free_disk_mb': 10240}), ('host3', 'node3', {'free_disk_mb': 30720}), ('host4', 'node4', {'free_disk_mb': 81920}) ] return [fakes.FakeHostState(host, node, values) for host, node, values in host_values] def test_default_of_spreading_first(self): hostinfo_list = self._get_all_hosts() # host1: free_disk_mb=5120 # host2: free_disk_mb=10240 # host3: free_disk_mb=30720 # host4: free_disk_mb=81920 # so, host4 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(1.0, weighed_host.weight) self.assertEqual('host4', weighed_host.obj.host) def test_disk_filter_multiplier1(self): self.flags(disk_weight_multiplier=0.0) hostinfo_list = self._get_all_hosts() # host1: free_disk_mb=5120 # host2: free_disk_mb=10240 # host3: free_disk_mb=30720 # host4: free_disk_mb=81920 # We do not know the host, all have same weight. weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(0.0, weighed_host.weight) def test_disk_filter_multiplier2(self): self.flags(disk_weight_multiplier=2.0) hostinfo_list = self._get_all_hosts() # host1: free_disk_mb=5120 # host2: free_disk_mb=10240 # host3: free_disk_mb=30720 # host4: free_disk_mb=81920 # so, host4 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(1.0 * 2, weighed_host.weight) self.assertEqual('host4', weighed_host.obj.host) def test_disk_filter_negative(self): self.flags(disk_weight_multiplier=1.0) hostinfo_list = self._get_all_hosts() host_attr = {'id': 100, 'disk_mb': 81920, 'free_disk_mb': -5120} host_state = fakes.FakeHostState('negative', 'negative', host_attr) hostinfo_list = list(hostinfo_list) + [host_state] # host1: free_disk_mb=5120 # host2: free_disk_mb=10240 # host3: free_disk_mb=30720 # host4: free_disk_mb=81920 # negativehost: free_disk_mb=-5120 # so, host4 should win weights = self.weight_handler.get_weighed_objects(self.weighers, hostinfo_list, {}) weighed_host = weights[0] self.assertEqual(1, weighed_host.weight) self.assertEqual('host4', weighed_host.obj.host) # and negativehost should lose weighed_host = weights[-1] self.assertEqual(0, weighed_host.weight) self.assertEqual('negative', weighed_host.obj.host) nova-13.0.0/nova/tests/unit/scheduler/weights/__init__.py0000664000567000056710000000000012701407773024522 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/scheduler/weights/test_weights_metrics.py0000664000567000056710000001656412701407773027250 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler metrics weights. """ from nova import exception from nova.objects import fields from nova.objects import monitor_metric from nova.scheduler import weights from nova.scheduler.weights import metrics from nova import test from nova.tests.unit.scheduler import fakes idle = fields.MonitorMetricType.CPU_IDLE_TIME kernel = fields.MonitorMetricType.CPU_KERNEL_TIME user = fields.MonitorMetricType.CPU_USER_TIME class MetricsWeigherTestCase(test.NoDBTestCase): def setUp(self): super(MetricsWeigherTestCase, self).setUp() self.weight_handler = weights.HostWeightHandler() self.weighers = [metrics.MetricsWeigher()] def _get_weighed_host(self, hosts, setting, weight_properties=None): if not weight_properties: weight_properties = {} self.flags(weight_setting=setting, group='metrics') self.weighers[0]._parse_setting() return self.weight_handler.get_weighed_objects(self.weighers, hosts, weight_properties)[0] def _get_all_hosts(self): def fake_metric(name, value): return monitor_metric.MonitorMetric(name=name, value=value) def fake_list(objs): m_list = [fake_metric(name, val) for name, val in objs] return monitor_metric.MonitorMetricList(objects=m_list) host_values = [ ('host1', 'node1', {'metrics': fake_list([(idle, 512), (kernel, 1)])}), ('host2', 'node2', {'metrics': fake_list([(idle, 1024), (kernel, 2)])}), ('host3', 'node3', {'metrics': fake_list([(idle, 3072), (kernel, 1)])}), ('host4', 'node4', {'metrics': fake_list([(idle, 8192), (kernel, 0)])}), ('host5', 'node5', {'metrics': fake_list([(idle, 768), (kernel, 0), (user, 1)])}), ('host6', 'node6', {'metrics': fake_list([(idle, 2048), (kernel, 0), (user, 2)])}), ] return [fakes.FakeHostState(host, node, values) for host, node, values in host_values] def _do_test(self, settings, expected_weight, expected_host): hostinfo_list = self._get_all_hosts() weighed_host = self._get_weighed_host(hostinfo_list, settings) self.assertEqual(expected_weight, weighed_host.weight) self.assertEqual(expected_host, weighed_host.obj.host) def test_single_resource_no_metrics(self): setting = [idle + '=1'] hostinfo_list = [fakes.FakeHostState('host1', 'node1', {'metrics': None}), fakes.FakeHostState('host2', 'node2', {'metrics': None})] self.assertRaises(exception.ComputeHostMetricNotFound, self._get_weighed_host, hostinfo_list, setting) def test_single_resource(self): # host1: idle=512 # host2: idle=1024 # host3: idle=3072 # host4: idle=8192 # so, host4 should win: setting = [idle + '=1'] self._do_test(setting, 1.0, 'host4') def test_multiple_resource(self): # host1: idle=512, kernel=1 # host2: idle=1024, kernel=2 # host3: idle=3072, kernel=1 # host4: idle=8192, kernel=0 # so, host2 should win: setting = [idle + '=0.0001', kernel + '=1'] self._do_test(setting, 1.0, 'host2') def test_single_resource_duplicate_setting(self): # host1: idle=512 # host2: idle=1024 # host3: idle=3072 # host4: idle=8192 # so, host1 should win (sum of settings is negative): setting = [idle + '=-2', idle + '=1'] self._do_test(setting, 1.0, 'host1') def test_single_resourcenegtive_ratio(self): # host1: idle=512 # host2: idle=1024 # host3: idle=3072 # host4: idle=8192 # so, host1 should win: setting = [idle + '=-1'] self._do_test(setting, 1.0, 'host1') def test_multiple_resource_missing_ratio(self): # host1: idle=512, kernel=1 # host2: idle=1024, kernel=2 # host3: idle=3072, kernel=1 # host4: idle=8192, kernel=0 # so, host4 should win: setting = [idle + '=0.0001', kernel] self._do_test(setting, 1.0, 'host4') def test_multiple_resource_wrong_ratio(self): # host1: idle=512, kernel=1 # host2: idle=1024, kernel=2 # host3: idle=3072, kernel=1 # host4: idle=8192, kernel=0 # so, host4 should win: setting = [idle + '=0.0001', kernel + ' = 2.0t'] self._do_test(setting, 1.0, 'host4') def _check_parsing_result(self, weigher, setting, results): self.flags(weight_setting=setting, group='metrics') weigher._parse_setting() self.assertEqual(len(weigher.setting), len(results)) for item in results: self.assertIn(item, weigher.setting) def test_parse_setting(self): weigher = self.weighers[0] self._check_parsing_result(weigher, [idle + '=1'], [(idle, 1.0)]) self._check_parsing_result(weigher, [idle + '=1', kernel + '=-2.1'], [(idle, 1.0), (kernel, -2.1)]) self._check_parsing_result(weigher, [idle + '=a1', kernel + '=-2.1'], [(kernel, -2.1)]) self._check_parsing_result(weigher, [idle, kernel + '=-2.1'], [(kernel, -2.1)]) self._check_parsing_result(weigher, ['=5', kernel + '=-2.1'], [(kernel, -2.1)]) def test_metric_not_found_required(self): setting = [idle + '=1', user + '=2'] self.assertRaises(exception.ComputeHostMetricNotFound, self._do_test, setting, 8192, 'host4') def test_metric_not_found_non_required(self): # host1: idle=512, kernel=1 # host2: idle=1024, kernel=2 # host3: idle=3072, kernel=1 # host4: idle=8192, kernel=0 # host5: idle=768, kernel=0, user=1 # host6: idle=2048, kernel=0, user=2 # so, host5 should win: self.flags(required=False, group='metrics') setting = [idle + '=0.0001', user + '=-1'] self._do_test(setting, 1.0, 'host5') nova-13.0.0/nova/tests/unit/scheduler/weights/test_weights_ram.py0000664000567000056710000000773612701407773026362 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler RAM weights. """ from nova.scheduler import weights from nova.scheduler.weights import ram from nova import test from nova.tests.unit.scheduler import fakes class RamWeigherTestCase(test.NoDBTestCase): def setUp(self): super(RamWeigherTestCase, self).setUp() self.weight_handler = weights.HostWeightHandler() self.weighers = [ram.RAMWeigher()] def _get_weighed_host(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = {} return self.weight_handler.get_weighed_objects(self.weighers, hosts, weight_properties)[0] def _get_all_hosts(self): host_values = [ ('host1', 'node1', {'free_ram_mb': 512}), ('host2', 'node2', {'free_ram_mb': 1024}), ('host3', 'node3', {'free_ram_mb': 3072}), ('host4', 'node4', {'free_ram_mb': 8192}) ] return [fakes.FakeHostState(host, node, values) for host, node, values in host_values] def test_default_of_spreading_first(self): hostinfo_list = self._get_all_hosts() # host1: free_ram_mb=512 # host2: free_ram_mb=1024 # host3: free_ram_mb=3072 # host4: free_ram_mb=8192 # so, host4 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(1.0, weighed_host.weight) self.assertEqual('host4', weighed_host.obj.host) def test_ram_filter_multiplier1(self): self.flags(ram_weight_multiplier=0.0) hostinfo_list = self._get_all_hosts() # host1: free_ram_mb=512 # host2: free_ram_mb=1024 # host3: free_ram_mb=3072 # host4: free_ram_mb=8192 # We do not know the host, all have same weight. weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(0.0, weighed_host.weight) def test_ram_filter_multiplier2(self): self.flags(ram_weight_multiplier=2.0) hostinfo_list = self._get_all_hosts() # host1: free_ram_mb=512 # host2: free_ram_mb=1024 # host3: free_ram_mb=3072 # host4: free_ram_mb=8192 # so, host4 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(1.0 * 2, weighed_host.weight) self.assertEqual('host4', weighed_host.obj.host) def test_ram_filter_negative(self): self.flags(ram_weight_multiplier=1.0) hostinfo_list = self._get_all_hosts() host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512} host_state = fakes.FakeHostState('negative', 'negative', host_attr) hostinfo_list = list(hostinfo_list) + [host_state] # host1: free_ram_mb=512 # host2: free_ram_mb=1024 # host3: free_ram_mb=3072 # host4: free_ram_mb=8192 # negativehost: free_ram_mb=-512 # so, host4 should win weights = self.weight_handler.get_weighed_objects(self.weighers, hostinfo_list, {}) weighed_host = weights[0] self.assertEqual(1, weighed_host.weight) self.assertEqual('host4', weighed_host.obj.host) # and negativehost should lose weighed_host = weights[-1] self.assertEqual(0, weighed_host.weight) self.assertEqual('negative', weighed_host.obj.host) nova-13.0.0/nova/tests/unit/scheduler/weights/test_weights_hosts.py0000664000567000056710000000325012701407773026726 0ustar jenkinsjenkins00000000000000# Copyright 2011-2014 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler weights. """ from nova.scheduler import weights from nova.scheduler.weights import affinity from nova.scheduler.weights import io_ops from nova.scheduler.weights import metrics from nova.scheduler.weights import ram from nova import test from nova.tests.unit import matchers from nova.tests.unit.scheduler import fakes class TestWeighedHost(test.NoDBTestCase): def test_dict_conversion(self): host_state = fakes.FakeHostState('somehost', None, {}) host = weights.WeighedHost(host_state, 'someweight') expected = {'weight': 'someweight', 'host': 'somehost'} self.assertThat(host.to_dict(), matchers.DictMatches(expected)) def test_all_weighers(self): classes = weights.all_weighers() self.assertIn(ram.RAMWeigher, classes) self.assertIn(metrics.MetricsWeigher, classes) self.assertIn(io_ops.IoOpsWeigher, classes) self.assertIn(affinity.ServerGroupSoftAffinityWeigher, classes) self.assertIn(affinity.ServerGroupSoftAntiAffinityWeigher, classes) nova-13.0.0/nova/tests/unit/image/0000775000567000056710000000000012701410205020035 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/image/abs.tar.gz0000664000567000056710000000023112701407773021745 0ustar jenkinsjenkins00000000000000‹‹ªØNíÑA Â0…á¬=En™šLÎSîJ¤FðøšE¥¸hA"þßf30^¨Ó%œK ®yÊ9µ©9Éz.œª™éÔ'*míSÇL/·kgï]­÷Í»½ý Kÿ§qîõ£l7úoýk<ªóÒ+ÐÚŸ÷?•røvŸyQä‡ (nova-13.0.0/nova/tests/unit/image/rel.tar.gz0000664000567000056710000000024512701407773021767 0ustar jenkinsjenkins00000000000000‹,«ØNíÒA Â0…á¬=ENÐdj2=O]tW"1¢ÞÞv!H±$ âÿ˜ÅòàeH¹ÜÊ%¹¦™OnHÉ™šü¤ëâ<¥‹þy>U•6жÆË¾•`l¬šbÅùTúl­)åºyïÝþG ¯û?ô¹ÞsÁªa£ÿ°ì_§ïb}½ëþ¼ÿ1¥Ý·3øÌõB;<(nova-13.0.0/nova/tests/unit/image/__init__.py0000664000567000056710000000000012701407773022154 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/image/test_glance.py0000664000567000056710000016471612701410011022711 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from six.moves import StringIO import cryptography import glanceclient.exc import mock from oslo_config import cfg from oslo_service import sslutils from oslo_utils import netutils import six import testtools from nova import context from nova import exception from nova.image import glance from nova import test CONF = cfg.CONF NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" class tzinfo(datetime.tzinfo): @staticmethod def utcoffset(*args, **kwargs): return datetime.timedelta() NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo()) class TestConversions(test.NoDBTestCase): def test_convert_timestamps_to_datetimes(self): fixture = {'name': None, 'properties': {}, 'status': None, 'is_public': None, 'created_at': NOW_GLANCE_FORMAT, 'updated_at': NOW_GLANCE_FORMAT, 'deleted_at': NOW_GLANCE_FORMAT} result = glance._convert_timestamps_to_datetimes(fixture) self.assertEqual(result['created_at'], NOW_DATETIME) self.assertEqual(result['updated_at'], NOW_DATETIME) self.assertEqual(result['deleted_at'], NOW_DATETIME) def _test_extracting_missing_attributes(self, include_locations): # Verify behavior from glance objects that are missing attributes # TODO(jaypipes): Find a better way of testing this crappy # glanceclient magic object stuff. class MyFakeGlanceImage(object): def __init__(self, metadata): IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at', 'updated_at', 'status', 'min_disk', 'min_ram', 'is_public'] raw = dict.fromkeys(IMAGE_ATTRIBUTES) raw.update(metadata) self.__dict__['raw'] = raw def __getattr__(self, key): try: return self.__dict__['raw'][key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): try: self.__dict__['raw'][key] = value except KeyError: raise AttributeError(key) metadata = { 'id': 1, 'created_at': NOW_DATETIME, 'updated_at': NOW_DATETIME, } image = MyFakeGlanceImage(metadata) observed = glance._extract_attributes( image, include_locations=include_locations) expected = { 'id': 1, 'name': None, 'is_public': None, 'size': 0, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': NOW_DATETIME, 'updated_at': NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None } if include_locations: expected['locations'] = None expected['direct_url'] = None self.assertEqual(expected, observed) def test_extracting_missing_attributes_include_locations(self): self._test_extracting_missing_attributes(include_locations=True) def test_extracting_missing_attributes_exclude_locations(self): self._test_extracting_missing_attributes(include_locations=False) class TestExceptionTranslations(test.NoDBTestCase): def test_client_forbidden_to_imagenotauthed(self): in_exc = glanceclient.exc.Forbidden('123') out_exc = glance._translate_image_exception('123', in_exc) self.assertIsInstance(out_exc, exception.ImageNotAuthorized) def test_client_httpforbidden_converts_to_imagenotauthed(self): in_exc = glanceclient.exc.HTTPForbidden('123') out_exc = glance._translate_image_exception('123', in_exc) self.assertIsInstance(out_exc, exception.ImageNotAuthorized) def test_client_notfound_converts_to_imagenotfound(self): in_exc = glanceclient.exc.NotFound('123') out_exc = glance._translate_image_exception('123', in_exc) self.assertIsInstance(out_exc, exception.ImageNotFound) def test_client_httpnotfound_converts_to_imagenotfound(self): in_exc = glanceclient.exc.HTTPNotFound('123') out_exc = glance._translate_image_exception('123', in_exc) self.assertIsInstance(out_exc, exception.ImageNotFound) class TestGlanceSerializer(test.NoDBTestCase): def test_serialize(self): metadata = {'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': { 'prop1': 'propvalue1', 'mappings': [ {'virtual': 'aaa', 'device': 'bbb'}, {'virtual': 'xxx', 'device': 'yyy'}], 'block_device_mapping': [ {'virtual_device': 'fake', 'device_name': '/dev/fake'}, {'virtual_device': 'ephemeral0', 'device_name': '/dev/fake0'}]}} # NOTE(tdurakov): Assertion of serialized objects won't work # during using of random PYTHONHASHSEED. Assertion of # serialized/deserialized object and initial one is enough converted = glance._convert_to_string(metadata) self.assertEqual(glance._convert_from_string(converted), metadata) class TestGetImageService(test.NoDBTestCase): @mock.patch.object(glance.GlanceClientWrapper, '__init__', return_value=None) def test_get_remote_service_from_id(self, gcwi_mocked): id_or_uri = '123' _ignored, image_id = glance.get_remote_image_service( mock.sentinel.ctx, id_or_uri) self.assertEqual(id_or_uri, image_id) gcwi_mocked.assert_called_once_with() @mock.patch.object(glance.GlanceClientWrapper, '__init__', return_value=None) def test_get_remote_service_from_href(self, gcwi_mocked): id_or_uri = 'http://127.0.0.1/v1/images/123' _ignored, image_id = glance.get_remote_image_service( mock.sentinel.ctx, id_or_uri) self.assertEqual('123', image_id) gcwi_mocked.assert_called_once_with(context=mock.sentinel.ctx, endpoint='http://127.0.0.1') class TestCreateGlanceClient(test.NoDBTestCase): @mock.patch('oslo_utils.netutils.is_valid_ipv6') @mock.patch('glanceclient.Client') def test_headers_passed_glanceclient(self, init_mock, ipv6_mock): self.flags(auth_strategy='keystone') ipv6_mock.return_value = False auth_token = 'token' ctx = context.RequestContext('fake', 'fake', auth_token=auth_token) expected_endpoint = 'http://host4:9295' expected_params = { 'identity_headers': { 'X-Auth-Token': 'token', 'X-User-Id': 'fake', 'X-Roles': '', 'X-Tenant-Id': 'fake', 'X-Identity-Status': 'Confirmed' } } glance._glanceclient_from_endpoint(ctx, expected_endpoint) init_mock.assert_called_once_with('1', expected_endpoint, **expected_params) # Test the version is properly passed to glanceclient. ipv6_mock.reset_mock() init_mock.reset_mock() expected_endpoint = 'http://host4:9295' expected_params = { 'identity_headers': { 'X-Auth-Token': 'token', 'X-User-Id': 'fake', 'X-Roles': '', 'X-Tenant-Id': 'fake', 'X-Identity-Status': 'Confirmed' } } glance._glanceclient_from_endpoint(ctx, expected_endpoint, version=2) init_mock.assert_called_once_with('2', expected_endpoint, **expected_params) # Test that the IPv6 bracketization adapts the endpoint properly. ipv6_mock.reset_mock() init_mock.reset_mock() ipv6_mock.return_value = True expected_endpoint = 'http://[host4]:9295' glance._glanceclient_from_endpoint(ctx, expected_endpoint) init_mock.assert_called_once_with('1', expected_endpoint, **expected_params) class TestGlanceClientWrapper(test.NoDBTestCase): @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_static_client_without_retries(self, create_client_mock, sleep_mock): client_mock = mock.MagicMock() images_mock = mock.MagicMock() images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable type(client_mock).images = mock.PropertyMock(return_value=images_mock) create_client_mock.return_value = client_mock self.flags(num_retries=0, group='glance') ctx = context.RequestContext('fake', 'fake') host = 'host4' port = 9295 endpoint = 'http://%s:%s' % (host, port) client = glance.GlanceClientWrapper(context=ctx, endpoint=endpoint) create_client_mock.assert_called_once_with(ctx, mock.ANY, 1) self.assertRaises(exception.GlanceConnectionFailed, client.call, ctx, 1, 'get', 'meow') self.assertFalse(sleep_mock.called) @mock.patch('nova.image.glance.LOG') @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_static_client_with_retries_negative(self, create_client_mock, sleep_mock, mock_log): client_mock = mock.Mock(spec=glanceclient.Client) images_mock = mock.Mock() images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable client_mock.images = images_mock create_client_mock.return_value = client_mock self.flags(num_retries=-1, group='glance') ctx = context.RequestContext('fake', 'fake') host = 'host4' port = 9295 endpoint = 'http://%s:%s' % (host, port) client = glance.GlanceClientWrapper(context=ctx, endpoint=endpoint) create_client_mock.assert_called_once_with(ctx, mock.ANY, 1) self.assertRaises(exception.GlanceConnectionFailed, client.call, ctx, 1, 'get', 'meow') self.assertTrue(mock_log.warning.called) msg = mock_log.warning.call_args_list[0] self.assertIn('Treating negative config value', msg[0][0]) self.assertFalse(sleep_mock.called) @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_static_client_with_retries(self, create_client_mock, sleep_mock): self.flags(num_retries=1, group='glance') client_mock = mock.MagicMock() images_mock = mock.MagicMock() images_mock.get.side_effect = [ glanceclient.exc.ServiceUnavailable, None ] type(client_mock).images = mock.PropertyMock(return_value=images_mock) create_client_mock.return_value = client_mock ctx = context.RequestContext('fake', 'fake') host = 'host4' port = 9295 endpoint = 'http://%s:%s' % (host, port) client = glance.GlanceClientWrapper(context=ctx, endpoint=endpoint) client.call(ctx, 1, 'get', 'meow') sleep_mock.assert_called_once_with(1) @mock.patch('random.shuffle') @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_default_client_without_retries(self, create_client_mock, sleep_mock, shuffle_mock): api_servers = [ 'host1:9292', 'https://host2:9293', 'http://host3:9294' ] client_mock = mock.MagicMock() images_mock = mock.MagicMock() images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable type(client_mock).images = mock.PropertyMock(return_value=images_mock) create_client_mock.return_value = client_mock shuffle_mock.return_value = api_servers self.flags(num_retries=0, group='glance') self.flags(api_servers=api_servers, group='glance') # Here we are testing the behaviour that calling client.call() twice # when there are no retries will cycle through the api_servers and not # sleep (which would be an indication of a retry) ctx = context.RequestContext('fake', 'fake') client = glance.GlanceClientWrapper() self.assertRaises(exception.GlanceConnectionFailed, client.call, ctx, 1, 'get', 'meow') self.assertEqual(str(client.api_server), "http://host1:9292") self.assertFalse(sleep_mock.called) self.assertRaises(exception.GlanceConnectionFailed, client.call, ctx, 1, 'get', 'meow') self.assertEqual(str(client.api_server), "https://host2:9293") self.assertFalse(sleep_mock.called) @mock.patch('random.shuffle') @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_default_client_with_retries(self, create_client_mock, sleep_mock, shuffle_mock): api_servers = [ 'host1:9292', 'https://host2:9293', 'http://host3:9294' ] client_mock = mock.MagicMock() images_mock = mock.MagicMock() images_mock.get.side_effect = [ glanceclient.exc.ServiceUnavailable, None ] type(client_mock).images = mock.PropertyMock(return_value=images_mock) create_client_mock.return_value = client_mock self.flags(num_retries=1, group='glance') self.flags(api_servers=api_servers, group='glance') ctx = context.RequestContext('fake', 'fake') # And here we're testing that if num_retries is not 0, then we attempt # to retry the same connection action against the next client. client = glance.GlanceClientWrapper() client.call(ctx, 1, 'get', 'meow') self.assertEqual(str(client.api_server), "https://host2:9293") sleep_mock.assert_called_once_with(1) @mock.patch('oslo_service.sslutils.is_enabled') @mock.patch('glanceclient.Client') def test_create_glance_client_with_ssl(self, client_mock, ssl_enable_mock): sslutils.register_opts(CONF) self.flags(ca_file='foo.cert', cert_file='bar.cert', key_file='wut.key', group='ssl') ctxt = mock.sentinel.ctx glance._glanceclient_from_endpoint(ctxt, 'https://host4:9295') client_mock.assert_called_once_with( '1', 'https://host4:9295', insecure=False, ssl_compression=False, cert_file='bar.cert', key_file='wut.key', cacert='foo.cert', identity_headers=mock.ANY) @mock.patch.object(glanceclient.common.http.HTTPClient, 'get') def test_determine_curr_major_version(self, http_client_mock): result = ("http://host1:9292/v2/", {'versions': [ {'status': 'CURRENT', 'id': 'v2.3'}, {'status': 'SUPPORTED', 'id': 'v1.0'}]}) http_client_mock.return_value = result maj_ver = glance._determine_curr_major_version('http://host1:9292') self.assertEqual(2, maj_ver) @mock.patch.object(glanceclient.common.http.HTTPClient, 'get') def test_determine_curr_major_version_invalid(self, http_client_mock): result = ("http://host1:9292/v2/", "Invalid String") http_client_mock.return_value = result curr_major_version = glance._determine_curr_major_version('abc') self.assertIsNone(curr_major_version) @mock.patch.object(glanceclient.common.http.HTTPClient, 'get') def test_determine_curr_major_version_unsupported(self, http_client_mock): result = ("http://host1:9292/v2/", {'versions': [ {'status': 'CURRENT', 'id': 'v666.0'}, {'status': 'SUPPORTED', 'id': 'v1.0'}]}) http_client_mock.return_value = result maj_ver = glance._determine_curr_major_version('http://host1:9292') self.assertIsNone(maj_ver) class TestDownloadNoDirectUri(test.NoDBTestCase): """Tests the download method of the GlanceImageService when the default of not allowing direct URI transfers is set. """ @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_no_data_no_dest_path(self, show_mock, open_mock): client = mock.MagicMock() client.call.return_value = mock.sentinel.image_chunks ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id) self.assertFalse(show_mock.called) self.assertFalse(open_mock.called) client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) self.assertEqual(mock.sentinel.image_chunks, res) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_data_no_dest_path(self, show_mock, open_mock): client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx data = mock.MagicMock() service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, data=data) self.assertFalse(show_mock.called) self.assertFalse(open_mock.called) client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) self.assertIsNone(res) data.write.assert_has_calls( [ mock.call(1), mock.call(2), mock.call(3) ] ) self.assertFalse(data.close.called) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_no_data_dest_path(self, show_mock, open_mock): client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx writer = mock.MagicMock() open_mock.return_value = writer service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, dst_path=mock.sentinel.dst_path) self.assertFalse(show_mock.called) client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) open_mock.assert_called_once_with(mock.sentinel.dst_path, 'wb') self.assertIsNone(res) writer.write.assert_has_calls( [ mock.call(1), mock.call(2), mock.call(3) ] ) writer.close.assert_called_once_with() @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_data_dest_path(self, show_mock, open_mock): # NOTE(jaypipes): This really shouldn't be allowed, but because of the # horrible design of the download() method in GlanceImageService, no # error is raised, and the dst_path is ignored... # #TODO(jaypipes): Fix the aforementioned horrible design of # the download() method. client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx data = mock.MagicMock() service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, data=data) self.assertFalse(show_mock.called) self.assertFalse(open_mock.called) client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) self.assertIsNone(res) data.write.assert_has_calls( [ mock.call(1), mock.call(2), mock.call(3) ] ) self.assertFalse(data.close.called) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_data_dest_path_write_fails(self, show_mock, open_mock): client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) # NOTE(mikal): data is a file like object, which in our case always # raises an exception when we attempt to write to the file. class FakeDiskException(Exception): pass class Exceptionator(StringIO): def write(self, _): raise FakeDiskException('Disk full!') self.assertRaises(FakeDiskException, service.download, ctx, mock.sentinel.image_id, data=Exceptionator()) @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_direct_file_uri(self, show_mock, get_tran_mock): self.flags(allowed_direct_url_schemes=['file'], group='glance') show_mock.return_value = { 'locations': [ { 'url': 'file:///files/image', 'metadata': mock.sentinel.loc_meta } ] } tran_mod = mock.MagicMock() get_tran_mock.return_value = tran_mod client = mock.MagicMock() ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, dst_path=mock.sentinel.dst_path) self.assertIsNone(res) self.assertFalse(client.call.called) show_mock.assert_called_once_with(ctx, mock.sentinel.image_id, include_locations=True) get_tran_mock.assert_called_once_with('file') tran_mod.download.assert_called_once_with(ctx, mock.ANY, mock.sentinel.dst_path, mock.sentinel.loc_meta) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_direct_exception_fallback(self, show_mock, get_tran_mock, open_mock): # Test that we fall back to downloading to the dst_path # if the download method of the transfer module raised # an exception. self.flags(allowed_direct_url_schemes=['file'], group='glance') show_mock.return_value = { 'locations': [ { 'url': 'file:///files/image', 'metadata': mock.sentinel.loc_meta } ] } tran_mod = mock.MagicMock() tran_mod.download.side_effect = Exception get_tran_mock.return_value = tran_mod client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx writer = mock.MagicMock() open_mock.return_value = writer service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, dst_path=mock.sentinel.dst_path) self.assertIsNone(res) show_mock.assert_called_once_with(ctx, mock.sentinel.image_id, include_locations=True) get_tran_mock.assert_called_once_with('file') tran_mod.download.assert_called_once_with(ctx, mock.ANY, mock.sentinel.dst_path, mock.sentinel.loc_meta) client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) # NOTE(jaypipes): log messages call open() in part of the # download path, so here, we just check that the last open() # call was done for the dst_path file descriptor. open_mock.assert_called_with(mock.sentinel.dst_path, 'wb') self.assertIsNone(res) writer.write.assert_has_calls( [ mock.call(1), mock.call(2), mock.call(3) ] ) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_direct_no_mod_fallback(self, show_mock, get_tran_mock, open_mock): # Test that we fall back to downloading to the dst_path # if no appropriate transfer module is found... # an exception. self.flags(allowed_direct_url_schemes=['funky'], group='glance') show_mock.return_value = { 'locations': [ { 'url': 'file:///files/image', 'metadata': mock.sentinel.loc_meta } ] } get_tran_mock.return_value = None client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx writer = mock.MagicMock() open_mock.return_value = writer service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, dst_path=mock.sentinel.dst_path) self.assertIsNone(res) show_mock.assert_called_once_with(ctx, mock.sentinel.image_id, include_locations=True) get_tran_mock.assert_called_once_with('file') client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) # NOTE(jaypipes): log messages call open() in part of the # download path, so here, we just check that the last open() # call was done for the dst_path file descriptor. open_mock.assert_called_with(mock.sentinel.dst_path, 'wb') self.assertIsNone(res) writer.write.assert_has_calls( [ mock.call(1), mock.call(2), mock.call(3) ] ) writer.close.assert_called_once_with() class TestDownloadSignatureVerification(test.NoDBTestCase): class MockVerifier(object): def update(self, data): return def verify(self): return True class BadVerifier(object): def update(self, data): return def verify(self): raise cryptography.exceptions.InvalidSignature( 'Invalid signature.' ) def setUp(self): super(TestDownloadSignatureVerification, self).setUp() self.flags(verify_glance_signatures=True, group='glance') self.fake_img_props = { 'properties': { 'img_signature': 'signature', 'img_signature_hash_method': 'SHA-224', 'img_signature_certificate_uuid': 'uuid', 'img_signature_key_type': 'RSA-PSS', } } self.fake_img_data = ['A' * 256, 'B' * 256] client = mock.MagicMock() client.call.return_value = self.fake_img_data self.service = glance.GlanceImageService(client) @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') @mock.patch('nova.signature_utils.get_verifier') def test_download_with_signature_verification(self, mock_get_verifier, mock_show, mock_log): mock_get_verifier.return_value = self.MockVerifier() mock_show.return_value = self.fake_img_props res = self.service.download(context=None, image_id=None, data=None, dst_path=None) self.assertEqual(self.fake_img_data, res) mock_get_verifier.assert_called_once_with(None, 'uuid', 'SHA-224', 'signature', 'RSA-PSS') mock_log.info.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') @mock.patch('nova.signature_utils.get_verifier') def test_download_dst_path_signature_verification(self, mock_get_verifier, mock_show, mock_log, mock_open): mock_get_verifier.return_value = self.MockVerifier() mock_show.return_value = self.fake_img_props mock_dest = mock.MagicMock() fake_path = 'FAKE_PATH' mock_open.return_value = mock_dest self.service.download(context=None, image_id=None, data=None, dst_path=fake_path) mock_get_verifier.assert_called_once_with(None, 'uuid', 'SHA-224', 'signature', 'RSA-PSS') mock_log.info.assert_called_once_with(mock.ANY, mock.ANY) self.assertEqual(len(self.fake_img_data), mock_dest.write.call_count) self.assertTrue(mock_dest.close.called) @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') @mock.patch('nova.signature_utils.get_verifier') def test_download_with_get_verifier_failure(self, mock_get_verifier, mock_show, mock_log): mock_get_verifier.side_effect = exception.SignatureVerificationError( reason='Signature verification ' 'failed.' ) mock_show.return_value = self.fake_img_props self.assertRaises(exception.SignatureVerificationError, self.service.download, context=None, image_id=None, data=None, dst_path=None) mock_log.error.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') @mock.patch('nova.signature_utils.get_verifier') def test_download_with_invalid_signature(self, mock_get_verifier, mock_show, mock_log): mock_get_verifier.return_value = self.BadVerifier() mock_show.return_value = self.fake_img_props self.assertRaises(cryptography.exceptions.InvalidSignature, self.service.download, context=None, image_id=None, data=None, dst_path=None) mock_log.error.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_missing_signature_metadata(self, mock_show, mock_log): mock_show.return_value = {'properties': {}} self.assertRaisesRegex(exception.SignatureVerificationError, 'Required image properties for signature ' 'verification do not exist. Cannot verify ' 'signature. Missing property: .*', self.service.download, context=None, image_id=None, data=None, dst_path=None) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.signature_utils.get_verifier') @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_dst_path_signature_fail(self, mock_show, mock_log, mock_get_verifier, mock_open): mock_get_verifier.return_value = self.BadVerifier() mock_dest = mock.MagicMock() fake_path = 'FAKE_PATH' mock_open.return_value = mock_dest mock_show.return_value = self.fake_img_props self.assertRaises(cryptography.exceptions.InvalidSignature, self.service.download, context=None, image_id=None, data=None, dst_path=fake_path) mock_log.error.assert_called_once_with(mock.ANY, mock.ANY) mock_open.assert_called_once_with(fake_path, 'wb') mock_dest.truncate.assert_called_once_with(0) self.assertTrue(mock_dest.close.called) class TestIsImageAvailable(test.NoDBTestCase): """Tests the internal _is_image_available function.""" class ImageSpecV2(object): visibility = None properties = None class ImageSpecV1(object): is_public = None properties = None def test_auth_token_override(self): ctx = mock.MagicMock(auth_token=True) img = mock.MagicMock() res = glance._is_image_available(ctx, img) self.assertTrue(res) self.assertFalse(img.called) def test_admin_override(self): ctx = mock.MagicMock(auth_token=False, is_admin=True) img = mock.MagicMock() res = glance._is_image_available(ctx, img) self.assertTrue(res) self.assertFalse(img.called) def test_v2_visibility(self): ctx = mock.MagicMock(auth_token=False, is_admin=False) # We emulate warlock validation that throws an AttributeError # if you try to call is_public on an image model returned by # a call to V2 image.get(). Here, the ImageSpecV2 does not have # an is_public attribute and MagicMock will throw an AttributeError. img = mock.MagicMock(visibility='PUBLIC', spec=TestIsImageAvailable.ImageSpecV2) res = glance._is_image_available(ctx, img) self.assertTrue(res) def test_v1_is_public(self): ctx = mock.MagicMock(auth_token=False, is_admin=False) img = mock.MagicMock(is_public=True, spec=TestIsImageAvailable.ImageSpecV1) res = glance._is_image_available(ctx, img) self.assertTrue(res) def test_project_is_owner(self): ctx = mock.MagicMock(auth_token=False, is_admin=False, project_id='123') props = { 'owner_id': '123' } img = mock.MagicMock(visibility='private', properties=props, spec=TestIsImageAvailable.ImageSpecV2) res = glance._is_image_available(ctx, img) self.assertTrue(res) ctx.reset_mock() img = mock.MagicMock(is_public=False, properties=props, spec=TestIsImageAvailable.ImageSpecV1) res = glance._is_image_available(ctx, img) self.assertTrue(res) def test_project_context_matches_project_prop(self): ctx = mock.MagicMock(auth_token=False, is_admin=False, project_id='123') props = { 'project_id': '123' } img = mock.MagicMock(visibility='private', properties=props, spec=TestIsImageAvailable.ImageSpecV2) res = glance._is_image_available(ctx, img) self.assertTrue(res) ctx.reset_mock() img = mock.MagicMock(is_public=False, properties=props, spec=TestIsImageAvailable.ImageSpecV1) res = glance._is_image_available(ctx, img) self.assertTrue(res) def test_no_user_in_props(self): ctx = mock.MagicMock(auth_token=False, is_admin=False, project_id='123') props = { } img = mock.MagicMock(visibility='private', properties=props, spec=TestIsImageAvailable.ImageSpecV2) res = glance._is_image_available(ctx, img) self.assertFalse(res) ctx.reset_mock() img = mock.MagicMock(is_public=False, properties=props, spec=TestIsImageAvailable.ImageSpecV1) res = glance._is_image_available(ctx, img) self.assertFalse(res) def test_user_matches_context(self): ctx = mock.MagicMock(auth_token=False, is_admin=False, user_id='123') props = { 'user_id': '123' } img = mock.MagicMock(visibility='private', properties=props, spec=TestIsImageAvailable.ImageSpecV2) res = glance._is_image_available(ctx, img) self.assertTrue(res) ctx.reset_mock() img = mock.MagicMock(is_public=False, properties=props, spec=TestIsImageAvailable.ImageSpecV1) res = glance._is_image_available(ctx, img) self.assertTrue(res) class TestShow(test.NoDBTestCase): """Tests the show method of the GlanceImageService.""" @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_show_success(self, is_avail_mock, trans_from_mock): is_avail_mock.return_value = True trans_from_mock.return_value = {'mock': mock.sentinel.trans_from} client = mock.MagicMock() client.call.return_value = {} ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) info = service.show(ctx, mock.sentinel.image_id) client.call.assert_called_once_with(ctx, 1, 'get', mock.sentinel.image_id) is_avail_mock.assert_called_once_with(ctx, {}) trans_from_mock.assert_called_once_with({}, include_locations=False) self.assertIn('mock', info) self.assertEqual(mock.sentinel.trans_from, info['mock']) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_show_not_available(self, is_avail_mock, trans_from_mock): is_avail_mock.return_value = False client = mock.MagicMock() client.call.return_value = mock.sentinel.images_0 ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) with testtools.ExpectedException(exception.ImageNotFound): service.show(ctx, mock.sentinel.image_id) client.call.assert_called_once_with(ctx, 1, 'get', mock.sentinel.image_id) is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0) self.assertFalse(trans_from_mock.called) @mock.patch('nova.image.glance._reraise_translated_image_exception') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_show_client_failure(self, is_avail_mock, trans_from_mock, reraise_mock): raised = exception.ImageNotAuthorized(image_id=123) client = mock.MagicMock() client.call.side_effect = glanceclient.exc.Forbidden ctx = mock.sentinel.ctx reraise_mock.side_effect = raised service = glance.GlanceImageService(client) with testtools.ExpectedException(exception.ImageNotAuthorized): service.show(ctx, mock.sentinel.image_id) client.call.assert_called_once_with(ctx, 1, 'get', mock.sentinel.image_id) self.assertFalse(is_avail_mock.called) self.assertFalse(trans_from_mock.called) reraise_mock.assert_called_once_with(mock.sentinel.image_id) @mock.patch('nova.image.glance._is_image_available') def test_show_queued_image_without_some_attrs(self, is_avail_mock): is_avail_mock.return_value = True client = mock.MagicMock() # fake image cls without disk_format, container_format, name attributes class fake_image_cls(dict): id = 'b31aa5dd-f07a-4748-8f15-398346887584' deleted = False protected = False min_disk = 0 created_at = '2014-05-20T08:16:48' size = 0 status = 'queued' is_public = False min_ram = 0 owner = '980ec4870033453ead65c0470a78b8a8' updated_at = '2014-05-20T08:16:48' glance_image = fake_image_cls() client.call.return_value = glance_image ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) image_info = service.show(ctx, glance_image.id) client.call.assert_called_once_with(ctx, 1, 'get', glance_image.id) NOVA_IMAGE_ATTRIBUTES = set(['size', 'disk_format', 'owner', 'container_format', 'status', 'id', 'name', 'created_at', 'updated_at', 'deleted', 'deleted_at', 'checksum', 'min_disk', 'min_ram', 'is_public', 'properties']) self.assertEqual(NOVA_IMAGE_ATTRIBUTES, set(image_info.keys())) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_include_locations_success(self, avail_mock, trans_from_mock): locations = [mock.sentinel.loc1] avail_mock.return_value = True trans_from_mock.return_value = {'locations': locations} client = mock.Mock() client.call.return_value = mock.sentinel.image service = glance.GlanceImageService(client) ctx = mock.sentinel.ctx image_id = mock.sentinel.image_id info = service.show(ctx, image_id, include_locations=True) client.call.assert_called_once_with(ctx, 2, 'get', image_id) avail_mock.assert_called_once_with(ctx, mock.sentinel.image) trans_from_mock.assert_called_once_with(mock.sentinel.image, include_locations=True) self.assertIn('locations', info) self.assertEqual(locations, info['locations']) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_include_direct_uri_success(self, avail_mock, trans_from_mock): locations = [mock.sentinel.loc1] avail_mock.return_value = True trans_from_mock.return_value = {'locations': locations, 'direct_uri': mock.sentinel.duri} client = mock.Mock() client.call.return_value = mock.sentinel.image service = glance.GlanceImageService(client) ctx = mock.sentinel.ctx image_id = mock.sentinel.image_id info = service.show(ctx, image_id, include_locations=True) client.call.assert_called_once_with(ctx, 2, 'get', image_id) expected = locations expected.append({'url': mock.sentinel.duri, 'metadata': {}}) self.assertIn('locations', info) self.assertEqual(expected, info['locations']) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_do_not_show_deleted_images(self, is_avail_mock, trans_from_mock): class fake_image_cls(dict): id = 'b31aa5dd-f07a-4748-8f15-398346887584' deleted = True glance_image = fake_image_cls() client = mock.MagicMock() client.call.return_value = glance_image ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) with testtools.ExpectedException(exception.ImageNotFound): service.show(ctx, glance_image.id, show_deleted=False) client.call.assert_called_once_with(ctx, 1, 'get', glance_image.id) self.assertFalse(is_avail_mock.called) self.assertFalse(trans_from_mock.called) class TestDetail(test.NoDBTestCase): """Tests the detail method of the GlanceImageService.""" @mock.patch('nova.image.glance._extract_query_params') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_detail_success_available(self, is_avail_mock, trans_from_mock, ext_query_mock): params = {} is_avail_mock.return_value = True ext_query_mock.return_value = params trans_from_mock.return_value = mock.sentinel.trans_from client = mock.MagicMock() client.call.return_value = [mock.sentinel.images_0] ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) images = service.detail(ctx, **params) client.call.assert_called_once_with(ctx, 1, 'list') is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0) trans_from_mock.assert_called_once_with(mock.sentinel.images_0) self.assertEqual([mock.sentinel.trans_from], images) @mock.patch('nova.image.glance._extract_query_params') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_detail_success_unavailable(self, is_avail_mock, trans_from_mock, ext_query_mock): params = {} is_avail_mock.return_value = False ext_query_mock.return_value = params trans_from_mock.return_value = mock.sentinel.trans_from client = mock.MagicMock() client.call.return_value = [mock.sentinel.images_0] ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) images = service.detail(ctx, **params) client.call.assert_called_once_with(ctx, 1, 'list') is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0) self.assertFalse(trans_from_mock.called) self.assertEqual([], images) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_detail_params_passed(self, is_avail_mock, _trans_from_mock): client = mock.MagicMock() client.call.return_value = [mock.sentinel.images_0] ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) service.detail(ctx, page_size=5, limit=10) expected_filters = { 'is_public': 'none' } client.call.assert_called_once_with(ctx, 1, 'list', filters=expected_filters, page_size=5, limit=10) @mock.patch('nova.image.glance._reraise_translated_exception') @mock.patch('nova.image.glance._extract_query_params') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_detail_client_failure(self, is_avail_mock, trans_from_mock, ext_query_mock, reraise_mock): params = {} ext_query_mock.return_value = params raised = exception.Forbidden() client = mock.MagicMock() client.call.side_effect = glanceclient.exc.Forbidden ctx = mock.sentinel.ctx reraise_mock.side_effect = raised service = glance.GlanceImageService(client) with testtools.ExpectedException(exception.Forbidden): service.detail(ctx, **params) client.call.assert_called_once_with(ctx, 1, 'list') self.assertFalse(is_avail_mock.called) self.assertFalse(trans_from_mock.called) reraise_mock.assert_called_once_with() class TestCreate(test.NoDBTestCase): """Tests the create method of the GlanceImageService.""" @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._translate_to_glance') def test_create_success(self, trans_to_mock, trans_from_mock): translated = { 'image_id': mock.sentinel.image_id } trans_to_mock.return_value = translated trans_from_mock.return_value = mock.sentinel.trans_from image_mock = mock.MagicMock(spec=dict) client = mock.MagicMock() client.call.return_value = mock.sentinel.image_meta ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) image_meta = service.create(ctx, image_mock) trans_to_mock.assert_called_once_with(image_mock) client.call.assert_called_once_with(ctx, 1, 'create', image_id=mock.sentinel.image_id) trans_from_mock.assert_called_once_with(mock.sentinel.image_meta) self.assertEqual(mock.sentinel.trans_from, image_meta) # Now verify that if we supply image data to the call, # that the client is also called with the data kwarg client.reset_mock() service.create(ctx, image_mock, data=mock.sentinel.data) client.call.assert_called_once_with(ctx, 1, 'create', image_id=mock.sentinel.image_id, data=mock.sentinel.data) @mock.patch('nova.image.glance._reraise_translated_exception') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._translate_to_glance') def test_create_client_failure(self, trans_to_mock, trans_from_mock, reraise_mock): translated = {} trans_to_mock.return_value = translated image_mock = mock.MagicMock(spec=dict) raised = exception.Invalid() client = mock.MagicMock() client.call.side_effect = glanceclient.exc.BadRequest ctx = mock.sentinel.ctx reraise_mock.side_effect = raised service = glance.GlanceImageService(client) self.assertRaises(exception.Invalid, service.create, ctx, image_mock) trans_to_mock.assert_called_once_with(image_mock) self.assertFalse(trans_from_mock.called) class TestUpdate(test.NoDBTestCase): """Tests the update method of the GlanceImageService.""" @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._translate_to_glance') def test_update_success(self, trans_to_mock, trans_from_mock): translated = { 'id': mock.sentinel.image_id, 'name': mock.sentinel.name } trans_to_mock.return_value = translated trans_from_mock.return_value = mock.sentinel.trans_from image_mock = mock.MagicMock(spec=dict) client = mock.MagicMock() client.call.return_value = mock.sentinel.image_meta ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) image_meta = service.update(ctx, mock.sentinel.image_id, image_mock) trans_to_mock.assert_called_once_with(image_mock) # Verify that the 'id' element has been removed as a kwarg to # the call to glanceclient's update (since the image ID is # supplied as a positional arg), and that the # purge_props default is True. client.call.assert_called_once_with(ctx, 1, 'update', mock.sentinel.image_id, name=mock.sentinel.name, purge_props=True) trans_from_mock.assert_called_once_with(mock.sentinel.image_meta) self.assertEqual(mock.sentinel.trans_from, image_meta) # Now verify that if we supply image data to the call, # that the client is also called with the data kwarg client.reset_mock() service.update(ctx, mock.sentinel.image_id, image_mock, data=mock.sentinel.data) client.call.assert_called_once_with(ctx, 1, 'update', mock.sentinel.image_id, name=mock.sentinel.name, purge_props=True, data=mock.sentinel.data) @mock.patch('nova.image.glance._reraise_translated_image_exception') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._translate_to_glance') def test_update_client_failure(self, trans_to_mock, trans_from_mock, reraise_mock): translated = { 'name': mock.sentinel.name } trans_to_mock.return_value = translated trans_from_mock.return_value = mock.sentinel.trans_from image_mock = mock.MagicMock(spec=dict) raised = exception.ImageNotAuthorized(image_id=123) client = mock.MagicMock() client.call.side_effect = glanceclient.exc.Forbidden ctx = mock.sentinel.ctx reraise_mock.side_effect = raised service = glance.GlanceImageService(client) self.assertRaises(exception.ImageNotAuthorized, service.update, ctx, mock.sentinel.image_id, image_mock) client.call.assert_called_once_with(ctx, 1, 'update', mock.sentinel.image_id, purge_props=True, name=mock.sentinel.name) self.assertFalse(trans_from_mock.called) reraise_mock.assert_called_once_with(mock.sentinel.image_id) class TestDelete(test.NoDBTestCase): """Tests the delete method of the GlanceImageService.""" def test_delete_success(self): client = mock.MagicMock() client.call.return_value = True ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) service.delete(ctx, mock.sentinel.image_id) client.call.assert_called_once_with(ctx, 1, 'delete', mock.sentinel.image_id) def test_delete_client_failure(self): client = mock.MagicMock() client.call.side_effect = glanceclient.exc.NotFound ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) self.assertRaises(exception.ImageNotFound, service.delete, ctx, mock.sentinel.image_id) class TestGlanceUrl(test.NoDBTestCase): def test_generate_glance_http_url(self): generated_url = glance.generate_glance_url() glance_host = CONF.glance.host # ipv6 address, need to wrap it with '[]' if netutils.is_valid_ipv6(glance_host): glance_host = '[%s]' % glance_host http_url = "http://%s:%d" % (glance_host, CONF.glance.port) self.assertEqual(generated_url, http_url) def test_generate_glance_https_url(self): self.flags(protocol="https", group='glance') generated_url = glance.generate_glance_url() glance_host = CONF.glance.host # ipv6 address, need to wrap it with '[]' if netutils.is_valid_ipv6(glance_host): glance_host = '[%s]' % glance_host https_url = "https://%s:%d" % (glance_host, CONF.glance.port) self.assertEqual(generated_url, https_url) class TestGlanceApiServers(test.NoDBTestCase): def test_get_api_servers(self): glance_servers = ['10.0.1.1:9292', 'https://10.0.0.1:9293', 'http://10.0.2.2:9294'] expected_servers = ['http://10.0.1.1:9292', 'https://10.0.0.1:9293', 'http://10.0.2.2:9294'] self.flags(api_servers=glance_servers, group='glance') api_servers = glance.get_api_servers() i = 0 for server in api_servers: i += 1 self.assertIn(server, expected_servers) if i > 2: break class TestGlanceNoApiServers(test.NoDBTestCase): def test_get_api_server_no_server(self): self.flags(group='glance', host="10.0.0.1", port=9292) api_servers = glance.get_api_servers() self.assertEqual("http://10.0.0.1:9292", next(api_servers)) self.flags(group='glance', host="10.0.0.1", protocol="https", port=9292) api_servers = glance.get_api_servers() self.assertEqual("https://10.0.0.1:9292", next(api_servers)) self.flags(group='glance', host="f000::c0de", protocol="https", port=9292) api_servers = glance.get_api_servers() self.assertEqual("https://[f000::c0de]:9292", next(api_servers)) class TestUpdateGlanceImage(test.NoDBTestCase): @mock.patch('nova.image.glance.GlanceImageService') def test_start(self, mock_glance_image_service): consumer = glance.UpdateGlanceImage( 'context', 'id', 'metadata', 'stream') with mock.patch.object(glance, 'get_remote_image_service') as a_mock: a_mock.return_value = (mock_glance_image_service, 'image_id') consumer.start() mock_glance_image_service.update.assert_called_with( 'context', 'image_id', 'metadata', 'stream', purge_props=False) nova-13.0.0/nova/tests/unit/image/test_fake.py0000664000567000056710000001106712701407773022401 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from six.moves import StringIO from nova import context from nova import exception from nova import test import nova.tests.unit.image.fake class FakeImageServiceTestCase(test.NoDBTestCase): def setUp(self): super(FakeImageServiceTestCase, self).setUp() self.image_service = nova.tests.unit.image.fake.FakeImageService() self.context = context.get_admin_context() def tearDown(self): super(FakeImageServiceTestCase, self).tearDown() nova.tests.unit.image.fake.FakeImageService_reset() def test_detail(self): res = self.image_service.detail(self.context) for image in res: keys = set(image.keys()) self.assertEqual(keys, set(['id', 'name', 'created_at', 'updated_at', 'deleted_at', 'deleted', 'status', 'is_public', 'properties', 'disk_format', 'container_format', 'size'])) self.assertIsInstance(image['created_at'], datetime.datetime) self.assertIsInstance(image['updated_at'], datetime.datetime) if not (isinstance(image['deleted_at'], datetime.datetime) or image['deleted_at'] is None): self.fail('image\'s "deleted_at" attribute was neither a ' 'datetime object nor None') def check_is_bool(image, key): val = image.get('deleted') if not isinstance(val, bool): self.fail('image\'s "%s" attribute wasn\'t ' 'a bool: %r' % (key, val)) check_is_bool(image, 'deleted') check_is_bool(image, 'is_public') def test_show_raises_imagenotfound_for_invalid_id(self): self.assertRaises(exception.ImageNotFound, self.image_service.show, self.context, 'this image does not exist') def test_create_adds_id(self): index = self.image_service.detail(self.context) image_count = len(index) self.image_service.create(self.context, {}) index = self.image_service.detail(self.context) self.assertEqual(len(index), image_count + 1) self.assertTrue(index[0]['id']) def test_create_keeps_id(self): self.image_service.create(self.context, {'id': '34'}) self.image_service.show(self.context, '34') def test_create_rejects_duplicate_ids(self): self.image_service.create(self.context, {'id': '34'}) self.assertRaises(exception.CouldNotUploadImage, self.image_service.create, self.context, {'id': '34'}) # Make sure there's still one left self.image_service.show(self.context, '34') def test_update(self): self.image_service.create(self.context, {'id': '34', 'foo': 'bar'}) self.image_service.update(self.context, '34', {'id': '34', 'foo': 'baz'}) img = self.image_service.show(self.context, '34') self.assertEqual(img['foo'], 'baz') def test_delete(self): self.image_service.create(self.context, {'id': '34', 'foo': 'bar'}) self.image_service.delete(self.context, '34') self.assertRaises(exception.NotFound, self.image_service.show, self.context, '34') def test_create_then_get(self): blob = 'some data' s1 = StringIO(blob) self.image_service.create(self.context, {'id': '32', 'foo': 'bar'}, data=s1) s2 = StringIO() self.image_service.download(self.context, '32', data=s2) self.assertEqual(s2.getvalue(), blob, 'Did not get blob back intact') nova-13.0.0/nova/tests/unit/image/test_transfer_modules.py0000664000567000056710000000723212701407773025046 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six.moves.urllib.parse as urlparse from nova import exception from nova.image.download import file as tm_file from nova import test class TestFileTransferModule(test.NoDBTestCase): @mock.patch('nova.virt.libvirt.utils.copy_image') def test_filesystem_success(self, copy_mock): self.flags(allowed_direct_url_schemes=['file'], group='glance') self.flags(group='image_file_url', filesystems=['gluster']) mountpoint = '/gluster' url = 'file:///gluster/my/image/path' url_parts = urlparse.urlparse(url) fs_id = 'someid' loc_meta = { 'id': fs_id, 'mountpoint': mountpoint } dst_file = mock.MagicMock() tm = tm_file.FileTransfer() # NOTE(Jbresnah) The following options must be added after the module # has added the specific groups. self.flags(group='image_file_url:gluster', id=fs_id) self.flags(group='image_file_url:gluster', mountpoint=mountpoint) tm.download(mock.sentinel.ctx, url_parts, dst_file, loc_meta) copy_mock.assert_called_once_with('/gluster/my/image/path', dst_file) @mock.patch('nova.virt.libvirt.utils.copy_image') def test_filesystem_mismatched_mountpoint(self, copy_mock): self.flags(allowed_direct_url_schemes=['file'], group='glance') self.flags(group='image_file_url', filesystems=['gluster']) mountpoint = '/gluster' # Should include the mountpoint before my/image/path url = 'file:///my/image/path' url_parts = urlparse.urlparse(url) fs_id = 'someid' loc_meta = { 'id': fs_id, 'mountpoint': mountpoint } dst_file = mock.MagicMock() tm = tm_file.FileTransfer() self.flags(group='image_file_url:gluster', id=fs_id) self.flags(group='image_file_url:gluster', mountpoint=mountpoint) self.assertRaises(exception.ImageDownloadModuleMetaDataError, tm.download, mock.sentinel.ctx, url_parts, dst_file, loc_meta) self.assertFalse(copy_mock.called) @mock.patch('nova.virt.libvirt.utils.copy_image') def test_filesystem_mismatched_filesystem(self, copy_mock): self.flags(allowed_direct_url_schemes=['file'], group='glance') self.flags(group='image_file_url', filesystems=['gluster']) mountpoint = '/gluster' # Should include the mountpoint before my/image/path url = 'file:///my/image/path' url_parts = urlparse.urlparse(url) fs_id = 'someid' loc_meta = { 'id': 'funky', 'mountpoint': mountpoint } dst_file = mock.MagicMock() tm = tm_file.FileTransfer() self.flags(group='image_file_url:gluster', id=fs_id) self.flags(group='image_file_url:gluster', mountpoint=mountpoint) self.assertRaises(exception.ImageDownloadModuleError, tm.download, mock.sentinel.ctx, url_parts, dst_file, loc_meta) self.assertFalse(copy_mock.called) nova-13.0.0/nova/tests/unit/image/fake.py0000664000567000056710000002261012701410011021311 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake image service.""" import copy import datetime import uuid from oslo_config import cfg from oslo_log import log as logging from nova.compute import arch from nova import exception CONF = cfg.CONF CONF.import_opt('null_kernel', 'nova.compute.api') LOG = logging.getLogger(__name__) AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID = '70a599e0-31e7-49b7-b260-868f441e862b' class _FakeImageService(object): """Mock (fake) image service for unit testing.""" def __init__(self): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3) image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'raw', 'disk_format': 'raw', 'size': '25165824', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel, 'architecture': arch.X86_64}} image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': '58145823', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel}} image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'bare', 'disk_format': 'raw', 'size': '83594576', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel}} image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': '84035174', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel}} image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': '26360814', 'properties': {'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'ramdisk_id': None}} image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', 'name': 'fakeimage6', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'ova', 'disk_format': 'vhd', 'size': '49163826', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel, 'architecture': arch.X86_64, 'auto_disk_config': 'False'}} image7 = {'id': AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, 'name': 'fakeimage7', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'ova', 'disk_format': 'vhd', 'size': '74185822', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel, 'architecture': arch.X86_64, 'auto_disk_config': 'True'}} self.create(None, image1) self.create(None, image2) self.create(None, image3) self.create(None, image4) self.create(None, image5) self.create(None, image6) self.create(None, image7) self._imagedata = {} super(_FakeImageService, self).__init__() # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir def detail(self, context, **kwargs): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) def download(self, context, image_id, dst_path=None, data=None): self.show(context, image_id) if data: data.write(self._imagedata.get(image_id, '')) elif dst_path: with open(dst_path, 'wb') as data: data.write(self._imagedata.get(image_id, '')) def show(self, context, image_id, include_locations=False, show_deleted=True): """Get data about specified image. Returns a dict containing image data for the given opaque image id. """ image = self.images.get(str(image_id)) if image: return copy.deepcopy(image) LOG.warning('Unable to find image id %s. Have images: %s', image_id, self.images) raise exception.ImageNotFound(image_id=image_id) def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ image_id = str(metadata.get('id', uuid.uuid4())) metadata['id'] = image_id if image_id in self.images: raise exception.CouldNotUploadImage(image_id=image_id) self.images[image_id] = copy.deepcopy(metadata) if data: self._imagedata[image_id] = data.read() return self.images[image_id] def update(self, context, image_id, metadata, data=None, purge_props=False): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. """ if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) if purge_props: self.images[image_id] = copy.deepcopy(metadata) else: image = self.images[image_id] try: image['properties'].update(metadata.pop('properties')) except KeyError: pass image.update(metadata) return self.images[image_id] def delete(self, context, image_id): """Delete the given image. :raises: ImageNotFound if the image does not exist. """ removed = self.images.pop(image_id, None) if not removed: raise exception.ImageNotFound(image_id=image_id) def get_location(self, context, image_id): if image_id in self.images: return 'fake_location' return None _fakeImageService = _FakeImageService() def FakeImageService(): return _fakeImageService def FakeImageService_reset(): global _fakeImageService _fakeImageService = _FakeImageService() def get_valid_image_id(): return AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID def stub_out_image_service(test): """Stubs out the image service for the test with the FakeImageService :param test: instance of nova.test.TestCase :returns: The stubbed out FakeImageService object """ image_service = FakeImageService() test.stub_out('nova.image.glance.get_remote_image_service', lambda x, y: (image_service, y)) test.stub_out('nova.image.glance.get_default_image_service', lambda: image_service) return image_service nova-13.0.0/nova/tests/unit/test_loadables.py0000664000567000056710000001246112701407773022336 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Loadable class handling. """ from nova import exception from nova import test from nova.tests.unit import fake_loadables class LoadablesTestCase(test.NoDBTestCase): def setUp(self): super(LoadablesTestCase, self).setUp() self.fake_loader = fake_loadables.FakeLoader() # The name that we imported above for testing self.test_package = 'nova.tests.unit.fake_loadables' def test_loader_init(self): self.assertEqual(self.fake_loader.package, self.test_package) # Test the path of the module ending_path = '/' + self.test_package.replace('.', '/') self.assertTrue(self.fake_loader.path.endswith(ending_path)) self.assertEqual(self.fake_loader.loadable_cls_type, fake_loadables.FakeLoadable) def _compare_classes(self, classes, expected): class_names = [cls.__name__ for cls in classes] self.assertEqual(set(class_names), set(expected)) def test_get_all_classes(self): classes = self.fake_loader.get_all_classes() expected_class_names = ['FakeLoadableSubClass1', 'FakeLoadableSubClass2', 'FakeLoadableSubClass5', 'FakeLoadableSubClass6'] self._compare_classes(classes, expected_class_names) def test_get_matching_classes(self): prefix = self.test_package test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1', prefix + '.fake_loadable2.FakeLoadableSubClass5'] classes = self.fake_loader.get_matching_classes(test_classes) expected_class_names = ['FakeLoadableSubClass1', 'FakeLoadableSubClass5'] self._compare_classes(classes, expected_class_names) def test_get_matching_classes_with_underscore(self): prefix = self.test_package test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1', prefix + '.fake_loadable2._FakeLoadableSubClass7'] self.assertRaises(exception.ClassNotFound, self.fake_loader.get_matching_classes, test_classes) def test_get_matching_classes_with_wrong_type1(self): prefix = self.test_package test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass4', prefix + '.fake_loadable2.FakeLoadableSubClass5'] self.assertRaises(exception.ClassNotFound, self.fake_loader.get_matching_classes, test_classes) def test_get_matching_classes_with_wrong_type2(self): prefix = self.test_package test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1', prefix + '.fake_loadable2.FakeLoadableSubClass8'] self.assertRaises(exception.ClassNotFound, self.fake_loader.get_matching_classes, test_classes) def test_get_matching_classes_with_one_function(self): prefix = self.test_package test_classes = [prefix + '.fake_loadable1.return_valid_classes', prefix + '.fake_loadable2.FakeLoadableSubClass5'] classes = self.fake_loader.get_matching_classes(test_classes) expected_class_names = ['FakeLoadableSubClass1', 'FakeLoadableSubClass2', 'FakeLoadableSubClass5'] self._compare_classes(classes, expected_class_names) def test_get_matching_classes_with_two_functions(self): prefix = self.test_package test_classes = [prefix + '.fake_loadable1.return_valid_classes', prefix + '.fake_loadable2.return_valid_class'] classes = self.fake_loader.get_matching_classes(test_classes) expected_class_names = ['FakeLoadableSubClass1', 'FakeLoadableSubClass2', 'FakeLoadableSubClass6'] self._compare_classes(classes, expected_class_names) def test_get_matching_classes_with_function_including_invalids(self): # When using a method, no checking is done on valid classes. prefix = self.test_package test_classes = [prefix + '.fake_loadable1.return_invalid_classes', prefix + '.fake_loadable2.return_valid_class'] classes = self.fake_loader.get_matching_classes(test_classes) expected_class_names = ['FakeLoadableSubClass1', '_FakeLoadableSubClass3', 'FakeLoadableSubClass4', 'FakeLoadableSubClass6'] self._compare_classes(classes, expected_class_names) nova-13.0.0/nova/tests/unit/test_notifications.py0000664000567000056710000005315612701410011023242 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for common notifications.""" import copy import mock from oslo_context import context as o_context from oslo_context import fixture as o_fixture from nova.compute import flavors from nova.compute import task_states from nova.compute import vm_states from nova import context from nova import exception from nova import notifications from nova import objects from nova.objects import base as obj_base from nova import test from nova.tests.unit import fake_network from nova.tests.unit import fake_notifier class NotificationsTestCase(test.TestCase): def setUp(self): super(NotificationsTestCase, self).setUp() self.fixture = self.useFixture(o_fixture.ClearRequestContext()) self.net_info = fake_network.fake_get_instance_nw_info(self, 1, 1) def fake_get_nw_info(cls, ctxt, instance): self.assertTrue(ctxt.is_admin) return self.net_info self.stub_out('nova.network.api.API.get_instance_nw_info', fake_get_nw_info) fake_network.set_stub_network_methods(self) fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) self.flags(compute_driver='nova.virt.fake.FakeDriver', network_manager='nova.network.manager.FlatManager', notify_on_state_change="vm_and_task_state", host='testhost') self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.instance = self._wrapped_create() self.decorated_function_called = False def _wrapped_create(self, params=None): instance_type = flavors.get_flavor_by_name('m1.tiny') inst = objects.Instance(image_ref=1, user_id=self.user_id, project_id=self.project_id, instance_type_id=instance_type['id'], root_gb=0, ephemeral_gb=0, access_ip_v4='1.2.3.4', access_ip_v6='feed::5eed', display_name='test_instance', hostname='test_instance_hostname', node='test_instance_node', system_metadata={}) inst._context = self.context if params: inst.update(params) inst.flavor = instance_type inst.create() return inst def test_send_api_fault_disabled(self): self.flags(notify_api_faults=False) notifications.send_api_fault("http://example.com/foo", 500, None) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) def test_send_api_fault(self): self.flags(notify_api_faults=True) exception = None try: # Get a real exception with a call stack. raise test.TestingException("junk") except test.TestingException as e: exception = e notifications.send_api_fault("http://example.com/foo", 500, exception) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) n = fake_notifier.NOTIFICATIONS[0] self.assertEqual(n.priority, 'ERROR') self.assertEqual(n.event_type, 'api.fault') self.assertEqual(n.payload['url'], 'http://example.com/foo') self.assertEqual(n.payload['status'], 500) self.assertIsNotNone(n.payload['exception']) def test_send_api_fault_fresh_context(self): self.flags(notify_api_faults=True) exception = None try: # Get a real exception with a call stack. raise test.TestingException("junk") except test.TestingException as e: exception = e ctxt = context.RequestContext(overwrite=True) notifications.send_api_fault("http://example.com/foo", 500, exception) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) n = fake_notifier.NOTIFICATIONS[0] self.assertEqual(n.priority, 'ERROR') self.assertEqual(n.event_type, 'api.fault') self.assertEqual(n.payload['url'], 'http://example.com/foo') self.assertEqual(n.payload['status'], 500) self.assertIsNotNone(n.payload['exception']) self.assertEqual(ctxt, n.context) def test_send_api_fault_fake_context(self): self.flags(notify_api_faults=True) exception = None try: # Get a real exception with a call stack. raise test.TestingException("junk") except test.TestingException as e: exception = e ctxt = o_context.get_current() self.assertIsNotNone(ctxt) notifications.send_api_fault("http://example.com/foo", 500, exception) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) n = fake_notifier.NOTIFICATIONS[0] self.assertEqual(n.priority, 'ERROR') self.assertEqual(n.event_type, 'api.fault') self.assertEqual(n.payload['url'], 'http://example.com/foo') self.assertEqual(n.payload['status'], 500) self.assertIsNotNone(n.payload['exception']) self.assertIsNotNone(n.context) self.assertEqual(ctxt, n.context) def test_send_api_fault_admin_context(self): self.flags(notify_api_faults=True) exception = None try: # Get a real exception with a call stack. raise test.TestingException("junk") except test.TestingException as e: exception = e self.fixture._remove_cached_context() self.assertIsNone(o_context.get_current()) notifications.send_api_fault("http://example.com/foo", 500, exception) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) n = fake_notifier.NOTIFICATIONS[0] self.assertEqual(n.priority, 'ERROR') self.assertEqual(n.event_type, 'api.fault') self.assertEqual(n.payload['url'], 'http://example.com/foo') self.assertEqual(n.payload['status'], 500) self.assertIsNotNone(n.payload['exception']) self.assertIsNotNone(n.context) self.assertTrue(n.context.is_admin) def test_notif_disabled(self): # test config disable of the notifications self.flags(notify_on_state_change=None) old = copy.copy(self.instance) self.instance.vm_state = vm_states.ACTIVE old_vm_state = old['vm_state'] new_vm_state = self.instance.vm_state old_task_state = old['task_state'] new_task_state = self.instance.task_state notifications.send_update_with_states(self.context, self.instance, old_vm_state, new_vm_state, old_task_state, new_task_state, verify_states=True) notifications.send_update(self.context, old, self.instance) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) def test_task_notif(self): # test config disable of just the task state notifications self.flags(notify_on_state_change="vm_state") # we should not get a notification on task stgate chagne now old = copy.copy(self.instance) self.instance.task_state = task_states.SPAWNING old_vm_state = old['vm_state'] new_vm_state = self.instance.vm_state old_task_state = old['task_state'] new_task_state = self.instance.task_state notifications.send_update_with_states(self.context, self.instance, old_vm_state, new_vm_state, old_task_state, new_task_state, verify_states=True) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) # ok now enable task state notifications and re-try self.flags(notify_on_state_change="vm_and_task_state") notifications.send_update(self.context, old, self.instance) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) def test_send_no_notif(self): # test notification on send no initial vm state: old_vm_state = self.instance.vm_state new_vm_state = self.instance.vm_state old_task_state = self.instance.task_state new_task_state = self.instance.task_state notifications.send_update_with_states(self.context, self.instance, old_vm_state, new_vm_state, old_task_state, new_task_state, service="compute", host=None, verify_states=True) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) def test_send_on_vm_change(self): old = obj_base.obj_to_primitive(self.instance) old['vm_state'] = None # pretend we just transitioned to ACTIVE: self.instance.vm_state = vm_states.ACTIVE notifications.send_update(self.context, old, self.instance) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) # service name should default to 'compute' notif = fake_notifier.NOTIFICATIONS[0] self.assertEqual('compute.testhost', notif.publisher_id) def test_send_on_task_change(self): old = obj_base.obj_to_primitive(self.instance) old['task_state'] = None # pretend we just transitioned to task SPAWNING: self.instance.task_state = task_states.SPAWNING notifications.send_update(self.context, old, self.instance) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) def test_no_update_with_states(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, task_states.SPAWNING, verify_states=True) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) def test_vm_update_with_states(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.ACTIVE, task_states.SPAWNING, task_states.SPAWNING, verify_states=True) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) notif = fake_notifier.NOTIFICATIONS[0] payload = notif.payload access_ip_v4 = str(self.instance.access_ip_v4) access_ip_v6 = str(self.instance.access_ip_v6) display_name = self.instance.display_name hostname = self.instance.hostname node = self.instance.node self.assertEqual(vm_states.BUILDING, payload["old_state"]) self.assertEqual(vm_states.ACTIVE, payload["state"]) self.assertEqual(task_states.SPAWNING, payload["old_task_state"]) self.assertEqual(task_states.SPAWNING, payload["new_task_state"]) self.assertEqual(payload["access_ip_v4"], access_ip_v4) self.assertEqual(payload["access_ip_v6"], access_ip_v6) self.assertEqual(payload["display_name"], display_name) self.assertEqual(payload["hostname"], hostname) self.assertEqual(payload["node"], node) def test_task_update_with_states(self): self.flags(notify_on_state_change="vm_and_task_state") notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None, verify_states=True) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) notif = fake_notifier.NOTIFICATIONS[0] payload = notif.payload access_ip_v4 = str(self.instance.access_ip_v4) access_ip_v6 = str(self.instance.access_ip_v6) display_name = self.instance.display_name hostname = self.instance.hostname self.assertEqual(vm_states.BUILDING, payload["old_state"]) self.assertEqual(vm_states.BUILDING, payload["state"]) self.assertEqual(task_states.SPAWNING, payload["old_task_state"]) self.assertIsNone(payload["new_task_state"]) self.assertEqual(payload["access_ip_v4"], access_ip_v4) self.assertEqual(payload["access_ip_v6"], access_ip_v6) self.assertEqual(payload["display_name"], display_name) self.assertEqual(payload["hostname"], hostname) def test_update_no_service_name(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) # service name should default to 'compute' notif = fake_notifier.NOTIFICATIONS[0] self.assertEqual('compute.testhost', notif.publisher_id) def test_update_with_service_name(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None, service="testservice") self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) # service name should default to 'compute' notif = fake_notifier.NOTIFICATIONS[0] self.assertEqual('testservice.testhost', notif.publisher_id) def test_update_with_host_name(self): notifications.send_update_with_states(self.context, self.instance, vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, None, host="someotherhost") self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) # service name should default to 'compute' notif = fake_notifier.NOTIFICATIONS[0] self.assertEqual('compute.someotherhost', notif.publisher_id) def test_payload_has_fixed_ip_labels(self): info = notifications.info_from_instance(self.context, self.instance, self.net_info, None) self.assertIn("fixed_ips", info) self.assertEqual(info["fixed_ips"][0]["label"], "test1") def test_payload_has_vif_mac_address(self): info = notifications.info_from_instance(self.context, self.instance, self.net_info, None) self.assertIn("fixed_ips", info) self.assertEqual(self.net_info[0]['address'], info["fixed_ips"][0]["vif_mac"]) def test_payload_has_cell_name_empty(self): info = notifications.info_from_instance(self.context, self.instance, self.net_info, None) self.assertIn("cell_name", info) self.assertIsNone(self.instance.cell_name) self.assertEqual("", info["cell_name"]) def test_payload_has_cell_name(self): self.instance.cell_name = "cell1" info = notifications.info_from_instance(self.context, self.instance, self.net_info, None) self.assertIn("cell_name", info) self.assertEqual("cell1", info["cell_name"]) def test_payload_has_progress_empty(self): info = notifications.info_from_instance(self.context, self.instance, self.net_info, None) self.assertIn("progress", info) self.assertIsNone(self.instance.progress) self.assertEqual("", info["progress"]) def test_payload_has_progress(self): self.instance.progress = 50 info = notifications.info_from_instance(self.context, self.instance, self.net_info, None) self.assertIn("progress", info) self.assertEqual(50, info["progress"]) def test_send_access_ip_update(self): notifications.send_update(self.context, self.instance, self.instance) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) notif = fake_notifier.NOTIFICATIONS[0] payload = notif.payload access_ip_v4 = str(self.instance.access_ip_v4) access_ip_v6 = str(self.instance.access_ip_v6) self.assertEqual(payload["access_ip_v4"], access_ip_v4) self.assertEqual(payload["access_ip_v6"], access_ip_v6) def test_send_name_update(self): param = {"display_name": "new_display_name"} new_name_inst = self._wrapped_create(params=param) notifications.send_update(self.context, self.instance, new_name_inst) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) notif = fake_notifier.NOTIFICATIONS[0] payload = notif.payload old_display_name = self.instance.display_name new_display_name = new_name_inst.display_name self.assertEqual(payload["old_display_name"], old_display_name) self.assertEqual(payload["display_name"], new_display_name) def test_send_no_state_change(self): called = [False] def sending_no_state_change(context, instance, **kwargs): called[0] = True self.stub_out('nova.notifications._send_instance_update_notification', sending_no_state_change) notifications.send_update(self.context, self.instance, self.instance) self.assertTrue(called[0]) def test_fail_sending_update(self): def fail_sending(context, instance, **kwargs): raise Exception('failed to notify') self.stub_out('nova.notifications._send_instance_update_notification', fail_sending) notifications.send_update(self.context, self.instance, self.instance) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) @mock.patch.object(notifications.LOG, 'exception') def test_fail_sending_update_instance_not_found(self, mock_log_exception): # Tests that InstanceNotFound is handled as an expected exception and # not logged as an error. notfound = exception.InstanceNotFound(instance_id=self.instance.uuid) with mock.patch.object(notifications, '_send_instance_update_notification', side_effect=notfound): notifications.send_update( self.context, self.instance, self.instance) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) self.assertEqual(0, mock_log_exception.call_count) @mock.patch.object(notifications.LOG, 'exception') def test_fail_send_update_with_states_inst_not_found(self, mock_log_exception): # Tests that InstanceNotFound is handled as an expected exception and # not logged as an error. notfound = exception.InstanceNotFound(instance_id=self.instance.uuid) with mock.patch.object(notifications, '_send_instance_update_notification', side_effect=notfound): notifications.send_update_with_states( self.context, self.instance, vm_states.BUILDING, vm_states.ERROR, task_states.NETWORKING, new_task_state=None) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) self.assertEqual(0, mock_log_exception.call_count) def _decorated_function(self, arg1, arg2): self.decorated_function_called = True def test_notify_decorator(self): func_name = self._decorated_function.__name__ # Decorated with notify_decorator like monkey_patch self._decorated_function = notifications.notify_decorator( func_name, self._decorated_function) ctxt = o_context.RequestContext() self._decorated_function(1, ctxt) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) n = fake_notifier.NOTIFICATIONS[0] self.assertEqual(n.priority, 'INFO') self.assertEqual(n.event_type, func_name) self.assertEqual(n.context, ctxt) self.assertTrue(self.decorated_function_called) class NotificationsFormatTestCase(test.NoDBTestCase): def test_state_computation(self): instance = {'vm_state': mock.sentinel.vm_state, 'task_state': mock.sentinel.task_state} states = notifications._compute_states_payload(instance) self.assertEqual(mock.sentinel.vm_state, states['state']) self.assertEqual(mock.sentinel.vm_state, states['old_state']) self.assertEqual(mock.sentinel.task_state, states['old_task_state']) self.assertEqual(mock.sentinel.task_state, states['new_task_state']) states = notifications._compute_states_payload( instance, old_vm_state=mock.sentinel.old_vm_state, ) self.assertEqual(mock.sentinel.vm_state, states['state']) self.assertEqual(mock.sentinel.old_vm_state, states['old_state']) self.assertEqual(mock.sentinel.task_state, states['old_task_state']) self.assertEqual(mock.sentinel.task_state, states['new_task_state']) states = notifications._compute_states_payload( instance, old_vm_state=mock.sentinel.old_vm_state, old_task_state=mock.sentinel.old_task_state, new_vm_state=mock.sentinel.new_vm_state, new_task_state=mock.sentinel.new_task_state, ) self.assertEqual(mock.sentinel.new_vm_state, states['state']) self.assertEqual(mock.sentinel.old_vm_state, states['old_state']) self.assertEqual(mock.sentinel.old_task_state, states['old_task_state']) self.assertEqual(mock.sentinel.new_task_state, states['new_task_state']) nova-13.0.0/nova/tests/unit/test_ipv6.py0000664000567000056710000000657412701407773021304 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test suite for IPv6.""" from nova import ipv6 from nova import test class IPv6RFC2462TestCase(test.NoDBTestCase): """Unit tests for IPv6 rfc2462 backend operations.""" def setUp(self): super(IPv6RFC2462TestCase, self).setUp() self.flags(ipv6_backend='rfc2462') ipv6.reset_backend() def test_to_global(self): addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test') self.assertEqual(addr, '2001:db8::16:3eff:fe33:4455') def test_to_mac(self): mac = ipv6.to_mac('2001:db8::216:3eff:fe33:4455') self.assertEqual(mac, '00:16:3e:33:44:55') def test_to_global_with_bad_mac(self): bad_mac = '02:16:3e:33:44:5Z' self.assertRaises(TypeError, ipv6.to_global, '2001:db8::', bad_mac, 'test') def test_to_global_with_bad_prefix(self): bad_prefix = '82' self.assertRaises(TypeError, ipv6.to_global, bad_prefix, '2001:db8::216:3eff:fe33:4455', 'test') def test_to_global_with_bad_project(self): bad_project = 'non-existent-project-name' self.assertRaises(TypeError, ipv6.to_global, '2001:db8::', '2001:db8::a94a:8fe5:ff33:4455', bad_project) class IPv6AccountIdentiferTestCase(test.NoDBTestCase): """Unit tests for IPv6 account_identifier backend operations.""" def setUp(self): super(IPv6AccountIdentiferTestCase, self).setUp() self.flags(ipv6_backend='account_identifier') ipv6.reset_backend() def test_to_global(self): addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test') self.assertEqual(addr, '2001:db8::a94a:8fe5:ff33:4455') def test_to_mac(self): mac = ipv6.to_mac('2001:db8::a94a:8fe5:ff33:4455') self.assertEqual(mac, '02:16:3e:33:44:55') def test_to_global_with_bad_mac(self): bad_mac = '02:16:3e:33:44:5X' self.assertRaises(TypeError, ipv6.to_global, '2001:db8::', bad_mac, 'test') def test_to_global_with_bad_prefix(self): bad_prefix = '78' self.assertRaises(TypeError, ipv6.to_global, bad_prefix, '2001:db8::a94a:8fe5:ff33:4455', 'test') def test_to_global_with_bad_project(self): bad_project = 'non-existent-project-name' self.assertRaises(TypeError, ipv6.to_global, '2001:db8::', '2001:db8::a94a:8fe5:ff33:4455', bad_project) nova-13.0.0/nova/tests/unit/test_fixtures.py0000664000567000056710000003635512701410011022244 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy import sys import fixtures as fx import mock from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils import testtools from nova.db.sqlalchemy import api as session from nova import exception from nova.objects import base as obj_base from nova.tests import fixtures from nova.tests.unit import conf_fixture from nova import utils CONF = cfg.CONF class TestConfFixture(testtools.TestCase): """Test the Conf fixtures in Nova. This is a basic test that this fixture works like we expect. Expectations: 1. before using the fixture, a default value (api_paste_config) comes through untouched. 2. before using the fixture, a known default value that we override is correct. 3. after using the fixture a known value that we override is the new value. 4. after using the fixture we can set a default value to something random, and it will be reset once we are done. There are 2 copies of this test so that you can verify they do the right thing with: tox -e py27 test_fixtures -- --concurrency=1 As regardless of run order, their initial asserts would be impacted if the reset behavior isn't working correctly. """ def _test_override(self): self.assertEqual('api-paste.ini', CONF.api_paste_config) self.assertFalse(CONF.fake_network) self.useFixture(conf_fixture.ConfFixture()) CONF.set_default('api_paste_config', 'foo') self.assertTrue(CONF.fake_network) def test_override1(self): self._test_override() def test_override2(self): self._test_override() class TestOutputStream(testtools.TestCase): """Ensure Output Stream capture works as expected. This has the added benefit of providing a code example of how you can manipulate the output stream in your own tests. """ def test_output(self): self.useFixture(fx.EnvironmentVariable('OS_STDOUT_CAPTURE', '1')) self.useFixture(fx.EnvironmentVariable('OS_STDERR_CAPTURE', '1')) out = self.useFixture(fixtures.OutputStreamCapture()) sys.stdout.write("foo") sys.stderr.write("bar") self.assertEqual("foo", out.stdout) self.assertEqual("bar", out.stderr) # TODO(sdague): nuke the out and err buffers so it doesn't # make it to testr class TestLogging(testtools.TestCase): def test_default_logging(self): stdlog = self.useFixture(fixtures.StandardLogging()) root = logging.getLogger() # there should be a null handler as well at DEBUG self.assertEqual(2, len(root.handlers), root.handlers) log = logging.getLogger(__name__) log.info("at info") log.debug("at debug") self.assertIn("at info", stdlog.logger.output) self.assertNotIn("at debug", stdlog.logger.output) # broken debug messages should still explode, even though we # aren't logging them in the regular handler self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo") # and, ensure that one of the terrible log messages isn't # output at info warn_log = logging.getLogger('migrate.versioning.api') warn_log.info("warn_log at info, should be skipped") warn_log.error("warn_log at error") self.assertIn("warn_log at error", stdlog.logger.output) self.assertNotIn("warn_log at info", stdlog.logger.output) def test_debug_logging(self): self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1')) stdlog = self.useFixture(fixtures.StandardLogging()) root = logging.getLogger() # there should no longer be a null handler self.assertEqual(1, len(root.handlers), root.handlers) log = logging.getLogger(__name__) log.info("at info") log.debug("at debug") self.assertIn("at info", stdlog.logger.output) self.assertIn("at debug", stdlog.logger.output) class TestTimeout(testtools.TestCase): """Tests for our timeout fixture. Testing the actual timeout mechanism is beyond the scope of this test, because it's a pretty clear pass through to fixtures' timeout fixture, which tested in their tree. """ def test_scaling(self): # a bad scaling factor self.assertRaises(ValueError, fixtures.Timeout, 1, 0.5) # various things that should work. timeout = fixtures.Timeout(10) self.assertEqual(10, timeout.test_timeout) timeout = fixtures.Timeout("10") self.assertEqual(10, timeout.test_timeout) timeout = fixtures.Timeout("10", 2) self.assertEqual(20, timeout.test_timeout) class TestOSAPIFixture(testtools.TestCase): @mock.patch('nova.objects.Service.get_by_host_and_binary') @mock.patch('nova.objects.Service.create') def test_responds_to_version(self, mock_service_create, mock_get): """Ensure the OSAPI server responds to calls sensibly.""" self.useFixture(fixtures.OutputStreamCapture()) self.useFixture(fixtures.StandardLogging()) self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.RPCFixture('nova.test')) api = self.useFixture(fixtures.OSAPIFixture()).api # request the API root, which provides us the versions of the API resp = api.api_request('/', strip_version=True) self.assertEqual(200, resp.status_code, resp.content) # request a bad root url, should be a 404 # # NOTE(sdague): this currently fails, as it falls into the 300 # dispatcher instead. This is a bug. The test case is left in # here, commented out until we can address it. # # resp = api.api_request('/foo', strip_version=True) # self.assertEqual(resp.status_code, 400, resp.content) # request a known bad url, and we should get a 404 resp = api.api_request('/foo') self.assertEqual(404, resp.status_code, resp.content) class TestDatabaseFixture(testtools.TestCase): def test_fixture_reset(self): # because this sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database()) engine = session.get_engine() conn = engine.connect() result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(5, len(rows), "Rows %s" % rows) # insert a 6th instance type, column 5 below is an int id # which has a constraint on it, so if new standard instance # types are added you have to bump it. conn.execute("insert into instance_types VALUES " "(NULL, NULL, NULL, 't1.test', 6, 4096, 2, 0, NULL, '87'" ", 1.0, 40, 0, 0, 1, 0)") result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(6, len(rows), "Rows %s" % rows) # reset by invoking the fixture again # # NOTE(sdague): it's important to reestablish the db # connection because otherwise we have a reference to the old # in mem db. self.useFixture(fixtures.Database()) conn = engine.connect() result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(5, len(rows), "Rows %s" % rows) def test_api_fixture_reset(self): # This sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database(database='api')) engine = session.get_api_engine() conn = engine.connect() result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(0, len(rows), "Rows %s" % rows) uuid = uuidutils.generate_uuid() conn.execute("insert into cell_mappings (uuid, name) VALUES " "('%s', 'fake-cell')" % (uuid,)) result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(1, len(rows), "Rows %s" % rows) # reset by invoking the fixture again # # NOTE(sdague): it's important to reestablish the db # connection because otherwise we have a reference to the old # in mem db. self.useFixture(fixtures.Database(database='api')) conn = engine.connect() result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(0, len(rows), "Rows %s" % rows) def test_fixture_cleanup(self): # because this sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) fix = fixtures.Database() self.useFixture(fix) # manually do the cleanup that addCleanup will do fix.cleanup() # ensure the db contains nothing engine = session.get_engine() conn = engine.connect() schema = "".join(line for line in conn.connection.iterdump()) self.assertEqual(schema, "BEGIN TRANSACTION;COMMIT;") def test_api_fixture_cleanup(self): # This sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) fix = fixtures.Database(database='api') self.useFixture(fix) # No data inserted by migrations so we need to add a row engine = session.get_api_engine() conn = engine.connect() uuid = uuidutils.generate_uuid() conn.execute("insert into cell_mappings (uuid, name) VALUES " "('%s', 'fake-cell')" % (uuid,)) result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(1, len(rows), "Rows %s" % rows) # Manually do the cleanup that addCleanup will do fix.cleanup() # Ensure the db contains nothing engine = session.get_api_engine() conn = engine.connect() schema = "".join(line for line in conn.connection.iterdump()) self.assertEqual("BEGIN TRANSACTION;COMMIT;", schema) class TestDatabaseAtVersionFixture(testtools.TestCase): def test_fixture_schema_version(self): self.useFixture(conf_fixture.ConfFixture()) # In/after 317 aggregates did have uuid self.useFixture(fixtures.DatabaseAtVersion(318)) engine = session.get_engine() engine.connect() meta = sqlalchemy.MetaData(engine) aggregate = sqlalchemy.Table('aggregates', meta, autoload=True) self.assertTrue(hasattr(aggregate.c, 'uuid')) # Before 317, aggregates had no uuid self.useFixture(fixtures.DatabaseAtVersion(316)) engine = session.get_engine() engine.connect() meta = sqlalchemy.MetaData(engine) aggregate = sqlalchemy.Table('aggregates', meta, autoload=True) self.assertFalse(hasattr(aggregate.c, 'uuid')) engine.dispose() def test_fixture_after_database_fixture(self): self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database()) self.useFixture(fixtures.DatabaseAtVersion(318)) class TestIndirectionAPIFixture(testtools.TestCase): def test_indirection_api(self): # Should initially be None self.assertIsNone(obj_base.NovaObject.indirection_api) # make sure the fixture correctly sets the value fix = fixtures.IndirectionAPIFixture('foo') self.useFixture(fix) self.assertEqual('foo', obj_base.NovaObject.indirection_api) # manually do the cleanup that addCleanup will do fix.cleanup() # ensure the initial value is restored self.assertIsNone(obj_base.NovaObject.indirection_api) class TestSpawnIsSynchronousFixture(testtools.TestCase): def test_spawn_patch(self): orig_spawn = utils.spawn_n fix = fixtures.SpawnIsSynchronousFixture() self.useFixture(fix) self.assertNotEqual(orig_spawn, utils.spawn_n) def test_spawn_passes_through(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) tester = mock.MagicMock() utils.spawn_n(tester.function, 'foo', bar='bar') tester.function.assert_called_once_with('foo', bar='bar') def test_spawn_return_has_wait(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn(lambda x: '%s' % x, 'foo') foo = gt.wait() self.assertEqual('foo', foo) def test_spawn_n_return_has_wait(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn_n(lambda x: '%s' % x, 'foo') foo = gt.wait() self.assertEqual('foo', foo) def test_spawn_has_link(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn(mock.MagicMock) passed_arg = 'test' call_count = [] def fake(thread, param): self.assertEqual(gt, thread) self.assertEqual(passed_arg, param) call_count.append(1) gt.link(fake, passed_arg) self.assertEqual(1, len(call_count)) def test_spawn_n_has_link(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn_n(mock.MagicMock) passed_arg = 'test' call_count = [] def fake(thread, param): self.assertEqual(gt, thread) self.assertEqual(passed_arg, param) call_count.append(1) gt.link(fake, passed_arg) self.assertEqual(1, len(call_count)) class TestBannedDBSchemaOperations(testtools.TestCase): def test_column(self): column = sqlalchemy.Column() with fixtures.BannedDBSchemaOperations(['Column']): self.assertRaises(exception.DBNotAllowed, column.drop) self.assertRaises(exception.DBNotAllowed, column.alter) def test_table(self): table = sqlalchemy.Table() with fixtures.BannedDBSchemaOperations(['Table']): self.assertRaises(exception.DBNotAllowed, table.drop) self.assertRaises(exception.DBNotAllowed, table.alter) class TestStableObjectJsonFixture(testtools.TestCase): def test_changes_sort(self): class TestObject(obj_base.NovaObject): def obj_what_changed(self): return ['z', 'a'] obj = TestObject() self.assertEqual(['z', 'a'], obj.obj_to_primitive()['nova_object.changes']) with fixtures.StableObjectJsonFixture(): self.assertEqual(['a', 'z'], obj.obj_to_primitive()['nova_object.changes']) nova-13.0.0/nova/tests/unit/fake_network_cache_model.py0000664000567000056710000000707412701407773024337 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.network import model def new_ip(ip_dict=None, version=4): if version == 6: new_ip = dict(address='fd00::1:100', version=6) elif version == 4: new_ip = dict(address='192.168.1.100') ip_dict = ip_dict or {} new_ip.update(ip_dict) return model.IP(**new_ip) def new_fixed_ip(ip_dict=None, version=4): if version == 6: new_fixed_ip = dict(address='fd00::1:100', version=6) elif version == 4: new_fixed_ip = dict(address='192.168.1.100') ip_dict = ip_dict or {} new_fixed_ip.update(ip_dict) return model.FixedIP(**new_fixed_ip) def new_route(route_dict=None, version=4): if version == 6: new_route = dict( cidr='::/48', gateway=new_ip(dict(address='fd00::1:1'), version=6), interface='eth0') elif version == 4: new_route = dict( cidr='0.0.0.0/24', gateway=new_ip(dict(address='192.168.1.1')), interface='eth0') route_dict = route_dict or {} new_route.update(route_dict) return model.Route(**new_route) def new_subnet(subnet_dict=None, version=4): if version == 6: new_subnet = dict( cidr='fd00::/48', dns=[new_ip(dict(address='1:2:3:4::'), version=6), new_ip(dict(address='2:3:4:5::'), version=6)], gateway=new_ip(dict(address='fd00::1'), version=6), ips=[new_fixed_ip(dict(address='fd00::2'), version=6), new_fixed_ip(dict(address='fd00::3'), version=6)], routes=[new_route(version=6)], version=6) elif version == 4: new_subnet = dict( cidr='10.10.0.0/24', dns=[new_ip(dict(address='1.2.3.4')), new_ip(dict(address='2.3.4.5'))], gateway=new_ip(dict(address='10.10.0.1')), ips=[new_fixed_ip(dict(address='10.10.0.2')), new_fixed_ip(dict(address='10.10.0.3'))], routes=[new_route()]) subnet_dict = subnet_dict or {} new_subnet.update(subnet_dict) return model.Subnet(**new_subnet) def new_network(network_dict=None, version=4): if version == 6: new_net = dict( id=1, bridge='br0', label='public', subnets=[new_subnet(version=6), new_subnet(dict(cidr='ffff:ffff:ffff:ffff::'), version=6)]) elif version == 4: new_net = dict( id=1, bridge='br0', label='public', subnets=[new_subnet(), new_subnet(dict(cidr='255.255.255.255'))]) network_dict = network_dict or {} new_net.update(network_dict) return model.Network(**new_net) def new_vif(vif_dict=None, version=4): vif = dict( id=1, address='aa:aa:aa:aa:aa:aa', type='bridge', network=new_network(version=version)) vif_dict = vif_dict or {} vif.update(vif_dict) return model.VIF(**vif) nova-13.0.0/nova/tests/unit/fake_ldap.py0000664000567000056710000002206112701407773021254 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fake LDAP server for test harness. This class does very little error checking, and knows nothing about ldap class definitions. It implements the minimum emulation of the python ldap library to work with nova. """ import fnmatch from oslo_serialization import jsonutils import six from six.moves import range class Store(object): def __init__(self): if hasattr(self.__class__, '_instance'): raise Exception('Attempted to instantiate singleton') @classmethod def instance(cls): if not hasattr(cls, '_instance'): cls._instance = _StorageDict() return cls._instance class _StorageDict(dict): def keys(self, pat=None): ret = super(_StorageDict, self).keys() if pat is not None: ret = fnmatch.filter(ret, pat) return ret def delete(self, key): try: del self[key] except KeyError: pass def flushdb(self): self.clear() def hgetall(self, key): """Returns the hash for the given key Creates the hash if the key doesn't exist. """ try: return self[key] except KeyError: self[key] = {} return self[key] def hget(self, key, field): hashdict = self.hgetall(key) try: return hashdict[field] except KeyError: hashdict[field] = {} return hashdict[field] def hset(self, key, field, val): hashdict = self.hgetall(key) hashdict[field] = val def hmset(self, key, value_dict): hashdict = self.hgetall(key) for field, val in value_dict.items(): hashdict[field] = val SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # Not implemented SCOPE_SUBTREE = 2 MOD_ADD = 0 MOD_DELETE = 1 MOD_REPLACE = 2 class NO_SUCH_OBJECT(Exception): """Duplicate exception class from real LDAP module.""" pass class OBJECT_CLASS_VIOLATION(Exception): """Duplicate exception class from real LDAP module.""" pass class SERVER_DOWN(Exception): """Duplicate exception class from real LDAP module.""" pass def initialize(_uri): """Opens a fake connection with an LDAP server.""" return FakeLDAP() def _match_query(query, attrs): """Match an ldap query to an attribute dictionary. The characters &, |, and ! are supported in the query. No syntax checking is performed, so malformed queries will not work correctly. """ # cut off the parentheses inner = query[1:-1] if inner.startswith('&'): # cut off the & l, r = _paren_groups(inner[1:]) return _match_query(l, attrs) and _match_query(r, attrs) if inner.startswith('|'): # cut off the | l, r = _paren_groups(inner[1:]) return _match_query(l, attrs) or _match_query(r, attrs) if inner.startswith('!'): # cut off the ! and the nested parentheses return not _match_query(query[2:-1], attrs) (k, _sep, v) = inner.partition('=') return _match(k, v, attrs) def _paren_groups(source): """Split a string into parenthesized groups.""" count = 0 start = 0 result = [] for pos in range(len(source)): if source[pos] == '(': if count == 0: start = pos count += 1 if source[pos] == ')': count -= 1 if count == 0: result.append(source[start:pos + 1]) return result def _match(key, value, attrs): """Match a given key and value against an attribute list.""" if key not in attrs: return False # This is a wild card search. Implemented as all or nothing for now. if value == "*": return True if key != "objectclass": return value in attrs[key] # it is an objectclass check, so check subclasses values = _subs(value) for v in values: if v in attrs[key]: return True return False def _subs(value): """Returns a list of subclass strings. The strings represent the ldap object class plus any subclasses that inherit from it. Fakeldap doesn't know about the ldap object structure, so subclasses need to be defined manually in the dictionary below. """ subs = {'groupOfNames': ['novaProject']} if value in subs: return [value] + subs[value] return [value] def _from_json(encoded): """Convert attribute values from json representation. Args: encoded -- a json encoded string Returns a list of strings """ return [str(x) for x in jsonutils.loads(encoded)] def _to_json(unencoded): """Convert attribute values into json representation. Args: unencoded -- an unencoded string or list of strings. If it is a single string, it will be converted into a list. Returns a json string """ return jsonutils.dumps(list(unencoded)) server_fail = False class FakeLDAP(object): """Fake LDAP connection.""" def simple_bind_s(self, dn, password): """This method is ignored, but provided for compatibility.""" if server_fail: raise SERVER_DOWN() pass def unbind_s(self): """This method is ignored, but provided for compatibility.""" if server_fail: raise SERVER_DOWN() pass def add_s(self, dn, attr): """Add an object with the specified attributes at dn.""" if server_fail: raise SERVER_DOWN() key = "%s%s" % (self.__prefix, dn) value_dict = {k: _to_json(v) for k, v in attr} Store.instance().hmset(key, value_dict) def delete_s(self, dn): """Remove the ldap object at specified dn.""" if server_fail: raise SERVER_DOWN() Store.instance().delete("%s%s" % (self.__prefix, dn)) def modify_s(self, dn, attrs): """Modify the object at dn using the attribute list. :param dn: a dn :param attrs: a list of tuples in the following form:: ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ if server_fail: raise SERVER_DOWN() store = Store.instance() key = "%s%s" % (self.__prefix, dn) for cmd, k, v in attrs: values = _from_json(store.hget(key, k)) if cmd == MOD_ADD: values.append(v) elif cmd == MOD_REPLACE: values = [v] else: values.remove(v) store.hset(key, k, _to_json(values)) def modrdn_s(self, dn, newrdn): oldobj = self.search_s(dn, SCOPE_BASE) if not oldobj: raise NO_SUCH_OBJECT() newdn = "%s,%s" % (newrdn, dn.partition(',')[2]) newattrs = oldobj[0][1] modlist = [] for attrtype in newattrs.keys(): modlist.append((attrtype, newattrs[attrtype])) self.add_s(newdn, modlist) self.delete_s(dn) def search_s(self, dn, scope, query=None, fields=None): """Search for all matching objects under dn using the query. Args: dn -- dn to search under scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported query -- query to filter objects by fields -- fields to return. Returns all fields if not specified """ if server_fail: raise SERVER_DOWN() if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: raise NotImplementedError(str(scope)) store = Store.instance() if scope == SCOPE_BASE: pattern = "%s%s" % (self.__prefix, dn) keys = store.keys(pattern) else: keys = store.keys("%s*%s" % (self.__prefix, dn)) if not keys: raise NO_SUCH_OBJECT() objects = [] for key in keys: # get the attributes from the store attrs = store.hgetall(key) # turn the values from the store into lists attrs = {k: _from_json(v) for k, v in six.iteritems(attrs)} # filter the objects by query if not query or _match_query(query, attrs): # filter the attributes by fields attrs = {k: v for k, v in six.iteritems(attrs) if not fields or k in fields} objects.append((key[len(self.__prefix):], attrs)) return objects @property def __prefix(self): """Get the prefix to use for all keys.""" return 'ldap:' nova-13.0.0/nova/tests/unit/virt/0000775000567000056710000000000012701410205017737 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/test_driver.py0000664000567000056710000000525512701407773022672 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Citrix Systems, Inc. # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as fixture_config from nova import test from nova.virt import driver class FakeDriver(object): def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs class FakeDriver2(FakeDriver): pass class ToDriverRegistryTestCase(test.NoDBTestCase): def assertDriverInstance(self, inst, class_, *args, **kwargs): self.assertEqual(class_, inst.__class__) self.assertEqual(args, inst.args) self.assertEqual(kwargs, inst.kwargs) def test_driver_dict_from_config(self): drvs = driver.driver_dict_from_config( [ 'key1=nova.tests.unit.virt.test_driver.FakeDriver', 'key2=nova.tests.unit.virt.test_driver.FakeDriver2', ], 'arg1', 'arg2', param1='value1', param2='value2' ) self.assertEqual( sorted(['key1', 'key2']), sorted(drvs.keys()) ) self.assertDriverInstance( drvs['key1'], FakeDriver, 'arg1', 'arg2', param1='value1', param2='value2') self.assertDriverInstance( drvs['key2'], FakeDriver2, 'arg1', 'arg2', param1='value1', param2='value2') class DriverMethodTestCase(test.NoDBTestCase): def setUp(self): super(DriverMethodTestCase, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_is_xenapi_true(self): self.CONF.set_override('compute_driver', 'xenapi.XenAPIDriver', enforce_type=True) self.assertTrue(driver.is_xenapi()) def test_is_xenapi_false(self): driver_names = ('libvirt.LibvirtDriver', 'fake.FakeDriver', 'ironic.IronicDriver', 'vmwareapi.VMwareVCDriver', 'hyperv.HyperVDriver', None) for driver_name in driver_names: self.CONF.set_override('compute_driver', driver_name, enforce_type=True) self.assertFalse(driver.is_xenapi()) nova-13.0.0/nova/tests/unit/virt/test_hardware.py0000664000567000056710000034417512701407773023203 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import uuid import mock from oslo_serialization import jsonutils import six from nova import context from nova import exception from nova import objects from nova.objects import base as base_obj from nova.objects import fields from nova.pci import stats from nova import test from nova.virt import hardware as hw class InstanceInfoTests(test.NoDBTestCase): def test_instance_info_default(self): ii = hw.InstanceInfo() self.assertIsNone(ii.state) self.assertIsNone(ii.id) self.assertEqual(0, ii.max_mem_kb) self.assertEqual(0, ii.mem_kb) self.assertEqual(0, ii.num_cpu) self.assertEqual(0, ii.cpu_time_ns) def test_instance_info(self): ii = hw.InstanceInfo(state='fake-state', max_mem_kb=1, mem_kb=2, num_cpu=3, cpu_time_ns=4, id='fake-id') self.assertEqual('fake-state', ii.state) self.assertEqual('fake-id', ii.id) self.assertEqual(1, ii.max_mem_kb) self.assertEqual(2, ii.mem_kb) self.assertEqual(3, ii.num_cpu) self.assertEqual(4, ii.cpu_time_ns) def test_instance_infoi_equals(self): ii1 = hw.InstanceInfo(state='fake-state', max_mem_kb=1, mem_kb=2, num_cpu=3, cpu_time_ns=4, id='fake-id') ii2 = hw.InstanceInfo(state='fake-state', max_mem_kb=1, mem_kb=2, num_cpu=3, cpu_time_ns=4, id='fake-id') ii3 = hw.InstanceInfo(state='fake-estat', max_mem_kb=11, mem_kb=22, num_cpu=33, cpu_time_ns=44, id='fake-di') self.assertEqual(ii1, ii2) self.assertNotEqual(ii1, ii3) class CpuSetTestCase(test.NoDBTestCase): def test_get_vcpu_pin_set(self): self.flags(vcpu_pin_set="1-3,5,^2") cpuset_ids = hw.get_vcpu_pin_set() self.assertEqual(set([1, 3, 5]), cpuset_ids) def test_parse_cpu_spec_none_returns_none(self): self.flags(vcpu_pin_set=None) cpuset_ids = hw.get_vcpu_pin_set() self.assertIsNone(cpuset_ids) def test_parse_cpu_spec_valid_syntax_works(self): cpuset_ids = hw.parse_cpu_spec("1") self.assertEqual(set([1]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1,2") self.assertEqual(set([1, 2]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,") self.assertEqual(set([1, 2]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1-1") self.assertEqual(set([1]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3") self.assertEqual(set([1, 2, 3]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1,^2") self.assertEqual(set([1]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1-2, ^1") self.assertEqual(set([2]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1-3,5,^2") self.assertEqual(set([1, 3, 5]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5") self.assertEqual(set([1, 3, 5]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1") self.assertEqual(set([]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("^0-1") self.assertEqual(set([]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("0-3,^1-2") self.assertEqual(set([0, 3]), cpuset_ids) def test_parse_cpu_spec_invalid_syntax_raises(self): self.assertRaises(exception.Invalid, hw.parse_cpu_spec, " -1-3,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3-,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "-3,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,5,^2^") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,5,^2-") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "--13,^^5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "a-3,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-a,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,b,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,5,^c") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "3 - 1, 5 , ^ 2 ") def test_format_cpu_spec(self): cpus = set([]) spec = hw.format_cpu_spec(cpus) self.assertEqual("", spec) cpus = [] spec = hw.format_cpu_spec(cpus) self.assertEqual("", spec) cpus = set([1, 3]) spec = hw.format_cpu_spec(cpus) self.assertEqual("1,3", spec) cpus = [1, 3] spec = hw.format_cpu_spec(cpus) self.assertEqual("1,3", spec) cpus = set([1, 2, 4, 6]) spec = hw.format_cpu_spec(cpus) self.assertEqual("1-2,4,6", spec) cpus = [1, 2, 4, 6] spec = hw.format_cpu_spec(cpus) self.assertEqual("1-2,4,6", spec) cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]) spec = hw.format_cpu_spec(cpus) self.assertEqual("10-11,13-16,19-20,40,42,48", spec) cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48] spec = hw.format_cpu_spec(cpus) self.assertEqual("10-11,13-16,19-20,40,42,48", spec) cpus = set([1, 2, 4, 6]) spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("1,2,4,6", spec) cpus = [1, 2, 4, 6] spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("1,2,4,6", spec) cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]) spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec) cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48] spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec) class VCPUTopologyTest(test.NoDBTestCase): def test_validate_config(self): testdata = [ { # Flavor sets preferred topology only "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", }), "image": { "properties": {} }, "expect": ( 8, 2, 1, 65536, 65536, 65536 ) }, { # Image topology overrides flavor "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", "hw:cpu_max_threads": "2", }), "image": { "properties": { "hw_cpu_sockets": "4", "hw_cpu_cores": "2", "hw_cpu_threads": "2", } }, "expect": ( 4, 2, 2, 65536, 65536, 2, ) }, { # Partial image topology overrides flavor "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", }), "image": { "properties": { "hw_cpu_sockets": "2", } }, "expect": ( 2, -1, -1, 65536, 65536, 65536, ) }, { # Restrict use of threads "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_threads": "2", }), "image": { "properties": { "hw_cpu_max_threads": "1", } }, "expect": ( -1, -1, -1, 65536, 65536, 1, ) }, { # Force use of at least two sockets "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": {} }, "expect": ( -1, -1, -1, 65536, 8, 1 ) }, { # Image limits reduce flavor "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": "4", } }, "expect": ( -1, -1, -1, 65536, 4, 1 ) }, { # Image limits kill flavor preferred "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "2", "hw:cpu_cores": "8", "hw:cpu_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": "4", } }, "expect": ( -1, -1, -1, 65536, 4, 65536 ) }, { # Image limits cannot exceed flavor "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": "16", } }, "expect": exception.ImageVCPULimitsRangeExceeded, }, { # Image preferred cannot exceed flavor "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_cores": "16", } }, "expect": exception.ImageVCPUTopologyRangeExceeded, }, ] for topo_test in testdata: image_meta = objects.ImageMeta.from_dict(topo_test["image"]) if type(topo_test["expect"]) == tuple: (preferred, maximum) = hw._get_cpu_topology_constraints( topo_test["flavor"], image_meta) self.assertEqual(topo_test["expect"][0], preferred.sockets) self.assertEqual(topo_test["expect"][1], preferred.cores) self.assertEqual(topo_test["expect"][2], preferred.threads) self.assertEqual(topo_test["expect"][3], maximum.sockets) self.assertEqual(topo_test["expect"][4], maximum.cores) self.assertEqual(topo_test["expect"][5], maximum.threads) else: self.assertRaises(topo_test["expect"], hw._get_cpu_topology_constraints, topo_test["flavor"], image_meta) def test_possible_topologies(self): testdata = [ { "allow_threads": True, "vcpus": 8, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 2, 1], [2, 4, 1], [1, 8, 1], [4, 1, 2], [2, 2, 2], [1, 4, 2], ] }, { "allow_threads": False, "vcpus": 8, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 2, 1], [2, 4, 1], [1, 8, 1], ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1024, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 2, 1], [2, 4, 1], [1, 8, 1], [4, 1, 2], [2, 2, 2], [1, 4, 2], ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 1, 2], ] }, { "allow_threads": True, "vcpus": 7, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "expect": [ [7, 1, 1], [1, 7, 1], ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 2, "maxcores": 1, "maxthreads": 1, "expect": exception.ImageVCPULimitsRangeImpossible, }, { "allow_threads": False, "vcpus": 8, "maxsockets": 2, "maxcores": 1, "maxthreads": 4, "expect": exception.ImageVCPULimitsRangeImpossible, }, ] for topo_test in testdata: if type(topo_test["expect"]) == list: actual = [] for topology in hw._get_possible_cpu_topologies( topo_test["vcpus"], objects.VirtCPUTopology( sockets=topo_test["maxsockets"], cores=topo_test["maxcores"], threads=topo_test["maxthreads"]), topo_test["allow_threads"]): actual.append([topology.sockets, topology.cores, topology.threads]) self.assertEqual(topo_test["expect"], actual) else: self.assertRaises(topo_test["expect"], hw._get_possible_cpu_topologies, topo_test["vcpus"], objects.VirtCPUTopology( sockets=topo_test["maxsockets"], cores=topo_test["maxcores"], threads=topo_test["maxthreads"]), topo_test["allow_threads"]) def test_sorting_topologies(self): testdata = [ { "allow_threads": True, "vcpus": 8, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "sockets": 4, "cores": 2, "threads": 1, "expect": [ [4, 2, 1], # score = 2 [8, 1, 1], # score = 1 [2, 4, 1], # score = 1 [1, 8, 1], # score = 1 [4, 1, 2], # score = 1 [2, 2, 2], # score = 1 [1, 4, 2], # score = 1 ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1024, "maxthreads": 2, "sockets": -1, "cores": 4, "threads": -1, "expect": [ [2, 4, 1], # score = 1 [1, 4, 2], # score = 1 [8, 1, 1], # score = 0 [4, 2, 1], # score = 0 [1, 8, 1], # score = 0 [4, 1, 2], # score = 0 [2, 2, 2], # score = 0 ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1, "maxthreads": 2, "sockets": -1, "cores": -1, "threads": 2, "expect": [ [4, 1, 2], # score = 1 [8, 1, 1], # score = 0 ] }, { "allow_threads": False, "vcpus": 8, "maxsockets": 1024, "maxcores": 1, "maxthreads": 2, "sockets": -1, "cores": -1, "threads": 2, "expect": [ [8, 1, 1], # score = 0 ] }, ] for topo_test in testdata: actual = [] possible = hw._get_possible_cpu_topologies( topo_test["vcpus"], objects.VirtCPUTopology(sockets=topo_test["maxsockets"], cores=topo_test["maxcores"], threads=topo_test["maxthreads"]), topo_test["allow_threads"]) tops = hw._sort_possible_cpu_topologies( possible, objects.VirtCPUTopology(sockets=topo_test["sockets"], cores=topo_test["cores"], threads=topo_test["threads"])) for topology in tops: actual.append([topology.sockets, topology.cores, topology.threads]) self.assertEqual(topo_test["expect"], actual) def test_best_config(self): testdata = [ { # Flavor sets preferred topology only "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1" }), "image": { "properties": {} }, "expect": [8, 2, 1], }, { # Image topology overrides flavor "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", "hw:cpu_maxthreads": "2", }), "image": { "properties": { "hw_cpu_sockets": "4", "hw_cpu_cores": "2", "hw_cpu_threads": "2", } }, "expect": [4, 2, 2], }, { # Image topology overrides flavor "allow_threads": False, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", "hw:cpu_maxthreads": "2", }), "image": { "properties": { "hw_cpu_sockets": "4", "hw_cpu_cores": "2", "hw_cpu_threads": "2", } }, "expect": [8, 2, 1], }, { # Partial image topology overrides flavor "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1" }), "image": { "properties": { "hw_cpu_sockets": "2" } }, "expect": [2, 8, 1], }, { # Restrict use of threads "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_threads": "1" }), "image": { "properties": {} }, "expect": [16, 1, 1] }, { # Force use of at least two sockets "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": {} }, "expect": [16, 1, 1] }, { # Image limits reduce flavor "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_sockets": "8", "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_max_sockets": 4, } }, "expect": [4, 4, 1] }, { # Image limits kill flavor preferred "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "2", "hw:cpu_cores": "8", "hw:cpu_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": 4, } }, "expect": [16, 1, 1] }, { # NUMA needs threads, only cores requested by flavor "allow_threads": True, "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_cores": "2", }), "image": { "properties": { "hw_cpu_max_cores": 2, } }, "numa_topology": objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=1024, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=1, threads=2)), objects.InstanceNUMACell( id=1, cpuset=set([2, 3]), memory=1024)]), "expect": [1, 2, 2] }, { # NUMA needs threads, but more than requested by flavor - the # least amount of threads wins "allow_threads": True, "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_threads": "2", }), "image": { "properties": {} }, "numa_topology": objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [2, 1, 2] }, { # NUMA needs threads, but more than limit in flavor - the # least amount of threads which divides into the vcpu # count wins. So with desired 4, max of 3, and # vcpu count of 4, we should get 2 threads. "allow_threads": True, "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_max_sockets": "5", "hw:cpu_max_cores": "2", "hw:cpu_max_threads": "3", }), "image": { "properties": {} }, "numa_topology": objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [2, 1, 2] }, { # NUMA needs threads, but thread count does not # divide into flavor vcpu count, so we must # reduce thread count to closest divisor "allow_threads": True, "flavor": objects.Flavor(vcpus=6, memory_mb=2048, extra_specs={ }), "image": { "properties": {} }, "numa_topology": objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [2, 1, 3] }, { # NUMA needs different number of threads per cell - the least # amount of threads wins "allow_threads": True, "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={}), "image": { "properties": {} }, "numa_topology": objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=2, threads=2)), objects.InstanceNUMACell( id=1, cpuset=set([4, 5, 6, 7]), memory=1024, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [4, 1, 2] }, ] for topo_test in testdata: image_meta = objects.ImageMeta.from_dict(topo_test["image"]) topology = hw._get_desirable_cpu_topologies( topo_test["flavor"], image_meta, topo_test["allow_threads"], topo_test.get("numa_topology"))[0] self.assertEqual(topo_test["expect"][0], topology.sockets) self.assertEqual(topo_test["expect"][1], topology.cores) self.assertEqual(topo_test["expect"][2], topology.threads) class NUMATopologyTest(test.NoDBTestCase): def test_topology_constraints(self): testdata = [ { "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ }), "image": { }, "expect": None, }, { "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2 }), "image": { }, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([4, 5, 6, 7]), memory=1024), ]), }, { "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:mem_page_size": 2048 }), "image": { }, "expect": objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048, pagesize=2048) ]), }, { # vcpus is not a multiple of nodes, so it # is an error to not provide cpu/mem mapping "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 3 }), "image": { }, "expect": exception.ImageNUMATopologyAsymmetric, }, { "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 3, "hw:numa_cpus.0": "0-3", "hw:numa_mem.0": "1024", "hw:numa_cpus.1": "4,6", "hw:numa_mem.1": "512", "hw:numa_cpus.2": "5,7", "hw:numa_mem.2": "512", }), "image": { }, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([4, 6]), memory=512), objects.InstanceNUMACell( id=2, cpuset=set([5, 7]), memory=512) ]), }, { "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ }), "image": { "properties": { "hw_numa_nodes": 3, "hw_numa_cpus.0": "0-3", "hw_numa_mem.0": "1024", "hw_numa_cpus.1": "4,6", "hw_numa_mem.1": "512", "hw_numa_cpus.2": "5,7", "hw_numa_mem.2": "512", }, }, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([4, 6]), memory=512), objects.InstanceNUMACell( id=2, cpuset=set([5, 7]), memory=512) ]), }, { # Request a CPU that is out of range # wrt vCPU count "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 1, "hw:numa_cpus.0": "0-16", "hw:numa_mem.0": "2048", }), "image": { }, "expect": exception.ImageNUMATopologyCPUOutOfRange, }, { # Request the same CPU in two nodes "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-7", "hw:numa_mem.0": "1024", "hw:numa_cpus.1": "0-7", "hw:numa_mem.1": "1024", }), "image": { }, "expect": exception.ImageNUMATopologyCPUDuplicates, }, { # Request with some CPUs not assigned "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-2", "hw:numa_mem.0": "1024", "hw:numa_cpus.1": "3-4", "hw:numa_mem.1": "1024", }), "image": { }, "expect": exception.ImageNUMATopologyCPUsUnassigned, }, { # Request too little memory vs flavor total "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-3", "hw:numa_mem.0": "512", "hw:numa_cpus.1": "4-7", "hw:numa_mem.1": "512", }), "image": { }, "expect": exception.ImageNUMATopologyMemoryOutOfRange, }, { # Request too much memory vs flavor total "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-3", "hw:numa_mem.0": "1576", "hw:numa_cpus.1": "4-7", "hw:numa_mem.1": "1576", }), "image": { }, "expect": exception.ImageNUMATopologyMemoryOutOfRange, }, { # Request missing mem.0 "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-3", "hw:numa_mem.1": "1576", }), "image": { }, "expect": exception.ImageNUMATopologyIncomplete, }, { # Request missing cpu.0 "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_mem.0": "1576", "hw:numa_cpus.1": "4-7", }), "image": { }, "expect": exception.ImageNUMATopologyIncomplete, }, { # Image attempts to override flavor "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, }), "image": { "properties": { "hw_numa_nodes": 4} }, "expect": exception.ImageNUMATopologyForbidden, }, { # NUMA + CPU pinning requested in the flavor "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED }), "image": { }, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( id=1, cpuset=set([2, 3]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # no NUMA + CPU pinning requested in the flavor "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED }), "image": { }, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # NUMA + CPU pinning requested in the image "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2 }), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED }}, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( id=1, cpuset=set([2, 3]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # no NUMA + CPU pinning requested in the image "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={}), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED }}, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # Invalid CPU pinning override "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:cpu_policy": fields.CPUAllocationPolicy.SHARED }), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED} }, "expect": exception.ImageCPUPinningForbidden, }, { # Invalid CPU pinning policy with realtime "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": fields.CPUAllocationPolicy.SHARED, "hw:cpu_realtime": "yes", }), "image": { "properties": {} }, "expect": exception.RealtimeConfigurationInvalid, }, { # Invalid CPU thread pinning override "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED, "hw:cpu_thread_policy": fields.CPUThreadAllocationPolicy.ISOLATE, }), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED, "hw_cpu_thread_policy": fields.CPUThreadAllocationPolicy.REQUIRE, } }, "expect": exception.ImageCPUThreadPolicyForbidden, }, { # Invalid CPU pinning policy with CPU thread pinning "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": fields.CPUAllocationPolicy.SHARED, "hw:cpu_thread_policy": fields.CPUThreadAllocationPolicy.ISOLATE, }), "image": { "properties": {} }, "expect": exception.CPUThreadPolicyConfigurationInvalid, }, { # Invalid vCPUs mask with realtime "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": "dedicated", "hw:cpu_realtime": "yes", }), "image": { "properties": {} }, "expect": exception.RealtimeMaskNotFoundOrInvalid, }, ] for testitem in testdata: image_meta = objects.ImageMeta.from_dict(testitem["image"]) if testitem["expect"] is None: topology = hw.numa_get_constraints( testitem["flavor"], image_meta) self.assertIsNone(topology) elif type(testitem["expect"]) == type: self.assertRaises(testitem["expect"], hw.numa_get_constraints, testitem["flavor"], image_meta) else: topology = hw.numa_get_constraints( testitem["flavor"], image_meta) self.assertIsNotNone(topology) self.assertEqual(len(testitem["expect"].cells), len(topology.cells)) for i in range(len(topology.cells)): self.assertEqual(testitem["expect"].cells[i].id, topology.cells[i].id) self.assertEqual(testitem["expect"].cells[i].cpuset, topology.cells[i].cpuset) self.assertEqual(testitem["expect"].cells[i].memory, topology.cells[i].memory) self.assertEqual(testitem["expect"].cells[i].pagesize, topology.cells[i].pagesize) self.assertEqual(testitem["expect"].cells[i].cpu_pinning, topology.cells[i].cpu_pinning) def test_host_usage_contiguous(self): hpages0_4K = objects.NUMAPagesTopology(size_kb=4, total=256, used=0) hpages0_2M = objects.NUMAPagesTopology(size_kb=2048, total=0, used=1) hpages1_4K = objects.NUMAPagesTopology(size_kb=4, total=128, used=2) hpages1_2M = objects.NUMAPagesTopology(size_kb=2048, total=0, used=3) hosttopo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[ hpages0_4K, hpages0_2M], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([4, 6]), memory=512, cpu_usage=0, memory_usage=0, mempages=[ hpages1_4K, hpages1_2M], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=2, cpuset=set([5, 7]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256), objects.InstanceNUMACell(id=1, cpuset=set([4]), memory=256), ]) instance2 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256), objects.InstanceNUMACell(id=1, cpuset=set([5, 7]), memory=256), ]) hostusage = hw.numa_usage_from_instances( hosttopo, [instance1, instance2]) self.assertEqual(len(hosttopo), len(hostusage)) self.assertIsInstance(hostusage.cells[0], objects.NUMACell) self.assertEqual(hosttopo.cells[0].cpuset, hostusage.cells[0].cpuset) self.assertEqual(hosttopo.cells[0].memory, hostusage.cells[0].memory) self.assertEqual(hostusage.cells[0].cpu_usage, 5) self.assertEqual(hostusage.cells[0].memory_usage, 512) self.assertEqual(hostusage.cells[0].mempages, [ hpages0_4K, hpages0_2M]) self.assertIsInstance(hostusage.cells[1], objects.NUMACell) self.assertEqual(hosttopo.cells[1].cpuset, hostusage.cells[1].cpuset) self.assertEqual(hosttopo.cells[1].memory, hostusage.cells[1].memory) self.assertEqual(hostusage.cells[1].cpu_usage, 3) self.assertEqual(hostusage.cells[1].memory_usage, 512) self.assertEqual(hostusage.cells[1].mempages, [ hpages1_4K, hpages1_2M]) self.assertEqual(256, hpages0_4K.total) self.assertEqual(0, hpages0_4K.used) self.assertEqual(0, hpages0_2M.total) self.assertEqual(1, hpages0_2M.used) self.assertIsInstance(hostusage.cells[2], objects.NUMACell) self.assertEqual(hosttopo.cells[2].cpuset, hostusage.cells[2].cpuset) self.assertEqual(hosttopo.cells[2].memory, hostusage.cells[2].memory) self.assertEqual(hostusage.cells[2].cpu_usage, 0) self.assertEqual(hostusage.cells[2].memory_usage, 0) self.assertEqual(128, hpages1_4K.total) self.assertEqual(2, hpages1_4K.used) self.assertEqual(0, hpages1_2M.total) self.assertEqual(3, hpages1_2M.used) def test_host_usage_sparse(self): hosttopo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=5, cpuset=set([4, 6]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=6, cpuset=set([5, 7]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256), objects.InstanceNUMACell(id=6, cpuset=set([4]), memory=256), ]) instance2 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256, cpu_usage=0, memory_usage=0, mempages=[]), objects.InstanceNUMACell(id=5, cpuset=set([5, 7]), memory=256, cpu_usage=0, memory_usage=0, mempages=[]), ]) hostusage = hw.numa_usage_from_instances( hosttopo, [instance1, instance2]) self.assertEqual(len(hosttopo), len(hostusage)) self.assertIsInstance(hostusage.cells[0], objects.NUMACell) self.assertEqual(hosttopo.cells[0].id, hostusage.cells[0].id) self.assertEqual(hosttopo.cells[0].cpuset, hostusage.cells[0].cpuset) self.assertEqual(hosttopo.cells[0].memory, hostusage.cells[0].memory) self.assertEqual(hostusage.cells[0].cpu_usage, 5) self.assertEqual(hostusage.cells[0].memory_usage, 512) self.assertIsInstance(hostusage.cells[1], objects.NUMACell) self.assertEqual(hosttopo.cells[1].id, hostusage.cells[1].id) self.assertEqual(hosttopo.cells[1].cpuset, hostusage.cells[1].cpuset) self.assertEqual(hosttopo.cells[1].memory, hostusage.cells[1].memory) self.assertEqual(hostusage.cells[1].cpu_usage, 2) self.assertEqual(hostusage.cells[1].memory_usage, 256) self.assertIsInstance(hostusage.cells[2], objects.NUMACell) self.assertEqual(hosttopo.cells[2].cpuset, hostusage.cells[2].cpuset) self.assertEqual(hosttopo.cells[2].memory, hostusage.cells[2].memory) self.assertEqual(hostusage.cells[2].cpu_usage, 1) self.assertEqual(hostusage.cells[2].memory_usage, 256) def test_host_usage_culmulative_with_free(self): hosttopo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_usage=2, memory_usage=512, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([4, 6]), memory=512, cpu_usage=1, memory_usage=512, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=2, cpuset=set([5, 7]), memory=256, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=256), objects.InstanceNUMACell(id=2, cpuset=set([4]), memory=256)]) hostusage = hw.numa_usage_from_instances( hosttopo, [instance1]) self.assertIsInstance(hostusage.cells[0], objects.NUMACell) self.assertEqual(hostusage.cells[0].cpu_usage, 5) self.assertEqual(hostusage.cells[0].memory_usage, 1024) self.assertIsInstance(hostusage.cells[1], objects.NUMACell) self.assertEqual(hostusage.cells[1].cpu_usage, 2) self.assertEqual(hostusage.cells[1].memory_usage, 768) self.assertIsInstance(hostusage.cells[2], objects.NUMACell) self.assertEqual(hostusage.cells[2].cpu_usage, 1) self.assertEqual(hostusage.cells[2].memory_usage, 256) # Test freeing of resources hostusage = hw.numa_usage_from_instances( hostusage, [instance1], free=True) self.assertEqual(hostusage.cells[0].cpu_usage, 2) self.assertEqual(hostusage.cells[0].memory_usage, 512) self.assertEqual(hostusage.cells[1].cpu_usage, 1) self.assertEqual(hostusage.cells[1].memory_usage, 512) self.assertEqual(hostusage.cells[2].cpu_usage, 0) self.assertEqual(hostusage.cells[2].memory_usage, 0) def test_topo_usage_none(self): hosttopo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256), objects.InstanceNUMACell(id=2, cpuset=set([2]), memory=256), ]) hostusage = hw.numa_usage_from_instances( None, [instance1]) self.assertIsNone(hostusage) hostusage = hw.numa_usage_from_instances( hosttopo, []) self.assertEqual(hostusage.cells[0].cpu_usage, 0) self.assertEqual(hostusage.cells[0].memory_usage, 0) self.assertEqual(hostusage.cells[1].cpu_usage, 0) self.assertEqual(hostusage.cells[1].memory_usage, 0) hostusage = hw.numa_usage_from_instances( hosttopo, None) self.assertEqual(hostusage.cells[0].cpu_usage, 0) self.assertEqual(hostusage.cells[0].memory_usage, 0) self.assertEqual(hostusage.cells[1].cpu_usage, 0) self.assertEqual(hostusage.cells[1].memory_usage, 0) def assertNUMACellMatches(self, expected_cell, got_cell): attrs = ('cpuset', 'memory', 'id') if isinstance(expected_cell, objects.NUMATopology): attrs += ('cpu_usage', 'memory_usage') for attr in attrs: self.assertEqual(getattr(expected_cell, attr), getattr(got_cell, attr)) def test_json(self): expected = objects.NUMATopology( cells=[ objects.NUMACell(id=1, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=2, cpuset=set([3, 4]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) got = objects.NUMATopology.obj_from_db_obj(expected._to_json()) for exp_cell, got_cell in zip(expected.cells, got.cells): self.assertNUMACellMatches(exp_cell, got_cell) class VirtNUMATopologyCellUsageTestCase(test.NoDBTestCase): def test_fit_instance_cell_success_no_limit(self): host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])) instance_cell = objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=1024) fitted_cell = hw._numa_fit_instance_cell(host_cell, instance_cell) self.assertIsInstance(fitted_cell, objects.InstanceNUMACell) self.assertEqual(host_cell.id, fitted_cell.id) def test_fit_instance_cell_success_w_limit(self): host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=2, memory_usage=1024, mempages=[], siblings=[], pinned_cpus=set([])) limit_cell = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) instance_cell = objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=1024) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsInstance(fitted_cell, objects.InstanceNUMACell) self.assertEqual(host_cell.id, fitted_cell.id) def test_fit_instance_cell_self_overcommit(self): host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])) limit_cell = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) instance_cell = objects.InstanceNUMACell( id=0, cpuset=set([1, 2, 3]), memory=4096) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsNone(fitted_cell) def test_fit_instance_cell_fail_w_limit(self): host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=2, memory_usage=1024, mempages=[], siblings=[], pinned_cpus=set([])) instance_cell = objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=4096) limit_cell = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsNone(fitted_cell) instance_cell = objects.InstanceNUMACell( id=0, cpuset=set([1, 2, 3, 4, 5]), memory=1024) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsNone(fitted_cell) class VirtNUMAHostTopologyTestCase(test.NoDBTestCase): def setUp(self): super(VirtNUMAHostTopologyTestCase, self).setUp() self.host = objects.NUMATopology( cells=[ objects.NUMACell(id=1, cpuset=set([1, 2]), memory=2048, cpu_usage=2, memory_usage=2048, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=2, cpuset=set([3, 4]), memory=2048, cpu_usage=2, memory_usage=2048, mempages=[], siblings=[], pinned_cpus=set([]))]) self.limits = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) self.instance1 = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=2048)]) self.instance2 = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1, 2, 3, 4]), memory=1024)]) self.instance3 = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=1024)]) def test_get_fitting_success_no_limits(self): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1) self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology) self.host = hw.numa_usage_from_instances(self.host, [fitted_instance1]) fitted_instance2 = hw.numa_fit_instance_to_host( self.host, self.instance3) self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology) def test_get_fitting_success_limits(self): fitted_instance = hw.numa_fit_instance_to_host( self.host, self.instance3, self.limits) self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology) self.assertEqual(1, fitted_instance.cells[0].id) def test_get_fitting_fails_no_limits(self): fitted_instance = hw.numa_fit_instance_to_host( self.host, self.instance2, self.limits) self.assertIsNone(fitted_instance) def test_get_fitting_culmulative_fails_limits(self): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1, self.limits) self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology) self.assertEqual(1, fitted_instance1.cells[0].id) self.host = hw.numa_usage_from_instances(self.host, [fitted_instance1]) fitted_instance2 = hw.numa_fit_instance_to_host( self.host, self.instance2, self.limits) self.assertIsNone(fitted_instance2) def test_get_fitting_culmulative_success_limits(self): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1, self.limits) self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology) self.assertEqual(1, fitted_instance1.cells[0].id) self.host = hw.numa_usage_from_instances(self.host, [fitted_instance1]) fitted_instance2 = hw.numa_fit_instance_to_host( self.host, self.instance3, self.limits) self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology) self.assertEqual(2, fitted_instance2.cells[0].id) def test_get_fitting_pci_success(self): pci_request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': '8086'}]) pci_reqs = [pci_request] pci_stats = stats.PciDeviceStats() with mock.patch.object(stats.PciDeviceStats, 'support_requests', return_value= True): fitted_instance1 = hw.numa_fit_instance_to_host(self.host, self.instance1, pci_requests=pci_reqs, pci_stats=pci_stats) self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology) def test_get_fitting_pci_fail(self): pci_request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': '8086'}]) pci_reqs = [pci_request] pci_stats = stats.PciDeviceStats() with mock.patch.object(stats.PciDeviceStats, 'support_requests', return_value= False): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1, pci_requests=pci_reqs, pci_stats=pci_stats) self.assertIsNone(fitted_instance1) class NumberOfSerialPortsTest(test.NoDBTestCase): def test_flavor(self): flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 3}) image_meta = objects.ImageMeta.from_dict({}) num_ports = hw.get_number_of_serial_ports(flavor, image_meta) self.assertEqual(3, num_ports) def test_image_meta(self): flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={}) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_serial_port_count": 2}}) num_ports = hw.get_number_of_serial_ports(flavor, image_meta) self.assertEqual(2, num_ports) def test_flavor_invalid_value(self): flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 'foo'}) image_meta = objects.ImageMeta.from_dict({}) self.assertRaises(exception.ImageSerialPortNumberInvalid, hw.get_number_of_serial_ports, flavor, image_meta) def test_image_meta_smaller_than_flavor(self): flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 3}) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_serial_port_count": 2}}) num_ports = hw.get_number_of_serial_ports(flavor, image_meta) self.assertEqual(2, num_ports) def test_flavor_smaller_than_image_meta(self): flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 3}) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_serial_port_count": 4}}) self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue, hw.get_number_of_serial_ports, flavor, image_meta) class HelperMethodsTestCase(test.NoDBTestCase): def setUp(self): super(HelperMethodsTestCase, self).setUp() self.hosttopo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512, memory_usage=0, cpu_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512, memory_usage=0, cpu_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) self.instancetopo = objects.InstanceNUMATopology( instance_uuid='fake-uuid', cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=256, pagesize=2048, cpu_pinning={0: 0, 1: 1}), objects.InstanceNUMACell( id=1, cpuset=set([2]), memory=256, pagesize=2048, cpu_pinning={2: 3}), ]) self.context = context.RequestContext('fake-user', 'fake-project') def _check_usage(self, host_usage): self.assertEqual(2, host_usage.cells[0].cpu_usage) self.assertEqual(256, host_usage.cells[0].memory_usage) self.assertEqual(1, host_usage.cells[1].cpu_usage) self.assertEqual(256, host_usage.cells[1].memory_usage) def test_dicts_json(self): host = {'numa_topology': self.hosttopo._to_json()} instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_dicts_instance_json(self): host = {'numa_topology': self.hosttopo} instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, objects.NUMATopology) self._check_usage(res) def test_dicts_instance_json_old(self): host = {'numa_topology': self.hosttopo} instance = {'numa_topology': jsonutils.dumps(self.instancetopo._to_dict())} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, objects.NUMATopology) self._check_usage(res) def test_dicts_host_json(self): host = {'numa_topology': self.hosttopo._to_json()} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_dicts_host_json_old(self): host = {'numa_topology': jsonutils.dumps( self.hosttopo._to_dict())} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_object_host_instance_json(self): host = objects.ComputeNode(numa_topology=self.hosttopo._to_json()) instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_object_host_instance(self): host = objects.ComputeNode(numa_topology=self.hosttopo._to_json()) instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_instance_with_fetch(self): host = objects.ComputeNode(numa_topology=self.hosttopo._to_json()) fake_uuid = str(uuid.uuid4()) instance = {'uuid': fake_uuid} with mock.patch.object(objects.InstanceNUMATopology, 'get_by_instance_uuid', return_value=None) as get_mock: res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self.assertTrue(get_mock.called) def test_object_instance_with_load(self): host = objects.ComputeNode(numa_topology=self.hosttopo._to_json()) fake_uuid = str(uuid.uuid4()) instance = objects.Instance(context=self.context, uuid=fake_uuid) with mock.patch.object(objects.InstanceNUMATopology, 'get_by_instance_uuid', return_value=None) as get_mock: res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self.assertTrue(get_mock.called) def test_instance_serialized_by_build_request_spec(self): host = objects.ComputeNode(numa_topology=self.hosttopo._to_json()) fake_uuid = str(uuid.uuid4()) instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid, numa_topology=self.instancetopo) # NOTE (ndipanov): This emulates scheduler.utils.build_request_spec # We can remove this test once we no longer use that method. instance_raw = jsonutils.to_primitive( base_obj.obj_to_primitive(instance)) res = hw.get_host_numa_usage_from_instance(host, instance_raw) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_attr_host(self): class Host(object): def __init__(obj): obj.numa_topology = self.hosttopo._to_json() host = Host() instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_never_serialize_result(self): host = {'numa_topology': self.hosttopo._to_json()} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance, never_serialize_result=True) self.assertIsInstance(res, objects.NUMATopology) self._check_usage(res) def test_dict_numa_topology_to_obj(self): fake_uuid = str(uuid.uuid4()) instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid, numa_topology=self.instancetopo) instance_dict = base_obj.obj_to_primitive(instance) instance_numa_topo = hw.instance_topology_from_instance(instance_dict) for expected_cell, actual_cell in zip(self.instancetopo.cells, instance_numa_topo.cells): for k in expected_cell.fields: self.assertEqual(getattr(expected_cell, k), getattr(actual_cell, k)) class VirtMemoryPagesTestCase(test.NoDBTestCase): def test_cell_instance_pagesize(self): cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=2048) self.assertEqual(0, cell.id) self.assertEqual(set([0]), cell.cpuset) self.assertEqual(1024, cell.memory) self.assertEqual(2048, cell.pagesize) def test_numa_pagesize_usage_from_cell(self): instcell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=512, pagesize=2048) hostcell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[objects.NUMAPagesTopology( size_kb=2048, total=512, used=0)], siblings=[], pinned_cpus=set([])) topo = hw._numa_pagesize_usage_from_cell(hostcell, instcell, 1) self.assertEqual(2048, topo[0].size_kb) self.assertEqual(512, topo[0].total) self.assertEqual(256, topo[0].used) def _test_get_requested_mempages_pagesize(self, spec=None, props=None): flavor = objects.Flavor(vcpus=16, memory_mb=2048, extra_specs=spec or {}) image_meta = objects.ImageMeta.from_dict({"properties": props or {}}) return hw._numa_get_pagesize_constraints(flavor, image_meta) def test_get_requested_mempages_pagesize_from_flavor_swipe(self): self.assertEqual( hw.MEMPAGES_SMALL, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "small"})) self.assertEqual( hw.MEMPAGES_LARGE, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "large"})) self.assertEqual( hw.MEMPAGES_ANY, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "any"})) def test_get_requested_mempages_pagesize_from_flavor_specific(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "2048"})) def test_get_requested_mempages_pagesize_from_flavor_invalid(self): self.assertRaises( exception.MemoryPageSizeInvalid, self._test_get_requested_mempages_pagesize, {"hw:mem_page_size": "foo"}) self.assertRaises( exception.MemoryPageSizeInvalid, self._test_get_requested_mempages_pagesize, {"hw:mem_page_size": "-42"}) def test_get_requested_mempages_pagesizes_from_flavor_suffix_sweep(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "2048KB"})) self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "2MB"})) self.assertEqual( 1048576, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "1GB"})) def test_get_requested_mempages_pagesize_from_image_flavor_any(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "any"}, props={"hw_mem_page_size": "2048"})) def test_get_requested_mempages_pagesize_from_image_flavor_large(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "large"}, props={"hw_mem_page_size": "2048"})) def test_get_requested_mempages_pagesize_from_image_forbidden(self): self.assertRaises( exception.MemoryPageSizeForbidden, self._test_get_requested_mempages_pagesize, {"hw:mem_page_size": "small"}, {"hw_mem_page_size": "2048"}) def test_get_requested_mempages_pagesize_from_image_forbidden2(self): self.assertRaises( exception.MemoryPageSizeForbidden, self._test_get_requested_mempages_pagesize, {}, {"hw_mem_page_size": "2048"}) def test_cell_accepts_request_wipe(self): host_cell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=262144, used=0), ], siblings=[], pinned_cpus=set([])) inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_SMALL) self.assertEqual( 4, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_ANY) self.assertEqual( 4, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE) self.assertIsNone(hw._numa_cell_supports_pagesize_request( host_cell, inst_cell)) def test_cell_accepts_request_large_pass(self): inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE) host_cell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=256, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertEqual( 2048, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) def test_cell_accepts_request_custom_pass(self): inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=2048) host_cell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=256, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertEqual( 2048, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) def test_cell_accepts_request_remainder_memory(self): # Test memory can't be divided with no rem by mempage's size_kb inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024 + 1, pagesize=2048) host_cell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=256, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertIsNone(hw._numa_cell_supports_pagesize_request( host_cell, inst_cell)) def test_cell_accepts_request_host_mempages(self): # Test pagesize not in host's mempages inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=4096) host_cell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=256, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertRaises(exception.MemoryPageSizeNotSupported, hw._numa_cell_supports_pagesize_request, host_cell, inst_cell) class _CPUPinningTestCaseBase(object): def assertEqualTopology(self, expected, got): for attr in ('sockets', 'cores', 'threads'): self.assertEqual(getattr(expected, attr), getattr(got, attr), "Mismatch on %s" % attr) def assertInstanceCellPinned(self, instance_cell, cell_ids=None): default_cell_id = 0 self.assertIsNotNone(instance_cell) if cell_ids is None: self.assertEqual(default_cell_id, instance_cell.id) else: self.assertIn(instance_cell.id, cell_ids) self.assertEqual(len(instance_cell.cpuset), len(instance_cell.cpu_pinning)) def assertPinningPreferThreads(self, instance_cell, host_cell): """Make sure we are preferring threads. We do this by assessing that at least 2 CPUs went to the same core if that was even possible to begin with. """ max_free_siblings = max(map(len, host_cell.free_siblings)) if len(instance_cell) > 1 and max_free_siblings > 1: cpu_to_sib = {} for sib in host_cell.free_siblings: for cpu in sib: cpu_to_sib[cpu] = tuple(sorted(sib)) pins_per_sib = collections.defaultdict(int) for inst_p, host_p in instance_cell.cpu_pinning.items(): pins_per_sib[cpu_to_sib[host_p]] += 1 self.assertTrue(max(pins_per_sib.values()) > 1, "Seems threads were not preferred by the pinning " "logic.") class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase): def test_get_pinning_inst_too_large_cpu(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_inst_too_large_mem(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]), memory=2048, memory_usage=1024, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_inst_not_avail(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, pinned_cpus=set([0]), siblings=[], mempages=[]) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_no_sibling_fits_empty(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=3) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 3)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_no_sibling_fits_w_usage(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, pinned_cpus=set([1]), mempages=[], siblings=[]) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=1024) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_pinning = {0: 0, 1: 2, 2: 3} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_fits(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_host_siblings_fits_empty(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_host_siblings_fits_empty_2(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 8)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([1, 2, 5, 6]), siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])], mempages=[]) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {0: 0, 1: 3, 2: 4, 3: 7} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_host_siblings_fit_single_core(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_host_siblings_fit(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_require_policy_too_few_siblings(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([0, 1, 2]), siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])], mempages=[]) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_require_policy_fits(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_require_policy_fits_w_usage(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([0, 1]), siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])], mempages=[]) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_odd_fit(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=5, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_fit_optimize_threads(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4, 5]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_odd_fit_w_usage(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([0, 2, 5])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_odd_fit_orphan_only(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([0, 2, 5, 6])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_large_instance_odd_fit(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), memory=4096, memory_usage=0, siblings=[set([0, 8]), set([1, 9]), set([2, 10]), set([3, 11]), set([4, 12]), set([5, 13]), set([6, 14]), set([7, 15])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) self.assertPinningPreferThreads(inst_pin, host_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=5, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_isolate_policy_too_few_fully_free_cores(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([1])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_isolate_policy_no_fully_free_cores(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([1, 2])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_isolate_policy_fits(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_isolate_policy_fits_w_usage(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([0, 1]), siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])], mempages=[]) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase): def test_host_numa_fit_instance_to_host_single_cell(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))] ) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(0, 1)) def test_host_numa_fit_instance_to_host_single_cell_w_usage(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), pinned_cpus=set([0]), memory=2048, memory_usage=0, siblings=[], mempages=[]), objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(1,)) def test_host_numa_fit_instance_to_host_single_cell_fail(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), memory=2048, pinned_cpus=set([0]), memory_usage=0, siblings=[], mempages=[]), objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048, pinned_cpus=set([2]), memory_usage=0, siblings=[], mempages=[])]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) self.assertIsNone(inst_topo) def test_host_numa_fit_instance_to_host_fit(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( cpuset=set([2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(0, 1)) def test_host_numa_fit_instance_to_host_barely_fit(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, pinned_cpus=set([0]), siblings=[], mempages=[], memory_usage=0), objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([4, 5, 6])), objects.NUMACell(id=2, cpuset=set([8, 9, 10, 11]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([10, 11]))]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( cpuset=set([2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(0, 2)) def test_host_numa_fit_instance_to_host_fail_capacity(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([0])), objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([4, 5, 6]))]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( cpuset=set([2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) self.assertIsNone(inst_topo) def test_host_numa_fit_instance_to_host_fail_topology(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( cpuset=set([2, 3]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( cpuset=set([4, 5]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) self.assertIsNone(inst_topo) def test_cpu_pinning_usage_from_instances(self): host_pin = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_pin_1 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), id=0, memory=2048, cpu_pinning={0: 0, 1: 3}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_pin_2 = objects.InstanceNUMATopology( cells = [objects.InstanceNUMACell( cpuset=set([0, 1]), id=0, memory=2048, cpu_pinning={0: 1, 1: 2}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) host_pin = hw.numa_usage_from_instances( host_pin, [inst_pin_1, inst_pin_2]) self.assertEqual(set([0, 1, 2, 3]), host_pin.cells[0].pinned_cpus) def test_cpu_pinning_usage_from_instances_free(self): host_pin = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([0, 1, 3]))]) inst_pin_1 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0]), memory=1024, cpu_pinning={0: 1}, id=0, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_pin_2 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=1024, id=0, cpu_pinning={0: 0, 1: 3}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) host_pin = hw.numa_usage_from_instances( host_pin, [inst_pin_1, inst_pin_2], free=True) self.assertEqual(set(), host_pin.cells[0].pinned_cpus) def test_host_usage_from_instances_fail(self): host_pin = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_pin_1 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, id=0, cpu_pinning={0: 0, 1: 3}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_pin_2 = objects.InstanceNUMATopology( cells = [objects.InstanceNUMACell( cpuset=set([0, 1]), id=0, memory=2048, cpu_pinning={0: 0, 1: 2}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) self.assertRaises(exception.CPUPinningInvalid, hw.numa_usage_from_instances, host_pin, [inst_pin_1, inst_pin_2]) def test_host_usage_from_instances_isolate(self): host_pin = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[set([0, 2]), set([1, 3])], mempages=[], pinned_cpus=set([]))]) inst_pin_1 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, id=0, cpu_pinning={0: 0, 1: 1}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE )]) new_cell = hw.numa_usage_from_instances(host_pin, [inst_pin_1]) self.assertEqual(host_pin.cells[0].cpuset, new_cell.cells[0].pinned_cpus) self.assertEqual(new_cell.cells[0].cpu_usage, 4) def test_host_usage_from_instances_isolate_free(self): host_pin = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=4, memory_usage=0, siblings=[set([0, 2]), set([1, 3])], mempages=[], pinned_cpus=set([0, 1, 2, 3]))]) inst_pin_1 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, id=0, cpu_pinning={0: 0, 1: 1}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE )]) new_cell = hw.numa_usage_from_instances(host_pin, [inst_pin_1], free=True) self.assertEqual(set([]), new_cell.cells[0].pinned_cpus) self.assertEqual(new_cell.cells[0].cpu_usage, 0) class CPURealtimeTestCase(test.NoDBTestCase): def test_success_flavor(self): flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^1"}} image = objects.ImageMeta.from_dict({}) rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image) self.assertEqual(set([0, 2]), rt) self.assertEqual(set([1]), em) def test_success_image(self): flavor = {"extra_specs": {}} image = objects.ImageMeta.from_dict( {"properties": {"hw_cpu_realtime_mask": "^0-1"}}) rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image) self.assertEqual(set([2]), rt) self.assertEqual(set([0, 1]), em) def test_no_mask_configured(self): flavor = {"extra_specs": {}} image = objects.ImageMeta.from_dict({"properties": {}}) self.assertRaises( exception.RealtimeMaskNotFoundOrInvalid, hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image) def test_mask_badly_configured(self): flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^0-2"}} image = objects.ImageMeta.from_dict({"properties": {}}) self.assertRaises( exception.RealtimeMaskNotFoundOrInvalid, hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image) nova-13.0.0/nova/tests/unit/virt/test_imagecache.py0000664000567000056710000001634712701410011023424 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova import block_device from nova.compute import vm_states from nova import context from nova import objects from nova.objects import block_device as block_device_obj from nova import test from nova.tests.unit import fake_instance from nova.virt import imagecache CONF = cfg.CONF swap_bdm_128 = [block_device.BlockDeviceDict( {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'disk_bus': 'scsi', 'volume_size': 128, 'boot_index': -1})] swap_bdm_256 = [block_device.BlockDeviceDict( {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'disk_bus': 'scsi', 'volume_size': 256, 'boot_index': -1})] class ImageCacheManagerTests(test.NoDBTestCase): def test_configurationi_defaults(self): self.assertEqual(2400, CONF.image_cache_manager_interval) self.assertEqual('_base', CONF.image_cache_subdirectory_name) self.assertTrue(CONF.remove_unused_base_images) self.assertEqual(24 * 3600, CONF.remove_unused_original_minimum_age_seconds) def test_cache_manager(self): cache_manager = imagecache.ImageCacheManager() self.assertTrue(cache_manager.remove_unused_base_images) self.assertRaises(NotImplementedError, cache_manager.update, None, []) self.assertRaises(NotImplementedError, cache_manager._get_base) base_images = cache_manager._list_base_images(None) self.assertEqual([], base_images['unexplained_images']) self.assertEqual([], base_images['originals']) self.assertRaises(NotImplementedError, cache_manager._age_and_verify_cached_images, None, [], None) def test_list_running_instances(self): instances = [{'image_ref': '1', 'host': CONF.host, 'id': '1', 'uuid': '123', 'vm_state': '', 'task_state': ''}, {'image_ref': '2', 'host': CONF.host, 'id': '2', 'uuid': '456', 'vm_state': '', 'task_state': ''}, {'image_ref': '2', 'kernel_id': '21', 'ramdisk_id': '22', 'host': 'remotehost', 'id': '3', 'uuid': '789', 'vm_state': '', 'task_state': ''}] all_instances = [fake_instance.fake_instance_obj(None, **instance) for instance in instances] image_cache_manager = imagecache.ImageCacheManager() self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') ctxt = context.get_admin_context() swap_bdm_256_list = block_device_obj.block_device_make_list_from_dicts( ctxt, swap_bdm_256) swap_bdm_128_list = block_device_obj.block_device_make_list_from_dicts( ctxt, swap_bdm_128) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '123').AndReturn(swap_bdm_256_list) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '456').AndReturn(swap_bdm_128_list) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '789').AndReturn(swap_bdm_128_list) self.mox.ReplayAll() # The argument here should be a context, but it's mocked out running = image_cache_manager._list_running_instances(ctxt, all_instances) self.assertEqual(4, len(running['used_images'])) self.assertEqual((1, 0, ['instance-00000001']), running['used_images']['1']) self.assertEqual((1, 1, ['instance-00000002', 'instance-00000003']), running['used_images']['2']) self.assertEqual((0, 1, ['instance-00000003']), running['used_images']['21']) self.assertEqual((0, 1, ['instance-00000003']), running['used_images']['22']) self.assertIn('instance-00000001', running['instance_names']) self.assertIn('123', running['instance_names']) self.assertEqual(4, len(running['image_popularity'])) self.assertEqual(1, running['image_popularity']['1']) self.assertEqual(2, running['image_popularity']['2']) self.assertEqual(1, running['image_popularity']['21']) self.assertEqual(1, running['image_popularity']['22']) self.assertEqual(len(running['used_swap_images']), 2) self.assertIn('swap_128', running['used_swap_images']) self.assertIn('swap_256', running['used_swap_images']) def test_list_resizing_instances(self): instances = [{'image_ref': '1', 'host': CONF.host, 'id': '1', 'uuid': '123', 'vm_state': vm_states.RESIZED, 'task_state': None}] all_instances = [fake_instance.fake_instance_obj(None, **instance) for instance in instances] image_cache_manager = imagecache.ImageCacheManager() self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') ctxt = context.get_admin_context() bdms = block_device_obj.block_device_make_list_from_dicts( ctxt, swap_bdm_256) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '123').AndReturn(bdms) self.mox.ReplayAll() running = image_cache_manager._list_running_instances(ctxt, all_instances) self.assertEqual(1, len(running['used_images'])) self.assertEqual((1, 0, ['instance-00000001']), running['used_images']['1']) self.assertEqual(set(['instance-00000001', '123', 'instance-00000001_resize', '123_resize']), running['instance_names']) self.assertEqual(1, len(running['image_popularity'])) self.assertEqual(1, running['image_popularity']['1']) nova-13.0.0/nova/tests/unit/virt/__init__.py0000664000567000056710000000000012701407773022056 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/test_virt.py0000664000567000056710000002541112701407773022357 0ustar jenkinsjenkins00000000000000# Copyright 2011 Isaku Yamahata # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import mock import six from nova import test from nova import utils from nova.virt.disk import api as disk_api from nova.virt.disk.mount import api as mount from nova.virt import driver PROC_MOUNTS_CONTENTS = """rootfs / rootfs rw 0 0 sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 udev /dev devtmpfs rw,relatime,size=1013160k,nr_inodes=253290,mode=755 0 0 devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620 0 0 tmpfs /run tmpfs rw,nosuid,relatime,size=408904k,mode=755 0 0""" class TestVirtDriver(test.NoDBTestCase): def test_block_device(self): swap = {'device_name': '/dev/sdb', 'swap_size': 1} ephemerals = [{'num': 0, 'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1', 'size': 1}] block_device_mapping = [{'mount_device': '/dev/sde', 'device_path': 'fake_device'}] block_device_info = { 'root_device_name': '/dev/sda', 'swap': swap, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} empty_block_device_info = {} self.assertEqual( driver.block_device_info_get_root(block_device_info), '/dev/sda') self.assertIsNone( driver.block_device_info_get_root(empty_block_device_info)) self.assertIsNone(driver.block_device_info_get_root(None)) self.assertEqual( driver.block_device_info_get_swap(block_device_info), swap) self.assertIsNone(driver.block_device_info_get_swap( empty_block_device_info)['device_name']) self.assertEqual(driver.block_device_info_get_swap( empty_block_device_info)['swap_size'], 0) self.assertIsNone( driver.block_device_info_get_swap({'swap': None})['device_name']) self.assertEqual( driver.block_device_info_get_swap({'swap': None})['swap_size'], 0) self.assertIsNone( driver.block_device_info_get_swap(None)['device_name']) self.assertEqual( driver.block_device_info_get_swap(None)['swap_size'], 0) self.assertEqual( driver.block_device_info_get_ephemerals(block_device_info), ephemerals) self.assertEqual( driver.block_device_info_get_ephemerals(empty_block_device_info), []) self.assertEqual( driver.block_device_info_get_ephemerals(None), []) def test_swap_is_usable(self): self.assertFalse(driver.swap_is_usable(None)) self.assertFalse(driver.swap_is_usable({'device_name': None})) self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb', 'swap_size': 0})) self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb', 'swap_size': 1})) class FakeMount(object): def __init__(self, image, mount_dir, partition=None, device=None): self.image = image self.partition = partition self.mount_dir = mount_dir self.linked = self.mapped = self.mounted = False self.device = device def do_mount(self): self.linked = True self.mapped = True self.mounted = True self.device = '/dev/fake' return True def do_umount(self): self.linked = True self.mounted = False def do_teardown(self): self.linked = False self.mapped = False self.mounted = False self.device = None class TestDiskImage(test.NoDBTestCase): def mock_proc_mounts(self, mock_open): response = io.StringIO(six.text_type(PROC_MOUNTS_CONTENTS)) mock_open.return_value = response @mock.patch.object(six.moves.builtins, 'open') def test_mount(self, mock_open): self.mock_proc_mounts(mock_open) image = '/tmp/fake-image' mountdir = '/mnt/fake_rootfs' fakemount = FakeMount(image, mountdir, None) def fake_instance_for_format(image, mountdir, partition): return fakemount self.stubs.Set(mount.Mount, 'instance_for_format', staticmethod(fake_instance_for_format)) diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir) dev = diskimage.mount() self.assertEqual(diskimage._mounter, fakemount) self.assertEqual(dev, '/dev/fake') @mock.patch.object(six.moves.builtins, 'open') def test_umount(self, mock_open): self.mock_proc_mounts(mock_open) image = '/tmp/fake-image' mountdir = '/mnt/fake_rootfs' fakemount = FakeMount(image, mountdir, None) def fake_instance_for_format(image, mountdir, partition): return fakemount self.stubs.Set(mount.Mount, 'instance_for_format', staticmethod(fake_instance_for_format)) diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir) dev = diskimage.mount() self.assertEqual(diskimage._mounter, fakemount) self.assertEqual(dev, '/dev/fake') diskimage.umount() self.assertIsNone(diskimage._mounter) @mock.patch.object(six.moves.builtins, 'open') def test_teardown(self, mock_open): self.mock_proc_mounts(mock_open) image = '/tmp/fake-image' mountdir = '/mnt/fake_rootfs' fakemount = FakeMount(image, mountdir, None) def fake_instance_for_format(image, mountdir, partition): return fakemount self.stubs.Set(mount.Mount, 'instance_for_format', staticmethod(fake_instance_for_format)) diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir) dev = diskimage.mount() self.assertEqual(diskimage._mounter, fakemount) self.assertEqual(dev, '/dev/fake') diskimage.teardown() self.assertIsNone(diskimage._mounter) class TestVirtDisk(test.NoDBTestCase): def setUp(self): super(TestVirtDisk, self).setUp() self.executes = [] def fake_execute(*cmd, **kwargs): self.executes.append(cmd) return None, None self.stubs.Set(utils, 'execute', fake_execute) def test_lxc_setup_container(self): image = '/tmp/fake-image' container_dir = '/mnt/fake_rootfs/' def proc_mounts(self, mount_point): return None def fake_instance_for_format(image, mountdir, partition): return FakeMount(image, mountdir, partition) self.stub_out('os.path.exists', lambda _: True) self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts) self.stubs.Set(mount.Mount, 'instance_for_format', staticmethod(fake_instance_for_format)) self.assertEqual(disk_api.setup_container(image, container_dir), '/dev/fake') def test_lxc_teardown_container(self): def proc_mounts(self, mount_point): mount_points = { '/mnt/loop/nopart': '/dev/loop0', '/mnt/loop/part': '/dev/mapper/loop0p1', '/mnt/nbd/nopart': '/dev/nbd15', '/mnt/nbd/part': '/dev/mapper/nbd15p1', } return mount_points[mount_point] self.stub_out('os.path.exists', lambda _: True) self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts) expected_commands = [] disk_api.teardown_container('/mnt/loop/nopart') expected_commands += [ ('umount', '/dev/loop0'), ('losetup', '--detach', '/dev/loop0'), ] disk_api.teardown_container('/mnt/loop/part') expected_commands += [ ('umount', '/dev/mapper/loop0p1'), ('kpartx', '-d', '/dev/loop0'), ('losetup', '--detach', '/dev/loop0'), ] disk_api.teardown_container('/mnt/nbd/nopart') expected_commands += [ ('blockdev', '--flushbufs', '/dev/nbd15'), ('umount', '/dev/nbd15'), ('qemu-nbd', '-d', '/dev/nbd15'), ] disk_api.teardown_container('/mnt/nbd/part') expected_commands += [ ('blockdev', '--flushbufs', '/dev/nbd15'), ('umount', '/dev/mapper/nbd15p1'), ('kpartx', '-d', '/dev/nbd15'), ('qemu-nbd', '-d', '/dev/nbd15'), ] # NOTE(thomasem): Not adding any commands in this case, because we're # not expecting an additional umount for LocalBlockImages. This is to # assert that no additional commands are run in this case. disk_api.teardown_container('/dev/volume-group/uuid_disk') self.assertEqual(self.executes, expected_commands) def test_lxc_teardown_container_with_namespace_cleaned(self): def proc_mounts(self, mount_point): return None self.stub_out('os.path.exists', lambda _: True) self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts) expected_commands = [] disk_api.teardown_container('/mnt/loop/nopart', '/dev/loop0') expected_commands += [ ('losetup', '--detach', '/dev/loop0'), ] disk_api.teardown_container('/mnt/loop/part', '/dev/loop0') expected_commands += [ ('losetup', '--detach', '/dev/loop0'), ] disk_api.teardown_container('/mnt/nbd/nopart', '/dev/nbd15') expected_commands += [ ('qemu-nbd', '-d', '/dev/nbd15'), ] disk_api.teardown_container('/mnt/nbd/part', '/dev/nbd15') expected_commands += [ ('qemu-nbd', '-d', '/dev/nbd15'), ] self.assertEqual(self.executes, expected_commands) nova-13.0.0/nova/tests/unit/virt/test_diagnostics.py0000664000567000056710000002413712701407773023706 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import exception from nova import test from nova.virt import diagnostics class DiagnosticsTests(test.NoDBTestCase): def test_cpu_diagnostics_default(self): cpu = diagnostics.CpuDiagnostics() self.assertEqual(0, cpu.time) def test_cpu_diagnostics(self): cpu = diagnostics.CpuDiagnostics(time=7) self.assertEqual(7, cpu.time) def test_nic_diagnostics_default(self): nic = diagnostics.NicDiagnostics() self.assertEqual('00:00:00:00:00:00', nic.mac_address) self.assertEqual(0, nic.rx_octets) self.assertEqual(0, nic.rx_errors) self.assertEqual(0, nic.rx_drop) self.assertEqual(0, nic.rx_packets) self.assertEqual(0, nic.tx_octets) self.assertEqual(0, nic.tx_errors) self.assertEqual(0, nic.tx_drop) self.assertEqual(0, nic.tx_packets) def test_nic_diagnostics(self): nic = diagnostics.NicDiagnostics(mac_address='00:00:ca:fe:00:00', rx_octets=1, rx_errors=2, rx_drop=3, rx_packets=4, tx_octets=5, tx_errors=6, tx_drop=7, tx_packets=8) self.assertEqual('00:00:ca:fe:00:00', nic.mac_address) self.assertEqual(1, nic.rx_octets) self.assertEqual(2, nic.rx_errors) self.assertEqual(3, nic.rx_drop) self.assertEqual(4, nic.rx_packets) self.assertEqual(5, nic.tx_octets) self.assertEqual(6, nic.tx_errors) self.assertEqual(7, nic.tx_drop) self.assertEqual(8, nic.tx_packets) def test_disk_diagnostics_default(self): disk = diagnostics.DiskDiagnostics() self.assertEqual('', disk.id) self.assertEqual(0, disk.read_bytes) self.assertEqual(0, disk.read_requests) self.assertEqual(0, disk.write_bytes) self.assertEqual(0, disk.write_requests) self.assertEqual(0, disk.errors_count) def test_disk_diagnostics(self): disk = diagnostics.DiskDiagnostics(id='fake_disk_id', read_bytes=1, read_requests=2, write_bytes=3, write_requests=4, errors_count=5) self.assertEqual('fake_disk_id', disk.id) self.assertEqual(1, disk.read_bytes) self.assertEqual(2, disk.read_requests) self.assertEqual(3, disk.write_bytes) self.assertEqual(4, disk.write_requests) self.assertEqual(5, disk.errors_count) def test_memory_diagnostics_default(self): memory = diagnostics.MemoryDiagnostics() self.assertEqual(0, memory.maximum) self.assertEqual(0, memory.used) def test_memory_diagnostics(self): memory = diagnostics.MemoryDiagnostics(maximum=1, used=2) self.assertEqual(1, memory.maximum) self.assertEqual(2, memory.used) def test_diagnostics_default(self): diags = diagnostics.Diagnostics() self.assertIsNone(diags.state) self.assertIsNone(diags.driver) self.assertIsNone(diags.hypervisor_os) self.assertEqual(0, diags.uptime) self.assertFalse(diags.config_drive) self.assertEqual([], diags.cpu_details) self.assertEqual([], diags.nic_details) self.assertEqual([], diags.disk_details) self.assertEqual(0, diags.memory_details.maximum) self.assertEqual(0, diags.memory_details.used) self.assertEqual('1.0', diags.version) def test_diagnostics(self): cpu_details = [diagnostics.CpuDiagnostics()] nic_details = [diagnostics.NicDiagnostics()] disk_details = [diagnostics.DiskDiagnostics()] diags = diagnostics.Diagnostics( state='fake-state', driver='fake-driver', hypervisor_os='fake-os', uptime=1, cpu_details=cpu_details, nic_details=nic_details, disk_details=disk_details, config_drive=True) self.assertEqual('fake-state', diags.state) self.assertEqual('fake-driver', diags.driver) self.assertEqual('fake-os', diags.hypervisor_os) self.assertEqual(1, diags.uptime) self.assertTrue(diags.config_drive) self.assertEqual(1, len(diags.cpu_details)) self.assertEqual(1, len(diags.nic_details)) self.assertEqual(1, len(diags.disk_details)) self.assertEqual(0, diags.memory_details.maximum) self.assertEqual(0, diags.memory_details.used) self.assertEqual('1.0', diags.version) def test_add_cpu(self): diags = diagnostics.Diagnostics() self.assertEqual([], diags.cpu_details) diags.add_cpu(time=7) self.assertEqual(1, len(diags.cpu_details)) self.assertEqual(7, diags.cpu_details[0].time) def test_add_nic(self): diags = diagnostics.Diagnostics() self.assertEqual([], diags.nic_details) diags.add_nic(mac_address='00:00:ca:fe:00:00', rx_octets=1, rx_errors=2, rx_drop=3, rx_packets=4, tx_octets=5, tx_errors=6, tx_drop=7, tx_packets=8) self.assertEqual(1, len(diags.nic_details)) self.assertEqual('00:00:ca:fe:00:00', diags.nic_details[0].mac_address) self.assertEqual(1, diags.nic_details[0].rx_octets) self.assertEqual(2, diags.nic_details[0].rx_errors) self.assertEqual(3, diags.nic_details[0].rx_drop) self.assertEqual(4, diags.nic_details[0].rx_packets) self.assertEqual(5, diags.nic_details[0].tx_octets) self.assertEqual(6, diags.nic_details[0].tx_errors) self.assertEqual(7, diags.nic_details[0].tx_drop) self.assertEqual(8, diags.nic_details[0].tx_packets) def test_add_disk(self): diags = diagnostics.Diagnostics() self.assertEqual([], diags.disk_details) diags.add_disk(id='fake_disk_id', read_bytes=1, read_requests=2, write_bytes=3, write_requests=4, errors_count=5) self.assertEqual(1, len(diags.disk_details)) self.assertEqual('fake_disk_id', diags.disk_details[0].id) self.assertEqual(1, diags.disk_details[0].read_bytes) self.assertEqual(2, diags.disk_details[0].read_requests) self.assertEqual(3, diags.disk_details[0].write_bytes) self.assertEqual(4, diags.disk_details[0].write_requests) self.assertEqual(5, diags.disk_details[0].errors_count) def test_diagnostics_serialize_default(self): diags = diagnostics.Diagnostics() expected = {'config_drive': False, 'cpu_details': [], 'disk_details': [], 'driver': None, 'hypervisor_os': None, 'memory_details': {'maximum': 0, 'used': 0}, 'nic_details': [], 'state': None, 'uptime': 0, 'version': '1.0'} result = diags.serialize() self.assertEqual(expected, result) def test_diagnostics_serialize(self): cpu_details = [diagnostics.CpuDiagnostics()] nic_details = [diagnostics.NicDiagnostics()] disk_details = [diagnostics.DiskDiagnostics()] diags = diagnostics.Diagnostics( state='fake-state', driver='fake-driver', hypervisor_os='fake-os', uptime=1, cpu_details=cpu_details, nic_details=nic_details, disk_details=disk_details, config_drive=True) expected = {'config_drive': True, 'cpu_details': [{'time': 0}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 0, 'read_requests': 0, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'fake-driver', 'hypervisor_os': 'fake-os', 'memory_details': {'maximum': 0, 'used': 0}, 'nic_details': [{'mac_address': '00:00:00:00:00:00', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 0, 'rx_packets': 0, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'fake-state', 'uptime': 1, 'version': '1.0'} result = diags.serialize() self.assertEqual(expected, result) def test_diagnostics_invalid_input(self): self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, cpu_details='invalid type') self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, cpu_details=['invalid entry']) self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, nic_details='invalid type') self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, nic_details=['invalid entry']) self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, disk_details='invalid type') self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, disk_details=['invalid entry']) nova-13.0.0/nova/tests/unit/virt/test_volumeutils.py0000664000567000056710000000273212701407773023764 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # Copyright 2012 University Of Minho # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for virt volumeutils. """ import mock from os_brick.initiator import connector from nova import test from nova.virt import volumeutils class VolumeUtilsTestCase(test.NoDBTestCase): @mock.patch.object(connector.ISCSIConnector, 'get_initiator', return_value='fake.initiator.iqn') def test_get_iscsi_initiator(self, fake_initiator): initiator = 'fake.initiator.iqn' # Start test result = volumeutils.get_iscsi_initiator() self.assertEqual(initiator, result) @mock.patch.object(connector.ISCSIConnector, 'get_initiator', return_value=None) def test_get_missing_iscsi_initiator(self, fake_initiator): result = volumeutils.get_iscsi_initiator() self.assertIsNone(result) nova-13.0.0/nova/tests/unit/virt/test_configdrive.py0000664000567000056710000000373612701407773023700 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova import test from nova.virt import configdrive class ConfigDriveTestCase(test.NoDBTestCase): def test_instance_force(self): self.flags(force_config_drive=False) instance = objects.Instance( config_drive="yes", system_metadata={ "image_img_config_drive": "mandatory", } ) self.assertTrue(configdrive.required_by(instance)) def test_image_meta_force(self): self.flags(force_config_drive=False) instance = objects.Instance( config_drive=None, system_metadata={ "image_img_config_drive": "mandatory", } ) self.assertTrue(configdrive.required_by(instance)) def test_config_flag_force(self): self.flags(force_config_drive=True) instance = objects.Instance( config_drive=None, system_metadata={ "image_img_config_drive": "optional", } ) self.assertTrue(configdrive.required_by(instance)) def test_no_config_drive(self): self.flags(force_config_drive=False) instance = objects.Instance( config_drive=None, system_metadata={ "image_img_config_drive": "optional", } ) self.assertFalse(configdrive.required_by(instance)) nova-13.0.0/nova/tests/unit/virt/test_block_device.py0000664000567000056710000013125512701407773024010 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils import six from nova import block_device from nova import context from nova import exception from nova import objects from nova.objects import fields from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import matchers from nova.virt import block_device as driver_block_device from nova.virt import driver from nova.volume import cinder from nova.volume import encryptors class TestDriverBlockDevice(test.NoDBTestCase): driver_classes = { 'swap': driver_block_device.DriverSwapBlockDevice, 'ephemeral': driver_block_device.DriverEphemeralBlockDevice, 'volume': driver_block_device.DriverVolumeBlockDevice, 'snapshot': driver_block_device.DriverSnapshotBlockDevice, 'image': driver_block_device.DriverImageBlockDevice, 'blank': driver_block_device.DriverBlankBlockDevice } swap_bdm_dict = block_device.BlockDeviceDict( {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'disk_bus': 'scsi', 'volume_size': 2, 'boot_index': -1}) swap_driver_bdm = { 'device_name': '/dev/sdb1', 'swap_size': 2, 'disk_bus': 'scsi'} swap_legacy_driver_bdm = { 'device_name': '/dev/sdb1', 'swap_size': 2} ephemeral_bdm_dict = block_device.BlockDeviceDict( {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'disk_bus': 'scsi', 'device_type': 'disk', 'volume_size': 4, 'guest_format': 'ext4', 'delete_on_termination': True, 'boot_index': -1}) ephemeral_driver_bdm = { 'device_name': '/dev/sdc1', 'size': 4, 'device_type': 'disk', 'guest_format': 'ext4', 'disk_bus': 'scsi'} ephemeral_legacy_driver_bdm = { 'device_name': '/dev/sdc1', 'size': 4, 'virtual_name': 'ephemeral0', 'num': 0} volume_bdm_dict = block_device.BlockDeviceDict( {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'source_type': 'volume', 'disk_bus': 'scsi', 'device_type': 'disk', 'volume_size': 8, 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'guest_format': 'ext4', 'connection_info': '{"fake": "connection_info"}', 'delete_on_termination': False, 'boot_index': 0}) volume_driver_bdm = { 'mount_device': '/dev/sda1', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': False, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': 'ext4', 'boot_index': 0} volume_legacy_driver_bdm = { 'mount_device': '/dev/sda1', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': False} snapshot_bdm_dict = block_device.BlockDeviceDict( {'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'delete_on_termination': True, 'volume_size': 3, 'disk_bus': 'scsi', 'device_type': 'disk', 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': '{"fake": "connection_info"}', 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}) snapshot_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1} snapshot_legacy_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True} image_bdm_dict = block_device.BlockDeviceDict( {'id': 5, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'delete_on_termination': True, 'volume_size': 1, 'disk_bus': 'scsi', 'device_type': 'disk', 'source_type': 'image', 'destination_type': 'volume', 'connection_info': '{"fake": "connection_info"}', 'image_id': 'fake-image-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}) image_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1} image_legacy_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True} blank_bdm_dict = block_device.BlockDeviceDict( {'id': 6, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'delete_on_termination': True, 'volume_size': 3, 'disk_bus': 'scsi', 'device_type': 'disk', 'source_type': 'blank', 'destination_type': 'volume', 'connection_info': '{"fake": "connection_info"}', 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}) blank_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1} blank_legacy_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True} def setUp(self): super(TestDriverBlockDevice, self).setUp() self.volume_api = self.mox.CreateMock(cinder.API) self.virt_driver = self.mox.CreateMock(driver.ComputeDriver) self.context = context.RequestContext('fake_user', 'fake_project') # create bdm objects for testing self.swap_bdm = fake_block_device.fake_bdm_object( self.context, self.swap_bdm_dict) self.ephemeral_bdm = fake_block_device.fake_bdm_object( self.context, self.ephemeral_bdm_dict) self.volume_bdm = fake_block_device.fake_bdm_object( self.context, self.volume_bdm_dict) self.snapshot_bdm = fake_block_device.fake_bdm_object( self.context, self.snapshot_bdm_dict) self.image_bdm = fake_block_device.fake_bdm_object( self.context, self.image_bdm_dict) self.blank_bdm = fake_block_device.fake_bdm_object( self.context, self.blank_bdm_dict) def test_no_device_raises(self): for name, cls in self.driver_classes.items(): bdm = fake_block_device.fake_bdm_object( self.context, {'no_device': True}) self.assertRaises(driver_block_device._NotTransformable, cls, bdm) def _test_driver_device(self, name): db_bdm = getattr(self, "%s_bdm" % name) test_bdm = self.driver_classes[name](db_bdm) self.assertThat(test_bdm, matchers.DictMatches( getattr(self, "%s_driver_bdm" % name))) for k, v in six.iteritems(db_bdm): field_val = getattr(test_bdm._bdm_obj, k) if isinstance(field_val, bool): v = bool(v) self.assertEqual(field_val, v) self.assertThat(test_bdm.legacy(), matchers.DictMatches( getattr(self, "%s_legacy_driver_bdm" % name))) # Test passthru attributes for passthru in test_bdm._proxy_as_attr: self.assertEqual(getattr(test_bdm, passthru), getattr(test_bdm._bdm_obj, passthru)) # Make sure that all others raise _invalidType for other_name, cls in six.iteritems(self.driver_classes): if other_name == name: continue self.assertRaises(driver_block_device._InvalidType, cls, getattr(self, '%s_bdm' % name)) # Test the save method with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock: for fld, alias in six.iteritems(test_bdm._update_on_save): # We can't set fake values on enums, like device_type, # so skip those. if not isinstance(test_bdm._bdm_obj.fields[fld], fields.BaseEnumField): test_bdm[alias or fld] = 'fake_changed_value' test_bdm.save() for fld, alias in six.iteritems(test_bdm._update_on_save): self.assertEqual(test_bdm[alias or fld], getattr(test_bdm._bdm_obj, fld)) save_mock.assert_called_once_with() def check_save(): self.assertEqual(set([]), test_bdm._bdm_obj.obj_what_changed()) # Test that nothing is set on the object if there are no actual changes test_bdm._bdm_obj.obj_reset_changes() with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock: save_mock.side_effect = check_save test_bdm.save() def _test_driver_default_size(self, name): size = 'swap_size' if name == 'swap' else 'size' no_size_bdm = getattr(self, "%s_bdm_dict" % name).copy() no_size_bdm['volume_size'] = None driver_bdm = self.driver_classes[name]( fake_block_device.fake_bdm_object(self.context, no_size_bdm)) self.assertEqual(driver_bdm[size], 0) del no_size_bdm['volume_size'] driver_bdm = self.driver_classes[name]( fake_block_device.fake_bdm_object(self.context, no_size_bdm)) self.assertEqual(driver_bdm[size], 0) def test_driver_swap_block_device(self): self._test_driver_device("swap") def test_driver_swap_default_size(self): self._test_driver_default_size('swap') def test_driver_ephemeral_block_device(self): self._test_driver_device("ephemeral") def test_driver_ephemeral_default_size(self): self._test_driver_default_size('ephemeral') def test_driver_volume_block_device(self): self._test_driver_device("volume") test_bdm = self.driver_classes['volume']( self.volume_bdm) self.assertEqual(test_bdm['connection_info'], jsonutils.loads(test_bdm._bdm_obj.connection_info)) self.assertEqual(test_bdm._bdm_obj.id, 3) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1') self.assertEqual(test_bdm.volume_size, 8) def test_driver_snapshot_block_device(self): self._test_driver_device("snapshot") test_bdm = self.driver_classes['snapshot']( self.snapshot_bdm) self.assertEqual(test_bdm._bdm_obj.id, 4) self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1') self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') self.assertEqual(test_bdm.volume_size, 3) def test_driver_image_block_device(self): self._test_driver_device('image') test_bdm = self.driver_classes['image']( self.image_bdm) self.assertEqual(test_bdm._bdm_obj.id, 5) self.assertEqual(test_bdm.image_id, 'fake-image-id-1') self.assertEqual(test_bdm.volume_size, 1) def test_driver_image_block_device_destination_local(self): self._test_driver_device('image') bdm = self.image_bdm_dict.copy() bdm['destination_type'] = 'local' self.assertRaises(driver_block_device._InvalidType, self.driver_classes['image'], fake_block_device.fake_bdm_object(self.context, bdm)) def test_driver_blank_block_device(self): self._test_driver_device('blank') test_bdm = self.driver_classes['blank']( self.blank_bdm) self.assertEqual(6, test_bdm._bdm_obj.id) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) self.assertEqual(3, test_bdm.volume_size) def _test_call_wait_func(self, delete_on_termination, delete_fail=False): test_bdm = self.driver_classes['volume'](self.volume_bdm) test_bdm['delete_on_termination'] = delete_on_termination with mock.patch.object(self.volume_api, 'delete') as vol_delete: wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id='fake-id', seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception if delete_on_termination and delete_fail: vol_delete.side_effect = Exception() self.assertRaises(exception.VolumeNotCreated, test_bdm._call_wait_func, context=self.context, wait_func=wait_func, volume_api=self.volume_api, volume_id='fake-id') self.assertEqual(delete_on_termination, vol_delete.called) def test_call_wait_delete_volume(self): self._test_call_wait_func(True) def test_call_wait_delete_volume_fail(self): self._test_call_wait_func(True, True) def test_call_wait_no_delete_volume(self): self._test_call_wait_func(False) def _test_volume_attach(self, driver_bdm, bdm_dict, fake_volume, check_attach=True, fail_check_attach=False, driver_attach=False, fail_driver_attach=False, volume_attach=True, fail_volume_attach=False, access_mode='rw', availability_zone=None): elevated_context = self.context.elevated() self.stubs.Set(self.context, 'elevated', lambda: elevated_context) self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save') self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata') instance_detail = {'id': '123', 'uuid': 'fake_uuid', 'availability_zone': availability_zone} instance = fake_instance.fake_instance_obj(self.context, **instance_detail) connector = {'ip': 'fake_ip', 'host': 'fake_host'} connection_info = {'data': {'access_mode': access_mode}} expected_conn_info = {'data': {'access_mode': access_mode}, 'serial': fake_volume['id']} enc_data = {'fake': 'enc_data'} self.volume_api.get(self.context, fake_volume['id']).AndReturn(fake_volume) if check_attach: if not fail_check_attach: self.volume_api.check_attach(self.context, fake_volume, instance=instance).AndReturn(None) else: self.volume_api.check_attach(self.context, fake_volume, instance=instance).AndRaise( test.TestingException) driver_bdm._bdm_obj.save().AndReturn(None) return instance, expected_conn_info self.virt_driver.get_volume_connector(instance).AndReturn(connector) self.volume_api.initialize_connection( elevated_context, fake_volume['id'], connector).AndReturn(connection_info) if driver_attach: encryptors.get_encryption_metadata( elevated_context, self.volume_api, fake_volume['id'], connection_info).AndReturn(enc_data) if not fail_driver_attach: self.virt_driver.attach_volume( elevated_context, expected_conn_info, instance, bdm_dict['device_name'], disk_bus=bdm_dict['disk_bus'], device_type=bdm_dict['device_type'], encryption=enc_data).AndReturn(None) else: self.virt_driver.attach_volume( elevated_context, expected_conn_info, instance, bdm_dict['device_name'], disk_bus=bdm_dict['disk_bus'], device_type=bdm_dict['device_type'], encryption=enc_data).AndRaise(test.TestingException) self.volume_api.terminate_connection( elevated_context, fake_volume['id'], connector).AndReturn(None) driver_bdm._bdm_obj.save().AndReturn(None) return instance, expected_conn_info if volume_attach: driver_bdm._bdm_obj.save().AndReturn(None) if not fail_volume_attach: self.volume_api.attach(elevated_context, fake_volume['id'], 'fake_uuid', bdm_dict['device_name'], mode=access_mode).AndReturn(None) else: self.volume_api.attach(elevated_context, fake_volume['id'], 'fake_uuid', bdm_dict['device_name'], mode=access_mode).AndRaise( test.TestingException) if driver_attach: self.virt_driver.detach_volume( expected_conn_info, instance, bdm_dict['device_name'], encryption=enc_data).AndReturn(None) self.volume_api.terminate_connection( elevated_context, fake_volume['id'], connector).AndReturn(None) self.volume_api.detach(elevated_context, fake_volume['id']).AndReturn(None) driver_bdm._bdm_obj.save().AndReturn(None) return instance, expected_conn_info def test_volume_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_ro(self): test_bdm = self.driver_classes['volume'](self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, access_mode='ro') self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_update_size(self): test_bdm = self.driver_classes['volume'](self.volume_bdm) test_bdm.volume_size = None volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached', 'size': 42} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual(expected_conn_info, test_bdm['connection_info']) self.assertEqual(42, test_bdm.volume_size) def test_volume_attach_check_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, fail_check_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver) def test_volume_no_volume_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, check_attach=False, driver_attach=False) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=False, do_driver_attach=False) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_no_check_driver_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, check_attach=False, driver_attach=True) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=False, do_driver_attach=True) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_driver_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, driver_attach=True, fail_driver_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver, do_driver_attach=True) def test_volume_attach_volume_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, driver_attach=True, fail_volume_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver, do_driver_attach=True) def test_volume_attach_no_driver_attach_volume_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, fail_volume_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver, do_driver_attach=False) def test_refresh_connection(self): test_bdm = self.driver_classes['snapshot']( self.snapshot_bdm) instance = {'id': 'fake_id', 'uuid': 'fake_uuid'} connector = {'ip': 'fake_ip', 'host': 'fake_host'} connection_info = {'data': {'multipath_id': 'fake_multipath_id'}} expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'}, 'serial': 'fake-volume-id-2'} self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save') self.virt_driver.get_volume_connector(instance).AndReturn(connector) self.volume_api.initialize_connection( self.context, test_bdm.volume_id, connector).AndReturn(connection_info) test_bdm._bdm_obj.save().AndReturn(None) self.mox.ReplayAll() test_bdm.refresh_connection_info(self.context, instance, self.volume_api, self.virt_driver) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_snapshot_attach_no_volume(self): no_volume_snapshot = self.snapshot_bdm_dict.copy() no_volume_snapshot['volume_id'] = None test_bdm = self.driver_classes['snapshot']( fake_block_device.fake_bdm_object( self.context, no_volume_snapshot)) snapshot = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.get_snapshot(self.context, 'fake-snapshot-id-1').AndReturn(snapshot) self.volume_api.create(self.context, 3, '', '', snapshot, availability_zone=None).AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_snapshot, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_snapshot_attach_no_volume_cinder_cross_az_attach_false(self): # Tests that the volume created from the snapshot has the same AZ as # the instance. self.flags(cross_az_attach=False, group='cinder') no_volume_snapshot = self.snapshot_bdm_dict.copy() no_volume_snapshot['volume_id'] = None test_bdm = self.driver_classes['snapshot']( fake_block_device.fake_bdm_object( self.context, no_volume_snapshot)) snapshot = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.get_snapshot(self.context, 'fake-snapshot-id-1').AndReturn(snapshot) self.volume_api.create(self.context, 3, '', '', snapshot, availability_zone='test-az').AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_snapshot, volume, availability_zone='test-az') self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_snapshot_attach_fail_volume(self): fail_volume_snapshot = self.snapshot_bdm_dict.copy() fail_volume_snapshot['volume_id'] = None test_bdm = self.driver_classes['snapshot']( fake_block_device.fake_bdm_object( self.context, fail_volume_snapshot)) snapshot = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) with test.nested( mock.patch.object(self.volume_api, 'get_snapshot', return_value=snapshot), mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(self.volume_api, 'delete'), ) as (vol_get_snap, vol_create, vol_delete): wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception self.assertRaises(exception.VolumeNotCreated, test_bdm.attach, context=self.context, instance=instance, volume_api=self.volume_api, virt_driver=self.virt_driver, wait_func=wait_func) vol_get_snap.assert_called_once_with( self.context, 'fake-snapshot-id-1') vol_create.assert_called_once_with( self.context, 3, '', '', snapshot, availability_zone=None) vol_delete.assert_called_once_with(self.context, volume['id']) def test_snapshot_attach_volume(self): test_bdm = self.driver_classes['snapshot']( self.snapshot_bdm) instance = {'id': 'fake_id', 'uuid': 'fake_uuid'} volume_class = self.driver_classes['volume'] self.mox.StubOutWithMock(volume_class, 'attach') # Make sure theses are not called self.mox.StubOutWithMock(self.volume_api, 'get_snapshot') self.mox.StubOutWithMock(self.volume_api, 'create') volume_class.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True ).AndReturn(None) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_image_attach_no_volume(self): no_volume_image = self.image_bdm_dict.copy() no_volume_image['volume_id'] = None test_bdm = self.driver_classes['image']( fake_block_device.fake_bdm_object( self.context, no_volume_image)) image = {'id': 'fake-image-id-1'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.create(self.context, 1, '', '', image_id=image['id'], availability_zone=None).AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_image, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_image_attach_no_volume_cinder_cross_az_attach_false(self): # Tests that the volume created from the image has the same AZ as the # instance. self.flags(cross_az_attach=False, group='cinder') no_volume_image = self.image_bdm_dict.copy() no_volume_image['volume_id'] = None test_bdm = self.driver_classes['image']( fake_block_device.fake_bdm_object( self.context, no_volume_image)) image = {'id': 'fake-image-id-1'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.create(self.context, 1, '', '', image_id=image['id'], availability_zone='test-az').AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_image, volume, availability_zone='test-az') self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_image_attach_fail_volume(self): fail_volume_image = self.image_bdm_dict.copy() fail_volume_image['volume_id'] = None test_bdm = self.driver_classes['image']( fake_block_device.fake_bdm_object( self.context, fail_volume_image)) image = {'id': 'fake-image-id-1'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) with test.nested( mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(self.volume_api, 'delete'), ) as (vol_create, vol_delete): wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception self.assertRaises(exception.VolumeNotCreated, test_bdm.attach, context=self.context, instance=instance, volume_api=self.volume_api, virt_driver=self.virt_driver, wait_func=wait_func) vol_create.assert_called_once_with( self.context, 1, '', '', image_id=image['id'], availability_zone=None) vol_delete.assert_called_once_with(self.context, volume['id']) def test_image_attach_volume(self): test_bdm = self.driver_classes['image']( self.image_bdm) instance = {'id': 'fake_id', 'uuid': 'fake_uuid'} volume_class = self.driver_classes['volume'] self.mox.StubOutWithMock(volume_class, 'attach') # Make sure theses are not called self.mox.StubOutWithMock(self.volume_api, 'get_snapshot') self.mox.StubOutWithMock(self.volume_api, 'create') volume_class.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True ).AndReturn(None) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_blank_attach_fail_volume(self): no_blank_volume = self.blank_bdm_dict.copy() no_blank_volume['volume_id'] = None test_bdm = self.driver_classes['blank']( fake_block_device.fake_bdm_object( self.context, no_blank_volume)) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) volume = {'id': 'fake-volume-id-2', 'display_name': 'fake-uuid-blank-vol'} with test.nested( mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(self.volume_api, 'delete'), ) as (vol_create, vol_delete): wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception self.assertRaises(exception.VolumeNotCreated, test_bdm.attach, context=self.context, instance=instance, volume_api=self.volume_api, virt_driver=self.virt_driver, wait_func=wait_func) vol_create.assert_called_once_with( self.context, test_bdm.volume_size, 'fake-uuid-blank-vol', '', availability_zone=None) vol_delete.assert_called_once_with( self.context, volume['id']) def test_blank_attach_volume(self): no_blank_volume = self.blank_bdm_dict.copy() no_blank_volume['volume_id'] = None test_bdm = self.driver_classes['blank']( fake_block_device.fake_bdm_object( self.context, no_blank_volume)) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) volume_class = self.driver_classes['volume'] volume = {'id': 'fake-volume-id-2', 'display_name': 'fake-uuid-blank-vol'} with test.nested( mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(volume_class, 'attach') ) as (vol_create, vol_attach): test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) vol_create.assert_called_once_with( self.context, test_bdm.volume_size, 'fake-uuid-blank-vol', '', availability_zone=None) vol_attach.assert_called_once_with(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_blank_attach_volume_cinder_cross_az_attach_false(self): # Tests that the blank volume created is in the same availability zone # as the instance. self.flags(cross_az_attach=False, group='cinder') no_blank_volume = self.blank_bdm_dict.copy() no_blank_volume['volume_id'] = None test_bdm = self.driver_classes['blank']( fake_block_device.fake_bdm_object( self.context, no_blank_volume)) updates = {'uuid': 'fake-uuid', 'availability_zone': 'test-az'} instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **updates) volume_class = self.driver_classes['volume'] volume = {'id': 'fake-volume-id-2', 'display_name': 'fake-uuid-blank-vol'} with mock.patch.object(self.volume_api, 'create', return_value=volume) as vol_create: with mock.patch.object(volume_class, 'attach') as vol_attach: test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) vol_create.assert_called_once_with( self.context, test_bdm.volume_size, 'fake-uuid-blank-vol', '', availability_zone='test-az') vol_attach.assert_called_once_with(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_convert_block_devices(self): bdms = objects.BlockDeviceMappingList( objects=[self.volume_bdm, self.ephemeral_bdm]) converted = driver_block_device._convert_block_devices( self.driver_classes['volume'], bdms) self.assertEqual(converted, [self.volume_driver_bdm]) def test_convert_all_volumes(self): converted = driver_block_device.convert_all_volumes() self.assertEqual([], converted) converted = driver_block_device.convert_all_volumes( self.volume_bdm, self.ephemeral_bdm, self.image_bdm, self.blank_bdm, self.snapshot_bdm) self.assertEqual(converted, [self.volume_driver_bdm, self.image_driver_bdm, self.blank_driver_bdm, self.snapshot_driver_bdm]) def test_convert_volume(self): self.assertIsNone(driver_block_device.convert_volume(self.swap_bdm)) self.assertEqual(self.volume_driver_bdm, driver_block_device.convert_volume(self.volume_bdm)) self.assertEqual(self.snapshot_driver_bdm, driver_block_device.convert_volume(self.snapshot_bdm)) def test_legacy_block_devices(self): test_snapshot = self.driver_classes['snapshot']( self.snapshot_bdm) block_device_mapping = [test_snapshot, test_snapshot] legacy_bdm = driver_block_device.legacy_block_devices( block_device_mapping) self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm, self.snapshot_legacy_driver_bdm]) # Test that the ephemerals work as expected test_ephemerals = [self.driver_classes['ephemeral']( self.ephemeral_bdm) for _ in range(2)] expected = [self.ephemeral_legacy_driver_bdm.copy() for _ in range(2)] expected[0]['virtual_name'] = 'ephemeral0' expected[0]['num'] = 0 expected[1]['virtual_name'] = 'ephemeral1' expected[1]['num'] = 1 legacy_ephemerals = driver_block_device.legacy_block_devices( test_ephemerals) self.assertEqual(expected, legacy_ephemerals) def test_get_swap(self): swap = [self.swap_driver_bdm] legacy_swap = [self.swap_legacy_driver_bdm] no_swap = [self.volume_driver_bdm] self.assertEqual(swap[0], driver_block_device.get_swap(swap)) self.assertEqual(legacy_swap[0], driver_block_device.get_swap(legacy_swap)) self.assertIsNone(driver_block_device.get_swap(no_swap)) self.assertIsNone(driver_block_device.get_swap([])) def test_is_implemented(self): for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm, self.ephemeral_bdm, self.snapshot_bdm): self.assertTrue(driver_block_device.is_implemented(bdm)) local_image = self.image_bdm_dict.copy() local_image['destination_type'] = 'local' self.assertFalse(driver_block_device.is_implemented( fake_block_device.fake_bdm_object(self.context, local_image))) def test_is_block_device_mapping(self): test_swap = self.driver_classes['swap'](self.swap_bdm) test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm) test_image = self.driver_classes['image'](self.image_bdm) test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm) test_volume = self.driver_classes['volume'](self.volume_bdm) test_blank = self.driver_classes['blank'](self.blank_bdm) for bdm in (test_image, test_snapshot, test_volume, test_blank): self.assertTrue(driver_block_device.is_block_device_mapping( bdm._bdm_obj)) for bdm in (test_swap, test_ephemeral): self.assertFalse(driver_block_device.is_block_device_mapping( bdm._bdm_obj)) def test_get_volume_create_az_cinder_cross_az_attach_true(self): # Tests that we get None back if cinder.cross_az_attach=True even if # the instance has an AZ assigned. Note that since cross_az_attach # defaults to True we don't need to set a flag explicitly for the test. updates = {'availability_zone': 'test-az'} instance = fake_instance.fake_instance_obj(self.context, **updates) self.assertIsNone( driver_block_device._get_volume_create_az_value(instance)) nova-13.0.0/nova/tests/unit/virt/disk/0000775000567000056710000000000012701410205020671 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/disk/mount/0000775000567000056710000000000012701410205022033 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/disk/mount/__init__.py0000664000567000056710000000000012701407773024152 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/disk/mount/test_api.py0000664000567000056710000001674712701407773024254 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import test from nova.virt.disk.mount import api from nova.virt.disk.mount import block from nova.virt.disk.mount import loop from nova.virt.disk.mount import nbd from nova.virt.image import model as imgmodel PARTITION = 77 ORIG_DEVICE = "/dev/null" AUTOMAP_PARTITION = "/dev/nullp77" MAP_PARTITION = "/dev/mapper/nullp77" class MountTestCase(test.NoDBTestCase): def setUp(self): super(MountTestCase, self).setUp() def _test_map_dev(self, partition): mount = api.Mount(mock.sentinel.image, mock.sentinel.mount_dir) mount.device = ORIG_DEVICE mount.partition = partition mount.map_dev() return mount @mock.patch('nova.utils.trycmd') def _test_map_dev_with_trycmd(self, partition, trycmd): trycmd.return_value = [None, None] mount = self._test_map_dev(partition) self.assertEqual(1, trycmd.call_count) # don't care about args return mount def _exists_effect(self, data): def exists_effect(filename): try: v = data[filename] if isinstance(v, list): if len(v) > 0: return v.pop(0) self.fail("Out of items for: %s" % filename) return v except KeyError: self.fail("Unexpected call with: %s" % filename) return exists_effect def _check_calls(self, exists, filenames): self.assertEqual([mock.call(x) for x in filenames], exists.call_args_list) @mock.patch('os.path.exists') def test_map_dev_partition_search(self, exists): exists.side_effect = self._exists_effect({ ORIG_DEVICE: True}) mount = self._test_map_dev(-1) self._check_calls(exists, [ORIG_DEVICE]) self.assertNotEqual("", mount.error) self.assertFalse(mount.mapped) @mock.patch('os.path.exists') def test_map_dev_good(self, exists): exists.side_effect = self._exists_effect({ ORIG_DEVICE: True, AUTOMAP_PARTITION: False, MAP_PARTITION: [False, True]}) mount = self._test_map_dev_with_trycmd(PARTITION) self._check_calls(exists, [ORIG_DEVICE, AUTOMAP_PARTITION, MAP_PARTITION, MAP_PARTITION]) self.assertEqual("", mount.error) self.assertTrue(mount.mapped) @mock.patch('os.path.exists') def test_map_dev_error(self, exists): exists.side_effect = self._exists_effect({ ORIG_DEVICE: True, AUTOMAP_PARTITION: False, MAP_PARTITION: False}) mount = self._test_map_dev_with_trycmd(PARTITION) self._check_calls(exists, [ORIG_DEVICE, AUTOMAP_PARTITION, MAP_PARTITION, MAP_PARTITION]) self.assertNotEqual("", mount.error) self.assertFalse(mount.mapped) @mock.patch('os.path.exists') def test_map_dev_automap(self, exists): exists.side_effect = self._exists_effect({ ORIG_DEVICE: True, AUTOMAP_PARTITION: True}) mount = self._test_map_dev(PARTITION) self._check_calls(exists, [ORIG_DEVICE, AUTOMAP_PARTITION, AUTOMAP_PARTITION]) self.assertEqual(AUTOMAP_PARTITION, mount.mapped_device) self.assertTrue(mount.automapped) self.assertTrue(mount.mapped) @mock.patch('os.path.exists') def test_map_dev_else(self, exists): exists.side_effect = self._exists_effect({ ORIG_DEVICE: True, AUTOMAP_PARTITION: True}) mount = self._test_map_dev(None) self._check_calls(exists, [ORIG_DEVICE]) self.assertEqual(ORIG_DEVICE, mount.mapped_device) self.assertFalse(mount.automapped) self.assertTrue(mount.mapped) def test_instance_for_format_raw(self): image = imgmodel.LocalFileImage("/some/file.raw", imgmodel.FORMAT_RAW) mount_dir = '/mount/dir' partition = -1 inst = api.Mount.instance_for_format(image, mount_dir, partition) self.assertIsInstance(inst, loop.LoopMount) def test_instance_for_format_qcow2(self): image = imgmodel.LocalFileImage("/some/file.qcows", imgmodel.FORMAT_QCOW2) mount_dir = '/mount/dir' partition = -1 inst = api.Mount.instance_for_format(image, mount_dir, partition) self.assertIsInstance(inst, nbd.NbdMount) def test_instance_for_format_block(self): image = imgmodel.LocalBlockImage( "/dev/mapper/instances--instance-0000001_disk",) mount_dir = '/mount/dir' partition = -1 inst = api.Mount.instance_for_format(image, mount_dir, partition) self.assertIsInstance(inst, block.BlockMount) def test_instance_for_device_loop(self): image = mock.MagicMock() mount_dir = '/mount/dir' partition = -1 device = '/dev/loop0' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, loop.LoopMount) def test_instance_for_device_loop_partition(self): image = mock.MagicMock() mount_dir = '/mount/dir' partition = 1 device = '/dev/mapper/loop0p1' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, loop.LoopMount) def test_instance_for_device_nbd(self): image = mock.MagicMock() mount_dir = '/mount/dir' partition = -1 device = '/dev/nbd0' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, nbd.NbdMount) def test_instance_for_device_nbd_partition(self): image = mock.MagicMock() mount_dir = '/mount/dir' partition = 1 device = '/dev/mapper/nbd0p1' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, nbd.NbdMount) def test_instance_for_device_block(self): image = mock.MagicMock() mount_dir = '/mount/dir' partition = -1 device = '/dev/mapper/instances--instance-0000001_disk' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, block.BlockMount) def test_instance_for_device_block_partiton(self,): image = mock.MagicMock() mount_dir = '/mount/dir' partition = 1 device = '/dev/mapper/instances--instance-0000001_diskp1' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, block.BlockMount) nova-13.0.0/nova/tests/unit/virt/disk/mount/test_block.py0000664000567000056710000000272612701407773024565 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace Hosting, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from nova import test from nova.virt.disk.mount import block from nova.virt.image import model as imgmodel class LoopTestCase(test.NoDBTestCase): def setUp(self): super(LoopTestCase, self).setUp() device_path = '/dev/mapper/instances--instance-0000001_disk' self.image = imgmodel.LocalBlockImage(device_path) def test_get_dev(self): tempdir = self.useFixture(fixtures.TempDir()).path b = block.BlockMount(self.image, tempdir) self.assertTrue(b.get_dev()) self.assertTrue(b.linked) self.assertEqual(self.image.path, b.device) def test_unget_dev(self): tempdir = self.useFixture(fixtures.TempDir()).path b = block.BlockMount(self.image, tempdir) b.unget_dev() self.assertIsNone(b.device) self.assertFalse(b.linked) nova-13.0.0/nova/tests/unit/virt/disk/mount/test_nbd.py0000664000567000056710000003123612701407773024234 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still and Canonical Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile import time import eventlet import fixtures from nova import test from nova.virt.disk.mount import nbd from nova.virt.image import model as imgmodel ORIG_EXISTS = os.path.exists ORIG_LISTDIR = os.listdir def _fake_exists_no_users(path): if path.startswith('/sys/block/nbd'): if path.endswith('pid'): return False return True return ORIG_EXISTS(path) def _fake_listdir_nbd_devices(path): if path.startswith('/sys/block'): return ['nbd0', 'nbd1'] return ORIG_LISTDIR(path) def _fake_exists_all_used(path): if path.startswith('/sys/block/nbd'): return True return ORIG_EXISTS(path) def _fake_detect_nbd_devices_none(self): return [] def _fake_detect_nbd_devices(self): return ['nbd0', 'nbd1'] def _fake_noop(*args, **kwargs): return class NbdTestCase(test.NoDBTestCase): def setUp(self): super(NbdTestCase, self).setUp() self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices', _fake_detect_nbd_devices) self.useFixture(fixtures.MonkeyPatch('os.listdir', _fake_listdir_nbd_devices)) self.file = imgmodel.LocalFileImage("/some/file.qcow2", imgmodel.FORMAT_QCOW2) def test_nbd_no_devices(self): tempdir = self.useFixture(fixtures.TempDir()).path self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices', _fake_detect_nbd_devices_none) n = nbd.NbdMount(self.file, tempdir) self.assertIsNone(n._allocate_nbd()) def test_nbd_no_free_devices(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('os.path.exists', _fake_exists_all_used)) self.assertIsNone(n._allocate_nbd()) def test_nbd_not_loaded(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) # Fake out os.path.exists def fake_exists(path): if path.startswith('/sys/block/nbd'): return False return ORIG_EXISTS(path) self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists)) # This should fail, as we don't have the module "loaded" # TODO(mikal): work out how to force english as the gettext language # so that the error check always passes self.assertIsNone(n._allocate_nbd()) self.assertEqual('nbd unavailable: module not loaded', n.error) def test_nbd_allocation(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('os.path.exists', _fake_exists_no_users)) self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop)) # Allocate a nbd device self.assertEqual('/dev/nbd0', n._allocate_nbd()) def test_nbd_allocation_one_in_use(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop)) # Fake out os.path.exists def fake_exists(path): if path.startswith('/sys/block/nbd'): if path == '/sys/block/nbd0/pid': return True if path.endswith('pid'): return False return True return ORIG_EXISTS(path) self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists)) # Allocate a nbd device, should not be the in use one # TODO(mikal): Note that there is a leak here, as the in use nbd device # is removed from the list, but not returned so it will never be # re-added. I will fix this in a later patch. self.assertEqual('/dev/nbd1', n._allocate_nbd()) def test_inner_get_dev_no_devices(self): tempdir = self.useFixture(fixtures.TempDir()).path self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices', _fake_detect_nbd_devices_none) n = nbd.NbdMount(self.file, tempdir) self.assertFalse(n._inner_get_dev()) def test_inner_get_dev_qemu_fails(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('os.path.exists', _fake_exists_no_users)) # We have a trycmd that always fails def fake_trycmd(*args, **kwargs): return '', 'broken' self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd)) # Error logged, no device consumed self.assertFalse(n._inner_get_dev()) self.assertTrue(n.error.startswith('qemu-nbd error')) def test_inner_get_dev_qemu_timeout(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('os.path.exists', _fake_exists_no_users)) # We have a trycmd that always passed def fake_trycmd(*args, **kwargs): return '', '' self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd)) self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop)) # Error logged, no device consumed self.assertFalse(n._inner_get_dev()) self.assertTrue(n.error.endswith('did not show up')) def fake_exists_one(self, path): # We need the pid file for the device which is allocated to exist, but # only once it is allocated to us if path.startswith('/sys/block/nbd'): if path == '/sys/block/nbd1/pid': return False if path.endswith('pid'): return False return True return ORIG_EXISTS(path) def fake_trycmd_creates_pid(self, *args, **kwargs): def fake_exists_two(path): if path.startswith('/sys/block/nbd'): if path == '/sys/block/nbd0/pid': return True if path.endswith('pid'): return False return True return ORIG_EXISTS(path) self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists_two)) return '', '' def test_inner_get_dev_works(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('os.path.exists', self.fake_exists_one)) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', self.fake_trycmd_creates_pid)) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) # No error logged, device consumed self.assertTrue(n._inner_get_dev()) self.assertTrue(n.linked) self.assertEqual('', n.error) self.assertEqual('/dev/nbd0', n.device) # Free n.unget_dev() self.assertFalse(n.linked) self.assertEqual('', n.error) self.assertIsNone(n.device) def test_unget_dev_simple(self): # This test is just checking we don't get an exception when we unget # something we don't have tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) n.unget_dev() def test_get_dev(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('os.path.exists', self.fake_exists_one)) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', self.fake_trycmd_creates_pid)) # No error logged, device consumed self.assertTrue(n.get_dev()) self.assertTrue(n.linked) self.assertEqual('', n.error) self.assertEqual('/dev/nbd0', n.device) # Free n.unget_dev() self.assertFalse(n.linked) self.assertEqual('', n.error) self.assertIsNone(n.device) def test_get_dev_timeout(self): # Always fail to get a device def fake_get_dev_fails(self): return False self.stubs.Set(nbd.NbdMount, '_inner_get_dev', fake_get_dev_fails) tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('os.path.exists', self.fake_exists_one)) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', self.fake_trycmd_creates_pid)) self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.api.' 'MAX_DEVICE_WAIT'), -10)) # No error logged, device consumed self.assertFalse(n.get_dev()) def test_do_mount_need_to_specify_fs_type(self): # NOTE(mikal): Bug 1094373 saw a regression where we failed to # communicate a failed mount properly. def fake_trycmd(*args, **kwargs): return '', 'broken' self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd)) imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) tempdir = self.useFixture(fixtures.TempDir()).path mount = nbd.NbdMount(imgfile.name, tempdir) def fake_returns_true(*args, **kwargs): return True mount.get_dev = fake_returns_true mount.map_dev = fake_returns_true self.assertFalse(mount.do_mount()) def test_device_creation_race(self): # Make sure that even if two threads create instances at the same time # they cannot choose the same nbd number (see bug 1207422) tempdir = self.useFixture(fixtures.TempDir()).path free_devices = _fake_detect_nbd_devices(None)[:] chosen_devices = [] def fake_find_unused(self): return os.path.join('/dev', free_devices[-1]) def delay_and_remove_device(*args, **kwargs): # Ensure that context switch happens before the device is marked # as used. This will cause a failure without nbd-allocation-lock # in place. time.sleep(0.1) # We always choose the top device in find_unused - remove it now. free_devices.pop() return '', '' def pid_exists(pidfile): return pidfile not in [os.path.join('/sys/block', dev, 'pid') for dev in free_devices] self.stubs.Set(nbd.NbdMount, '_allocate_nbd', fake_find_unused) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', delay_and_remove_device)) self.useFixture(fixtures.MonkeyPatch('os.path.exists', pid_exists)) def get_a_device(): n = nbd.NbdMount(self.file, tempdir) n.get_dev() chosen_devices.append(n.device) thread1 = eventlet.spawn(get_a_device) thread2 = eventlet.spawn(get_a_device) thread1.wait() thread2.wait() self.assertEqual(2, len(chosen_devices)) self.assertNotEqual(chosen_devices[0], chosen_devices[1]) nova-13.0.0/nova/tests/unit/virt/disk/mount/test_loop.py0000664000567000056710000000706312701407773024443 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from nova import test from nova.virt.disk.mount import loop from nova.virt.image import model as imgmodel def _fake_noop(*args, **kwargs): return def _fake_trycmd_losetup_works(*args, **kwargs): return '/dev/loop0', '' def _fake_trycmd_losetup_fails(*args, **kwards): return '', 'doh' class LoopTestCase(test.NoDBTestCase): def setUp(self): super(LoopTestCase, self).setUp() self.file = imgmodel.LocalFileImage("/some/file.qcow2", imgmodel.FORMAT_QCOW2) def test_get_dev(self): tempdir = self.useFixture(fixtures.TempDir()).path l = loop.LoopMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', _fake_trycmd_losetup_works)) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) # No error logged, device consumed self.assertTrue(l.get_dev()) self.assertTrue(l.linked) self.assertEqual('', l.error) self.assertEqual('/dev/loop0', l.device) # Free l.unget_dev() self.assertFalse(l.linked) self.assertEqual('', l.error) self.assertIsNone(l.device) def test_inner_get_dev_fails(self): tempdir = self.useFixture(fixtures.TempDir()).path l = loop.LoopMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', _fake_trycmd_losetup_fails)) # No error logged, device consumed self.assertFalse(l._inner_get_dev()) self.assertFalse(l.linked) self.assertNotEqual('', l.error) self.assertIsNone(l.device) # Free l.unget_dev() self.assertFalse(l.linked) self.assertIsNone(l.device) def test_get_dev_timeout(self): tempdir = self.useFixture(fixtures.TempDir()).path l = loop.LoopMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', _fake_trycmd_losetup_fails)) self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.api.' 'MAX_DEVICE_WAIT'), -10)) # Always fail to get a device def fake_get_dev_fails(): return False l._inner_get_dev = fake_get_dev_fails # Fail to get a device self.assertFalse(l.get_dev()) def test_unget_dev(self): tempdir = self.useFixture(fixtures.TempDir()).path l = loop.LoopMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) # This just checks that a free of something we don't have doesn't # throw an exception l.unget_dev() nova-13.0.0/nova/tests/unit/virt/disk/__init__.py0000664000567000056710000000000012701407773023010 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/disk/test_api.py0000664000567000056710000002153612701407773023102 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile import fixtures import mock from oslo_concurrency import processutils from nova import test from nova import utils from nova.virt.disk import api from nova.virt.disk.mount import api as mount from nova.virt.image import model as imgmodel class FakeMount(object): device = None @staticmethod def instance_for_format(image, mountdir, partition): return FakeMount() def get_dev(self): pass def unget_dev(self): pass class APITestCase(test.NoDBTestCase): def test_can_resize_need_fs_type_specified(self): # NOTE(mikal): Bug 1094373 saw a regression where we failed to # treat a failure to mount as a failure to be able to resize the # filesystem def _fake_get_disk_size(path): return 10 self.useFixture(fixtures.MonkeyPatch( 'nova.virt.disk.api.get_disk_size', _fake_get_disk_size)) def fake_trycmd(*args, **kwargs): return '', 'broken' self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd)) def fake_returns_true(*args, **kwargs): return True def fake_returns_nothing(*args, **kwargs): return '' self.useFixture(fixtures.MonkeyPatch( 'nova.virt.disk.mount.nbd.NbdMount.get_dev', fake_returns_true)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.disk.mount.nbd.NbdMount.map_dev', fake_returns_true)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.disk.vfs.localfs.VFSLocalFS.get_image_fs', fake_returns_nothing)) # Force the use of localfs, which is what was used during the failure # reported in the bug def fake_import_fails(*args, **kwargs): raise Exception('Failed') self.useFixture(fixtures.MonkeyPatch( 'oslo_utils.import_module', fake_import_fails)) imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) image = imgmodel.LocalFileImage(imgfile.name, imgmodel.FORMAT_QCOW2) self.assertFalse(api.is_image_extendable(image)) def test_is_image_extendable_raw(self): imgfile = tempfile.NamedTemporaryFile() self.mox.StubOutWithMock(utils, 'execute') utils.execute('e2label', imgfile) self.mox.ReplayAll() image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_RAW) self.addCleanup(imgfile.close) self.assertTrue(api.is_image_extendable(image)) def test_resize2fs_success(self): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) self.mox.StubOutWithMock(utils, 'execute') utils.execute('e2fsck', '-fp', imgfile, check_exit_code=[0, 1, 2], run_as_root=False) utils.execute('resize2fs', imgfile, check_exit_code=False, run_as_root=False) self.mox.ReplayAll() api.resize2fs(imgfile) def test_resize2fs_e2fsck_fails(self): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) self.mox.StubOutWithMock(utils, 'execute') utils.execute('e2fsck', '-fp', imgfile, check_exit_code=[0, 1, 2], run_as_root=False).AndRaise( processutils.ProcessExecutionError("fs error")) self.mox.ReplayAll() api.resize2fs(imgfile) def test_extend_qcow_success(self): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 device = "/dev/sdh" image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_QCOW2) self.flags(resize_fs_using_block_device=True) mounter = FakeMount.instance_for_format( image, None, None) mounter.device = device self.mox.StubOutWithMock(api, 'can_resize_image') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(api, 'is_image_extendable') self.mox.StubOutWithMock(mounter, 'get_dev') self.mox.StubOutWithMock(mounter, 'unget_dev') self.mox.StubOutWithMock(api, 'resize2fs') self.mox.StubOutWithMock(mount.Mount, 'instance_for_format', use_mock_anything=True) api.can_resize_image(imgfile, imgsize).AndReturn(True) utils.execute('qemu-img', 'resize', imgfile, imgsize) api.is_image_extendable(image).AndReturn(True) mount.Mount.instance_for_format(image, None, None).AndReturn(mounter) mounter.get_dev().AndReturn(True) api.resize2fs(mounter.device, run_as_root=True, check_exit_code=[0]) mounter.unget_dev() self.mox.ReplayAll() api.extend(image, imgsize) @mock.patch.object(api, 'can_resize_image', return_value=True) @mock.patch.object(api, 'is_image_extendable') @mock.patch.object(utils, 'execute') def test_extend_qcow_no_resize(self, mock_execute, mock_extendable, mock_can_resize_image): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_QCOW2) self.flags(resize_fs_using_block_device=False) api.extend(image, imgsize) mock_can_resize_image.assert_called_once_with(imgfile, imgsize) mock_execute.assert_called_once_with('qemu-img', 'resize', imgfile, imgsize) self.assertFalse(mock_extendable.called) def test_extend_raw_success(self): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_RAW) self.mox.StubOutWithMock(api, 'can_resize_image') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(api, 'resize2fs') api.can_resize_image(imgfile, imgsize).AndReturn(True) utils.execute('qemu-img', 'resize', imgfile, imgsize) utils.execute('e2label', image.path) api.resize2fs(imgfile, run_as_root=False, check_exit_code=[0]) self.mox.ReplayAll() api.extend(image, imgsize) HASH_VFAT = utils.get_hash_str(api.FS_FORMAT_VFAT)[:7] HASH_EXT4 = utils.get_hash_str(api.FS_FORMAT_EXT4)[:7] HASH_NTFS = utils.get_hash_str(api.FS_FORMAT_NTFS)[:7] def test_get_file_extension_for_os_type(self): self.assertEqual(self.HASH_VFAT, api.get_file_extension_for_os_type(None, None)) self.assertEqual(self.HASH_EXT4, api.get_file_extension_for_os_type('linux', None)) self.assertEqual(self.HASH_NTFS, api.get_file_extension_for_os_type( 'windows', None)) def test_get_file_extension_for_os_type_with_overrides(self): with mock.patch('nova.virt.disk.api._DEFAULT_MKFS_COMMAND', 'custom mkfs command'): self.assertEqual("a74d253", api.get_file_extension_for_os_type( 'linux', None)) self.assertEqual("a74d253", api.get_file_extension_for_os_type( 'windows', None)) self.assertEqual("a74d253", api.get_file_extension_for_os_type('osx', None)) with mock.patch.dict(api._MKFS_COMMAND, {'osx': 'custom mkfs command'}, clear=True): self.assertEqual(self.HASH_VFAT, api.get_file_extension_for_os_type(None, None)) self.assertEqual(self.HASH_EXT4, api.get_file_extension_for_os_type('linux', None)) self.assertEqual(self.HASH_NTFS, api.get_file_extension_for_os_type( 'windows', None)) self.assertEqual("a74d253", api.get_file_extension_for_os_type( 'osx', None)) nova-13.0.0/nova/tests/unit/virt/disk/vfs/0000775000567000056710000000000012701410205021467 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/disk/vfs/__init__.py0000664000567000056710000000000012701407773023606 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/disk/vfs/test_localfs.py0000664000567000056710000004336712701407773024560 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile import mock from oslo_concurrency import processutils from nova import exception from nova import test from nova.tests.unit import utils as tests_utils import nova.utils from nova.virt.disk.mount import nbd from nova.virt.disk.vfs import localfs as vfsimpl from nova.virt.image import model as imgmodel dirs = [] files = {} commands = [] def fake_execute(*args, **kwargs): commands.append({"args": args, "kwargs": kwargs}) if args[0] == "readlink": if args[1] == "-nm": if args[2] in ["/scratch/dir/some/file", "/scratch/dir/some/dir", "/scratch/dir/other/dir", "/scratch/dir/other/file"]: return args[2], "" elif args[1] == "-e": if args[2] in files: return args[2], "" return "", "No such file" elif args[0] == "mkdir": dirs.append(args[2]) elif args[0] == "chown": owner = args[1] path = args[2] if path not in files: raise Exception("No such file: " + path) sep = owner.find(':') if sep != -1: user = owner[0:sep] group = owner[sep + 1:] else: user = owner group = None if user: if user == "fred": uid = 105 else: uid = 110 files[path]["uid"] = uid if group: if group == "users": gid = 500 else: gid = 600 files[path]["gid"] = gid elif args[0] == "chgrp": group = args[1] path = args[2] if path not in files: raise Exception("No such file: " + path) if group == "users": gid = 500 else: gid = 600 files[path]["gid"] = gid elif args[0] == "chmod": mode = args[1] path = args[2] if path not in files: raise Exception("No such file: " + path) files[path]["mode"] = int(mode, 8) elif args[0] == "cat": path = args[1] if path not in files: files[path] = { "content": "Hello World", "gid": 100, "uid": 100, "mode": 0o700 } return files[path]["content"], "" elif args[0] == "tee": if args[1] == "-a": path = args[2] append = True else: path = args[1] append = False if path not in files: files[path] = { "content": "Hello World", "gid": 100, "uid": 100, "mode": 0o700, } if append: files[path]["content"] += kwargs["process_input"] else: files[path]["content"] = kwargs["process_input"] class VirtDiskVFSLocalFSTestPaths(test.NoDBTestCase): def setUp(self): super(VirtDiskVFSLocalFSTestPaths, self).setUp() real_execute = processutils.execute def nonroot_execute(*cmd_parts, **kwargs): kwargs.pop('run_as_root', None) return real_execute(*cmd_parts, **kwargs) self.stubs.Set(processutils, 'execute', nonroot_execute) self.rawfile = imgmodel.LocalFileImage("/dummy.img", imgmodel.FORMAT_RAW) def test_check_safe_path(self): if not tests_utils.coreutils_readlink_available(): self.skipTest("coreutils readlink(1) unavailable") vfs = vfsimpl.VFSLocalFS(self.rawfile) vfs.imgdir = "/foo" ret = vfs._canonical_path('etc/something.conf') self.assertEqual(ret, '/foo/etc/something.conf') def test_check_unsafe_path(self): if not tests_utils.coreutils_readlink_available(): self.skipTest("coreutils readlink(1) unavailable") vfs = vfsimpl.VFSLocalFS(self.rawfile) vfs.imgdir = "/foo" self.assertRaises(exception.Invalid, vfs._canonical_path, 'etc/../../../something.conf') class VirtDiskVFSLocalFSTest(test.NoDBTestCase): def setUp(self): super(VirtDiskVFSLocalFSTest, self).setUp() self.qcowfile = imgmodel.LocalFileImage("/dummy.qcow2", imgmodel.FORMAT_QCOW2) self.rawfile = imgmodel.LocalFileImage("/dummy.img", imgmodel.FORMAT_RAW) def test_makepath(self): global dirs, commands dirs = [] commands = [] self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.make_path("/some/dir") vfs.make_path("/other/dir") self.assertEqual(dirs, ["/scratch/dir/some/dir", "/scratch/dir/other/dir"]), root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/dir'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('mkdir', '-p', '/scratch/dir/some/dir'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/other/dir'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('mkdir', '-p', '/scratch/dir/other/dir'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}]) def test_append_file(self): global files, commands files = {} commands = [] self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.append_file("/some/file", " Goodbye") self.assertIn("/scratch/dir/some/file", files) self.assertEqual(files["/scratch/dir/some/file"]["content"], "Hello World Goodbye") root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('tee', '-a', '/scratch/dir/some/file'), 'kwargs': {'process_input': ' Goodbye', 'run_as_root': True, 'root_helper': root_helper}}]) def test_replace_file(self): global files, commands files = {} commands = [] self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.replace_file("/some/file", "Goodbye") self.assertIn("/scratch/dir/some/file", files) self.assertEqual(files["/scratch/dir/some/file"]["content"], "Goodbye") root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('tee', '/scratch/dir/some/file'), 'kwargs': {'process_input': 'Goodbye', 'run_as_root': True, 'root_helper': root_helper}}]) def test_read_file(self): global commands, files files = {} commands = [] self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" self.assertEqual(vfs.read_file("/some/file"), "Hello World") root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('cat', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}]) def test_has_file(self): global commands, files files = {} commands = [] self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.read_file("/some/file") self.assertTrue(vfs.has_file("/some/file")) self.assertFalse(vfs.has_file("/other/file")) root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('cat', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-e', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/other/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-e', '/scratch/dir/other/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, ]) def test_set_permissions(self): global commands, files commands = [] files = {} self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.read_file("/some/file") vfs.set_permissions("/some/file", 0o777) self.assertEqual(files["/scratch/dir/some/file"]["mode"], 0o777) root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('cat', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('chmod', '777', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}]) def test_set_ownership(self): global commands, files commands = [] files = {} self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.read_file("/some/file") self.assertEqual(files["/scratch/dir/some/file"]["uid"], 100) self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100) vfs.set_ownership("/some/file", "fred", None) self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105) self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100) vfs.set_ownership("/some/file", None, "users") self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105) self.assertEqual(files["/scratch/dir/some/file"]["gid"], 500) vfs.set_ownership("/some/file", "joe", "admins") self.assertEqual(files["/scratch/dir/some/file"]["uid"], 110) self.assertEqual(files["/scratch/dir/some/file"]["gid"], 600) root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('cat', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('chown', 'fred', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('chgrp', 'users', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('chown', 'joe:admins', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}]) @mock.patch.object(nova.utils, 'execute') def test_get_format_fs(self, execute): vfs = vfsimpl.VFSLocalFS(self.rawfile) vfs.setup = mock.MagicMock() vfs.teardown = mock.MagicMock() def fake_setup(): vfs.mount = mock.MagicMock() vfs.mount.device = None vfs.mount.get_dev.side_effect = fake_get_dev def fake_teardown(): vfs.mount.device = None def fake_get_dev(): vfs.mount.device = '/dev/xyz' return True vfs.setup.side_effect = fake_setup vfs.teardown.side_effect = fake_teardown execute.return_value = ('ext3\n', '') vfs.setup() self.assertEqual('ext3', vfs.get_image_fs()) vfs.teardown() vfs.mount.get_dev.assert_called_once_with() execute.assert_called_once_with('blkid', '-o', 'value', '-s', 'TYPE', '/dev/xyz', run_as_root=True, check_exit_code=[0, 2]) @mock.patch.object(tempfile, 'mkdtemp') @mock.patch.object(nbd, 'NbdMount') def test_setup_mount(self, NbdMount, mkdtemp): vfs = vfsimpl.VFSLocalFS(self.qcowfile) mounter = mock.MagicMock() mkdtemp.return_value = 'tmp/' NbdMount.return_value = mounter vfs.setup() self.assertTrue(mkdtemp.called) NbdMount.assert_called_once_with(self.qcowfile, "tmp/", None) mounter.do_mount.assert_called_once_with() @mock.patch.object(tempfile, 'mkdtemp') @mock.patch.object(nbd, 'NbdMount') def test_setup_mount_false(self, NbdMount, mkdtemp): vfs = vfsimpl.VFSLocalFS(self.qcowfile) mounter = mock.MagicMock() mkdtemp.return_value = 'tmp/' NbdMount.return_value = mounter vfs.setup(mount=False) self.assertTrue(mkdtemp.called) NbdMount.assert_called_once_with(self.qcowfile, "tmp/", None) self.assertFalse(mounter.do_mount.called) nova-13.0.0/nova/tests/unit/virt/disk/vfs/fakeguestfs.py0000664000567000056710000001322212701407773024370 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. EVENT_APPLIANCE = 0x1 EVENT_LIBRARY = 0x2 EVENT_WARNING = 0x3 EVENT_TRACE = 0x4 class GuestFS(object): SUPPORT_CLOSE_ON_EXIT = True SUPPORT_RETURN_DICT = True def __init__(self, **kwargs): if not self.SUPPORT_CLOSE_ON_EXIT and 'close_on_exit' in kwargs: raise TypeError('close_on_exit') if not self.SUPPORT_RETURN_DICT and 'python_return_dict' in kwargs: raise TypeError('python_return_dict') self._python_return_dict = kwargs.get('python_return_dict', False) self.kwargs = kwargs self.drives = [] self.running = False self.closed = False self.mounts = [] self.files = {} self.auginit = False self.root_mounted = False self.backend_settings = None self.trace_enabled = False self.verbose_enabled = False self.event_callback = None def launch(self): self.running = True def shutdown(self): self.running = False self.mounts = [] self.drives = [] def set_backend_settings(self, settings): self.backend_settings = settings def close(self): self.closed = True def add_drive_opts(self, file, *args, **kwargs): if file == "/some/fail/file": raise RuntimeError("%s: No such file or directory", file) self.drives.append((file, kwargs)) def add_drive(self, file, format=None, *args, **kwargs): self.add_drive_opts(file, format=None, *args, **kwargs) def inspect_os(self): return ["/dev/guestvgf/lv_root"] def inspect_get_mountpoints(self, dev): mountpoints = [("/home", "/dev/mapper/guestvgf-lv_home"), ("/", "/dev/mapper/guestvgf-lv_root"), ("/boot", "/dev/vda1")] if self.SUPPORT_RETURN_DICT and self._python_return_dict: return dict(mountpoints) else: return mountpoints def mount_options(self, options, device, mntpoint): if mntpoint == "/": self.root_mounted = True else: if not self.root_mounted: raise RuntimeError( "mount: %s: No such file or directory" % mntpoint) self.mounts.append((options, device, mntpoint)) def mkdir_p(self, path): if path not in self.files: self.files[path] = { "isdir": True, "gid": 100, "uid": 100, "mode": 0o700 } def read_file(self, path): if path not in self.files: self.files[path] = { "isdir": False, "content": "Hello World", "gid": 100, "uid": 100, "mode": 0o700 } return self.files[path]["content"] def write(self, path, content): if path not in self.files: self.files[path] = { "isdir": False, "content": "Hello World", "gid": 100, "uid": 100, "mode": 0o700 } self.files[path]["content"] = content def write_append(self, path, content): if path not in self.files: self.files[path] = { "isdir": False, "content": "Hello World", "gid": 100, "uid": 100, "mode": 0o700 } self.files[path]["content"] = self.files[path]["content"] + content def stat(self, path): if path not in self.files: raise RuntimeError("No such file: " + path) return self.files[path]["mode"] def chown(self, uid, gid, path): if path not in self.files: raise RuntimeError("No such file: " + path) if uid != -1: self.files[path]["uid"] = uid if gid != -1: self.files[path]["gid"] = gid def chmod(self, mode, path): if path not in self.files: raise RuntimeError("No such file: " + path) self.files[path]["mode"] = mode def aug_init(self, root, flags): self.auginit = True def aug_close(self): self.auginit = False def aug_get(self, cfgpath): if not self.auginit: raise RuntimeError("Augeus not initialized") if cfgpath == "/files/etc/passwd/root/uid": return 0 elif cfgpath == "/files/etc/passwd/fred/uid": return 105 elif cfgpath == "/files/etc/passwd/joe/uid": return 110 elif cfgpath == "/files/etc/group/root/gid": return 0 elif cfgpath == "/files/etc/group/users/gid": return 500 elif cfgpath == "/files/etc/group/admins/gid": return 600 raise RuntimeError("Unknown path %s", cfgpath) def set_trace(self, enabled): self.trace_enabled = enabled def set_verbose(self, enabled): self.verbose_enabled = enabled def set_event_callback(self, func, events): self.event_callback = (func, events) def vfs_type(self, dev): return 'ext3' nova-13.0.0/nova/tests/unit/virt/disk/vfs/test_guestfs.py0000664000567000056710000002623612701407773024611 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from nova import exception from nova import test from nova.tests.unit.virt.disk.vfs import fakeguestfs from nova.virt.disk.vfs import guestfs as vfsimpl from nova.virt.image import model as imgmodel class VirtDiskVFSGuestFSTest(test.NoDBTestCase): def setUp(self): super(VirtDiskVFSGuestFSTest, self).setUp() self.useFixture( fixtures.MonkeyPatch('nova.virt.disk.vfs.guestfs.guestfs', fakeguestfs)) self.qcowfile = imgmodel.LocalFileImage("/dummy.qcow2", imgmodel.FORMAT_QCOW2) self.rawfile = imgmodel.LocalFileImage("/dummy.img", imgmodel.FORMAT_RAW) self.lvmfile = imgmodel.LocalBlockImage("/dev/volgroup/myvol") self.rbdfile = imgmodel.RBDImage("myvol", "mypool", "cthulu", "arrrrrgh", ["server1:123", "server2:123"]) def _do_test_appliance_setup_inspect(self, image, drives, forcetcg): if forcetcg: vfsimpl.force_tcg() else: vfsimpl.force_tcg(False) vfs = vfsimpl.VFSGuestFS( image, partition=-1) vfs.setup() if forcetcg: self.assertEqual("force_tcg", vfs.handle.backend_settings) vfsimpl.force_tcg(False) else: self.assertIsNone(vfs.handle.backend_settings) self.assertTrue(vfs.handle.running) self.assertEqual(drives, vfs.handle.drives) self.assertEqual(3, len(vfs.handle.mounts)) self.assertEqual("/dev/mapper/guestvgf-lv_root", vfs.handle.mounts[0][1]) self.assertEqual("/dev/vda1", vfs.handle.mounts[1][1]) self.assertEqual("/dev/mapper/guestvgf-lv_home", vfs.handle.mounts[2][1]) self.assertEqual("/", vfs.handle.mounts[0][2]) self.assertEqual("/boot", vfs.handle.mounts[1][2]) self.assertEqual("/home", vfs.handle.mounts[2][2]) handle = vfs.handle vfs.teardown() self.assertIsNone(vfs.handle) self.assertFalse(handle.running) self.assertTrue(handle.closed) self.assertEqual(0, len(handle.mounts)) def test_appliance_setup_inspect_auto(self): drives = [("/dummy.qcow2", {"format": "qcow2"})] self._do_test_appliance_setup_inspect(self.qcowfile, drives, False) def test_appliance_setup_inspect_tcg(self): drives = [("/dummy.qcow2", {"format": "qcow2"})] self._do_test_appliance_setup_inspect(self.qcowfile, drives, True) def test_appliance_setup_inspect_raw(self): drives = [("/dummy.img", {"format": "raw"})] self._do_test_appliance_setup_inspect(self.rawfile, drives, True) def test_appliance_setup_inspect_lvm(self): drives = [("/dev/volgroup/myvol", {"format": "raw"})] self._do_test_appliance_setup_inspect(self.lvmfile, drives, True) def test_appliance_setup_inspect_rbd(self): drives = [("mypool/myvol", {"format": "raw", "protocol": "rbd", "username": "cthulu", "secret": "arrrrrgh", "server": ["server1:123", "server2:123"]})] self._do_test_appliance_setup_inspect(self.rbdfile, drives, True) def test_appliance_setup_inspect_no_root_raises(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile, partition=-1) # call setup to init the handle so we can stub it vfs.setup() self.assertIsNone(vfs.handle.backend_settings) def fake_inspect_os(): return [] self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os) self.assertRaises(exception.NovaException, vfs.setup_os_inspect) def test_appliance_setup_inspect_multi_boots_raises(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile, partition=-1) # call setup to init the handle so we can stub it vfs.setup() self.assertIsNone(vfs.handle.backend_settings) def fake_inspect_os(): return ['fake1', 'fake2'] self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os) self.assertRaises(exception.NovaException, vfs.setup_os_inspect) def test_appliance_setup_static_nopart(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile, partition=None) vfs.setup() self.assertIsNone(vfs.handle.backend_settings) self.assertTrue(vfs.handle.running) self.assertEqual(1, len(vfs.handle.mounts)) self.assertEqual("/dev/sda", vfs.handle.mounts[0][1]) self.assertEqual("/", vfs.handle.mounts[0][2]) handle = vfs.handle vfs.teardown() self.assertIsNone(vfs.handle) self.assertFalse(handle.running) self.assertTrue(handle.closed) self.assertEqual(0, len(handle.mounts)) def test_appliance_setup_static_part(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile, partition=2) vfs.setup() self.assertIsNone(vfs.handle.backend_settings) self.assertTrue(vfs.handle.running) self.assertEqual(1, len(vfs.handle.mounts)) self.assertEqual("/dev/sda2", vfs.handle.mounts[0][1]) self.assertEqual("/", vfs.handle.mounts[0][2]) handle = vfs.handle vfs.teardown() self.assertIsNone(vfs.handle) self.assertFalse(handle.running) self.assertTrue(handle.closed) self.assertEqual(0, len(handle.mounts)) def test_makepath(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.make_path("/some/dir") vfs.make_path("/other/dir") self.assertIn("/some/dir", vfs.handle.files) self.assertIn("/other/dir", vfs.handle.files) self.assertTrue(vfs.handle.files["/some/dir"]["isdir"]) self.assertTrue(vfs.handle.files["/other/dir"]["isdir"]) vfs.teardown() def test_append_file(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.append_file("/some/file", " Goodbye") self.assertIn("/some/file", vfs.handle.files) self.assertEqual("Hello World Goodbye", vfs.handle.files["/some/file"]["content"]) vfs.teardown() def test_replace_file(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.replace_file("/some/file", "Goodbye") self.assertIn("/some/file", vfs.handle.files) self.assertEqual("Goodbye", vfs.handle.files["/some/file"]["content"]) vfs.teardown() def test_read_file(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertEqual("Hello World", vfs.read_file("/some/file")) vfs.teardown() def test_has_file(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.read_file("/some/file") self.assertTrue(vfs.has_file("/some/file")) self.assertFalse(vfs.has_file("/other/file")) vfs.teardown() def test_set_permissions(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.read_file("/some/file") self.assertEqual(0o700, vfs.handle.files["/some/file"]["mode"]) vfs.set_permissions("/some/file", 0o7777) self.assertEqual(0o7777, vfs.handle.files["/some/file"]["mode"]) vfs.teardown() def test_set_ownership(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.read_file("/some/file") self.assertEqual(100, vfs.handle.files["/some/file"]["uid"]) self.assertEqual(100, vfs.handle.files["/some/file"]["gid"]) vfs.set_ownership("/some/file", "fred", None) self.assertEqual(105, vfs.handle.files["/some/file"]["uid"]) self.assertEqual(100, vfs.handle.files["/some/file"]["gid"]) vfs.set_ownership("/some/file", None, "users") self.assertEqual(105, vfs.handle.files["/some/file"]["uid"]) self.assertEqual(500, vfs.handle.files["/some/file"]["gid"]) vfs.set_ownership("/some/file", "joe", "admins") self.assertEqual(110, vfs.handle.files["/some/file"]["uid"]) self.assertEqual(600, vfs.handle.files["/some/file"]["gid"]) vfs.teardown() def test_close_on_error(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertFalse(vfs.handle.kwargs['close_on_exit']) vfs.teardown() self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_CLOSE_ON_EXIT', False) vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertNotIn('close_on_exit', vfs.handle.kwargs) vfs.teardown() def test_python_return_dict(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertFalse(vfs.handle.kwargs['python_return_dict']) vfs.teardown() self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_RETURN_DICT', False) vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertNotIn('python_return_dict', vfs.handle.kwargs) vfs.teardown() def test_setup_debug_disable(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertFalse(vfs.handle.trace_enabled) self.assertFalse(vfs.handle.verbose_enabled) self.assertIsNone(vfs.handle.event_callback) def test_setup_debug_enabled(self): self.flags(debug=True, group='guestfs') vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertTrue(vfs.handle.trace_enabled) self.assertTrue(vfs.handle.verbose_enabled) self.assertIsNotNone(vfs.handle.event_callback) def test_get_format_fs(self): vfs = vfsimpl.VFSGuestFS(self.rawfile) vfs.setup() self.assertIsNotNone(vfs.handle) self.assertEqual('ext3', vfs.get_image_fs()) vfs.teardown() @mock.patch.object(vfsimpl.VFSGuestFS, 'setup_os') def test_setup_mount(self, setup_os): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertTrue(setup_os.called) @mock.patch.object(vfsimpl.VFSGuestFS, 'setup_os') def test_setup_mount_false(self, setup_os): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup(mount=False) self.assertFalse(setup_os.called) nova-13.0.0/nova/tests/unit/virt/disk/test_inject.py0000664000567000056710000002704612701407773023607 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import OrderedDict import os import fixtures from nova import exception from nova import test from nova.tests.unit.virt.disk.vfs import fakeguestfs from nova.virt.disk import api as diskapi from nova.virt.disk.vfs import guestfs as vfsguestfs from nova.virt.image import model as imgmodel class VirtDiskTest(test.NoDBTestCase): def setUp(self): super(VirtDiskTest, self).setUp() self.useFixture( fixtures.MonkeyPatch('nova.virt.disk.vfs.guestfs.guestfs', fakeguestfs)) self.file = imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_QCOW2) def test_inject_data(self): self.assertTrue(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_QCOW2))) self.assertTrue(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), mandatory=('files',))) self.assertTrue(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), key="mysshkey", mandatory=('key',))) os_name = os.name os.name = 'nt' # Cause password injection to fail self.assertRaises(exception.NovaException, diskapi.inject_data, imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), admin_password="p", mandatory=('admin_password',)) self.assertFalse(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), admin_password="p")) os.name = os_name self.assertFalse(diskapi.inject_data( imgmodel.LocalFileImage("/some/fail/file", imgmodel.FORMAT_RAW), key="mysshkey")) def test_inject_data_key(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() diskapi._inject_key_into_fs("mysshkey", vfs) self.assertIn("/root/.ssh", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh"], {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700}) self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"], {'isdir': False, 'content': "Hello World\n# The following ssh " + "key was injected by Nova\nmysshkey\n", 'gid': 100, 'uid': 100, 'mode': 0o600}) vfs.teardown() def test_inject_data_key_with_selinux(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() vfs.make_path("etc/selinux") vfs.make_path("etc/rc.d") diskapi._inject_key_into_fs("mysshkey", vfs) self.assertIn("/etc/rc.d/rc.local", vfs.handle.files) self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"], {'isdir': False, 'content': "Hello World#!/bin/sh\n# Added by " + "Nova to ensure injected ssh keys " + "have the right context\nrestorecon " + "-RF root/.ssh 2>/dev/null || :\n", 'gid': 100, 'uid': 100, 'mode': 0o700}) self.assertIn("/root/.ssh", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh"], {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700}) self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"], {'isdir': False, 'content': "Hello World\n# The following ssh " + "key was injected by Nova\nmysshkey\n", 'gid': 100, 'uid': 100, 'mode': 0o600}) vfs.teardown() def test_inject_data_key_with_selinux_append_with_newline(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done") vfs.make_path("etc/selinux") vfs.make_path("etc/rc.d") diskapi._inject_key_into_fs("mysshkey", vfs) self.assertIn("/etc/rc.d/rc.local", vfs.handle.files) self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"], {'isdir': False, 'content': "#!/bin/sh\necho done\n# Added " "by Nova to ensure injected ssh keys have " "the right context\nrestorecon -RF " "root/.ssh 2>/dev/null || :\n", 'gid': 100, 'uid': 100, 'mode': 0o700}) vfs.teardown() def test_inject_net(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() diskapi._inject_net_into_fs("mynetconfig", vfs) self.assertIn("/etc/network/interfaces", vfs.handle.files) self.assertEqual(vfs.handle.files["/etc/network/interfaces"], {'content': 'mynetconfig', 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) vfs.teardown() def test_inject_metadata(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() metadata = {"foo": "bar", "eek": "wizz"} metadata = OrderedDict(sorted(metadata.items())) diskapi._inject_metadata_into_fs(metadata, vfs) self.assertIn("/meta.js", vfs.handle.files) self.assertEqual({'content': '{"eek": "wizz", ' + '"foo": "bar"}', 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}, vfs.handle.files["/meta.js"]) vfs.teardown() def test_inject_admin_password(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() def fake_salt(): return "1234567890abcdef" self.stubs.Set(diskapi, '_generate_salt', fake_salt) vfs.handle.write("/etc/shadow", "root:$1$12345678$xxxxx:14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n") vfs.handle.write("/etc/passwd", "root:x:0:0:root:/root:/bin/bash\n" + "bin:x:1:1:bin:/bin:/sbin/nologin\n" + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\n") diskapi._inject_admin_password_into_fs("123456", vfs) self.assertEqual(vfs.handle.files["/etc/passwd"], {'content': "root:x:0:0:root:/root:/bin/bash\n" + "bin:x:1:1:bin:/bin:/sbin/nologin\n" + "daemon:x:2:2:daemon:/sbin:" + "/sbin/nologin\n", 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) shadow = vfs.handle.files["/etc/shadow"] # if the encrypted password is only 13 characters long, then # nova.virt.disk.api:_set_password fell back to DES. if len(shadow['content']) == 91: self.assertEqual(shadow, {'content': "root:12tir.zIbWQ3c" + ":14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n", 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) else: self.assertEqual(shadow, {'content': "root:$1$12345678$a4ge4d5iJ5vw" + "vbFS88TEN0:14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n", 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) vfs.teardown() def test_inject_files_into_fs(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() diskapi._inject_files_into_fs([("/path/to/not/exists/file", "inject-file-contents")], vfs) self.assertIn("/path/to/not/exists", vfs.handle.files) shadow_dir = vfs.handle.files["/path/to/not/exists"] self.assertEqual(shadow_dir, {"isdir": True, "gid": 0, "uid": 0, "mode": 0o744}) shadow_file = vfs.handle.files["/path/to/not/exists/file"] self.assertEqual(shadow_file, {"isdir": False, "content": "inject-file-contents", "gid": 100, "uid": 100, "mode": 0o700}) vfs.teardown() def test_inject_files_into_fs_dir_exists(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() called = {'make_path': False} def fake_has_file(*args, **kwargs): return True def fake_make_path(*args, **kwargs): called['make_path'] = True self.stubs.Set(vfs, 'has_file', fake_has_file) self.stubs.Set(vfs, 'make_path', fake_make_path) # test for already exists dir diskapi._inject_files_into_fs([("/path/to/exists/file", "inject-file-contents")], vfs) self.assertIn("/path/to/exists/file", vfs.handle.files) self.assertFalse(called['make_path']) # test for root dir diskapi._inject_files_into_fs([("/inject-file", "inject-file-contents")], vfs) self.assertIn("/inject-file", vfs.handle.files) self.assertFalse(called['make_path']) # test for null dir vfs.handle.files.pop("/inject-file") diskapi._inject_files_into_fs([("inject-file", "inject-file-contents")], vfs) self.assertIn("/inject-file", vfs.handle.files) self.assertFalse(called['make_path']) vfs.teardown() nova-13.0.0/nova/tests/unit/virt/test_fake.py0000664000567000056710000000161312701407773022277 0ustar jenkinsjenkins00000000000000# # Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import test from nova.virt import driver from nova.virt import fake class FakeDriverTest(test.NoDBTestCase): def test_public_api_signatures(self): baseinst = driver.ComputeDriver(None) inst = fake.FakeDriver(fake.FakeVirtAPI(), True) self.assertPublicAPISignatures(baseinst, inst) nova-13.0.0/nova/tests/unit/virt/test_events.py0000664000567000056710000000223212701407773022673 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from nova import test from nova.virt import event class TestEvents(test.NoDBTestCase): def test_event_repr(self): t = time.time() uuid = '1234' lifecycle = event.EVENT_LIFECYCLE_RESUMED e = event.Event(t) self.assertEqual(str(e), "" % t) e = event.InstanceEvent(uuid, timestamp=t) self.assertEqual(str(e), "" % (t, uuid)) e = event.LifecycleEvent(uuid, lifecycle, timestamp=t) self.assertEqual(str(e), " Resumed>" % (t, uuid)) nova-13.0.0/nova/tests/unit/virt/libvirt/0000775000567000056710000000000012701410205021412 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/libvirt/test_driver.py0000664000567000056710000272470512701410011024332 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import datetime import errno import glob import os import random import re import shutil import signal import threading import time import uuid import eventlet from eventlet import greenthread import fixtures from lxml import etree import mock from mox3 import mox from os_brick.initiator import connector from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_utils import encodeutils from oslo_utils import fileutils from oslo_utils import fixture as utils_fixture from oslo_utils import importutils from oslo_utils import units from oslo_utils import uuidutils from oslo_utils import versionutils import six from six.moves import builtins from six.moves import range from nova.api.metadata import base as instance_metadata from nova.compute import arch from nova.compute import cpumodel from nova.compute import manager from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_mode from nova.compute import vm_states from nova import context from nova import db from nova import exception from nova.network import model as network_model from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import fields from nova.pci import manager as pci_manager from nova.pci import utils as pci_utils from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_network import nova.tests.unit.image.fake from nova.tests.unit import matchers from nova.tests.unit.objects import test_pci_device from nova.tests.unit.objects import test_vcpu_model from nova.tests.unit.virt.libvirt import fake_imagebackend from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.tests.unit.virt.libvirt import fakelibvirt from nova import utils from nova import version from nova.virt import block_device as driver_block_device from nova.virt import configdrive from nova.virt.disk import api as disk from nova.virt import driver from nova.virt import fake from nova.virt import firewall as base_firewall from nova.virt import hardware from nova.virt.image import model as imgmodel from nova.virt.libvirt import blockinfo from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import driver as libvirt_driver from nova.virt.libvirt import firewall from nova.virt.libvirt import guest as libvirt_guest from nova.virt.libvirt import host from nova.virt.libvirt import imagebackend from nova.virt.libvirt.storage import dmcrypt from nova.virt.libvirt.storage import lvm from nova.virt.libvirt.storage import rbd_utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt.volume import volume as volume_drivers libvirt_driver.libvirt = fakelibvirt host.libvirt = fakelibvirt libvirt_guest.libvirt = fakelibvirt CONF = cfg.CONF CONF.import_opt('compute_manager', 'nova.service') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('instances_path', 'nova.compute.manager') _fake_network_info = fake_network.fake_get_instance_nw_info _fake_NodeDevXml = \ {"pci_0000_04_00_3": """ pci_0000_04_00_3 pci_0000_00_01_1 igb 0 4 0 3 I350 Gigabit Network Connection Intel Corporation
""", "pci_0000_04_10_7": """ pci_0000_04_10_7 pci_0000_00_01_1 igbvf 0 4 16 7 I350 Ethernet Controller Virtual Function Intel Corporation
""", "pci_0000_04_11_7": """ pci_0000_04_11_7 pci_0000_00_01_1 igbvf 0 4 17 7 I350 Ethernet Controller Virtual Function Intel Corporation
""", "pci_0000_04_00_1": """ pci_0000_04_00_1 /sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1 pci_0000_00_02_0 mlx5_core 0 4 0 1 MT27700 Family [ConnectX-4] Mellanox Technologies
""", # libvirt >= 1.3.0 nodedev-dumpxml "pci_0000_03_00_0": """ pci_0000_03_00_0 /sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0 pci_0000_00_02_0 mlx5_core 0 3 0 0 MT27700 Family [ConnectX-4] Mellanox Technologies
""", "pci_0000_03_00_1": """ pci_0000_03_00_1 /sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1 pci_0000_00_02_0 mlx5_core 0 3 0 1 MT27700 Family [ConnectX-4] Mellanox Technologies
""", } _fake_cpu_info = { "arch": "test_arch", "model": "test_model", "vendor": "test_vendor", "topology": { "sockets": 1, "cores": 8, "threads": 16 }, "features": ["feature1", "feature2"] } def _concurrency(signal, wait, done, target, is_block_dev=False): signal.send() wait.wait() done.send() class FakeVirtDomain(object): def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None): if uuidstr is None: uuidstr = str(uuid.uuid4()) self.uuidstr = uuidstr self.id = id self.domname = name self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi, None, None] if fake_xml: self._fake_dom_xml = fake_xml else: self._fake_dom_xml = """ """ def name(self): if self.domname is None: return "fake-domain %s" % self else: return self.domname def ID(self): return self.id def info(self): return self._info def create(self): pass def managedSave(self, *args): pass def createWithFlags(self, launch_flags): pass def XMLDesc(self, flags): return self._fake_dom_xml def UUIDString(self): return self.uuidstr def attachDeviceFlags(self, xml, flags): pass def attachDevice(self, xml): pass def detachDeviceFlags(self, xml, flags): pass def snapshotCreateXML(self, xml, flags): pass def blockCommit(self, disk, base, top, bandwidth=0, flags=0): pass def blockRebase(self, disk, base, bandwidth=0, flags=0): pass def blockJobInfo(self, path, flags): pass def resume(self): pass def destroy(self): pass def fsFreeze(self, disks=None, flags=0): pass def fsThaw(self, disks=None, flags=0): pass def isActive(self): return True class CacheConcurrencyTestCase(test.NoDBTestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) # utils.synchronized() will create the lock_path for us if it # doesn't already exist. It will also delete it when it's done, # which can cause race conditions with the multiple threads we # use for tests. So, create the path here so utils.synchronized() # won't delete it out from under one of the threads. self.lock_path = os.path.join(CONF.instances_path, 'locks') fileutils.ensure_tree(self.lock_path) def fake_exists(fname): basedir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if fname == basedir or fname == self.lock_path: return True return False def fake_execute(*args, **kwargs): pass def fake_extend(image, size, use_cow=False): pass self.stub_out('os.path.exists', fake_exists) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(imagebackend.disk, 'extend', fake_extend) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def _fake_instance(self, uuid): return objects.Instance(id=1, uuid=uuid) def test_same_fname_concurrency(self): # Ensures that the same fname cache runs at a sequentially. uuid = uuidutils.generate_uuid() backend = imagebackend.Backend(False) wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) # Thread 1 should run before thread 2. sig1.wait() wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname', None, signal=sig2, wait=wait2, done=done2) wait2.send() eventlet.sleep(0) try: self.assertFalse(done2.ready()) finally: wait1.send() done1.wait() eventlet.sleep(0) self.assertTrue(done2.ready()) # Wait on greenthreads to assert they didn't raise exceptions # during execution thr1.wait() thr2.wait() def test_different_fname_concurrency(self): # Ensures that two different fname caches are concurrent. uuid = uuidutils.generate_uuid() backend = imagebackend.Backend(False) wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname2', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) # Thread 1 should run before thread 2. sig1.wait() wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname1', None, signal=sig2, wait=wait2, done=done2) eventlet.sleep(0) # Wait for thread 2 to start. sig2.wait() wait2.send() tries = 0 while not done2.ready() and tries < 10: eventlet.sleep(0) tries += 1 try: self.assertTrue(done2.ready()) finally: wait1.send() eventlet.sleep(0) # Wait on greenthreads to assert they didn't raise exceptions # during execution thr1.wait() thr2.wait() class FakeVolumeDriver(object): def __init__(self, *args, **kwargs): pass def attach_volume(self, *args): pass def detach_volume(self, *args): pass def get_xml(self, *args): return "" def get_config(self, *args): """Connect the volume to a fake device.""" conf = vconfig.LibvirtConfigGuestDisk() conf.source_type = "network" conf.source_protocol = "fake" conf.source_name = "fake" conf.target_dev = "fake" conf.target_bus = "fake" return conf def connect_volume(self, *args): """Connect the volume to a fake device.""" pass class FakeConfigGuestDisk(object): def __init__(self, *args, **kwargs): self.source_type = None self.driver_cache = None class FakeConfigGuest(object): def __init__(self, *args, **kwargs): self.driver_cache = None class FakeNodeDevice(object): def __init__(self, fakexml): self.xml = fakexml def XMLDesc(self, flags): return self.xml def _create_test_instance(): flavor = objects.Flavor(memory_mb=2048, swap=0, vcpu_weight=None, root_gb=1, id=2, name=u'm1.small', ephemeral_gb=0, rxtx_factor=1.0, flavorid=u'1', vcpus=1, extra_specs={}) return { 'id': 1, 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310', 'memory_kb': '1024000', 'basepath': '/some/path', 'bridge_name': 'br100', 'display_name': "Acme webserver", 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'root_gb': 10, 'ephemeral_gb': 20, 'instance_type_id': '5', # m1.small 'extra_specs': {}, 'system_metadata': { 'image_disk_format': 'raw', }, 'flavor': flavor, 'new_flavor': None, 'old_flavor': None, 'pci_devices': objects.PciDeviceList(), 'numa_topology': None, 'config_drive': None, 'vm_mode': None, 'kernel_id': None, 'ramdisk_id': None, 'os_type': 'linux', 'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb', 'ephemeral_key_uuid': None, 'vcpu_model': None, 'host': 'fake-host', 'task_state': None, } class LibvirtConnTestCase(test.NoDBTestCase): REQUIRES_LOCKING = True _EPHEMERAL_20_DEFAULT = ('ephemeral_20_%s' % utils.get_hash_str(disk._DEFAULT_FILE_SYSTEM)[:7]) def setUp(self): super(LibvirtConnTestCase, self).setUp() self.flags(fake_call=True) self.user_id = 'fake' self.project_id = 'fake' self.context = context.get_admin_context() temp_dir = self.useFixture(fixtures.TempDir()).path self.flags(instances_path=temp_dir) self.flags(snapshots_directory=temp_dir, group='libvirt') self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.flags(sysinfo_serial="hardware", group="libvirt") self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def fake_extend(image, size, use_cow=False): pass self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend) self.stubs.Set(imagebackend.Image, 'resolve_driver_format', imagebackend.Image._get_driver_format) self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.test_instance = _create_test_instance() self.test_image_meta = { "disk_format": "raw", } self.image_service = nova.tests.unit.image.fake.stub_out_image_service( self) self.device_xml_tmpl = """ 58a84f6d-3f0c-4e19-a0af-eb657b790657
""" def relpath(self, path): return os.path.relpath(path, CONF.instances_path) def tearDown(self): nova.tests.unit.image.fake.FakeImageService_reset() super(LibvirtConnTestCase, self).tearDown() def test_driver_capabilities(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr.capabilities['has_imagecache'], 'Driver capabilities for \'has_imagecache\' ' 'is invalid') self.assertTrue(drvr.capabilities['supports_recreate'], 'Driver capabilities for \'supports_recreate\' ' 'is invalid') self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'], 'Driver capabilities for ' '\'supports_migrate_to_same_host\' is invalid') def create_fake_libvirt_mock(self, **kwargs): """Defining mocks for LibvirtDriver(libvirt is not used).""" # A fake libvirt.virConnect class FakeLibvirtDriver(object): def defineXML(self, xml): return FakeVirtDomain() # Creating mocks volume_driver = ['iscsi=nova.tests.unit.virt.libvirt.test_driver' '.FakeVolumeDriver'] fake = FakeLibvirtDriver() # Customizing above fake if necessary for key, val in kwargs.items(): fake.__setattr__(key, val) self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake) self.stubs.Set(libvirt_driver.LibvirtDriver, '_get_volume_drivers', lambda x: volume_driver) self.stubs.Set(host.Host, 'get_connection', lambda x: fake) def fake_lookup(self, instance_name): return FakeVirtDomain() def fake_execute(self, *args, **kwargs): open(args[-1], "a").close() def _create_service(self, **kwargs): service_ref = {'host': kwargs.get('host', 'dummy'), 'disabled': kwargs.get('disabled', False), 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0} return objects.Service(**service_ref) def _get_pause_flag(self, drvr, network_info, power_on=True, vifs_already_plugged=False): timeout = CONF.vif_plugging_timeout events = [] if (drvr._conn_supports_start_paused and utils.is_neutron() and not vifs_already_plugged and power_on and timeout): events = drvr._get_neutron_events(network_info) return bool(events) def test_public_api_signatures(self): baseinst = driver.ComputeDriver(None) inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertPublicAPISignatures(baseinst, inst) def test_legacy_block_device_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(drvr.need_legacy_block_device_info) @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_ok(self, mock_version): mock_version.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_abort(self, mock_version): mock_version.return_value = False drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_version_deprecation_warning(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_VERSION)): self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION") # Test that a warning is logged if the libvirt version is less than # the next required minimum version. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertTrue(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION)) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_version_ok(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_VERSION)): self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION") # Test that a warning is not logged if the libvirt version is greater # than or equal to NEXT_MIN_LIBVIRT_VERSION. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertFalse(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64)) - 1) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_old_libvirt(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64)) - 1) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_old_qemu(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_ok(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X)) - 1) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_old_libvirt(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X)) - 1) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_old_qemu(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_ok(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") def _do_test_parse_migration_flags(self, lm_config=None, lm_expected=None, bm_config=None, bm_expected=None): if lm_config is not None: self.flags(live_migration_flag=lm_config, group='libvirt') if bm_config is not None: self.flags(block_migration_flag=bm_config, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr._parse_migration_flags() if lm_expected is not None: self.assertEqual(lm_expected, drvr._live_migration_flags) if bm_expected is not None: self.assertEqual(bm_expected, drvr._block_migration_flags) def test_parse_live_migration_flags_default(self): self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED)) def test_parse_live_migration_flags(self): self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE)) def test_parse_block_migration_flags_default(self): self._do_test_parse_migration_flags( bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC'), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_parse_block_migration_flags(self): self._do_test_parse_migration_flags( bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC'), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) @mock.patch('nova.virt.libvirt.driver.LOG') def test_parse_live_migration_flag_with_invalid_flag(self, mock_log): self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_FOO_BAR'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC, VIR_MIGRATE_FOO_BAR'), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn("unknown libvirt live migration flag", msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn("unknown libvirt live migration flag", msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_parse_migration_flags_unsafe_block(self, mock_log): '''Test if the driver logs a warning if the live_migration_flag and/or block_migration_flag config option uses a value which can cause potential damage. ''' self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC'), bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn('Removing the VIR_MIGRATE_NON_SHARED_INC', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('Adding the VIR_MIGRATE_NON_SHARED_INC', msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_parse_migration_flags_p2p_missing(self, mock_log): self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED'), bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn('Adding the VIR_MIGRATE_PEER2PEER flag', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('Adding the VIR_MIGRATE_PEER2PEER flag', msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_parse_migration_flags_p2p_xen(self, mock_log): self.flags(virt_type='xen', group='libvirt') self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED'), bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn('Removing the VIR_MIGRATE_PEER2PEER flag', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('Removing the VIR_MIGRATE_PEER2PEER flag', msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_parse_migration_flags_config_mgmt(self, mock_log): self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_PERSIST_DEST, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED'), bm_config=('VIR_MIGRATE_PERSIST_DEST, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn('Adding the VIR_MIGRATE_UNDEFINE_SOURCE flag', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('Removing the VIR_MIGRATE_PERSIST_DEST flag', msg[0][0]) msg = mock_log.warning.call_args_list[2] self.assertIn('Adding the VIR_MIGRATE_UNDEFINE_SOURCE flag', msg[0][0]) msg = mock_log.warning.call_args_list[3] self.assertIn('Removing the VIR_MIGRATE_PERSIST_DEST flag', msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_live_migration_tunnelled_true(self, mock_log): self.flags(live_migration_tunnelled=True, group='libvirt') self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE'), bm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED)) msg = mock_log.warning.call_args_list[0] self.assertIn('does not contain the VIR_MIGRATE_TUNNELLED', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('does not contain the VIR_MIGRATE_TUNNELLED', msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_live_migration_tunnelled_false(self, mock_log): self.flags(live_migration_tunnelled=False, group='libvirt') self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED'), bm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC, ' 'VIR_MIGRATE_TUNNELLED'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn('contains the VIR_MIGRATE_TUNNELLED flag', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('contains the VIR_MIGRATE_TUNNELLED flag', msg[0][0]) @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("root", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) instance.os_type = "windows" mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with( "Administrator", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_image(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes", "os_admin_user": "foo" }} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("foo", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_set_admin_password_bad_version(self, mock_svc, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.SetAdminPasswdNotSupported, drvr.set_admin_password, instance, "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_set_admin_password_bad_hyp(self, mock_svc, mock_image): self.flags(virt_type='foo', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.SetAdminPasswdNotSupported, drvr.set_admin_password, instance, "123") @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_set_admin_password_guest_agent_not_running(self, mock_svc): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.QemuGuestAgentNotEnabled, drvr.set_admin_password, instance, "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_error(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest.set_user_password.side_effect = ( fakelibvirt.libvirtError("error")) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.set_admin_password, instance, "123") @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_disable(self, mock_svc): # Tests disabling an enabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(False) self.assertTrue(svc.disabled) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_enable(self, mock_svc): # Tests enabling a disabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=True, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(True) self.assertTrue(svc.disabled) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_enable_state_enabled(self, mock_svc): # Tests enabling an enabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=False, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(True) self.assertFalse(svc.disabled) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_disable_state_disabled(self, mock_svc): # Tests disabling a disabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=True, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(False) self.assertTrue(svc.disabled) def test_set_host_enabled_swallows_exceptions(self): # Tests that set_host_enabled will swallow exceptions coming from the # db_api code so they don't break anything calling it, e.g. the # _get_new_connection method. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object(db, 'service_get_by_compute_host') as db_mock: # Make db.service_get_by_compute_host raise NovaException; this # is more robust than just raising ComputeHostNotFound. db_mock.side_effect = exception.NovaException drvr._set_host_enabled(False) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") def test_prepare_pci_device(self, mock_lookup): pci_devices = [dict(hypervisor_name='xxx')] self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn = drvr._host.get_connection() mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) drvr._prepare_pci_devices_for_use(pci_devices) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") @mock.patch.object(fakelibvirt.virNodeDevice, "dettach") def test_prepare_pci_device_exception(self, mock_detach, mock_lookup): pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid')] self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn = drvr._host.get_connection() mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) mock_detach.side_effect = fakelibvirt.libvirtError("xxxx") self.assertRaises(exception.PciDevicePrepareFailed, drvr._prepare_pci_devices_for_use, pci_devices) def test_detach_pci_devices_exception(self): pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid')] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(host.Host, 'has_min_version') host.Host.has_min_version = lambda x, y: False self.assertRaises(exception.PciDeviceDetachFailed, drvr._detach_pci_devices, None, pci_devices) def test_detach_pci_devices(self): fake_domXML1 =\ """
""" pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid', address="0001:04:10:1")] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(host.Host, 'has_min_version') host.Host.has_min_version = lambda x, y: True self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_get_guest_pci_device') class FakeDev(object): def to_xml(self): pass libvirt_driver.LibvirtDriver._get_guest_pci_device =\ lambda x, y: FakeDev() class FakeDomain(object): def detachDeviceFlags(self, xml, flags): pci_devices[0]['hypervisor_name'] = 'marked' pass def XMLDesc(self, flags): return fake_domXML1 guest = libvirt_guest.Guest(FakeDomain()) drvr._detach_pci_devices(guest, pci_devices) self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked') def test_detach_pci_devices_timeout(self): fake_domXML1 =\ """
""" pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid', address="0000:04:10:1")] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(host.Host, 'has_min_version') host.Host.has_min_version = lambda x, y: True self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_get_guest_pci_device') class FakeDev(object): def to_xml(self): pass libvirt_driver.LibvirtDriver._get_guest_pci_device =\ lambda x, y: FakeDev() class FakeDomain(object): def detachDeviceFlags(self, xml, flags): pass def XMLDesc(self, flags): return fake_domXML1 guest = libvirt_guest.Guest(FakeDomain()) self.assertRaises(exception.PciDeviceDetachFailed, drvr._detach_pci_devices, guest, pci_devices) @mock.patch.object(connector, 'get_connector_properties') def test_get_connector(self, fake_get_connector): initiator = 'fake.initiator.iqn' ip = 'fakeip' host = 'fakehost' wwpns = ['100010604b019419'] wwnns = ['200010604b019419'] self.flags(my_ip=ip) self.flags(host=host) expected = { 'ip': ip, 'initiator': initiator, 'host': host, 'wwpns': wwpns, 'wwnns': wwnns } volume = { 'id': 'fake' } # TODO(walter-boring) add the fake in os-brick fake_get_connector.return_value = expected drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) result = drvr.get_volume_connector(volume) self.assertThat(expected, matchers.DictMatches(result)) @mock.patch.object(connector, 'get_connector_properties') def test_get_connector_storage_ip(self, fake_get_connector): ip = '100.100.100.100' storage_ip = '101.101.101.101' self.flags(my_block_storage_ip=storage_ip, my_ip=ip) volume = { 'id': 'fake' } expected = { 'ip': storage_ip } # TODO(walter-boring) add the fake in os-brick fake_get_connector.return_value = expected drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) result = drvr.get_volume_connector(volume) self.assertEqual(storage_ip, result['ip']) def test_lifecycle_event_registration(self): calls = [] def fake_registerErrorHandler(*args, **kwargs): calls.append('fake_registerErrorHandler') def fake_get_host_capabilities(**args): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.ARMV7 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu calls.append('fake_get_host_capabilities') return caps @mock.patch.object(fakelibvirt, 'registerErrorHandler', side_effect=fake_registerErrorHandler) @mock.patch.object(host.Host, "get_capabilities", side_effect=fake_get_host_capabilities) def test_init_host(get_host_capabilities, register_error_handler): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("test_host") test_init_host() # NOTE(dkliban): Will fail if get_host_capabilities is called before # registerErrorHandler self.assertEqual(['fake_registerErrorHandler', 'fake_get_host_capabilities'], calls) def test_sanitize_log_to_xml(self): # setup fake data data = {'auth_password': 'scrubme'} bdm = [{'connection_info': {'data': data}}] bdi = {'block_device_mapping': bdm} # Tests that the parameters to the _get_guest_xml method # are sanitized for passwords when logged. def fake_debug(*args, **kwargs): if 'auth_password' in args[0]: self.assertNotIn('scrubme', args[0]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = mock.Mock() with test.nested( mock.patch.object(libvirt_driver.LOG, 'debug', side_effect=fake_debug), mock.patch.object(drvr, '_get_guest_config', return_value=conf) ) as ( debug_mock, conf_mock ): drvr._get_guest_xml(self.context, self.test_instance, network_info={}, disk_info={}, image_meta={}, block_device_info=bdi) # we don't care what the log message is, we just want to make sure # our stub method is called which asserts the password is scrubbed self.assertTrue(debug_mock.called) @mock.patch.object(time, "time") def test_get_guest_config(self, time_mock): time_mock.return_value = 1234567.89 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) test_instance = copy.deepcopy(self.test_instance) test_instance["display_name"] = "purple tomatoes" ctxt = context.RequestContext(project_id=123, project_name="aubergine", user_id=456, user_name="pie") flavor = objects.Flavor(name='m1.small', memory_mb=6, vcpus=28, root_gb=496, ephemeral_gb=8128, swap=33550336, extra_specs={}) instance_ref = objects.Instance(**test_instance) instance_ref.flavor = flavor image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info, context=ctxt) self.assertEqual(cfg.uuid, instance_ref["uuid"]) self.assertEqual(2, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertEqual(cfg.memory, 6 * units.Ki) self.assertEqual(cfg.vcpus, 28) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(len(cfg.metadata), 1) self.assertIsInstance(cfg.metadata[0], vconfig.LibvirtConfigGuestMetaNovaInstance) self.assertEqual(version.version_string_with_package(), cfg.metadata[0].package) self.assertEqual("purple tomatoes", cfg.metadata[0].name) self.assertEqual(1234567.89, cfg.metadata[0].creationTime) self.assertEqual("image", cfg.metadata[0].roottype) self.assertEqual(str(instance_ref["image_ref"]), cfg.metadata[0].rootid) self.assertIsInstance(cfg.metadata[0].owner, vconfig.LibvirtConfigGuestMetaNovaOwner) self.assertEqual(456, cfg.metadata[0].owner.userid) self.assertEqual("pie", cfg.metadata[0].owner.username) self.assertEqual(123, cfg.metadata[0].owner.projectid) self.assertEqual("aubergine", cfg.metadata[0].owner.projectname) self.assertIsInstance(cfg.metadata[0].flavor, vconfig.LibvirtConfigGuestMetaNovaFlavor) self.assertEqual("m1.small", cfg.metadata[0].flavor.name) self.assertEqual(6, cfg.metadata[0].flavor.memory) self.assertEqual(28, cfg.metadata[0].flavor.vcpus) self.assertEqual(496, cfg.metadata[0].flavor.disk) self.assertEqual(8128, cfg.metadata[0].flavor.ephemeral) self.assertEqual(33550336, cfg.metadata[0].flavor.swap) def test_get_guest_config_lxc(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {}}) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) self.assertIsNone(cfg.os_root) self.assertEqual(3, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) def test_get_guest_config_lxc_with_id_maps(self): self.flags(virt_type='lxc', group='libvirt') self.flags(uid_maps=['0:1000:100'], group='libvirt') self.flags(gid_maps=['0:1000:100'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {}}) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) self.assertIsNone(cfg.os_root) self.assertEqual(3, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) self.assertEqual(len(cfg.idmaps), 2) self.assertIsInstance(cfg.idmaps[0], vconfig.LibvirtConfigGuestUIDMap) self.assertIsInstance(cfg.idmaps[1], vconfig.LibvirtConfigGuestGIDMap) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_fits(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_no_fit(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(random, 'choice') ) as (get_host_cap_mock, get_vcpu_pin_set_mock, choice_mock): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertFalse(choice_mock.called) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) def _test_get_guest_memory_backing_config( self, host_topology, inst_topology, numatune): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object( drvr, "_get_host_numa_topology", return_value=host_topology): return drvr._get_guest_memory_backing_config( inst_topology, numatune, {}) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_memory_backing_config_large_success(self, mock_version): host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=3, cpuset=set([1]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=2000, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0), objects.NUMAPagesTopology(size_kb=1048576, total=0, used=0), ])]) inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)]) numa_tune = vconfig.LibvirtConfigGuestNUMATune() numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] numa_tune.memnodes[0].cellid = 0 numa_tune.memnodes[0].nodeset = [3] result = self._test_get_guest_memory_backing_config( host_topology, inst_topology, numa_tune) self.assertEqual(1, len(result.hugepages)) self.assertEqual(2048, result.hugepages[0].size_kb) self.assertEqual([0], result.hugepages[0].nodeset) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_memory_backing_config_smallest(self, mock_version): host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=3, cpuset=set([1]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=2000, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0), objects.NUMAPagesTopology(size_kb=1048576, total=0, used=0), ])]) inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)]) numa_tune = vconfig.LibvirtConfigGuestNUMATune() numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] numa_tune.memnodes[0].cellid = 0 numa_tune.memnodes[0].nodeset = [3] result = self._test_get_guest_memory_backing_config( host_topology, inst_topology, numa_tune) self.assertIsNone(result) def test_get_guest_memory_backing_config_realtime(self): flavor = {"extra_specs": { "hw:cpu_realtime": "yes", "hw:cpu_policy": "dedicated" }} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) membacking = drvr._get_guest_memory_backing_config( None, None, flavor) self.assertTrue(membacking.locked) self.assertFalse(membacking.sharedpages) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_pci_no_numa_info( self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.AVAILABLE, address='0000:00:00.1', instance_uuid=None, request_id=None, extra_info={}, numa_node=None) pci_device = objects.PciDevice(**pci_device_info) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object( host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), mock.patch.object(pci_manager, "get_instance_pci_devs", return_value=[pci_device])): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.AVAILABLE, address='0000:00:00.1', instance_uuid=None, request_id=None, extra_info={}, numa_node=1) pci_device = objects.PciDevice(**pci_device_info) pci_device_info.update(numa_node=0, address='0000:00:00.2') pci_device2 = objects.PciDevice(**pci_device_info) with test.nested( mock.patch.object( host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(random, 'choice'), mock.patch.object(pci_manager, "get_instance_pci_devs", return_value=[pci_device, pci_device2]) ) as (get_host_cap_mock, get_vcpu_pin_set_mock, choice_mock, pci_mock): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertFalse(choice_mock.called) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') @mock.patch.object(host.Host, 'get_capabilities') @mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled') def _test_get_guest_config_numa_unsupported(self, fake_lib_version, fake_version, fake_type, fake_arch, exception_class, pagesize, mock_host, mock_caps, mock_lib_version, mock_version, mock_type): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=pagesize)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = fake_arch caps.host.topology = self._fake_caps_numa_topology() mock_type.return_value = fake_type mock_version.return_value = fake_version mock_lib_version.return_value = fake_lib_version mock_caps.return_value = caps drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(exception_class, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_numa_old_version_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_old_version_libvirt_ppc(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION_PPC) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.PPC64LE, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_bad_version_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.BAD_LIBVIRT_NUMA_VERSIONS[0]), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) @mock.patch.object(libvirt_driver.LOG, 'warn') def test_has_numa_support_bad_version_libvirt_log(self, mock_warn): # Tests that a warning is logged once and only once when there is a bad # BAD_LIBVIRT_NUMA_VERSIONS detected. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(hasattr(drvr, '_bad_libvirt_numa_version_warn')) with mock.patch.object(drvr._host, 'has_version', return_value=True): for i in range(2): self.assertFalse(drvr._has_numa_support()) self.assertTrue(drvr._bad_libvirt_numa_version_warn) self.assertEqual(1, mock_warn.call_count) # assert the version is logged properly self.assertEqual('1.2.9.2', mock_warn.call_args[0][1]) def test_get_guest_config_numa_old_version_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1, host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_other_arch_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.S390, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_xen(self): self.flags(virt_type='xen', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int((4, 5, 0)), 'XEN', arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_old_pages_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.MemoryPagesUnsupported, 2048) def test_get_guest_config_numa_old_pages_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1, host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, 2048) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset( self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology(kb_mem=4194304) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))) ) as (has_min_version_mock, get_host_cap_mock, get_vcpu_pin_set_mock, get_online_cpus_mock): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) # NOTE(ndipanov): we make sure that pin_set was taken into account # when choosing viable cells self.assertEqual(set([2, 3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_non_numa_host_instance_topo(self, is_able): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([2]), memory=1024)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.numatune) self.assertIsNotNone(cfg.cpu.numa) for instance_cell, numa_cfg_cell in zip( instance_topology.cells, cfg.cpu.numa.cells): self.assertEqual(instance_cell.id, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_topo(self, is_able): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=None), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=None)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_instance_topo_reordered(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024), objects.InstanceNUMACell( id=0, cpuset=set([2, 3]), memory=1024)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset) for index, (instance_cell, numa_cfg_cell) in enumerate(zip( instance_topology.cells, cfg.cpu.numa.cells)): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertIsNone(numa_cfg_cell.memAccess) allnodes = set([cell.id for cell in instance_topology.cells]) self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) self.assertEqual("strict", cfg.numatune.memory.mode) for index, (instance_cell, memnode) in enumerate(zip( instance_topology.cells, cfg.numatune.memnodes)): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, cpu_pinning={0: 24, 1: 25}), objects.InstanceNUMACell( id=0, cpuset=set([2, 3]), memory=1024, cpu_pinning={2: 0, 3: 1})]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology( sockets_per_cell=4, cores_per_socket=3, threads_per_core=2) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) # Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([0, 1, 24, 25]), cfg.cputune.emulatorpin.cpuset) for i, (instance_cell, numa_cfg_cell) in enumerate(zip( instance_topology.cells, cfg.cpu.numa.cells)): self.assertEqual(i, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertIsNone(numa_cfg_cell.memAccess) allnodes = set([cell.id for cell in instance_topology.cells]) self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) self.assertEqual("strict", cfg.numatune.memory.mode) for i, (instance_cell, memnode) in enumerate(zip( instance_topology.cells, cfg.numatune.memnodes)): self.assertEqual(i, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_mempages_shared(self): instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertEqual("shared", numa_cfg_cell.memAccess) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) self.assertEqual(0, len(cfg.cputune.vcpusched)) self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self): instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={ "hw:cpu_realtime": "yes", "hw:cpu_policy": "dedicated", "hw:cpu_realtime_mask": "^0-1" }) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertEqual("shared", numa_cfg_cell.memAccess) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) self.assertEqual(1, len(cfg.cputune.vcpusched)) self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler) self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus) self.assertEqual(set([0, 1]), cfg.cputune.emulatorpin.cpuset) def test_get_cpu_numa_config_from_instance(self): topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128), objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128), ]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = drvr._get_cpu_numa_config_from_instance(topology, True) self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA) self.assertEqual(0, conf.cells[0].id) self.assertEqual(set([1, 2]), conf.cells[0].cpus) self.assertEqual(131072, conf.cells[0].memory) self.assertEqual("shared", conf.cells[0].memAccess) self.assertEqual(1, conf.cells[1].id) self.assertEqual(set([3, 4]), conf.cells[1].cpus) self.assertEqual(131072, conf.cells[1].memory) self.assertEqual("shared", conf.cells[1].memAccess) def test_get_cpu_numa_config_from_instance_none(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = drvr._get_cpu_numa_config_from_instance(None, False) self.assertIsNone(conf) @mock.patch.object(host.Host, 'has_version', return_value=True) def test_has_cpu_policy_support(self, mock_has_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.CPUPinningNotSupported, drvr._has_cpu_policy_support) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support", return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_does_not_want_hugepages(self, mock_caps, mock_numa, mock_hp): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=4), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=4)]) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() mock_caps.return_value = caps host_topology = drvr._get_host_numa_topology() self.assertFalse(drvr._wants_hugepages(None, None)) self.assertFalse(drvr._wants_hugepages(host_topology, None)) self.assertFalse(drvr._wants_hugepages(None, instance_topology)) self.assertFalse(drvr._wants_hugepages(host_topology, instance_topology)) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support", return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_does_want_hugepages(self, mock_caps, mock_numa, mock_hp): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() mock_caps.return_value = caps host_topology = drvr._get_host_numa_topology() self.assertTrue(drvr._wants_hugepages(host_topology, instance_topology)) def test_get_guest_config_clock(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) hpet_map = { arch.X86_64: True, arch.I686: True, arch.PPC: False, arch.PPC64: False, arch.ARMV7: False, arch.AARCH64: False, } for guestarch, expect_hpet in hpet_map.items(): with mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=guestarch): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "utc") self.assertIsInstance(cfg.clock.timers[0], vconfig.LibvirtConfigGuestTimer) self.assertIsInstance(cfg.clock.timers[1], vconfig.LibvirtConfigGuestTimer) self.assertEqual(cfg.clock.timers[0].name, "pit") self.assertEqual(cfg.clock.timers[0].tickpolicy, "delay") self.assertEqual(cfg.clock.timers[1].name, "rtc") self.assertEqual(cfg.clock.timers[1].tickpolicy, "catchup") if expect_hpet: self.assertEqual(3, len(cfg.clock.timers)) self.assertIsInstance(cfg.clock.timers[2], vconfig.LibvirtConfigGuestTimer) self.assertEqual('hpet', cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) else: self.assertEqual(2, len(cfg.clock.timers)) @mock.patch.object(libvirt_utils, 'get_arch') @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows(self, mock_version, mock_get_arch): mock_version.return_value = False mock_get_arch.return_value = arch.I686 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.clock.timers), cfg.clock.timers) self.assertEqual("pit", cfg.clock.timers[0].name) self.assertEqual("rtc", cfg.clock.timers[1].name) self.assertEqual("hpet", cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) @mock.patch.object(libvirt_utils, 'get_arch') @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_timer(self, mock_version, mock_get_arch): mock_version.return_value = True mock_get_arch.return_value = arch.I686 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers) self.assertEqual("pit", cfg.clock.timers[0].name) self.assertEqual("rtc", cfg.clock.timers[1].name) self.assertEqual("hpet", cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) self.assertEqual("hypervclock", cfg.clock.timers[3].name) self.assertTrue(cfg.clock.timers[3].present) self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_hyperv_feature1(self, mock_version): def fake_version(lv_ver=None, hv_ver=None, hv_type=None): if lv_ver == (1, 0, 0) and hv_ver == (1, 1, 0): return True return False mock_version.side_effect = fake_version drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) self.assertTrue(cfg.features[2].relaxed) self.assertFalse(cfg.features[2].spinlocks) self.assertFalse(cfg.features[2].vapic) @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_hyperv_feature2(self, mock_version): mock_version.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) self.assertTrue(cfg.features[2].relaxed) self.assertTrue(cfg.features[2].spinlocks) self.assertEqual(8191, cfg.features[2].spinlock_retries) self.assertTrue(cfg.features[2].vapic) def test_get_guest_config_with_two_nics(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 2), image_meta, disk_info) self.assertEqual(2, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertEqual(cfg.memory, 2 * units.Mi) self.assertEqual(cfg.vcpus, 1) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) def test_get_guest_config_bug_1118829(self): self.flags(virt_type='uml', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) disk_info = {'disk_bus': 'virtio', 'cdrom_bus': 'ide', 'mapping': {u'vda': {'bus': 'virtio', 'type': 'disk', 'dev': u'vda'}, 'root': {'bus': 'virtio', 'type': 'disk', 'dev': 'vda'}}} # NOTE(jdg): For this specific test leave this blank # This will exercise the failed code path still, # and won't require fakes and stubs of the iscsi discovery block_device_info = {} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, block_device_info) self.assertEqual(instance_ref['root_device_name'], '/dev/vda') def test_get_guest_config_with_root_device_name(self): self.flags(virt_type='uml', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = {'root_device_name': '/dev/vdb'} disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, block_device_info) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, block_device_info) self.assertEqual(0, len(cfg.features)) self.assertEqual(cfg.memory, 2 * units.Mi) self.assertEqual(cfg.vcpus, 1) self.assertEqual(cfg.os_type, "uml") self.assertEqual(cfg.os_boot_dev, []) self.assertEqual(cfg.os_root, '/dev/vdb') self.assertEqual(len(cfg.devices), 3) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) def test_has_uefi_support_with_invalid_version(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object(drvr._host, 'has_min_version', return_value=False): self.assertFalse(drvr._has_uefi_support()) def test_has_uefi_support_not_supported_arch(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "alpha" self.assertFalse(drvr._has_uefi_support()) @mock.patch('os.path.exists', return_value=False) def test_has_uefi_support_with_no_loader_existed(self, mock_exist): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(drvr._has_uefi_support()) @mock.patch('os.path.exists', return_value=True) def test_has_uefi_support(self, mock_has_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" with mock.patch.object(drvr._host, 'has_min_version', return_value=True): self.assertTrue(drvr._has_uefi_support()) def test_get_guest_config_with_uefi(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_firmware_type": "uefi"}}) instance_ref = objects.Instance(**self.test_instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(drvr, "_has_uefi_support", return_value=True)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_loader_type, "pflash") def test_get_guest_config_with_block_device(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdd'}), ] ) info = {'block_device_mapping': driver_block_device.convert_volumes( bdms )} info['block_device_mapping'][0]['connection_info'] = conn_info info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, info) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'vdc') self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[3].target_dev, 'vdd') mock_save.assert_called_with() def test_get_guest_config_lxc_with_attached_volume(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', }), fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'source_type': 'volume', 'destination_type': 'volume', }), ] ) info = {'block_device_mapping': driver_block_device.convert_volumes( bdms )} info['block_device_mapping'][0]['connection_info'] = conn_info info['block_device_mapping'][1]['connection_info'] = conn_info info['block_device_mapping'][2]['connection_info'] = conn_info info['block_device_mapping'][0]['mount_device'] = '/dev/vda' info['block_device_mapping'][1]['mount_device'] = '/dev/vdc' info['block_device_mapping'][2]['mount_device'] = '/dev/vdd' with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, info) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[1].target_dev, 'vdc') self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'vdd') mock_save.assert_called_with() def test_get_guest_config_with_configdrive(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # make configdrive.required_by() return True instance_ref['config_drive'] = True disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) # The last device is selected for this. on x86 is the last ide # device (hdd). Since power only support scsi, the last device # is sdz expect = {"ppc": "sdz", "ppc64": "sdz"} disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd") self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, disk) def test_get_guest_config_with_virtio_scsi_bus(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_scsi_model": "virtio-scsi"}}) instance_ref = objects.Instance(**self.test_instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, []) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestController) self.assertEqual(cfg.devices[2].model, 'virtio-scsi') def test_get_guest_config_with_virtio_scsi_bus_bdm(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_scsi_model": "virtio-scsi"}}) instance_ref = objects.Instance(**self.test_instance) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}), ] ) bd_info = { 'block_device_mapping': driver_block_device.convert_volumes(bdms)} bd_info['block_device_mapping'][0]['connection_info'] = conn_info bd_info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, bd_info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, [], bd_info) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'sdc') self.assertEqual(cfg.devices[2].target_bus, 'scsi') self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[3].target_dev, 'sdd') self.assertEqual(cfg.devices[3].target_bus, 'scsi') self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestController) self.assertEqual(cfg.devices[4].model, 'virtio-scsi') mock_save.assert_called_with() def test_get_guest_config_with_vnc(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 7) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "vnc") def test_get_guest_config_with_vnc_and_tablet(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") def test_get_guest_config_with_spice_and_tablet(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "spice") def test_get_guest_config_with_spice_and_agent(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0") self.assertEqual(cfg.devices[5].type, "spice") self.assertEqual(cfg.devices[6].type, "qxl") @mock.patch('nova.console.serial.acquire_port') @mock.patch('nova.virt.hardware.get_number_of_serial_ports', return_value=1) @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',) def test_create_serial_console_devices_based_on_arch(self, mock_get_arch, mock_get_port_number, mock_acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) expected = {arch.X86_64: vconfig.LibvirtConfigGuestSerial, arch.S390: vconfig.LibvirtConfigGuestConsole, arch.S390X: vconfig.LibvirtConfigGuestConsole} for guest_arch, device_type in expected.items(): mock_get_arch.return_value = guest_arch guest = vconfig.LibvirtConfigGuest() drvr._create_serial_console_devices(guest, instance=None, flavor={}, image_meta={}) self.assertEqual(1, len(guest.devices)) console_device = guest.devices[0] self.assertIsInstance(console_device, device_type) self.assertEqual("tcp", console_device.type) @mock.patch('nova.console.serial.acquire_port') def test_get_guest_config_serial_console(self, acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) acquire_port.return_value = 11111 cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(8, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual(11111, cfg.devices[2].listen_port) def test_get_guest_config_serial_console_through_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual("tcp", cfg.devices[3].type) self.assertEqual("tcp", cfg.devices[4].type) def test_get_guest_config_serial_console_invalid_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises( exception.ImageSerialPortNumberInvalid, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_serial_console_image_and_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_serial_port_count": "3"}}) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4} disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10, len(cfg.devices), cfg.devices) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual("tcp", cfg.devices[3].type) self.assertEqual("tcp", cfg.devices[4].type) @mock.patch('nova.console.serial.acquire_port') def test_get_guest_config_serial_console_through_port_rng_exhausted( self, acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) acquire_port.side_effect = exception.SocketPortRangeExhaustedException( '127.0.0.1') self.assertRaises( exception.SocketPortRangeExhaustedException, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch('os.path.getsize', return_value=0) # size doesn't matter @mock.patch('nova.virt.libvirt.storage.lvm.get_volume_size', return_value='fake-size') def test_detach_encrypted_volumes(self, mock_getsize, mock_get_volume_size): """Test that unencrypted volumes are not disconnected with dmcrypt.""" instance = objects.Instance(**self.test_instance) xml = """ """ dom = FakeVirtDomain(fake_xml=xml) instance.ephemeral_key_uuid = 'fake-id' # encrypted conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch.object(conn._host, 'get_domain', return_value=dom) def detach_encrypted_volumes(block_device_info, mock_get_domain, mock_delete_volume): conn._detach_encrypted_volumes(instance, block_device_info) mock_get_domain.assert_called_once_with(instance) self.assertFalse(mock_delete_volume.called) block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} detach_encrypted_volumes(block_device_info) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest(None, mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.1', 101), ('127.0.0.2', 100), ('127.0.0.2', 101)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_bind_only(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest('bind', mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 101), ('127.0.0.2', 100)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_connect_only(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest('connect', mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.2', 101)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_on_s390(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest(None, mock_get_xml_desc, 'console') self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.1', 101), ('127.0.0.2', 100), ('127.0.0.2', 101)], list(i)) def _test_get_serial_ports_from_guest(self, mode, mock_get_xml_desc, dev_name='serial'): xml = """ <%(dev_name)s type="tcp"> <%(dev_name)s type="tcp"> <%(dev_name)s type="tcp"> <%(dev_name)s type="tcp"> """ % {'dev_name': dev_name} mock_get_xml_desc.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) guest = libvirt_guest.Guest(FakeVirtDomain()) return drvr._get_serial_ports_from_guest(guest, mode=mode) def test_get_guest_config_with_type_xen(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 6) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[3].type, "vnc") self.assertEqual(cfg.devices[4].type, "xen") @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=arch.S390X) def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') self._stub_host_capabilities_cpu_arch(arch.S390X) instance_ref = objects.Instance(**self.test_instance) cfg = self._get_guest_config_via_fake_api(instance_ref) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) log_file_device = cfg.devices[2] self.assertIsInstance(log_file_device, vconfig.LibvirtConfigGuestConsole) self.assertEqual("sclplm", log_file_device.target_type) self.assertEqual("file", log_file_device.type) terminal_device = cfg.devices[3] self.assertIsInstance(terminal_device, vconfig.LibvirtConfigGuestConsole) self.assertEqual("sclp", terminal_device.target_type) self.assertEqual("pty", terminal_device.type) self.assertEqual("s390-ccw-virtio", cfg.os_mach_type) def _stub_host_capabilities_cpu_arch(self, cpu_arch): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = cpu_arch caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) def _get_guest_config_via_fake_api(self, instance): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) return drvr._get_guest_config(instance, [], image_meta, disk_info) def test_get_guest_config_with_type_xen_pae_hvm(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['vm_mode'] = vm_mode.HVM image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path) self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeaturePAE) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureAPIC) def test_get_guest_config_with_type_xen_pae_pvm(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_type, vm_mode.XEN) self.assertEqual(1, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeaturePAE) def test_get_guest_config_with_vnc_and_spice(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0") self.assertEqual(cfg.devices[6].type, "vnc") self.assertEqual(cfg.devices[7].type, "spice") def test_get_guest_config_with_watchdog_action_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_watchdog_action": "none"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 9) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("none", cfg.devices[7].action) def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type, agent_enabled=False): self.flags(enabled=vnc_enabled, group='vnc') self.flags(enabled=spice_enabled, agent_enabled=agent_enabled, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) return drvr._get_guest_usb_tablet(os_type) def test_get_guest_usb_tablet_wipe(self): self.flags(use_usb_tablet=True, group='libvirt') tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(True, False, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(False, True, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(False, False, vm_mode.HVM) self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet(True, True, "foo") self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet( False, True, vm_mode.HVM, True) self.assertIsNone(tablet) def _test_get_guest_config_with_watchdog_action_flavor(self, hw_watchdog_action="hw:watchdog_action"): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {hw_watchdog_action: 'none'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(9, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("none", cfg.devices[7].action) def test_get_guest_config_with_watchdog_action_through_flavor(self): self._test_get_guest_config_with_watchdog_action_flavor() # TODO(pkholkin): the test accepting old property name 'hw_watchdog_action' # should be removed in the next release def test_get_guest_config_with_watchdog_action_through_flavor_no_scope( self): self._test_get_guest_config_with_watchdog_action_flavor( hw_watchdog_action="hw_watchdog_action") def test_get_guest_config_with_watchdog_overrides_flavor(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_watchdog_action': 'none'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_watchdog_action": "pause"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(9, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("pause", cfg.devices[7].action) def test_get_guest_config_with_video_driver_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "vmvga"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[5].type, "vnc") self.assertEqual(cfg.devices[6].type, "vmvga") def test_get_guest_config_with_qga_through_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_qemu_guest_agent": "yes"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 9) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") self.assertEqual(cfg.devices[7].type, "unix") self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0") def test_get_guest_config_with_video_driver_vram(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[5].type, "spice") self.assertEqual(cfg.devices[6].type, "qxl") self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki) @mock.patch('nova.virt.disk.api.teardown_container') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_unmount_fs_if_error_during_lxc_create_domain(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_teardown): """If we hit an error during a `_create_domain` call to `libvirt+lxc` we need to ensure the guest FS is unmounted from the host so that any future `lvremove` calls will work. """ self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.side_effect = exception.InstanceNotFound( instance_id='foo') drvr._conn.defineXML = mock.Mock() drvr._conn.defineXML.side_effect = ValueError('somethingbad') with test.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, 'cleanup')): self.assertRaises(ValueError, drvr._create_domain_and_network, self.context, 'xml', mock_instance, None, None) mock_teardown.assert_called_with(container_dir='/tmp/rootfs') def test_video_driver_flavor_limit_not_set(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(objects.Instance, 'save'): self.assertRaises(exception.RequestedVRamTooHigh, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_video_driver_ram_above_flavor_limit(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') instance_ref = objects.Instance(**self.test_instance) instance_type = instance_ref.get_flavor() instance_type.extra_specs = {'hw_video:ram_max_mb': "50"} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(objects.Instance, 'save'): self.assertRaises(exception.RequestedVRamTooHigh, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_without_qga_through_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_qemu_guest_agent": "no"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") def test_get_guest_config_with_rng_device(self): self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertIsNone(cfg.devices[6].backend) self.assertIsNone(cfg.devices[6].rate_bytes) self.assertIsNone(cfg.devices[6].rate_period) def test_get_guest_config_with_rng_not_allowed(self): self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 7) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigMemoryBalloon) def test_get_guest_config_with_rng_limits(self): self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True', 'hw_rng:rate_bytes': '1024', 'hw_rng:rate_period': '2'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertIsNone(cfg.devices[6].backend) self.assertEqual(cfg.devices[6].rate_bytes, 1024) self.assertEqual(cfg.devices[6].rate_period, 2) @mock.patch('nova.virt.libvirt.driver.os.path.exists') def test_get_guest_config_with_rng_backend(self, mock_path): self.flags(virt_type='kvm', use_usb_tablet=False, rng_dev_path='/dev/hw_rng', group='libvirt') mock_path.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng') self.assertIsNone(cfg.devices[6].rate_bytes) self.assertIsNone(cfg.devices[6].rate_period) @mock.patch('nova.virt.libvirt.driver.os.path.exists') def test_get_guest_config_with_rng_dev_not_present(self, mock_path): self.flags(virt_type='kvm', use_usb_tablet=False, rng_dev_path='/dev/hw_rng', group='libvirt') mock_path.return_value = False drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(exception.RngDeviceNotExist, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_guest_cpu_shares_with_multi_vcpu(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 4 image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(4096, cfg.cputune.shares) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_with_cpu_quota(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000', 'quota:cpu_period': '20000'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10000, cfg.cputune.shares) self.assertEqual(20000, cfg.cputune.period) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_with_bogus_cpu_quota(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood', 'quota:cpu_period': '20000'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(ValueError, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=False) def test_get_update_guest_cputune(self, is_able): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000', 'quota:cpu_period': '20000'} self.assertRaises( exception.UnsupportedHostCPUControlPolicy, drvr._update_guest_cputune, {}, instance_ref.flavor, "kvm") def _test_get_guest_config_sysinfo_serial(self, expected_serial): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) cfg = drvr._get_guest_config_sysinfo(instance_ref) self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo) self.assertEqual(version.vendor_string(), cfg.system_manufacturer) self.assertEqual(version.product_string(), cfg.system_product) self.assertEqual(version.version_string_with_package(), cfg.system_version) self.assertEqual(expected_serial, cfg.system_serial) self.assertEqual(instance_ref['uuid'], cfg.system_uuid) self.assertEqual("Virtual Machine", cfg.system_family) def test_get_guest_config_sysinfo_serial_none(self): self.flags(sysinfo_serial="none", group="libvirt") self._test_get_guest_config_sysinfo_serial(None) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware") def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid): self.flags(sysinfo_serial="hardware", group="libvirt") theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" mock_uuid.return_value = theuuid self._test_get_guest_config_sysinfo_serial(theuuid) @contextlib.contextmanager def patch_exists(self, result): real_exists = os.path.exists def fake_exists(filename): if filename == "/etc/machine-id": return result return real_exists(filename) with mock.patch.object(os.path, "exists") as mock_exists: mock_exists.side_effect = fake_exists yield mock_exists def test_get_guest_config_sysinfo_serial_os(self): self.flags(sysinfo_serial="os", group="libvirt") theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" with test.nested( mock.patch.object(six.moves.builtins, "open", mock.mock_open(read_data=theuuid)), self.patch_exists(True)): self._test_get_guest_config_sysinfo_serial(theuuid) def test_get_guest_config_sysinfo_serial_os_empty_machine_id(self): self.flags(sysinfo_serial="os", group="libvirt") with test.nested( mock.patch.object(six.moves.builtins, "open", mock.mock_open(read_data="")), self.patch_exists(True)): self.assertRaises(exception.NovaException, self._test_get_guest_config_sysinfo_serial, None) def test_get_guest_config_sysinfo_serial_os_no_machine_id_file(self): self.flags(sysinfo_serial="os", group="libvirt") with self.patch_exists(False): self.assertRaises(exception.NovaException, self._test_get_guest_config_sysinfo_serial, None) def test_get_guest_config_sysinfo_serial_auto_hardware(self): self.flags(sysinfo_serial="auto", group="libvirt") real_exists = os.path.exists with test.nested( mock.patch.object(os.path, "exists"), mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware") ) as (mock_exists, mock_uuid): def fake_exists(filename): if filename == "/etc/machine-id": return False return real_exists(filename) mock_exists.side_effect = fake_exists theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" mock_uuid.return_value = theuuid self._test_get_guest_config_sysinfo_serial(theuuid) def test_get_guest_config_sysinfo_serial_auto_os(self): self.flags(sysinfo_serial="auto", group="libvirt") real_exists = os.path.exists real_open = builtins.open with test.nested( mock.patch.object(os.path, "exists"), mock.patch.object(builtins, "open"), ) as (mock_exists, mock_open): def fake_exists(filename): if filename == "/etc/machine-id": return True return real_exists(filename) mock_exists.side_effect = fake_exists theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" def fake_open(filename, *args, **kwargs): if filename == "/etc/machine-id": h = mock.MagicMock() h.read.return_value = theuuid h.__enter__.return_value = h return h return real_open(filename, *args, **kwargs) mock_open.side_effect = fake_open self._test_get_guest_config_sysinfo_serial(theuuid) def _create_fake_service_compute(self): service_info = { 'id': 1729, 'host': 'fake', 'report_count': 0 } service_ref = objects.Service(**service_info) compute_info = { 'id': 1729, 'vcpus': 2, 'memory_mb': 1024, 'local_gb': 2048, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 0, 'free_ram_mb': 1024, 'free_disk_gb': 2048, 'hypervisor_type': 'xen', 'hypervisor_version': 1, 'running_vms': 0, 'cpu_info': '', 'current_workload': 0, 'service_id': service_ref['id'], 'host': service_ref['host'] } compute_ref = objects.ComputeNode(**compute_info) return (service_ref, compute_ref) def test_get_guest_config_with_pci_passthrough_kvm(self): self.flags(virt_type='kvm', group='libvirt') service_ref, compute_ref = self._create_fake_service_compute() instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.ALLOCATED, address='0000:00:00.1', compute_id=compute_ref['id'], instance_uuid=instance.uuid, request_id=None, extra_info={}) pci_device = objects.PciDevice(**pci_device_info) pci_list = objects.PciDeviceList() pci_list.objects.append(pci_device) instance.pci_devices = pci_list drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) cfg = drvr._get_guest_config(instance, [], image_meta, disk_info) had_pci = 0 # care only about the PCI devices for dev in cfg.devices: if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI: had_pci += 1 self.assertEqual(dev.type, 'pci') self.assertEqual(dev.managed, 'yes') self.assertEqual(dev.mode, 'subsystem') self.assertEqual(dev.domain, "0000") self.assertEqual(dev.bus, "00") self.assertEqual(dev.slot, "00") self.assertEqual(dev.function, "1") self.assertEqual(had_pci, 1) def test_get_guest_config_with_pci_passthrough_xen(self): self.flags(virt_type='xen', group='libvirt') service_ref, compute_ref = self._create_fake_service_compute() instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.ALLOCATED, address='0000:00:00.2', compute_id=compute_ref['id'], instance_uuid=instance.uuid, request_id=None, extra_info={}) pci_device = objects.PciDevice(**pci_device_info) pci_list = objects.PciDeviceList() pci_list.objects.append(pci_device) instance.pci_devices = pci_list drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) cfg = drvr._get_guest_config(instance, [], image_meta, disk_info) had_pci = 0 # care only about the PCI devices for dev in cfg.devices: if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI: had_pci += 1 self.assertEqual(dev.type, 'pci') self.assertEqual(dev.managed, 'no') self.assertEqual(dev.mode, 'subsystem') self.assertEqual(dev.domain, "0000") self.assertEqual(dev.bus, "00") self.assertEqual(dev.slot, "00") self.assertEqual(dev.function, "2") self.assertEqual(had_pci, 1) def test_get_guest_config_os_command_line_through_image_meta(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') self.test_instance['kernel_id'] = "fake_kernel_id" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": "fake_os_command_line"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_cmdline, "fake_os_command_line") def test_get_guest_config_os_command_line_without_kernel_id(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": "fake_os_command_line"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(cfg.os_cmdline) def test_get_guest_config_os_command_empty(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') self.test_instance['kernel_id'] = "fake_kernel_id" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": ""}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) # the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by # default, so testing an empty string and None value in the # os_command_line image property must pass cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertNotEqual(cfg.os_cmdline, "") def test_get_guest_config_armv7(self): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.ARMV7 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.flags(virt_type="kvm", group="libvirt") instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "vexpress-a15") def test_get_guest_config_aarch64(self): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.AARCH64 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.flags(virt_type="kvm", group="libvirt") instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "virt") def test_get_guest_config_machine_type_s390(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigGuestCPU() image_meta = objects.ImageMeta.from_dict(self.test_image_meta) host_cpu_archs = (arch.S390, arch.S390X) for host_cpu_arch in host_cpu_archs: caps.host.cpu.arch = host_cpu_arch os_mach_type = drvr._get_machine_type(image_meta, caps) self.assertEqual('s390-ccw-virtio', os_mach_type) def test_get_guest_config_machine_type_through_image_meta(self): self.flags(virt_type="kvm", group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_machine_type": "fake_machine_type"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") def test_get_guest_config_machine_type_from_config(self): self.flags(virt_type='kvm', group='libvirt') self.flags(hw_machine_type=['x86_64=fake_machine_type'], group='libvirt') def fake_getCapabilities(): return """ cef19ce0-0ca2-11df-855d-b19fbce37686 x86_64 Penryn Intel """ def fake_baselineCPU(cpu, flag): return """ Penryn Intel """ # Make sure the host arch is mocked as x86_64 self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities, baselineCPU=fake_baselineCPU, getVersion=lambda: 1005001) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") def _test_get_guest_config_ppc64(self, device_index): """Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config. """ self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) expected = (arch.PPC64, arch.PPC) for guestarch in expected: with mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=guestarch): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.devices[device_index], vconfig.LibvirtConfigGuestVideo) self.assertEqual(cfg.devices[device_index].type, 'vga') def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self): self.flags(enabled=True, group='vnc') self._test_get_guest_config_ppc64(6) def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self): self.flags(enabled=True, agent_enabled=True, group='spice') self._test_get_guest_config_ppc64(8) def _test_get_guest_config_bootmenu(self, image_meta, extra_specs): self.flags(virt_type='kvm', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = extra_specs disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertTrue(conf.os_bootmenu) def test_get_guest_config_bootmenu_via_image_meta(self): image_meta = objects.ImageMeta.from_dict( {"disk_format": "raw", "properties": {"hw_boot_menu": "True"}}) self._test_get_guest_config_bootmenu(image_meta, {}) def test_get_guest_config_bootmenu_via_extra_specs(self): image_meta = objects.ImageMeta.from_dict( self.test_image_meta) self._test_get_guest_config_bootmenu(image_meta, {'hw:boot_menu': 'True'}) def test_get_guest_cpu_config_none(self): self.flags(cpu_mode="none", group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertIsNone(conf.cpu.mode) self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_default_kvm(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_default_uml(self): self.flags(virt_type="uml", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(conf.cpu) def test_get_guest_cpu_config_default_lxc(self): self.flags(virt_type="lxc", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(conf.cpu) def test_get_guest_cpu_config_host_passthrough(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="host-passthrough", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-passthrough") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_host_model(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="host-model", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_custom(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="custom", cpu_model="Penryn", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "custom") self.assertEqual(conf.cpu.model, "Penryn") self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_topology(self): instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 8 instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertEqual(conf.cpu.sockets, 4) self.assertEqual(conf.cpu.cores, 2) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_memory_balloon_config_by_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_disable(self): self.flags(mem_stats_period_seconds=0, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) no_exist = True for device in cfg.devices: if device.root_name == 'memballoon': no_exist = False break self.assertTrue(no_exist) def test_get_guest_memory_balloon_config_period_value(self): self.flags(mem_stats_period_seconds=21, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(21, device.period) def test_get_guest_memory_balloon_config_qemu(self): self.flags(virt_type='qemu', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_xen(self): self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('xen', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_lxc(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) no_exist = True for device in cfg.devices: if device.root_name == 'memballoon': no_exist = False break self.assertTrue(no_exist) def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self): instance_data = dict(self.test_instance) instance_data.update({'vm_mode': vm_mode.HVM}) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, expect_xen_hvm=True) def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self): instance_data = dict(self.test_instance) instance_data.update({'vm_mode': vm_mode.XEN}) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, expect_xen_hvm=False, xen_only=True) def test_xml_and_uri_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False) def test_xml_and_uri_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) def test_xml_and_uri(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True) def test_xml_and_uri_rescue(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True, rescue=instance_data) def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, rescue=instance_data) def test_xml_and_uri_rescue_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=True, rescue=instance_data) def test_xml_and_uri_rescue_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False, rescue=instance_data) def test_xml_uuid(self): self._check_xml_and_uuid(self.test_image_meta) def test_lxc_container_and_uri(self): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) def test_xml_disk_prefix(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_prefix(instance_data, None) def test_xml_user_specified_disk_prefix(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_prefix(instance_data, 'sd') def test_xml_disk_driver(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_driver(instance_data) def test_xml_disk_bus_virtio(self): image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self._check_xml_and_disk_bus(image_meta, None, (("disk", "virtio", "vda"),)) def test_xml_disk_bus_ide(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi expected = {arch.PPC: ("cdrom", "scsi", "sda"), arch.PPC64: ("cdrom", "scsi", "sda")} expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}), ("cdrom", "ide", "hda")) image_meta = objects.ImageMeta.from_dict({ "disk_format": "iso"}) self._check_xml_and_disk_bus(image_meta, None, (expec_val,)) def test_xml_disk_bus_ide_and_virtio(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi expected = {arch.PPC: ("cdrom", "scsi", "sda"), arch.PPC64: ("cdrom", "scsi", "sda")} swap = {'device_name': '/dev/vdc', 'swap_size': 1} ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'size': 1}] block_device_info = { 'swap': swap, 'ephemerals': ephemerals} expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}), ("cdrom", "ide", "hda")) image_meta = objects.ImageMeta.from_dict({ "disk_format": "iso"}) self._check_xml_and_disk_bus(image_meta, block_device_info, (expec_val, ("disk", "virtio", "vdb"), ("disk", "virtio", "vdc"))) @mock.patch.object(host.Host, "list_instance_domains") def test_list_instances(self, mock_list): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm1, vm2, vm3, vm4] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) names = drvr.list_instances() self.assertEqual(names[0], vm1.name()) self.assertEqual(names[1], vm2.name()) self.assertEqual(names[2], vm3.name()) self.assertEqual(names[3], vm4.name()) mock_list.assert_called_with(only_guests=True, only_running=False) @mock.patch.object(host.Host, "list_instance_domains") def test_list_instance_uuids(self, mock_list): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm1, vm2, vm3, vm4] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) uuids = drvr.list_instance_uuids() self.assertEqual(len(uuids), 4) self.assertEqual(uuids[0], vm1.UUIDString()) self.assertEqual(uuids[1], vm2.UUIDString()) self.assertEqual(uuids[2], vm3.UUIDString()) self.assertEqual(uuids[3], vm4.UUIDString()) mock_list.assert_called_with(only_guests=True, only_running=False) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.flags(vcpu_pin_set="4-5") get_online_cpus.return_value = set([4, 5, 6]) expected_vcpus = 2 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_out_of_range(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.flags(vcpu_pin_set="4-6") get_online_cpus.return_value = set([4, 5]) self.assertRaises(exception.Invalid, drvr._get_vcpu_total) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_libvirt_error(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) self.flags(vcpu_pin_set="4-6") get_online_cpus.side_effect = not_supported_exc self.assertRaises(exception.Invalid, drvr._get_vcpu_total) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) self.flags(vcpu_pin_set="1") get_online_cpus.side_effect = not_supported_exc expected_vcpus = 1 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch('nova.virt.libvirt.host.Host.get_cpu_count') def test_get_host_vcpus_after_hotplug(self, get_cpu_count): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) get_cpu_count.return_value = 2 expected_vcpus = 2 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) get_cpu_count.return_value = 3 expected_vcpus = 3 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_quiesce(self, mock_has_min_version): self.create_fake_libvirt_mock(lookupByName=self.fake_lookup) with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_qemu_guest_agent": "yes"}}) self.assertIsNone(drvr.quiesce(self.context, instance, image_meta)) mock_fsfreeze.assert_called_once_with() def test_quiesce_not_supported(self): self.create_fake_libvirt_mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) self.assertRaises(exception.InstanceQuiesceNotSupported, drvr.quiesce, self.context, instance, None) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_unquiesce(self, mock_has_min_version): self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005, lookupByName=self.fake_lookup) with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_qemu_guest_agent": "yes"}}) self.assertIsNone(drvr.unquiesce(self.context, instance, image_meta)) mock_fsthaw.assert_called_once_with() def test_create_snapshot_metadata(self): base = objects.ImageMeta.from_dict( {'disk_format': 'raw'}) instance_data = {'kernel_id': 'kernel', 'project_id': 'prj_id', 'ramdisk_id': 'ram_id', 'os_type': None} instance = objects.Instance(**instance_data) img_fmt = 'raw' snp_name = 'snapshot_name' drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name) expected = {'is_public': False, 'status': 'active', 'name': snp_name, 'properties': { 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', 'owner_id': instance['project_id'], 'ramdisk_id': instance['ramdisk_id'], }, 'disk_format': img_fmt, 'container_format': 'bare', } self.assertEqual(ret, expected) # simulate an instance with os_type field defined # disk format equals to ami # container format not equals to bare instance['os_type'] = 'linux' base = objects.ImageMeta.from_dict( {'disk_format': 'ami', 'container_format': 'test_container'}) expected['properties']['os_type'] = instance['os_type'] expected['disk_format'] = base.disk_format expected['container_format'] = base.container_format ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name) self.assertEqual(ret, expected) def test_get_volume_driver(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'fake', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} driver = conn._get_volume_driver(connection_info) result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver) self.assertTrue(result) def test_get_volume_driver_unknown(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'unknown', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} self.assertRaises( exception.VolumeDriverNotFound, conn._get_volume_driver, connection_info ) @mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'connect_volume') @mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'get_config') def test_get_volume_config(self, get_config, connect_volume): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'fake', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} bdm = {'device_name': 'vdb', 'disk_bus': 'fake-bus', 'device_type': 'fake-type'} disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], 'dev': 'vdb'} mock_config = mock.MagicMock() get_config.return_value = mock_config config = drvr._get_volume_config(connection_info, disk_info) get_config.assert_called_once_with(connection_info, disk_info) self.assertEqual(mock_config, config) def test_attach_invalid_volume_type(self): self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.VolumeDriverNotFound, drvr.attach_volume, None, {"driver_volume_type": "badtype"}, instance, "/dev/sda") def test_attach_blockio_invalid_hypervisor(self): self.flags(virt_type='fake_type', group='libvirt') self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InvalidHypervisorType, drvr.attach_volume, None, {"driver_volume_type": "fake", "data": {"logical_block_size": "4096", "physical_block_size": "4096"} }, instance, "/dev/sda") def _test_check_discard(self, mock_log, driver_discard=None, bus=None, should_log=False): mock_config = mock.Mock() mock_config.driver_discard = driver_discard mock_config.target_bus = bus mock_instance = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._check_discard_for_attach_volume(mock_config, mock_instance) self.assertEqual(should_log, mock_log.called) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_no_unmap(self, mock_log): self._test_check_discard(mock_log, driver_discard=None, bus='scsi', should_log=False) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_blk_controller(self, mock_log): self._test_check_discard(mock_log, driver_discard='unmap', bus='virtio', should_log=True) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_valid_controller(self, mock_log): self._test_check_discard(mock_log, driver_discard='unmap', bus='scsi', should_log=False) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_blk_controller_no_unmap(self, mock_log): self._test_check_discard(mock_log, driver_discard=None, bus='virtio', should_log=False) @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_attach_volume_with_vir_domain_affect_live_flag(self, mock_get_domain, mock_get_info, get_image): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = {} get_image.return_value = image_meta mock_dom = mock.MagicMock() mock_get_domain.return_value = mock_dom connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} bdm = {'device_name': 'vdb', 'disk_bus': 'fake-bus', 'device_type': 'fake-type'} disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], 'dev': 'vdb'} mock_get_info.return_value = disk_info mock_conf = mock.MagicMock() flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) with test.nested( mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode'), mock.patch.object(drvr, '_check_discard_for_attach_volume') ) as (mock_connect_volume, mock_get_volume_config, mock_set_cache_mode, mock_check_discard): for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] drvr.attach_volume(self.context, connection_info, instance, "/dev/vdb", disk_bus=bdm['disk_bus'], device_type=bdm['device_type']) mock_get_domain.assert_called_with(instance) mock_get_info.assert_called_with( instance, CONF.libvirt.virt_type, test.MatchType(objects.ImageMeta), bdm) mock_connect_volume.assert_called_with( connection_info, disk_info) mock_get_volume_config.assert_called_with( connection_info, disk_info) mock_set_cache_mode.assert_called_with(mock_conf) mock_dom.attachDeviceFlags.assert_called_with( mock_conf.to_xml(), flags=flags) mock_check_discard.assert_called_with(mock_conf, instance) @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_detach_volume_with_vir_domain_affect_live_flag(self, mock_get_domain): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_xml_with_disk = """ """ mock_xml_without_disk = """ """ mock_dom = mock.MagicMock() # Second time don't return anything about disk vdc so it looks removed return_list = [mock_xml_with_disk, mock_xml_without_disk] # Doubling the size of return list because we test with two guest power # states mock_dom.XMLDesc.side_effect = return_list + return_list connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) with mock.patch.object(drvr, '_disconnect_volume') as \ mock_disconnect_volume: for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom drvr.detach_volume(connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_with(instance) mock_dom.detachDeviceFlags.assert_called_with(""" """, flags=flags) mock_disconnect_volume.assert_called_with( connection_info, 'vdc') @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_detach_volume_disk_not_found(self, mock_get_domain): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_xml_without_disk = """ """ mock_dom = mock.MagicMock(return_value=mock_xml_without_disk) connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom self.assertRaises(exception.DiskNotFound, drvr.detach_volume, connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_once_with(instance) def test_multi_nic(self): network_info = _fake_network_info(self, 2) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) interfaces = tree.findall("./devices/interface") self.assertEqual(len(interfaces), 2) self.assertEqual(interfaces[0].get('type'), 'bridge') def _behave_supports_direct_io(self, raise_open=False, raise_write=False, exc=ValueError()): open_behavior = os.open(os.path.join('.', '.directio.test'), os.O_CREAT | os.O_WRONLY | os.O_DIRECT) if raise_open: open_behavior.AndRaise(exc) else: open_behavior.AndReturn(3) write_bahavior = os.write(3, mox.IgnoreArg()) if raise_write: write_bahavior.AndRaise(exc) else: os.close(3) os.unlink(3) def test_supports_direct_io(self): # O_DIRECT is not supported on all Python runtimes, so on platforms # where it's not supported (e.g. Mac), we can still test the code-path # by stubbing out the value. if not hasattr(os, 'O_DIRECT'): # `mock` seems to have trouble stubbing an attr that doesn't # originally exist, so falling back to stubbing out the attribute # directly. os.O_DIRECT = 16384 self.addCleanup(delattr, os, 'O_DIRECT') einval = OSError() einval.errno = errno.EINVAL self.mox.StubOutWithMock(os, 'open') self.mox.StubOutWithMock(os, 'write') self.mox.StubOutWithMock(os, 'close') self.mox.StubOutWithMock(os, 'unlink') _supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io self._behave_supports_direct_io() self._behave_supports_direct_io(raise_write=True) self._behave_supports_direct_io(raise_open=True) self._behave_supports_direct_io(raise_write=True, exc=einval) self._behave_supports_direct_io(raise_open=True, exc=einval) self.mox.ReplayAll() self.assertTrue(_supports_direct_io('.')) self.assertRaises(ValueError, _supports_direct_io, '.') self.assertRaises(ValueError, _supports_direct_io, '.') self.assertFalse(_supports_direct_io('.')) self.assertFalse(_supports_direct_io('.')) self.mox.VerifyAll() def _check_xml_and_container(self, instance): instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), 'lxc:///') network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) check = [ (lambda t: t.find('.').get('type'), 'lxc'), (lambda t: t.find('./os/type').text, 'exe'), (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')] for i, (check, expected_result) in enumerate(check): self.assertEqual(check(tree), expected_result, '%s failed common check %d' % (xml, i)) target = tree.find('./devices/filesystem/source').get('dir') self.assertTrue(len(target) > 0) def _check_xml_and_disk_prefix(self, instance, prefix): instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) def _get_prefix(p, default): if p: return p + 'a' return default type_disk_map = { 'qemu': [ (lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'vda'))], 'xen': [ (lambda t: t.find('.').get('type'), 'xen'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'xvda'))], 'kvm': [ (lambda t: t.find('.').get('type'), 'kvm'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'vda'))], 'uml': [ (lambda t: t.find('.').get('type'), 'uml'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'ubda'))] } for (virt_type, checks) in six.iteritems(type_disk_map): self.flags(virt_type=virt_type, group='libvirt') if prefix: self.flags(disk_prefix=prefix, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, '%s != %s failed check %d' % (check(tree), expected_result, i)) def _check_xml_and_disk_driver(self, image_meta): os_open = os.open directio_supported = True def os_open_stub(path, flags, *args, **kwargs): if flags & os.O_DIRECT: if not directio_supported: raise OSError(errno.EINVAL, '%s: %s' % (os.strerror(errno.EINVAL), path)) flags &= ~os.O_DIRECT return os_open(path, flags, *args, **kwargs) self.stub_out('os.open', os_open_stub) @staticmethod def connection_supports_direct_io_stub(dirpath): return directio_supported self.stubs.Set(libvirt_driver.LibvirtDriver, '_supports_direct_io', connection_supports_direct_io_stub) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') for guest_disk in disks: self.assertEqual(guest_disk.get("cache"), "none") directio_supported = False # The O_DIRECT availability is cached on first use in # LibvirtDriver, hence we re-create it here drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') for guest_disk in disks: self.assertEqual(guest_disk.get("cache"), "writethrough") def _check_xml_and_disk_bus(self, image_meta, block_device_info, wantConfig): instance_ref = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, block_device_info) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta, block_device_info=block_device_info) tree = etree.fromstring(xml) got_disks = tree.findall('./devices/disk') got_disk_targets = tree.findall('./devices/disk/target') for i in range(len(wantConfig)): want_device_type = wantConfig[i][0] want_device_bus = wantConfig[i][1] want_device_dev = wantConfig[i][2] got_device_type = got_disks[i].get('device') got_device_bus = got_disk_targets[i].get('bus') got_device_dev = got_disk_targets[i].get('dev') self.assertEqual(got_device_type, want_device_type) self.assertEqual(got_device_bus, want_device_bus) self.assertEqual(got_device_dev, want_device_dev) def _check_xml_and_uuid(self, image_meta): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) self.assertEqual(tree.find('./uuid').text, instance_ref['uuid']) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware",) def _check_xml_and_uri(self, instance, mock_serial, expect_ramdisk=False, expect_kernel=False, rescue=None, expect_xen_hvm=False, xen_only=False): mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686" instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) xen_vm_mode = vm_mode.XEN if expect_xen_hvm: xen_vm_mode = vm_mode.HVM type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./os/type').text, vm_mode.HVM), (lambda t: t.find('./devices/emulator'), None)]), 'kvm': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'kvm'), (lambda t: t.find('./os/type').text, vm_mode.HVM), (lambda t: t.find('./devices/emulator'), None)]), 'uml': ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'), (lambda t: t.find('./os/type').text, vm_mode.UML)]), 'xen': ('xen:///', [(lambda t: t.find('.').get('type'), 'xen'), (lambda t: t.find('./os/type').text, xen_vm_mode)])} if expect_xen_hvm or xen_only: hypervisors_to_check = ['xen'] else: hypervisors_to_check = ['qemu', 'kvm', 'xen'] for hypervisor_type in hypervisors_to_check: check_list = type_uri_map[hypervisor_type][1] if rescue: suffix = '.rescue' else: suffix = '' if expect_kernel: check = (lambda t: self.relpath(t.find('./os/kernel').text). split('/')[1], 'kernel' + suffix) else: check = (lambda t: t.find('./os/kernel'), None) check_list.append(check) if expect_kernel: check = (lambda t: "no_timer_check" in t.find('./os/cmdline'). text, hypervisor_type == "qemu") check_list.append(check) # Hypervisors that only support vm_mode.HVM and Xen # should not produce configuration that results in kernel # arguments if not expect_kernel and (hypervisor_type in ['qemu', 'kvm', 'xen']): check = (lambda t: t.find('./os/root'), None) check_list.append(check) check = (lambda t: t.find('./os/cmdline'), None) check_list.append(check) if expect_ramdisk: check = (lambda t: self.relpath(t.find('./os/initrd').text). split('/')[1], 'ramdisk' + suffix) else: check = (lambda t: t.find('./os/initrd'), None) check_list.append(check) if hypervisor_type in ['qemu', 'kvm']: xpath = "./sysinfo/system/entry" check = (lambda t: t.findall(xpath)[0].get("name"), "manufacturer") check_list.append(check) check = (lambda t: t.findall(xpath)[0].text, version.vendor_string()) check_list.append(check) check = (lambda t: t.findall(xpath)[1].get("name"), "product") check_list.append(check) check = (lambda t: t.findall(xpath)[1].text, version.product_string()) check_list.append(check) check = (lambda t: t.findall(xpath)[2].get("name"), "version") check_list.append(check) # NOTE(sirp): empty strings don't roundtrip in lxml (they are # converted to None), so we need an `or ''` to correct for that check = (lambda t: t.findall(xpath)[2].text or '', version.version_string_with_package()) check_list.append(check) check = (lambda t: t.findall(xpath)[3].get("name"), "serial") check_list.append(check) check = (lambda t: t.findall(xpath)[3].text, "cef19ce0-0ca2-11df-855d-b19fbce37686") check_list.append(check) check = (lambda t: t.findall(xpath)[4].get("name"), "uuid") check_list.append(check) check = (lambda t: t.findall(xpath)[4].text, instance['uuid']) check_list.append(check) if hypervisor_type in ['qemu', 'kvm']: check = (lambda t: t.findall('./devices/serial')[0].get( 'type'), 'file') check_list.append(check) check = (lambda t: t.findall('./devices/serial')[1].get( 'type'), 'pty') check_list.append(check) check = (lambda t: self.relpath(t.findall( './devices/serial/source')[0].get('path')). split('/')[1], 'console.log') check_list.append(check) else: check = (lambda t: t.find('./devices/console').get( 'type'), 'pty') check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), (lambda t: t.find('./memory').text, '2097152')] if rescue: common_checks += [ (lambda t: self.relpath(t.findall('./devices/disk/source')[0]. get('file')).split('/')[1], 'disk.rescue'), (lambda t: self.relpath(t.findall('./devices/disk/source')[1]. get('file')).split('/')[1], 'disk')] else: common_checks += [(lambda t: self.relpath(t.findall( './devices/disk/source')[0].get('file')).split('/')[1], 'disk')] common_checks += [(lambda t: self.relpath(t.findall( './devices/disk/source')[1].get('file')).split('/')[1], 'disk.local')] for virt_type in hypervisors_to_check: expected_uri = type_uri_map[virt_type][0] checks = type_uri_map[virt_type][1] self.flags(virt_type=virt_type, group='libvirt') with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt: del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), expected_uri) network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, rescue=rescue) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta, rescue=rescue) tree = etree.fromstring(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, '%s != %s failed check %d' % (check(tree), expected_result, i)) for i, (check, expected_result) in enumerate(common_checks): self.assertEqual(check(tree), expected_result, '%s != %s failed common check %d' % (check(tree), expected_result, i)) filterref = './devices/interface/filterref' vif = network_info[0] nic_id = vif['address'].replace(':', '') fw = firewall.NWFilterFirewall(drvr) instance_filter_name = fw._instance_filter_name(instance_ref, nic_id) self.assertEqual(tree.find(filterref).get('filter'), instance_filter_name) # This test is supposed to make sure we don't # override a specifically set uri # # Deliberately not just assigning this string to CONF.connection_uri # and checking against that later on. This way we make sure the # implementation doesn't fiddle around with the CONF. testuri = 'something completely different' self.flags(connection_uri=testuri, group='libvirt') for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map): self.flags(virt_type=virt_type, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), testuri) def test_ensure_filtering_rules_for_instance_timeout(self): # ensure_filtering_fules_for_instance() finishes with timeout. # Preparing mocks def fake_none(self, *args): return class FakeTime(object): def __init__(self): self.counter = 0 def sleep(self, t): self.counter += t fake_timer = FakeTime() def fake_sleep(t): fake_timer.sleep(t) # _fake_network_info must be called before create_fake_libvirt_mock(), # as _fake_network_info calls importutils.import_class() and # create_fake_libvirt_mock() mocks importutils.import_class(). network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock() instance_ref = objects.Instance(**self.test_instance) # Start test self.mox.ReplayAll() try: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr.firewall_driver, 'setup_basic_filtering', fake_none) self.stubs.Set(drvr.firewall_driver, 'prepare_instance_filter', fake_none) self.stubs.Set(drvr.firewall_driver, 'instance_filter_exists', fake_none) self.stubs.Set(greenthread, 'sleep', fake_sleep) drvr.ensure_filtering_rules_for_instance(instance_ref, network_info) except exception.NovaException as e: msg = ('The firewall filter for %s does not exist' % instance_ref['name']) c1 = (0 <= six.text_type(e).find(msg)) self.assertTrue(c1) self.assertEqual(29, fake_timer.counter, "Didn't wait the expected " "amount of time") @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_all_pass_with_block_migration( self, mock_cpu, mock_test_file): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': 400, 'cpu_info': 'asdf', } filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename # No need for the src_compute_info return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, None, compute_info, True) return_value.is_volume_backed = False self.assertThat({"filename": "file", 'image_type': 'default', 'disk_available_mb': 409600, "disk_over_commit": False, "block_migration": True, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_all_pass_no_block_migration( self, mock_cpu, mock_test_file): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': 400, 'cpu_info': 'asdf', } filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename # No need for the src_compute_info return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, None, compute_info, False) return_value.is_volume_backed = False self.assertThat({"filename": "file", "image_type": 'default', "block_migration": False, "disk_over_commit": False, "disk_available_mb": 409600, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file', return_value='fake') @mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu') def test_check_can_live_migrate_guest_cpu_none_model( self, mock_cpu, mock_test_file): # Tests that when instance.vcpu_model.model is None, the host cpu # model is used for live migration. instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel instance_ref.vcpu_model.model = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1} result = drvr.check_can_live_migrate_destination( self.context, instance_ref, compute_info, compute_info) result.is_volume_backed = False mock_cpu.assert_called_once_with(None, 'asdf') expected_result = {"filename": 'fake', "image_type": CONF.libvirt.images_type, "block_migration": False, "disk_over_commit": False, "disk_available_mb": 1024, "is_volume_backed": False} self.assertDictEqual(expected_result, result.to_legacy_dict()) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_no_instance_cpu_info( self, mock_cpu, mock_test_file): instance_ref = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': jsonutils.dumps({ "vendor": "AMD", "arch": arch.I686, "features": ["sse3"], "model": "Opteron_G3", "topology": {"cores": 2, "threads": 1, "sockets": 4} }), 'disk_available_least': 1} filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, compute_info, compute_info, False) # NOTE(danms): Compute manager would have set this, so set it here return_value.is_volume_backed = False self.assertThat({"filename": "file", "image_type": 'default', "block_migration": False, "disk_over_commit": False, "disk_available_mb": 1024, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_incompatible_cpu_raises( self, mock_cpu): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1} mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo') self.assertRaises(exception.InvalidCPUInfo, drvr.check_can_live_migrate_destination, self.context, instance_ref, compute_info, compute_info, False) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare): mock_compare.return_value = 5 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info)) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_handles_not_supported_error_gracefully(self, mock_vconfig, mock_compare): not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virCompareCPU', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) mock_compare.side_effect = not_supported_exc conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info)) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt.LibvirtDriver, '_vcpu_model_to_cpu_config') def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu, mock_compare): mock_compare.return_value = 6 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None) self.assertIsNone(ret) def test_compare_cpu_virt_type_xen(self): self.flags(virt_type='xen', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, None) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig, mock_compare): mock_compare.return_value = 0 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InvalidCPUInfo, conn._compare_cpu, None, jsonutils.dumps(_fake_cpu_info)) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig, mock_compare): mock_compare.side_effect = fakelibvirt.libvirtError('cpu') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationPreCheckError, conn._compare_cpu, None, jsonutils.dumps(_fake_cpu_info)) def test_check_can_live_migrate_dest_cleanup_works_correctly(self): objects.Instance(**self.test_instance) dest_check_data = objects.LibvirtLiveMigrateData( filename="file", block_migration=True, disk_over_commit=False, disk_available_mb=1024) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file') drvr._cleanup_shared_storage_test_file("file") self.mox.ReplayAll() drvr.check_can_live_migrate_destination_cleanup(self.context, dest_check_data) def _mock_can_live_migrate_source(self, block_migration=False, is_shared_block_storage=False, is_shared_instance_path=False, is_booted_from_volume=False, disk_available_mb=1024, block_device_info=None, block_device_text=None): instance = objects.Instance(**self.test_instance) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=block_migration, disk_over_commit=False, disk_available_mb=disk_available_mb) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_is_shared_block_storage') drvr._is_shared_block_storage(instance, dest_check_data, block_device_info).AndReturn(is_shared_block_storage) self.mox.StubOutWithMock(drvr, '_check_shared_storage_test_file') drvr._check_shared_storage_test_file('file').AndReturn( is_shared_instance_path) self.mox.StubOutWithMock(drvr, "get_instance_disk_info") drvr.get_instance_disk_info(instance, block_device_info=block_device_info).\ AndReturn(block_device_text) self.mox.StubOutWithMock(drvr, '_is_booted_from_volume') drvr._is_booted_from_volume(instance, block_device_text).AndReturn( is_booted_from_volume) return (instance, dest_check_data, drvr) def test_check_can_live_migrate_source_block_migration(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True) self.mox.StubOutWithMock(drvr, "_assert_dest_node_has_enough_disk") drvr._assert_dest_node_has_enough_disk( self.context, instance, dest_check_data.disk_available_mb, False, None) self.mox.ReplayAll() ret = drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) self.assertIsInstance(ret, objects.LibvirtLiveMigrateData) self.assertIn('is_shared_block_storage', ret) self.assertIn('is_shared_instance_path', ret) def test_check_can_live_migrate_source_shared_block_storage(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_shared_block_storage=True) self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_shared_instance_path(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_shared_instance_path=True) self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_non_shared_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source() self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_shared_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, is_shared_block_storage=True) self.mox.ReplayAll() self.assertRaises(exception.InvalidLocalStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_shared_path_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, is_shared_instance_path=True) self.mox.ReplayAll() self.assertRaises(exception.InvalidLocalStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, None) def test_check_can_live_migrate_non_shared_non_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source() self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_with_dest_not_enough_disk(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, disk_available_mb=0) drvr.get_instance_disk_info(instance, block_device_info=None).AndReturn( '[{"virt_disk_size":2}]') self.mox.ReplayAll() self.assertRaises(exception.MigrationError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_booted_from_volume(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_booted_from_volume=True, block_device_text='[]') self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_booted_from_volume_with_swap(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_booted_from_volume=True, block_device_text='[{"path":"disk.swap"}]') self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) @mock.patch.object(host.Host, 'has_min_version', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file', return_value=False) def test_check_can_live_migrate_source_block_migration_with_bdm_error( self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_min_version): bdi = {'block_device_mapping': ['bdm']} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=True, disk_over_commit=False, disk_available_mb=100) self.assertRaises(exception.MigrationPreCheckError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, block_device_info=bdi) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file') def _test_check_can_live_migrate_source_block_migration_none( self, block_migrate, is_shared_instance_path, is_share_block, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_verson): mock_check.return_value = is_shared_instance_path mock_shared_block.return_value = is_share_block instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', disk_over_commit=False, disk_available_mb=100) dest_check_data_ret = drvr.check_can_live_migrate_source( self.context, instance, dest_check_data) self.assertEqual(block_migrate, dest_check_data_ret.block_migration) def test_check_can_live_migrate_source_block_migration_none_shared1(self): self._test_check_can_live_migrate_source_block_migration_none( False, True, False) def test_check_can_live_migrate_source_block_migration_none_shared2(self): self._test_check_can_live_migrate_source_block_migration_none( False, False, True) def test_check_can_live_migrate_source_block_migration_none_no_share(self): self._test_check_can_live_migrate_source_block_migration_none( True, False, False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file') def test_check_can_live_migration_source_disk_over_commit_none(self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_disk_check): mock_check.return_value = False mock_shared_block.return_value = False instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', disk_available_mb=100) drvr.check_can_live_migrate_source( self.context, instance, dest_check_data) self.assertFalse(mock_disk_check.called) def _is_shared_block_storage_test_create_mocks(self, disks): # Test data instance_xml = ("instance-0000000a" "{}") disks_xml = '' for dsk in disks: if dsk['type'] is not 'network': disks_xml = ''.join([disks_xml, "" "" "" "" "".format(**dsk)]) else: disks_xml = ''.join([disks_xml, "" "" "" "" "" "" "".format(**dsk)]) # Preparing mocks mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.XMLDesc = mock.Mock() mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml)) mock_lookup = mock.Mock() def mock_lookup_side_effect(name): return mock_virDomain mock_lookup.side_effect = mock_lookup_side_effect mock_getsize = mock.Mock() mock_getsize.return_value = "10737418240" return (mock_getsize, mock_lookup) def test_is_shared_block_storage_rbd(self): self.flags(images_type='rbd', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_instance_disk_info = mock.Mock() data = objects.LibvirtLiveMigrateData(image_type='rbd') with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr._is_shared_block_storage(instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_lvm(self): self.flags(images_type='lvm', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='lvm', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_qcow2(self): self.flags(images_type='qcow2', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='qcow2', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_rbd_only_source(self): self.flags(images_type='rbd', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(is_shared_instance_path=False, is_volume_backed=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_rbd_only_dest(self): bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='rbd', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_volume_backed(self): disks = [{'type': 'block', 'driver': 'raw', 'source': 'dev', 'source_path': '/dev/disk', 'target_dev': 'vda'}] bdi = {'block_device_mapping': [ {'connection_info': 'info', 'mount_device': '/dev/vda'}]} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) (mock_getsize, mock_lookup) =\ self._is_shared_block_storage_test_create_mocks(disks) data = objects.LibvirtLiveMigrateData(is_volume_backed=True, is_shared_instance_path=False) with mock.patch.object(host.Host, 'get_domain', mock_lookup): self.assertTrue(drvr._is_shared_block_storage(instance, data, block_device_info = bdi)) mock_lookup.assert_called_once_with(instance) def test_is_shared_block_storage_volume_backed_with_disk(self): disks = [{'type': 'block', 'driver': 'raw', 'source': 'dev', 'source_path': '/dev/disk', 'target_dev': 'vda'}, {'type': 'file', 'driver': 'raw', 'source': 'file', 'source_path': '/instance/disk.local', 'target_dev': 'vdb'}] bdi = {'block_device_mapping': [ {'connection_info': 'info', 'mount_device': '/dev/vda'}]} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) (mock_getsize, mock_lookup) =\ self._is_shared_block_storage_test_create_mocks(disks) data = objects.LibvirtLiveMigrateData(is_volume_backed=True, is_shared_instance_path=False) with test.nested( mock.patch.object(os.path, 'getsize', mock_getsize), mock.patch.object(host.Host, 'get_domain', mock_lookup)): self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info = bdi)) mock_getsize.assert_called_once_with('/instance/disk.local') mock_lookup.assert_called_once_with(instance) def test_is_shared_block_storage_nfs(self): bdi = {'block_device_mapping': []} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_backend = mock.MagicMock() mock_image_backend.backend.return_value = mock_backend mock_backend.is_file_in_instance_path.return_value = True mock_get_instance_disk_info = mock.Mock() data = objects.LibvirtLiveMigrateData( is_shared_instance_path=True, image_type='foo') with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): self.assertTrue(drvr._is_shared_block_storage( 'instance', data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_live_migration_update_graphics_xml(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) xml_tmpl = ("" "" "" "" "" "" "" "" "" "") initial_xml = xml_tmpl.format(vnc='1.2.3.4', spice='5.6.7.8') target_xml = xml_tmpl.format(vnc='10.0.0.1', spice='10.0.0.2') target_xml = etree.tostring(etree.fromstring(target_xml)) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI2") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn( initial_xml) vdmock.migrateToURI2(drvr._live_migration_uri('dest'), None, target_xml, mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}} migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='10.0.0.1', graphics_listen_addr_spice='10.0.0.2', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) def test_live_migration_update_volume_xml(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') # start test connection_info = { u'driver_volume_type': u'iscsi', u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdm = objects.LibvirtLiveMigrateBDMInfo( serial='58a84f6d-3f0c-4e19-a0af-eb657b790657', bus='virtio', type='disk', dev='vdb', connection_info=connection_info) migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, bdms=[bdm], block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_mock = mock.MagicMock() with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \ mget_info,\ mock.patch.object(drvr._host, 'get_domain') as mget_domain,\ mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\ mock.patch.object(drvr, '_update_xml') as mupdate: mget_info.side_effect = exception.InstanceNotFound( instance_id='foo') mget_domain.return_value = test_mock test_mock.XMLDesc.return_value = target_xml self.assertFalse(drvr._live_migration_operation( self.context, instance_ref, 'dest', False, migrate_data, test_mock, [])) mupdate.assert_called_once_with(target_xml, migrate_data.bdms, {}, '') def test_live_migration_with_valid_target_connect_addr(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') # start test connection_info = { u'driver_volume_type': u'iscsi', u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdm = objects.LibvirtLiveMigrateBDMInfo( serial='58a84f6d-3f0c-4e19-a0af-eb657b790657', bus='virtio', type='disk', dev='vdb', connection_info=connection_info) migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr='127.0.0.2', bdms=[bdm], block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_mock = mock.MagicMock() with mock.patch.object(drvr, '_update_xml') as mupdate: test_mock.XMLDesc.return_value = target_xml drvr._live_migration_operation(self.context, instance_ref, 'dest', False, migrate_data, test_mock, []) test_mock.migrateToURI2.assert_called_once_with( 'qemu+tcp://127.0.0.2/system', None, mupdate(), None, None, 0) def test_update_volume_xml(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) initial_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, bus='virtio', type='disk', dev='vdb') bdmi.connection_info = {u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': {u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}} conf = vconfig.LibvirtConfigGuestDisk() conf.source_device = bdmi.type conf.driver_name = "qemu" conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = bdmi.dev conf.target_bus = bdmi.bus conf.serial = bdmi.connection_info.get('serial') conf.source_type = "block" conf.source_path = bdmi.connection_info['data'].get('device_path') with mock.patch.object(drvr, '_get_volume_config', return_value=conf): parser = etree.XMLParser(remove_blank_text=True) xml_doc = etree.fromstring(initial_xml, parser) config = drvr._update_volume_xml(xml_doc, [bdmi]) xml_doc = etree.fromstring(target_xml, parser) self.assertEqual(etree.tostring(xml_doc), etree.tostring(config)) def test_live_migration_uri(self): hypervisor_uri_map = ( ('xen', 'xenmigr://%s/system'), ('kvm', 'qemu+tcp://%s/system'), ('qemu', 'qemu+tcp://%s/system'), # anything else will return None ('lxc', None), ('parallels', None), ('', None), ) dest = 'destination' for hyperv, uri in hypervisor_uri_map: self.flags(virt_type=hyperv, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) if uri is not None: uri = uri % dest self.assertEqual(uri, drvr._live_migration_uri(dest)) else: self.assertRaises(exception.LiveMigrationURINotAvailable, drvr._live_migration_uri, dest) def test_live_migration_uri_forced(self): dest = 'destination' for hyperv in ('kvm', 'xen'): self.flags(virt_type=hyperv, group='libvirt') forced_uri = 'foo://%s/bar' self.flags(live_migration_uri=forced_uri, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(forced_uri % dest, drvr._live_migration_uri(dest)) def test_update_volume_xml_no_serial(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) xml_tmpl = """
""" initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" connection_info = { u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, bus='virtio', dev='vdb', type='disk') bdmi.connection_info = connection_info conf = vconfig.LibvirtConfigGuestDisk() conf.source_device = bdmi.type conf.driver_name = "qemu" conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = bdmi.dev conf.target_bus = bdmi.bus conf.serial = bdmi.connection_info.get('serial') conf.source_type = "block" conf.source_path = bdmi.connection_info['data'].get('device_path') with mock.patch.object(drvr, '_get_volume_config', return_value=conf): xml_doc = etree.fromstring(initial_xml) config = drvr._update_volume_xml(xml_doc, [bdmi]) self.assertEqual(target_xml, etree.tostring(config)) def test_update_volume_xml_no_connection_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) initial_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, dev='vdb', type='disk', bus='scsi', format='qcow') bdmi.connection_info = {} conf = vconfig.LibvirtConfigGuestDisk() with mock.patch.object(drvr, '_get_volume_config', return_value=conf): xml_doc = etree.fromstring(initial_xml) config = drvr._update_volume_xml(xml_doc, [bdmi]) self.assertEqual(target_xml, etree.tostring(config)) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI2") @mock.patch.object(fakelibvirt.virDomain, "XMLDesc") def test_live_migration_update_serial_console_xml(self, mock_xml, mock_migrate): self.compute = importutils.import_object(CONF.compute_manager) instance_ref = self.test_instance xml_tmpl = ("" "" "" "" "" "" "") initial_xml = xml_tmpl.format(addr='9.0.0.1') target_xml = xml_tmpl.format(addr='9.0.0.12') target_xml = etree.tostring(etree.fromstring(target_xml)) # Preparing mocks mock_xml.return_value = initial_xml mock_migrate.side_effect = fakelibvirt.libvirtError("ERR") # start test bandwidth = CONF.libvirt.live_migration_bandwidth migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='10.0.0.1', graphics_listen_addr_spice='10.0.0.2', serial_listen_addr='9.0.0.12', target_connect_addr=None, bdms=[], block_migration=False) dom = fakelibvirt.virDomain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, dom, []) mock_xml.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE) mock_migrate.assert_called_once_with( drvr._live_migration_uri('dest'), None, target_xml, mock.ANY, None, bandwidth) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_fails_with_serial_console_without_migratable(self): self.compute = importutils.import_object(CONF.compute_manager) instance_ref = self.test_instance CONF.set_override("enabled", True, "serial_console") dom = fakelibvirt.virDomain migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, dom, []) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_uses_migrateToURI_without_migratable_flag(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.migrateToURI(drvr._live_migration_uri('dest'), mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.migrateToURI(drvr._live_migration_uri('dest'), mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI3") @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._update_xml', return_value='') @mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='') def test_live_migration_uses_migrateToURI3( self, mock_old_xml, mock_new_xml, mock_migrateToURI3, mock_min_version): # Preparing mocks disk_paths = ['vda', 'vdb'] params = { 'migrate_disks': ['vda', 'vdb'], 'bandwidth': CONF.libvirt.live_migration_bandwidth, 'destination_xml': '', } mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR") # Start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) dom = fakelibvirt.virDomain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance, 'dest', False, migrate_data, dom, disk_paths) mock_migrateToURI3.assert_called_once_with( drvr._live_migration_uri('dest'), params, None) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_fails_without_migratable_flag_or_0_addr(self): self.flags(enabled=True, vncserver_listen='1.2.3.4', group='vnc') self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='1.2.3.4', graphics_listen_addr_spice='1.2.3.4', serial_listen_addr='127.0.0.1', target_connect_addr=None, block_migration=False) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) def test_live_migration_raises_exception(self): # Confirms recover method is called when exceptions are raised. # Preparing data self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI2") _bandwidth = CONF.libvirt.live_migration_bandwidth if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None: vdmock.migrateToURI(drvr._live_migration_uri('dest'), mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError('ERR')) else: vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE ).AndReturn(FakeVirtDomain().XMLDesc(flags=0)) vdmock.migrateToURI2(drvr._live_migration_uri('dest'), None, mox.IgnoreArg(), mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError('ERR')) # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='127.0.0.1', graphics_listen_addr_spice='127.0.0.1', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state) self.assertEqual(power_state.RUNNING, instance_ref.power_state) def test_live_migration_raises_unsupported_config_exception(self): # Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED, # migrateToURI is used instead. # Preparing data instance_ref = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, 'migrateToURI2') self.mox.StubOutWithMock(vdmock, 'migrateToURI') _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn( FakeVirtDomain().XMLDesc(flags=0)) unsupported_config_error = fakelibvirt.libvirtError('ERR') unsupported_config_error.err = ( fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED,) # This is the first error we hit but since the error code is # VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI. vdmock.migrateToURI2(drvr._live_migration_uri('dest'), None, mox.IgnoreArg(), mox.IgnoreArg(), None, _bandwidth).AndRaise(unsupported_config_error) # This is the second and final error that will actually kill the run, # we use TestingException to make sure it's not the same libvirtError # above. vdmock.migrateToURI(drvr._live_migration_uri('dest'), mox.IgnoreArg(), None, _bandwidth).AndRaise(test.TestingException('oops')) graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'} migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='127.0.0.1', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock( drvr, '_check_graphics_addresses_can_live_migrate') drvr._check_graphics_addresses_can_live_migrate(graphics_listen_addrs) self.mox.ReplayAll() # start test self.assertRaises(test.TestingException, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) @mock.patch('shutil.rmtree') @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy') def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy, mock_get_instance_path, mock_exist, mock_shutil ): # destroy method may raise InstanceTerminationFailure or # InstancePowerOffFailure, here use their base class Invalid. mock_destroy.side_effect = exception.Invalid(reason='just test') fake_instance_path = os.path.join(cfg.CONF.instances_path, '/fake_instance_uuid') mock_get_instance_path.return_value = fake_instance_path drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) migrate_data = objects.LibvirtLiveMigrateData( is_shared_instance_path=False, instance_relative_path=False) self.assertRaises(exception.Invalid, drvr.rollback_live_migration_at_destination, "context", "instance", [], None, True, migrate_data) mock_exist.assert_called_once_with(fake_instance_path) mock_shutil.assert_called_once_with(fake_instance_path) @mock.patch('shutil.rmtree') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy') def test_rollback_live_migration_at_dest_shared(self, mock_destroy, mock_get_instance_path, mock_exist, mock_shutil ): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) migrate_data = objects.LibvirtLiveMigrateData( is_shared_instance_path=True, instance_relative_path=False) drvr.rollback_live_migration_at_destination("context", "instance", [], None, True, migrate_data) mock_destroy.assert_called_once_with("context", "instance", [], None, True, migrate_data) self.assertFalse(mock_get_instance_path.called) self.assertFalse(mock_exist.called) self.assertFalse(mock_shutil.called) @mock.patch.object(host.Host, "has_min_version", return_value=False) @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths(self, mock_xml, mock_version): xml = """ dummy d4e13113-918e-42fe-9fc9-861693ffd432 """ mock_xml.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) paths = drvr._live_migration_copy_disk_paths(None, None, guest) self.assertEqual((["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"], ['vda', 'vdd']), paths) @mock.patch.object(host.Host, "has_min_version", return_value=True) @mock.patch('nova.virt.driver.get_block_device_info') @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths_selective_block_migration( self, mock_xml, mock_get_instance, mock_block_device_info, mock_version): xml = """ dummy d4e13113-918e-42fe-9fc9-861693ffd432 """ mock_xml.return_value = xml instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' block_device_info = { 'swap': { 'disk_bus': u'virtio', 'swap_size': 10, 'device_name': u'/dev/vdc' }, 'root_device_name': u'/dev/vda', 'ephemerals': [{ 'guest_format': u'ext3', 'device_name': u'/dev/vdb', 'disk_bus': u'virtio', 'device_type': u'disk', 'size': 1 }], 'block_device_mapping': [{ 'guest_format': None, 'boot_index': None, 'mount_device': u'/dev/vdd', 'connection_info': { u'driver_volume_type': u'iscsi', 'serial': u'147df29f-aec2-4851-b3fe-f68dad151834', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'encrypted': False, u'qos_specs': None, u'target_iqn': u'iqn.2010-10.org.openstack:' u'volume-147df29f-aec2-4851-b3fe-' u'f68dad151834', u'target_portal': u'10.102.44.141:3260', u'volume_id': u'147df29f-aec2-4851-b3fe-f68dad151834', u'target_lun': 1, u'auth_password': u'cXELT66FngwzTwpf', u'auth_username': u'QbQQjj445uWgeQkFKcVw', u'auth_method': u'CHAP' } }, 'disk_bus': None, 'device_type': None, 'delete_on_termination': False }] } mock_block_device_info.return_value = block_device_info drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) return_value = drvr._live_migration_copy_disk_paths(context, instance, guest) expected = (['/var/lib/nova/instance/123/disk.root', '/var/lib/nova/instance/123/disk.shared', '/var/lib/nova/instance/123/disk.config'], ['vda', 'vdb', 'vdc']) self.assertEqual(expected, return_value) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths") def test_live_migration_data_gb_plain(self, mock_paths): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) data_gb = drvr._live_migration_data_gb(instance, []) self.assertEqual(2, data_gb) self.assertEqual(0, mock_paths.call_count) def test_live_migration_data_gb_block(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) def fake_stat(path): class StatResult(object): def __init__(self, size): self._size = size @property def st_size(self): return self._size if path == "/var/lib/nova/instance/123/disk.root": return StatResult(10 * units.Gi) elif path == "/dev/mapper/somevol": return StatResult(1.5 * units.Gi) else: raise Exception("Should not be reached") disk_paths = ["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"] with mock.patch.object(os, "stat") as mock_stat: mock_stat.side_effect = fake_stat data_gb = drvr._live_migration_data_gb(instance, disk_paths) # Expecting 2 GB for RAM, plus 10 GB for disk.root # and 1.5 GB rounded to 2 GB for somevol, so 14 GB self.assertEqual(14, data_gb) EXPECT_SUCCESS = 1 EXPECT_FAILURE = 2 EXPECT_ABORT = 3 @mock.patch.object(time, "time") @mock.patch.object(time, "sleep", side_effect=lambda x: eventlet.sleep(0)) @mock.patch.object(host.DomainJobInfo, "for_domain") @mock.patch.object(objects.Instance, "save") @mock.patch.object(objects.Migration, "save") @mock.patch.object(fakelibvirt.Connection, "_mark_running") @mock.patch.object(fakelibvirt.virDomain, "abortJob") def _test_live_migration_monitoring(self, job_info_records, time_records, expect_result, mock_abort, mock_running, mock_save, mock_mig_save, mock_job_info, mock_sleep, mock_time, expected_mig_status=None): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) dom = fakelibvirt.Domain(drvr._get_connection(), "", True) guest = libvirt_guest.Guest(dom) finish_event = eventlet.event.Event() def fake_job_info(hostself): while True: self.assertTrue(len(job_info_records) > 0) rec = job_info_records.pop(0) if type(rec) == str: if rec == "thread-finish": finish_event.send() elif rec == "domain-stop": dom.destroy() else: if len(time_records) > 0: time_records.pop(0) return rec return rec def fake_time(): if len(time_records) > 0: return time_records[0] else: return int( datetime.datetime(2001, 1, 20, 20, 1, 0) .strftime('%s')) mock_job_info.side_effect = fake_job_info mock_time.side_effect = fake_time dest = mock.sentinel.migrate_dest migration = objects.Migration(context=self.context, id=1) migrate_data = objects.LibvirtLiveMigrateData( migration=migration) fake_post_method = mock.MagicMock() fake_recover_method = mock.MagicMock() drvr._live_migration_monitor(self.context, instance, guest, dest, fake_post_method, fake_recover_method, False, migrate_data, dom, finish_event, []) mock_mig_save.assert_called_with() if expect_result == self.EXPECT_SUCCESS: self.assertFalse(fake_recover_method.called, 'Recover method called when success expected') self.assertFalse(mock_abort.called, 'abortJob not called when success expected') fake_post_method.assert_called_once_with( self.context, instance, dest, False, migrate_data) else: if expect_result == self.EXPECT_ABORT: self.assertTrue(mock_abort.called, 'abortJob called when abort expected') else: self.assertFalse(mock_abort.called, 'abortJob not called when failure expected') self.assertFalse(fake_post_method.called, 'Post method called when success not expected') if expected_mig_status: fake_recover_method.assert_called_once_with( self.context, instance, dest, False, migrate_data, migration_status=expected_mig_status) else: fake_recover_method.assert_called_once_with( self.context, instance, dest, False, migrate_data) def test_live_migration_monitor_success(self): # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS) def test_live_migration_monitor_success_race(self): # A normalish sequence but we're too slow to see the # completed job state domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS) def test_live_migration_monitor_failed(self): # A failed sequence where we see all the expected events domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) def test_live_migration_monitor_failed_race(self): # A failed sequence where we are too slow to see the # failed event domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) def test_live_migration_monitor_cancelled(self): # A cancelled sequence where we see all the events domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, expected_mig_status='cancelled') @mock.patch.object(fakelibvirt.virDomain, "migrateSetMaxDowntime") @mock.patch.object(libvirt_driver.LibvirtDriver, "_migration_downtime_steps") def test_live_migration_monitor_downtime(self, mock_downtime_steps, mock_set_downtime): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=1000000, group='libvirt') # We've setup 4 fake downtime steps - first value is the # time delay, second is the downtime value downtime_steps = [ (90, 10), (180, 50), (270, 200), (500, 300), ] mock_downtime_steps.return_value = downtime_steps # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. # Times are chosen so that only the first 3 downtime # steps are needed. fake_times = [0, 1, 30, 95, 150, 200, 300] # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_SUCCESS) mock_set_downtime.assert_has_calls([mock.call(10), mock.call(50), mock.call(200)]) def test_live_migration_monitor_completion(self): self.flags(live_migration_completion_timeout=100, live_migration_progress_timeout=1000000, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_ABORT, expected_mig_status='cancelled') def test_live_migration_monitor_progress(self): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=150, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_ABORT, expected_mig_status='cancelled') def test_live_migration_downtime_steps(self): self.flags(live_migration_downtime=400, group='libvirt') self.flags(live_migration_downtime_steps=10, group='libvirt') self.flags(live_migration_downtime_delay=30, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) steps = drvr._migration_downtime_steps(3.0) self.assertEqual([ (0, 37), (90, 38), (180, 39), (270, 42), (360, 46), (450, 55), (540, 70), (630, 98), (720, 148), (810, 238), (900, 400), ], list(steps)) @mock.patch.object(utils, "spawn") @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor") @mock.patch.object(host.Host, "get_guest") @mock.patch.object(fakelibvirt.Connection, "_mark_running") @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths") def test_live_migration_main(self, mock_copy_disk_path, mock_running, mock_guest, mock_monitor, mock_thread): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) dom = fakelibvirt.Domain(drvr._get_connection(), "demo", True) guest = libvirt_guest.Guest(dom) migrate_data = objects.LibvirtLiveMigrateData(block_migration=True) disks_to_copy = (['/some/path/one', '/test/path/two'], ['vda', 'vdb']) mock_copy_disk_path.return_value = disks_to_copy mock_guest.return_value = guest def fake_post(): pass def fake_recover(): pass drvr._live_migration(self.context, instance, "fakehost", fake_post, fake_recover, True, migrate_data) mock_copy_disk_path.assert_called_once_with(self.context, instance, guest) class AnyEventletEvent(object): def __eq__(self, other): return type(other) == eventlet.event.Event mock_thread.assert_called_once_with( drvr._live_migration_operation, self.context, instance, "fakehost", True, migrate_data, dom, disks_to_copy[1]) mock_monitor.assert_called_once_with( self.context, instance, guest, "fakehost", fake_post, fake_recover, True, migrate_data, dom, AnyEventletEvent(), disks_to_copy[0]) def _do_test_create_images_and_backing(self, disk_type): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk') self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image') disk_info = {'path': 'foo', 'type': disk_type, 'disk_size': 1 * 1024 ** 3, 'virt_disk_size': 20 * 1024 ** 3, 'backing_file': None} libvirt_driver.libvirt_utils.create_image( disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size']) drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance, fallback_from_host=None) self.mox.ReplayAll() self.stub_out('os.path.exists', lambda *args: False) drvr._create_images_and_backing(self.context, self.test_instance, "/fake/instance/dir", [disk_info]) def test_create_images_and_backing_qcow2(self): self._do_test_create_images_and_backing('qcow2') def test_create_images_and_backing_raw(self): self._do_test_create_images_and_backing('raw') def test_create_images_and_backing_images_not_exist_no_fallback(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image', side_effect=exception.ImageNotFound( image_id="fake_id")): self.assertRaises(exception.ImageNotFound, conn._create_images_and_backing, self.context, instance, "/fake/instance/dir", disk_info) def test_create_images_and_backing_images_not_exist_fallback(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) with test.nested( mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'), mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image', side_effect=exception.ImageNotFound( image_id="fake_id")), ) as (copy_image_mock, fetch_image_mock): conn._create_images_and_backing(self.context, instance, "/fake/instance/dir", disk_info, fallback_from_host="fake_host") backfile_path = os.path.join(base_dir, 'fake_image_backing_file') kernel_path = os.path.join(CONF.instances_path, self.test_instance['uuid'], 'kernel') ramdisk_path = os.path.join(CONF.instances_path, self.test_instance['uuid'], 'ramdisk') copy_image_mock.assert_has_calls([ mock.call(dest=backfile_path, src=backfile_path, host='fake_host', receive=True), mock.call(dest=kernel_path, src=kernel_path, host='fake_host', receive=True), mock.call(dest=ramdisk_path, src=ramdisk_path, host='fake_host', receive=True) ]) fetch_image_mock.assert_has_calls([ mock.call(context=self.context, target=backfile_path, image_id=self.test_instance['image_ref'], user_id=self.test_instance['user_id'], project_id=self.test_instance['project_id'], max_size=25165824), mock.call(self.context, kernel_path, self.test_instance['kernel_id'], self.test_instance['user_id'], self.test_instance['project_id']), mock.call(self.context, ramdisk_path, self.test_instance['ramdisk_id'], self.test_instance['user_id'], self.test_instance['project_id']), ]) @mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image') @mock.patch.object(os.path, 'exists', return_value=True) def test_create_images_and_backing_images_exist(self, mock_exists, mock_fetch_image): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) with mock.patch.object(imagebackend.Image, 'get_disk_size'): conn._create_images_and_backing(self.context, instance, '/fake/instance/dir', disk_info) self.assertFalse(mock_fetch_image.called) def test_create_images_and_backing_ephemeral_gets_created(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}, {u'backing_file': u'ephemeral_1_default', u'disk_size': 393216, u'over_committed_disk_size': 1073348608, u'path': u'disk_eph_path', u'type': u'qcow2', u'virt_disk_size': 1073741824}] base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) instance = objects.Instance(**self.test_instance) with test.nested( mock.patch.object(drvr, '_fetch_instance_kernel_ramdisk'), mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'), mock.patch.object(drvr, '_create_ephemeral'), mock.patch.object(imagebackend.Image, 'verify_base_size'), mock.patch.object(imagebackend.Image, 'get_disk_size') ) as (fetch_kernel_ramdisk_mock, fetch_image_mock, create_ephemeral_mock, verify_base_size_mock, disk_size_mock): drvr._create_images_and_backing(self.context, instance, "/fake/instance/dir", disk_info) self.assertEqual(len(create_ephemeral_mock.call_args_list), 1) m_args, m_kwargs = create_ephemeral_mock.call_args_list[0] self.assertEqual( os.path.join(base_dir, 'ephemeral_1_default'), m_kwargs['target']) self.assertEqual(len(fetch_image_mock.call_args_list), 1) m_args, m_kwargs = fetch_image_mock.call_args_list[0] self.assertEqual( os.path.join(base_dir, 'fake_image_backing_file'), m_kwargs['target']) verify_base_size_mock.assert_has_calls([ mock.call(os.path.join(base_dir, 'fake_image_backing_file'), 25165824), mock.call(os.path.join(base_dir, 'ephemeral_1_default'), 1073741824) ]) def test_create_images_and_backing_disk_info_none(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk') drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance, fallback_from_host=None) self.mox.ReplayAll() drvr._create_images_and_backing(self.context, self.test_instance, "/fake/instance/dir", None) def _generate_target_ret(self, target_connect_addr=None): target_ret = { 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': target_connect_addr, 'serial_listen_addr': '127.0.0.1', 'volume': { '12345': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}, 'serial': '12345'}, 'disk_info': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}}, '67890': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}, 'serial': '67890'}, 'disk_info': {'bus': 'scsi', 'dev': 'sdb', 'type': 'disk'}}}} return target_ret def test_pre_live_migration_works_correctly_mocked(self): self._test_pre_live_migration_works_correctly_mocked() def test_pre_live_migration_with_transport_ip(self): self.flags(live_migration_inbound_addr='127.0.0.2', group='libvirt') target_ret = self._generate_target_ret('127.0.0.2') self._test_pre_live_migration_works_correctly_mocked(target_ret) def _test_pre_live_migration_works_correctly_mocked(self, target_ret=None): # Creating testdata vol = {'block_device_mapping': [ {'connection_info': {'serial': '12345', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}}, 'mount_device': '/dev/sda'}, {'connection_info': {'serial': '67890', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) class FakeNetworkInfo(object): def fixed_ips(self): return ["test_ip_addr"] def fake_none(*args, **kwargs): return self.stubs.Set(drvr, '_create_images_and_backing', fake_none) instance = objects.Instance(**self.test_instance) c = context.get_admin_context() nw_info = FakeNetworkInfo() # Creating mocks self.mox.StubOutWithMock(driver, "block_device_info_get_mapping") driver.block_device_info_get_mapping(vol ).AndReturn(vol['block_device_mapping']) self.mox.StubOutWithMock(drvr, "_connect_volume") for v in vol['block_device_mapping']: disk_info = { 'bus': "scsi", 'dev': v['mount_device'].rpartition("/")[2], 'type': "disk" } drvr._connect_volume(v['connection_info'], disk_info) self.mox.StubOutWithMock(drvr, 'plug_vifs') drvr.plug_vifs(mox.IsA(instance), nw_info) self.mox.ReplayAll() migrate_data = { "block_migration": False, "instance_relative_path": "foo", "is_shared_block_storage": False, "is_shared_instance_path": False, } result = drvr.pre_live_migration( c, instance, vol, nw_info, None, migrate_data=migrate_data) if not target_ret: target_ret = self._generate_target_ret() self.assertEqual( result.to_legacy_dict( pre_migration_result=True)['pre_live_migration_result'], target_ret) def test_pre_live_migration_block_with_config_drive_mocked(self): # Creating testdata vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) def fake_true(*args, **kwargs): return True self.stubs.Set(configdrive, 'required_by', fake_true) instance = objects.Instance(**self.test_instance) c = context.get_admin_context() self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt, drvr.pre_live_migration, c, instance, vol, None, None, {'is_shared_instance_path': False, 'is_shared_block_storage': False, 'block_migration': False, 'instance_relative_path': 'foo'}) @mock.patch('nova.virt.driver.block_device_info_get_mapping', return_value=()) @mock.patch('nova.virt.configdrive.required_by', return_value=True) def test_pre_live_migration_block_with_config_drive_mocked_with_vfat( self, mock_required_by, block_device_info_get_mapping): self.flags(config_drive_format='vfat') # Creating testdata vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) res_data = drvr.pre_live_migration( self.context, instance, vol, [], None, {'is_shared_instance_path': False, 'is_shared_block_storage': False, 'block_migration': False, 'instance_relative_path': 'foo'}) res_data = res_data.to_legacy_dict(pre_migration_result=True) block_device_info_get_mapping.assert_called_once_with( {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'} ]} ) self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': None, 'serial_listen_addr': '127.0.0.1', 'volume': {}}, res_data['pre_live_migration_result']) def test_pre_live_migration_vol_backed_works_correctly_mocked(self): # Creating testdata, using temp dir. with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) vol = {'block_device_mapping': [ {'connection_info': {'serial': '12345', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}}, 'mount_device': '/dev/sda'}, {'connection_info': {'serial': '67890', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) def fake_none(*args, **kwargs): return self.stubs.Set(drvr, '_create_images_and_backing', fake_none) class FakeNetworkInfo(object): def fixed_ips(self): return ["test_ip_addr"] inst_ref = objects.Instance(**self.test_instance) c = context.get_admin_context() nw_info = FakeNetworkInfo() # Creating mocks self.mox.StubOutWithMock(drvr, "_connect_volume") for v in vol['block_device_mapping']: disk_info = { 'bus': "scsi", 'dev': v['mount_device'].rpartition("/")[2], 'type': "disk" } drvr._connect_volume(v['connection_info'], disk_info) self.mox.StubOutWithMock(drvr, 'plug_vifs') drvr.plug_vifs(mox.IsA(inst_ref), nw_info) self.mox.ReplayAll() migrate_data = {'is_shared_instance_path': False, 'is_shared_block_storage': False, 'is_volume_backed': True, 'block_migration': False, 'instance_relative_path': inst_ref['name'], 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'filename': 'foo', } ret = drvr.pre_live_migration(c, inst_ref, vol, nw_info, None, migrate_data) target_ret = { 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': None, 'serial_listen_addr': '127.0.0.1', 'volume': { '12345': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}, 'serial': '12345'}, 'disk_info': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}}, '67890': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}, 'serial': '67890'}, 'disk_info': {'bus': 'scsi', 'dev': 'sdb', 'type': 'disk'}}}} self.assertEqual( ret.to_legacy_dict(True)['pre_live_migration_result'], target_ret) self.assertTrue(os.path.exists('%s/%s/' % (tmpdir, inst_ref['name']))) def test_pre_live_migration_plug_vifs_retry_fails(self): self.flags(live_migration_retry_count=3) instance = objects.Instance(**self.test_instance) def fake_plug_vifs(instance, network_info): raise processutils.ProcessExecutionError() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: eventlet.sleep(0)) disk_info_json = jsonutils.dumps({}) self.assertRaises(processutils.ProcessExecutionError, drvr.pre_live_migration, self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json) def test_pre_live_migration_plug_vifs_retry_works(self): self.flags(live_migration_retry_count=3) called = {'count': 0} instance = objects.Instance(**self.test_instance) def fake_plug_vifs(instance, network_info): called['count'] += 1 if called['count'] < CONF.live_migration_retry_count: raise processutils.ProcessExecutionError() else: return drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: eventlet.sleep(0)) disk_info_json = jsonutils.dumps({}) drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json) def test_pre_live_migration_image_not_created_with_shared_storage(self): migrate_data_set = [{'is_shared_block_storage': False, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': False}, {'is_shared_block_storage': True, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': False}, {'is_shared_block_storage': False, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': True}] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) # creating mocks with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), ) as ( create_image_mock, rules_mock, plug_mock, ): disk_info_json = jsonutils.dumps({}) for migrate_data in migrate_data_set: res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) self.assertFalse(create_image_mock.called) self.assertIsInstance(res, objects.LibvirtLiveMigrateData) def test_pre_live_migration_with_not_shared_instance_path(self): migrate_data = {'is_shared_block_storage': False, 'is_shared_instance_path': False, 'block_migration': False, 'instance_relative_path': 'foo'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) def check_instance_dir(context, instance, instance_dir, disk_info, fallback_from_host=False): self.assertTrue(instance_dir) # creating mocks with test.nested( mock.patch.object(drvr, '_create_images_and_backing', side_effect=check_instance_dir), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), ) as ( create_image_mock, rules_mock, plug_mock, ): disk_info_json = jsonutils.dumps({}) res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) create_image_mock.assert_has_calls( [mock.call(self.context, instance, mock.ANY, {}, fallback_from_host=instance.host)]) self.assertIsInstance(res, objects.LibvirtLiveMigrateData) def test_pre_live_migration_recreate_disk_info(self): migrate_data = {'is_shared_block_storage': False, 'is_shared_instance_path': False, 'block_migration': True, 'instance_relative_path': '/some/path/'} disk_info = [{'disk_size': 5368709120, 'type': 'raw', 'virt_disk_size': 5368709120, 'path': '/some/path/disk', 'backing_file': '', 'over_committed_disk_size': 0}, {'disk_size': 1073741824, 'type': 'raw', 'virt_disk_size': 1073741824, 'path': '/some/path/disk.eph0', 'backing_file': '', 'over_committed_disk_size': 0}] image_disk_info = {'/some/path/disk': 'raw', '/some/path/disk.eph0': 'raw'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance_path = os.path.dirname(disk_info[0]['path']) disk_info_path = os.path.join(instance_path, 'disk.info') with test.nested( mock.patch.object(os, 'mkdir'), mock.patch.object(fake_libvirt_utils, 'write_to_file'), mock.patch.object(drvr, '_create_images_and_backing') ) as ( mkdir, write_to_file, create_images_and_backing ): drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=jsonutils.dumps(disk_info), migrate_data=migrate_data) write_to_file.assert_called_with(disk_info_path, jsonutils.dumps(image_disk_info)) def test_get_instance_disk_info_works_correctly(self): # Test data instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file' self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) os.path.getsize('/test/disk.local').AndReturn((3328599655)) ret = ("image: /test/disk\n" "file format: raw\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 3.1G\n" "cluster_size: 2097152\n" "backing file: /test/dummy (actual path: /backing/file)\n") self.mox.StubOutWithMock(os.path, "exists") os.path.exists('/test/disk.local').AndReturn(True) self.mox.StubOutWithMock(utils, "execute") utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/test/disk.local').AndReturn((ret, '')) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) self.assertEqual(info[1]['type'], 'qcow2') self.assertEqual(info[1]['path'], '/test/disk.local') self.assertEqual(info[1]['virt_disk_size'], 21474836480) self.assertEqual(info[1]['backing_file'], "file") self.assertEqual(info[1]['over_committed_disk_size'], 18146236825) def test_post_live_migration(self): vol = {'block_device_mapping': [ {'connection_info': { 'data': {'multipath_id': 'dummy1'}, 'serial': 'fake_serial1'}, 'mount_device': '/dev/sda', }, {'connection_info': { 'data': {}, 'serial': 'fake_serial2'}, 'mount_device': '/dev/sdb', }]} def fake_initialize_connection(context, volume_id, connector): return {'data': {}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_connector = {'host': 'fake'} inst_ref = {'id': 'foo'} cntx = context.get_admin_context() # Set up the mock expectations with test.nested( mock.patch.object(driver, 'block_device_info_get_mapping', return_value=vol['block_device_mapping']), mock.patch.object(drvr, "get_volume_connector", return_value=fake_connector), mock.patch.object(drvr._volume_api, "initialize_connection", side_effect=fake_initialize_connection), mock.patch.object(drvr, '_disconnect_volume') ) as (block_device_info_get_mapping, get_volume_connector, initialize_connection, _disconnect_volume): drvr.post_live_migration(cntx, inst_ref, vol) block_device_info_get_mapping.assert_has_calls([ mock.call(vol)]) get_volume_connector.assert_has_calls([ mock.call(inst_ref)]) _disconnect_volume.assert_has_calls([ mock.call({'data': {'multipath_id': 'dummy1'}}, 'sda'), mock.call({'data': {}}, 'sdb')]) def test_get_instance_disk_info_excludes_volumes(self): # Test data instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "" "" "" "" "" "" "") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file' self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) os.path.getsize('/test/disk.local').AndReturn((3328599655)) ret = ("image: /test/disk\n" "file format: raw\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 3.1G\n" "cluster_size: 2097152\n" "backing file: /test/dummy (actual path: /backing/file)\n") self.mox.StubOutWithMock(os.path, "exists") os.path.exists('/test/disk.local').AndReturn(True) self.mox.StubOutWithMock(utils, "execute") utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/test/disk.local').AndReturn((ret, '')) self.mox.ReplayAll() conn_info = {'driver_volume_type': 'fake'} info = {'block_device_mapping': [ {'connection_info': conn_info, 'mount_device': '/dev/vdc'}, {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance, block_device_info=info) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) self.assertEqual(info[1]['type'], 'qcow2') self.assertEqual(info[1]['path'], '/test/disk.local') self.assertEqual(info[1]['virt_disk_size'], 21474836480) self.assertEqual(info[1]['backing_file'], "file") self.assertEqual(info[1]['over_committed_disk_size'], 18146236825) def test_get_instance_disk_info_no_bdinfo_passed(self): # NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method # without access to Nova's block device information. We want to make # sure that we guess volumes mostly correctly in that case as well instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(1, len(info)) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) def test_spawn_with_network_info(self): # Preparing mocks def fake_none(*args, **kwargs): return def fake_getLibVersion(): return fakelibvirt.FAKE_LIBVIRT_VERSION def fake_getCapabilities(): return """ cef19ce0-0ca2-11df-855d-b19fbce37686 x86_64 Penryn Intel """ def fake_baselineCPU(cpu, flag): return """ Penryn Intel """ # _fake_network_info must be called before create_fake_libvirt_mock(), # as _fake_network_info calls importutils.import_class() and # create_fake_libvirt_mock() mocks importutils.import_class(). network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion, getCapabilities=fake_getCapabilities, getVersion=lambda: 1005001, baselineCPU=fake_baselineCPU) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 # we send an int to test sha1 call instance = objects.Instance(**instance_ref) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # Mock out the get_info method of the LibvirtDriver so that the polling # in the spawn method of the LibvirtDriver returns immediately self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info') libvirt_driver.LibvirtDriver.get_info(instance ).AndReturn(hardware.InstanceInfo(state=power_state.RUNNING)) # Start test self.mox.ReplayAll() with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt: del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr.firewall_driver, 'setup_basic_filtering', fake_none) self.stubs.Set(drvr.firewall_driver, 'prepare_instance_filter', fake_none) self.stubs.Set(imagebackend.Image, 'cache', fake_none) drvr.spawn(self.context, instance, image_meta, [], 'herp', network_info=network_info) path = os.path.join(CONF.instances_path, instance['name']) if os.path.isdir(path): shutil.rmtree(path) path = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if os.path.isdir(path): shutil.rmtree(os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name)) def test_spawn_without_image_meta(self): self.create_image_called = False def fake_none(*args, **kwargs): return def fake_create_image(*args, **kwargs): self.create_image_called = True def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_image', fake_create_image) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) drvr.spawn(self.context, instance, image_meta, [], None) self.assertTrue(self.create_image_called) drvr.spawn(self.context, instance, image_meta, [], None) self.assertTrue(self.create_image_called) def test_spawn_from_volume_calls_cache(self): self.cache_called_for_disk = False def fake_none(*args, **kwargs): return def fake_cache(*args, **kwargs): if kwargs.get('image_id') == 'my_fake_image': self.cache_called_for_disk = True def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(imagebackend.Image, 'cache', fake_cache) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) block_device_info = {'root_device_name': '/dev/vda', 'block_device_mapping': [ {'mount_device': 'vda', 'boot_index': 0} ] } image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # Volume-backed instance created without image instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/vda' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None, block_device_info=block_device_info) self.assertFalse(self.cache_called_for_disk) # Booted from volume but with placeholder image instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance_ref['root_device_name'] = '/dev/vda' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None, block_device_info=block_device_info) self.assertFalse(self.cache_called_for_disk) # Booted from an image instance_ref['image_ref'] = 'my_fake_image' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None) self.assertTrue(self.cache_called_for_disk) def test_start_lxc_from_volume(self): self.flags(virt_type="lxc", group='libvirt') def check_setup_container(image, container_dir=None): self.assertIsInstance(image, imgmodel.LocalBlockImage) self.assertEqual(image.path, '/dev/path/to/dev') return '/dev/nbd1' bdm = { 'guest_format': None, 'boot_index': 0, 'mount_device': '/dev/sda', 'connection_info': { 'driver_volume_type': 'iscsi', 'serial': 'afc1', 'data': { 'access_mode': 'rw', 'target_discovered': False, 'encrypted': False, 'qos_specs': None, 'target_iqn': 'iqn: volume-afc1', 'target_portal': 'ip: 3260', 'volume_id': 'afc1', 'target_lun': 1, 'auth_password': 'uj', 'auth_username': '47', 'auth_method': 'CHAP' } }, 'disk_bus': 'scsi', 'device_type': 'disk', 'delete_on_termination': False } def _connect_volume_side_effect(connection_info, disk_info): bdm['connection_info']['data']['device_path'] = '/dev/path/to/dev' def _get(key, opt=None): return bdm.get(key, opt) def getitem(key): return bdm[key] def setitem(key, val): bdm[key] = val bdm_mock = mock.MagicMock() bdm_mock.__getitem__.side_effect = getitem bdm_mock.__setitem__.side_effect = setitem bdm_mock.get = _get disk_mock = mock.MagicMock() disk_mock.source_path = '/dev/path/to/dev' block_device_info = {'block_device_mapping': [bdm_mock], 'root_device_name': '/dev/sda'} # Volume-backed instance created without image instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/sda' instance_ref['ephemeral_gb'] = 0 instance_ref['uuid'] = uuidutils.generate_uuid() inst_obj = objects.Instance(**instance_ref) image_meta = objects.ImageMeta.from_dict({}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, '_connect_volume', side_effect=_connect_volume_side_effect), mock.patch.object(drvr, '_get_volume_config', return_value=disk_mock), mock.patch.object(drvr, 'get_info', return_value=hardware.InstanceInfo( state=power_state.RUNNING)), mock.patch('nova.virt.disk.api.setup_container', side_effect=check_setup_container), mock.patch('nova.virt.disk.api.teardown_container'), mock.patch.object(objects.Instance, 'save')): drvr.spawn(self.context, inst_obj, image_meta, [], None, network_info=[], block_device_info=block_device_info) self.assertEqual('/dev/nbd1', inst_obj.system_metadata.get( 'rootfs_device_name')) def test_spawn_with_pci_devices(self): def fake_none(*args, **kwargs): return None def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) class FakeLibvirtPciDevice(object): def dettach(self): return None def reset(self): return None def fake_node_device_lookup_by_name(address): pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}" % dict(hex='[\da-f]', oct='[0-8]')) pattern = re.compile(pattern) if pattern.match(address) is None: raise fakelibvirt.libvirtError() return FakeLibvirtPciDevice() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_image', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) drvr._conn.nodeDeviceLookupByName = \ fake_node_device_lookup_by_name instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance = objects.Instance(**instance_ref) instance['pci_devices'] = objects.PciDeviceList( objects=[objects.PciDevice(address='0000:00:00.0')]) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) drvr.spawn(self.context, instance, image_meta, [], None) def test_chown_disk_config_for_instance(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(fake_libvirt_utils, 'chown') fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid') os.path.exists('/tmp/uuid/disk.config').AndReturn(True) fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid()) self.mox.ReplayAll() drvr._chown_disk_config_for_instance(instance) def _test_create_image_plain(self, os_type='', filename='', mkfs=False): gotFiles = [] def fake_image(self, instance, name, image_type=''): class FakeImage(imagebackend.Image): def __init__(self, instance, name, is_block_dev=False): self.path = os.path.join(instance['name'], name) self.is_block_dev = is_block_dev def create_image(self, prepare_template, base, size, *args, **kwargs): pass def resize_image(self, size): pass def cache(self, fetch_func, filename, size=None, *args, **kwargs): gotFiles.append({'filename': filename, 'size': size}) def snapshot(self, name): pass return FakeImage(instance, name) def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) # Stop 'libvirt_driver._create_image' touching filesystem self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image", fake_image) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) instance['os_type'] = os_type drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) if mkfs: self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'}) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) drvr._create_image(context, instance, disk_info['mapping']) drvr._get_guest_xml(self.context, instance, None, disk_info, image_meta) wantFiles = [ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab', 'size': 10 * units.Gi}, {'filename': filename, 'size': 20 * units.Gi}, ] self.assertEqual(gotFiles, wantFiles) def test_create_image_plain_os_type_blank(self): self._test_create_image_plain(os_type='', filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_none(self): self._test_create_image_plain(os_type=None, filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_set_no_fs(self): self._test_create_image_plain(os_type='test', filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_set_with_fs(self): ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str( 'mkfs.ext4 --label %(fs_label)s %(target)s')[:7]) self._test_create_image_plain(os_type='test', filename=ephemeral_file_name, mkfs=True) @mock.patch('nova.virt.libvirt.driver.imagecache') def test_create_image_initrd(self, mock_imagecache): INITRD = self._EPHEMERAL_20_DEFAULT + '.initrd' KERNEL = 'vmlinuz.' + self._EPHEMERAL_20_DEFAULT mock_imagecache.get_cache_fname.side_effect = \ [KERNEL, INITRD, self._EPHEMERAL_20_DEFAULT + '.img'] filename = self._EPHEMERAL_20_DEFAULT gotFiles = [] outer = self def fake_image(self, instance, name, image_type=''): class FakeImage(imagebackend.Image): def __init__(self, instance, name, is_block_dev=False): self.path = os.path.join(instance['name'], name) self.is_block_dev = is_block_dev def create_image(self, prepare_template, base, size, *args, **kwargs): pass def cache(self, fetch_func, filename, size=None, *args, **kwargs): gotFiles.append({'filename': filename, 'size': size}) if filename == INITRD: outer.assertEqual(fetch_func, fake_libvirt_utils.fetch_raw_image) if filename == KERNEL: outer.assertEqual(fetch_func, fake_libvirt_utils.fetch_raw_image) def resize_image(self, size): pass def snapshot(self, name): pass return FakeImage(instance, name) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance_ref['kernel_id'] = 2 instance_ref['ramdisk_id'] = 3 instance_ref['os_type'] = 'test' instance = objects.Instance(**instance_ref) driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(nova.virt.libvirt.imagebackend.Backend, "image", fake_image), mock.patch.object(driver, '_get_guest_xml'), mock.patch.object(driver, '_create_domain_and_network'), mock.patch.object(driver, 'get_info', return_value=[hardware.InstanceInfo(state=power_state.RUNNING)]) ): image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) driver._create_image(context, instance, disk_info['mapping']) wantFiles = [ {'filename': KERNEL, 'size': None}, {'filename': INITRD, 'size': None}, {'filename': self._EPHEMERAL_20_DEFAULT + '.img', 'size': 10 * units.Gi}, {'filename': filename, 'size': 20 * units.Gi}, ] self.assertEqual(wantFiles, gotFiles) def _create_image_helper(self, callback, suffix=''): gotFiles = [] imported_files = [] def fake_image(self, instance, name, image_type=''): class FakeImage(imagebackend.Image): def __init__(self, instance, name, is_block_dev=False): self.path = os.path.join(instance['name'], name) self.is_block_dev = is_block_dev def create_image(self, prepare_template, base, size, *args, **kwargs): pass def resize_image(self, size): pass def cache(self, fetch_func, filename, size=None, *args, **kwargs): gotFiles.append({'filename': filename, 'size': size}) def import_file(self, instance, local_filename, remote_filename): imported_files.append((local_filename, remote_filename)) def snapshot(self, name): pass return FakeImage(instance, name) def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) # Stop 'libvirt_driver._create_image' touching filesystem self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image", fake_image) instance_ref = self.test_instance instance_ref['image_ref'] = 1 # NOTE(mikal): use this callback to tweak the instance to match # what you're trying to test callback(instance_ref) instance = objects.Instance(**instance_ref) # Turn on some swap to exercise that codepath in _create_image instance.flavor.swap = 500 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(instance_metadata, 'InstanceMetadata', fake_none) self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder, 'make_drive', fake_none) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) drvr._create_image(context, instance, disk_info['mapping'], suffix=suffix) drvr._get_guest_xml(self.context, instance, None, disk_info, image_meta) return gotFiles, imported_files def test_create_image_with_swap(self): def enable_swap(instance_ref): # Turn on some swap to exercise that codepath in _create_image instance_ref['system_metadata']['instance_type_swap'] = 500 gotFiles, _ = self._create_image_helper(enable_swap) wantFiles = [ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab', 'size': 10 * units.Gi}, {'filename': self._EPHEMERAL_20_DEFAULT, 'size': 20 * units.Gi}, {'filename': 'swap_500', 'size': 500 * units.Mi}, ] self.assertEqual(gotFiles, wantFiles) def test_create_image_with_configdrive(self): def enable_configdrive(instance_ref): instance_ref['config_drive'] = 'true' # Ensure that we create a config drive and then import it into the # image backend store _, imported_files = self._create_image_helper(enable_configdrive) self.assertTrue(imported_files[0][0].endswith('/disk.config')) self.assertEqual('disk.config', imported_files[0][1]) def test_create_image_with_configdrive_rescue(self): def enable_configdrive(instance_ref): instance_ref['config_drive'] = 'true' # Ensure that we create a config drive and then import it into the # image backend store _, imported_files = self._create_image_helper(enable_configdrive, suffix='.rescue') self.assertTrue(imported_files[0][0].endswith('/disk.config.rescue')) self.assertEqual('disk.config.rescue', imported_files[0][1]) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache', side_effect=exception.ImageNotFound(image_id='fake-id')) def test_create_image_not_exist_no_fallback(self, mock_cache): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) self.assertRaises(exception.ImageNotFound, drvr._create_image, self.context, instance, disk_info['mapping']) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache') def test_create_image_not_exist_fallback(self, mock_cache): def side_effect(fetch_func, filename, size=None, *args, **kwargs): def second_call(fetch_func, filename, size=None, *args, **kwargs): # call copy_from_host ourselves because we mocked image.cache() fetch_func('fake-target', 'fake-max-size') # further calls have no side effect mock_cache.side_effect = None mock_cache.side_effect = second_call # raise an error only the first call raise exception.ImageNotFound(image_id='fake-id') mock_cache.side_effect = side_effect drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) with mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image') as mock_copy: drvr._create_image(self.context, instance, disk_info['mapping'], fallback_from_host='fake-source-host') mock_copy.assert_called_once_with(src='fake-target', dest='fake-target', host='fake-source-host', receive=True) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache') def test_create_image_resize_snap_backend(self, mock_cache): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend instance = objects.Instance(**self.test_instance) instance.task_state = task_states.RESIZE_FINISH image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) with mock.patch.object(drvr.image_backend, 'create_snap') as mock_crt: drvr._create_image(self.context, instance, disk_info['mapping']) mock_crt.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME) @mock.patch.object(utils, 'execute') def test_create_ephemeral_specified_fs(self, mock_exec): self.flags(default_ephemeral_format='ext3') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True, max_size=20, specified_fs='ext4') mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) def test_create_ephemeral_specified_fs_not_valid(self): CONF.set_override('default_ephemeral_format', 'ext4') ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'guest_format': 'dummy', 'size': 1}] block_device_info = { 'ephemerals': ephemerals} instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) disk_info['mapping'].pop('disk.local') with test.nested( mock.patch.object(utils, 'execute'), mock.patch.object(drvr, 'get_info'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(imagebackend.Image, 'verify_base_size'), mock.patch.object(imagebackend.Image, 'get_disk_size')): self.assertRaises(exception.InvalidBDMFormat, drvr._create_image, context, instance, disk_info['mapping'], block_device_info=block_device_info) def test_create_ephemeral_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True, max_size=20) def test_create_ephemeral_with_conf(self): CONF.set_override('default_ephemeral_format', 'ext4') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_arbitrary(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'}) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs.ext4', '--label', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_ext3(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'}) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_swap_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkswap', '/dev/something', run_as_root=False) self.mox.ReplayAll() drvr._create_swap('/dev/something', 1, max_size=20) def test_get_console_output_file(self): fake_libvirt_utils.files['console.log'] = '01234567890' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_dir = (os.path.join(tmpdir, instance['name'])) console_log = '%s/console.log' % (console_dir) fake_dom_xml = """ """ % console_log def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) try: prev_max = libvirt_driver.MAX_CONSOLE_BYTES libvirt_driver.MAX_CONSOLE_BYTES = 5 with mock.patch('os.path.exists', return_value=True): output = drvr.get_console_output(self.context, instance) finally: libvirt_driver.MAX_CONSOLE_BYTES = prev_max self.assertEqual('67890', output) def test_get_console_output_file_missing(self): with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_log = os.path.join(tmpdir, instance['name'], 'non-existent.log') fake_dom_xml = """ """ % console_log def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch('os.path.exists', return_value=False): output = drvr.get_console_output(self.context, instance) self.assertEqual('', output) def test_get_console_output_pty(self): fake_libvirt_utils.files['pty'] = '01234567890' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_dir = (os.path.join(tmpdir, instance['name'])) pty_file = '%s/fake_pty' % (console_dir) fake_dom_xml = """ """ % pty_file def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) def _fake_flush(self, fake_pty): return 'foo' def _fake_append_to_file(self, data, fpath): return 'pty' self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) try: prev_max = libvirt_driver.MAX_CONSOLE_BYTES libvirt_driver.MAX_CONSOLE_BYTES = 5 output = drvr.get_console_output(self.context, instance) finally: libvirt_driver.MAX_CONSOLE_BYTES = prev_max self.assertEqual('67890', output) @mock.patch('nova.virt.libvirt.host.Host.get_domain') @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_console_output_not_available(self, mock_get_xml, get_domain): xml = """ """ mock_get_xml.return_value = xml get_domain.return_value = mock.MagicMock() instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleNotAvailable, drvr.get_console_output, self.context, instance) def test_get_host_ip_addr(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ip = drvr.get_host_ip_addr() self.assertEqual(ip, CONF.my_ip) @mock.patch.object(libvirt_driver.LOG, 'warn') @mock.patch('nova.compute.utils.get_machine_ips') def test_get_host_ip_addr_failure(self, mock_ips, mock_log): mock_ips.return_value = ['8.8.8.8', '75.75.75.75'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.get_host_ip_addr() mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was ' u'not found on any of the ' u'interfaces: %(ifaces)s', {'ifaces': '8.8.8.8, 75.75.75.75', 'my_ip': mock.ANY}) def test_conn_event_handler(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False with test.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): # verify that the driver registers for the close callback # and re-connects after receiving the callback self.assertRaises(exception.HypervisorUnavailable, drvr.init_host, "wibble") self.assertTrue(service_mock.disabled) def test_command_with_broken_connection(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False with test.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): drvr.init_host("wibble") self.assertRaises(exception.HypervisorUnavailable, drvr.get_num_instances) self.assertTrue(service_mock.disabled) def test_service_resume_after_broken_connection(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) service_mock = mock.MagicMock() service_mock.disabled.return_value = True with test.nested( mock.patch.object(drvr._host, "_connect", return_value=mock.MagicMock()), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): drvr.init_host("wibble") drvr.get_num_instances() self.assertTrue(not service_mock.disabled and service_mock.disabled_reason is None) @mock.patch.object(objects.Instance, 'save') def test_immediate_delete(self, mock_save): def fake_get_domain(instance): raise exception.InstanceNotFound(instance_id=instance.uuid) def fake_delete_instance_files(instance): pass drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, {}) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True) @mock.patch.object(objects.Instance, 'save', autospec=True) @mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files') @mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume') @mock.patch.object(driver, 'block_device_info_get_mapping') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping, mock_disconnect_volume, mock_delete_instance_files, mock_destroy, mock_inst_save, mock_inst_obj_load_attr, mock_get_by_uuid, volume_fail=False): instance = objects.Instance(self.context, **self.test_instance) vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} mock_mapping.return_value = vol['block_device_mapping'] mock_delete_instance_files.return_value = True mock_get_by_uuid.return_value = instance if volume_fail: mock_disconnect_volume.return_value = ( exception.VolumeNotFound('vol')) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.destroy(self.context, instance, [], vol) def test_destroy_removes_disk(self): self._test_destroy_removes_disk(volume_fail=False) def test_destroy_removes_disk_volume_fails(self): self._test_destroy_removes_disk(volume_fail=True) @mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs') @mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy, mock_unplug_vifs): instance = fake_instance.fake_instance_obj( None, name='instancename', id=1, uuid='875a8070-d0b9-4949-8b31-104d125c9a64') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.destroy(self.context, instance, [], None, False) @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup') @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container') @mock.patch.object(host.Host, 'get_domain') def test_destroy_lxc_calls_teardown_container(self, mock_get_domain, mock_teardown_container, mock_cleanup): self.flags(virt_type='lxc', group='libvirt') fake_domain = FakeVirtDomain() def destroy_side_effect(*args, **kwargs): fake_domain._info[0] = power_state.SHUTDOWN with mock.patch.object(fake_domain, 'destroy', side_effect=destroy_side_effect) as mock_domain_destroy: mock_get_domain.return_value = fake_domain instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [] drvr.destroy(self.context, instance, network_info, None, False) mock_get_domain.assert_has_calls([mock.call(instance), mock.call(instance)]) mock_domain_destroy.assert_called_once_with() mock_teardown_container.assert_called_once_with(instance) mock_cleanup.assert_called_once_with(self.context, instance, network_info, None, False, None) @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup') @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container') @mock.patch.object(host.Host, 'get_domain') def test_destroy_lxc_calls_teardown_container_when_no_domain(self, mock_get_domain, mock_teardown_container, mock_cleanup): self.flags(virt_type='lxc', group='libvirt') instance = objects.Instance(**self.test_instance) inf_exception = exception.InstanceNotFound(instance_id=instance.uuid) mock_get_domain.side_effect = inf_exception drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [] drvr.destroy(self.context, instance, network_info, None, False) mock_get_domain.assert_has_calls([mock.call(instance), mock.call(instance)]) mock_teardown_container.assert_called_once_with(instance) mock_cleanup.assert_called_once_with(self.context, instance, network_info, None, False, None) def test_reboot_different_ids(self): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None self.flags(wait_soft_reboot_seconds=1, group='libvirt') info_tuple = ('fake', 'fake', 'fake', 'also_fake') self.reboot_create_called = False # Mock domain mock_domain = self.mox.CreateMock(fakelibvirt.virDomain) mock_domain.info().AndReturn( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) mock_domain.ID().AndReturn('some_fake_id') mock_domain.ID().AndReturn('some_fake_id') mock_domain.shutdown() mock_domain.info().AndReturn( (libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple) mock_domain.ID().AndReturn('some_other_fake_id') mock_domain.ID().AndReturn('some_other_fake_id') self.mox.ReplayAll() def fake_get_domain(instance): return mock_domain def fake_create_domain(**kwargs): self.reboot_create_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, '_create_domain', fake_create_domain) self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall', lambda *a, **k: FakeLoopingCall()) self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: []) drvr.reboot(None, instance, [], 'SOFT') self.assertTrue(self.reboot_create_called) @mock.patch.object(pci_manager, 'get_instance_pci_devs') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(greenthread, 'sleep') @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot, mock_sleep, mock_loopingcall, mock_get_instance_pci_devs): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None self.flags(wait_soft_reboot_seconds=1, group='libvirt') info_tuple = ('fake', 'fake', 'fake', 'also_fake') self.reboot_hard_reboot_called = False # Mock domain mock_domain = mock.Mock(fakelibvirt.virDomain) return_values = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple, (libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple] mock_domain.info.side_effect = return_values mock_domain.ID.return_value = 'some_fake_id' mock_domain.shutdown.side_effect = mock.Mock() def fake_hard_reboot(*args, **kwargs): self.reboot_hard_reboot_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_get_domain.return_value = mock_domain mock_hard_reboot.side_effect = fake_hard_reboot mock_loopingcall.return_value = FakeLoopingCall() mock_get_instance_pci_devs.return_value = [] drvr.reboot(None, instance, [], 'SOFT') self.assertTrue(self.reboot_hard_reboot_called) @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_soft_reboot_libvirt_exception(self, mock_get_domain, mock_hard_reboot): # Tests that a hard reboot is performed when a soft reboot results # in raising a libvirtError. info_tuple = ('fake', 'fake', 'fake', 'also_fake') # setup mocks mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.info.return_value = ( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) mock_virDomain.ID.return_value = 'some_fake_id' mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) context = None instance = objects.Instance(**self.test_instance) network_info = [] mock_get_domain.return_value = mock_virDomain drvr.reboot(context, instance, network_info, 'SOFT') @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def _test_resume_state_on_host_boot_with_state(self, state, mock_get_domain, mock_hard_reboot): mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.info.return_value = ([state, None, None, None, None]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = mock_virDomain instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drvr.resume_state_on_host_boot(self.context, instance, network_info, block_device_info=None) ignored_states = (power_state.RUNNING, power_state.SUSPENDED, power_state.NOSTATE, power_state.PAUSED) self.assertEqual(mock_hard_reboot.called, state not in ignored_states) def test_resume_state_on_host_boot_with_running_state(self): self._test_resume_state_on_host_boot_with_state(power_state.RUNNING) def test_resume_state_on_host_boot_with_suspended_state(self): self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED) def test_resume_state_on_host_boot_with_paused_state(self): self._test_resume_state_on_host_boot_with_state(power_state.PAUSED) def test_resume_state_on_host_boot_with_nostate(self): self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE) def test_resume_state_on_host_boot_with_shutdown_state(self): self._test_resume_state_on_host_boot_with_state(power_state.RUNNING) def test_resume_state_on_host_boot_with_crashed_state(self): self._test_resume_state_on_host_boot_with_state(power_state.CRASHED) @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_resume_state_on_host_boot_with_instance_not_found_on_driver( self, mock_get_domain, mock_hard_reboot): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.side_effect = exception.InstanceNotFound( instance_id='fake') drvr.resume_state_on_host_boot(self.context, instance, network_info=[], block_device_info=None) mock_hard_reboot.assert_called_once_with(self.context, instance, [], None) @mock.patch('nova.virt.libvirt.LibvirtDriver.get_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') def test_hard_reboot(self, mock_destroy, mock_get_disk_info, mock_get_instance_disk_info, mock_get_guest_xml, mock_create_images_and_backing, mock_create_domain_and_network, mock_get_info): self.context.auth_token = True # any non-None value will suffice instance = objects.Instance(**self.test_instance) instance_path = libvirt_utils.get_instance_path(instance) network_info = _fake_network_info(self, 1) block_device_info = None dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN), hardware.InstanceInfo(state=power_state.RUNNING)] mock_get_info.side_effect = return_values backing_disk_info = [{"virt_disk_size": 2}] mock_get_disk_info.return_value = mock.sentinel.disk_info mock_get_guest_xml.return_value = dummyxml mock_get_instance_disk_info.return_value = backing_disk_info drvr._hard_reboot(self.context, instance, network_info, block_device_info) # make sure that _create_images_and_backing is passed the disk_info # returned from _get_instance_disk_info and not the one that is in # scope from blockinfo.get_disk_info mock_create_images_and_backing.assert_called_once_with(self.context, instance, instance_path, backing_disk_info) # make sure that _create_domain_and_network is passed the disk_info # returned from blockinfo.get_disk_info and not the one that's # returned from _get_instance_disk_info mock_create_domain_and_network.assert_called_once_with(self.context, dummyxml, instance, network_info, mock.sentinel.disk_info, block_device_info=block_device_info, reboot=True, vifs_already_plugged=True) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') @mock.patch('nova.pci.manager.get_instance_pci_devs') @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') @mock.patch('nova.virt.libvirt.utils.write_to_file') @mock.patch('nova.virt.libvirt.utils.get_instance_path') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') def test_hard_reboot_does_not_call_glance_show(self, mock_destroy, mock_get_disk_info, mock_get_guest_config, mock_get_instance_path, mock_write_to_file, mock_get_instance_disk_info, mock_create_images_and_backing, mock_create_domand_and_network, mock_prepare_pci_devices_for_use, mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree): """For a hard reboot, we shouldn't need an additional call to glance to get the image metadata. This is important for automatically spinning up instances on a host-reboot, since we won't have a user request context that'll allow the Glance request to go through. We have to rely on the cached image metadata, instead. https://bugs.launchpad.net/nova/+bug/1339386 """ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) network_info = mock.MagicMock() block_device_info = mock.MagicMock() mock_get_disk_info.return_value = {} mock_get_guest_config.return_value = mock.MagicMock() mock_get_instance_path.return_value = '/foo' mock_looping_call.return_value = mock.MagicMock() drvr._image_api = mock.MagicMock() drvr._hard_reboot(self.context, instance, network_info, block_device_info) self.assertFalse(drvr._image_api.get.called) mock_ensure_tree.assert_called_once_with('/foo') def test_suspend(self): guest = libvirt_guest.Guest(FakeVirtDomain(id=1)) dom = guest._domain instance = objects.Instance(**self.test_instance) instance.ephemeral_key_uuid = None conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch.object(conn, '_get_instance_disk_info', return_value=[]) @mock.patch.object(conn, '_detach_sriov_ports') @mock.patch.object(conn, '_detach_pci_devices') @mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value='pci devs') @mock.patch.object(conn._host, 'get_guest', return_value=guest) def suspend(mock_get_guest, mock_get_instance_pci_devs, mock_detach_pci_devices, mock_detach_sriov_ports, mock_get_instance_disk_info, mock_delete_volume): mock_managedSave = mock.Mock() dom.managedSave = mock_managedSave conn.suspend(self.context, instance) mock_managedSave.assert_called_once_with(0) self.assertFalse(mock_get_instance_disk_info.called) mock_delete_volume.assert_has_calls([mock.call(disk['path']) for disk in mock_get_instance_disk_info.return_value], False) suspend() @mock.patch.object(time, 'sleep') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain') @mock.patch.object(host.Host, 'get_domain') def _test_clean_shutdown(self, mock_get_domain, mock_create_domain, mock_sleep, seconds_to_shutdown, timeout, retry_interval, shutdown_attempts, succeeds): info_tuple = ('fake', 'fake', 'fake', 'also_fake') shutdown_count = [] # Mock domain mock_domain = mock.Mock(fakelibvirt.virDomain) return_infos = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple] return_shutdowns = [shutdown_count.append("shutdown")] retry_countdown = retry_interval for x in range(min(seconds_to_shutdown, timeout)): return_infos.append( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) if retry_countdown == 0: return_shutdowns.append(shutdown_count.append("shutdown")) retry_countdown = retry_interval else: retry_countdown -= 1 if seconds_to_shutdown < timeout: return_infos.append( (libvirt_guest.VIR_DOMAIN_SHUTDOWN,) + info_tuple) mock_domain.info.side_effect = return_infos mock_domain.shutdown.side_effect = return_shutdowns def fake_create_domain(**kwargs): self.reboot_create_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_get_domain.return_value = mock_domain mock_create_domain.side_effect = fake_create_domain result = drvr._clean_shutdown(instance, timeout, retry_interval) self.assertEqual(succeeds, result) self.assertEqual(shutdown_attempts, len(shutdown_count)) def test_clean_shutdown_first_time(self): self._test_clean_shutdown(seconds_to_shutdown=2, timeout=5, retry_interval=3, shutdown_attempts=1, succeeds=True) def test_clean_shutdown_with_retry(self): self._test_clean_shutdown(seconds_to_shutdown=4, timeout=5, retry_interval=3, shutdown_attempts=2, succeeds=True) def test_clean_shutdown_failure(self): self._test_clean_shutdown(seconds_to_shutdown=6, timeout=5, retry_interval=3, shutdown_attempts=2, succeeds=False) def test_clean_shutdown_no_wait(self): self._test_clean_shutdown(seconds_to_shutdown=6, timeout=0, retry_interval=3, shutdown_attempts=1, succeeds=False) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_ports(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, network_info) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_ports_with_info_cache(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT instance.info_cache = objects.InstanceInfoCache( network_info=network_info) guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, None) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def _test_detach_sriov_ports(self, mock_has_min_version, vif_type): instance = objects.Instance(**self.test_instance) expeted_pci_slot = "0000:00:00.0" network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT # some more adjustments for the fake network_info so that # the correct get_config function will be executed (vif's # get_config_hw_veb - which is according to the real SRIOV vif) # and most importantly the pci_slot which is translated to # cfg.source_dev, then to PciDevice.address and sent to # _detach_pci_devices network_info[0]['profile'] = dict(pci_slot=expeted_pci_slot) network_info[0]['type'] = vif_type network_info[0]['details'] = dict(vlan="2145") instance.info_cache = objects.InstanceInfoCache( network_info=network_info) # fill the pci_devices of the instance so that # pci_manager.get_instance_pci_devs will not return an empty list # which will eventually fail the assertion for detachDeviceFlags expected_pci_device_obj = ( objects.PciDevice(address=expeted_pci_slot, request_id=None)) instance.pci_devices = objects.PciDeviceList() instance.pci_devices.objects = [expected_pci_device_obj] domain = FakeVirtDomain() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest(domain) with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci: drvr._detach_sriov_ports(self.context, instance, guest) mock_detach_pci.assert_called_once_with( guest, [expected_pci_device_obj]) def test_detach_sriov_ports_interface_interface_hostdev(self): # Note: test detach_sriov_ports method for vif with config # LibvirtConfigGuestInterface self._test_detach_sriov_ports(vif_type="hw_veb") def test_detach_sriov_ports_interface_pci_hostdev(self): # Note: test detach_sriov_ports method for vif with config # LibvirtConfigGuestHostdevPCI self._test_detach_sriov_ports(vif_type="ib_hostdev") @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags') def test_detach_duplicate_mac_sriov_ports(self, mock_detachDeviceFlags, mock_has_min_version): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 2) for network_info_inst in network_info: network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT network_info_inst['type'] = "hw_veb" network_info_inst['details'] = dict(vlan="2145") network_info_inst['address'] = "fa:16:3e:96:2a:48" network_info[0]['profile'] = dict(pci_slot="0000:00:00.0") network_info[1]['profile'] = dict(pci_slot="0000:00:00.1") instance.info_cache = objects.InstanceInfoCache( network_info=network_info) # fill the pci_devices of the instance so that # pci_manager.get_instance_pci_devs will not return an empty list # which will eventually fail the assertion for detachDeviceFlags instance.pci_devices = objects.PciDeviceList() instance.pci_devices.objects = [ objects.PciDevice(address='0000:00:00.0', request_id=None), objects.PciDevice(address='0000:00:00.1', request_id=None) ] domain = FakeVirtDomain() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest(domain) drvr._detach_sriov_ports(self.context, instance, guest) expected_xml = [ ('\n' ' \n' '
\n' ' \n' '\n'), ('\n' ' \n' '
\n' ' \n' '\n') ] mock_detachDeviceFlags.has_calls([ mock.call(expected_xml[0], flags=1), mock.call(expected_xml[1], flags=1) ]) def test_resume(self): dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) block_device_info = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest('fake_dom') with test.nested( mock.patch.object(drvr, '_get_existing_domain_xml', return_value=dummyxml), mock.patch.object(drvr, '_create_domain_and_network', return_value=guest), mock.patch.object(drvr, '_attach_pci_devices'), mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value='fake_pci_devs'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(blockinfo, 'get_disk_info'), ) as (_get_existing_domain_xml, _create_domain_and_network, _attach_pci_devices, get_instance_pci_devs, get_image_metadata, get_disk_info): get_image_metadata.return_value = {'bar': 234} disk_info = {'foo': 123} get_disk_info.return_value = disk_info drvr.resume(self.context, instance, network_info, block_device_info) _get_existing_domain_xml.assert_has_calls([mock.call(instance, network_info, block_device_info)]) _create_domain_and_network.assert_has_calls([mock.call( self.context, dummyxml, instance, network_info, disk_info, block_device_info=block_device_info, vifs_already_plugged=True)]) _attach_pci_devices.assert_has_calls([mock.call(guest, 'fake_pci_devs')]) @mock.patch.object(host.Host, 'get_domain') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files') @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines(self, mock_save, mock_delete_instance_files, mock_get_info, mock_get_domain): dom_mock = mock.MagicMock() dom_mock.undefineFlags.return_value = 1 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = dom_mock mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN, id=-1) mock_delete_instance_files.return_value = None instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(rbd_utils, 'RBDDriver') def test_cleanup_rbd(self, mock_driver): driver = mock_driver.return_value driver.cleanup_volumes = mock.Mock() fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr._cleanup_rbd(fake_instance) driver.cleanup_volumes.assert_called_once_with(fake_instance) @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_undefine_flags(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(fakelibvirt.libvirtError('Err')) mock.ID().AndReturn(123) mock.undefine() self.mox.ReplayAll() def fake_get_domain(instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(AttributeError()) mock.hasManagedSaveImage(0).AndReturn(True) mock.managedSaveRemove(0) mock.undefine() self.mox.ReplayAll() def fake_get_domain(instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(AttributeError()) mock.hasManagedSaveImage(0).AndRaise(AttributeError()) mock.undefine() self.mox.ReplayAll() def fake_get_domain(self, instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() def test_destroy_timed_out(self): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy().AndRaise(fakelibvirt.libvirtError("timed out")) self.mox.ReplayAll() def fake_get_domain(self, instance): return mock def fake_get_error_code(self): return fakelibvirt.VIR_ERR_OPERATION_TIMEOUT drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.stubs.Set(fakelibvirt.libvirtError, 'get_error_code', fake_get_error_code) instance = objects.Instance(**self.test_instance) self.assertRaises(exception.InstancePowerOffFailure, drvr.destroy, self.context, instance, []) def test_private_destroy_not_found(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "No such domain", error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy().AndRaise(ex) mock.info().AndRaise(ex) mock.UUIDString() self.mox.ReplayAll() def fake_get_domain(instance): return mock drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) instance = objects.Instance(**self.test_instance) # NOTE(vish): verifies destroy doesn't raise if the instance disappears drvr._destroy(instance) def test_private_destroy_lxc_processes_refused_to_die(self): self.flags(virt_type='lxc', group='libvirt') ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="internal error: Some processes refused to die", error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn._host, 'get_domain') as mock_get_domain, \ mock.patch.object(conn, 'get_info') as mock_get_info: mock_domain = mock.MagicMock() mock_domain.ID.return_value = 1 mock_get_domain.return_value = mock_domain mock_domain.destroy.side_effect = ex mock_info = mock.MagicMock() mock_info.id = 1 mock_info.state = power_state.SHUTDOWN mock_get_info.return_value = mock_info instance = objects.Instance(**self.test_instance) conn._destroy(instance) def test_private_destroy_processes_refused_to_die_still_raises(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="internal error: Some processes refused to die", error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn._host, 'get_domain') as mock_get_domain: mock_domain = mock.MagicMock() mock_domain.ID.return_value = 1 mock_get_domain.return_value = mock_domain mock_domain.destroy.side_effect = ex instance = objects.Instance(**self.test_instance) self.assertRaises(fakelibvirt.libvirtError, conn._destroy, instance) def test_private_destroy_ebusy_timeout(self): # Tests that _destroy will retry 3 times to destroy the guest when an # EBUSY is raised, but eventually times out and raises the libvirtError ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, ("Failed to terminate process 26425 with SIGKILL: " "Device or resource busy"), error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR, int1=errno.EBUSY) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.poweroff = mock.Mock(side_effect=ex) instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(fakelibvirt.libvirtError, drvr._destroy, instance) self.assertEqual(3, mock_guest.poweroff.call_count) def test_private_destroy_ebusy_multiple_attempt_ok(self): # Tests that the _destroy attempt loop is broken when EBUSY is no # longer raised. ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, ("Failed to terminate process 26425 with SIGKILL: " "Device or resource busy"), error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR, int1=errno.EBUSY) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.poweroff = mock.Mock(side_effect=[ex, None]) inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, id=1) instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr._host, 'get_guest', return_value=mock_guest): with mock.patch.object(drvr, 'get_info', return_value=inst_info): drvr._destroy(instance) self.assertEqual(2, mock_guest.poweroff.call_count) def test_undefine_domain_with_not_found_instance(self): def fake_get_domain(self, instance): raise exception.InstanceNotFound(instance_id=instance.uuid) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.mox.StubOutWithMock(fakelibvirt.libvirtError, "get_error_code") self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) # NOTE(wenjianhn): verifies undefine doesn't raise if the # instance disappears drvr._undefine_domain(instance) @mock.patch.object(host.Host, "list_instance_domains") @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total(self, mock_get, mock_bdms, mock_list): # Ensure destroy calls managedSaveRemove for saved instance. class DiagFakeDomain(object): def __init__(self, name): self._name = name self._uuid = str(uuid.uuid4()) def ID(self): return 1 def name(self): return self._name def UUIDString(self): return self._uuid def XMLDesc(self, flags): return "" instance_domains = [ DiagFakeDomain("instance0000001"), DiagFakeDomain("instance0000002")] mock_list.return_value = instance_domains drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_disks = {'instance0000001': [{'type': 'qcow2', 'path': '/somepath/disk1', 'virt_disk_size': '10737418240', 'backing_file': '/somepath/disk1', 'disk_size': '83886080', 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', 'virt_disk_size': '0', 'backing_file': '/somepath/disk2', 'disk_size': '10737418240', 'over_committed_disk_size': '0'}]} def get_info(instance_name, xml, **kwargs): return fake_disks.get(instance_name) instance_uuids = [dom.UUIDString() for dom in instance_domains] instances = [objects.Instance( uuid=instance_uuids[0], root_device_name='/dev/vda'), objects.Instance( uuid=instance_uuids[1], root_device_name='/dev/vdb') ] mock_get.return_value = instances with mock.patch.object(drvr, "_get_instance_disk_info") as mock_info: mock_info.side_effect = get_info result = drvr._get_disk_over_committed_size_total() self.assertEqual(result, 10653532160) mock_list.assert_called_once_with() self.assertEqual(2, mock_info.call_count) filters = {'uuid': instance_uuids} mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains") @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total_eperm(self, mock_get, mock_bdms, mock_list): # Ensure destroy calls managedSaveRemove for saved instance. class DiagFakeDomain(object): def __init__(self, name): self._name = name self._uuid = str(uuid.uuid4()) def ID(self): return 1 def name(self): return self._name def UUIDString(self): return self._uuid def XMLDesc(self, flags): return "" instance_domains = [ DiagFakeDomain("instance0000001"), DiagFakeDomain("instance0000002")] mock_list.return_value = instance_domains drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_disks = {'instance0000001': [{'type': 'qcow2', 'path': '/somepath/disk1', 'virt_disk_size': '10737418240', 'backing_file': '/somepath/disk1', 'disk_size': '83886080', 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', 'virt_disk_size': '0', 'backing_file': '/somepath/disk2', 'disk_size': '10737418240', 'over_committed_disk_size': '21474836480'}]} def side_effect(name, dom, block_device_info): if name == 'instance0000001': self.assertEqual('/dev/vda', block_device_info['root_device_name']) raise OSError(errno.EACCES, 'Permission denied') if name == 'instance0000002': self.assertEqual('/dev/vdb', block_device_info['root_device_name']) return fake_disks.get(name) get_disk_info = mock.Mock() get_disk_info.side_effect = side_effect drvr._get_instance_disk_info = get_disk_info instance_uuids = [dom.UUIDString() for dom in instance_domains] instances = [objects.Instance( uuid=instance_uuids[0], root_device_name='/dev/vda'), objects.Instance( uuid=instance_uuids[1], root_device_name='/dev/vdb') ] mock_get.return_value = instances result = drvr._get_disk_over_committed_size_total() self.assertEqual(21474836480, result) mock_list.assert_called_once_with() self.assertEqual(2, get_disk_info.call_count) filters = {'uuid': instance_uuids} mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains", return_value=[mock.MagicMock(name='foo')]) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info", side_effect=exception.VolumeBDMPathNotFound(path='bar')) @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total_bdm_not_found(self, mock_get, mock_bdms, mock_get_disk_info, mock_list_domains): # Tests that we handle VolumeBDMPathNotFound gracefully. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(0, drvr._get_disk_over_committed_size_total()) def test_cpu_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigCPU() cpu.model = "Opteron_G4" cpu.vendor = "AMD" cpu.arch = arch.X86_64 cpu.cells = 1 cpu.cores = 2 cpu.threads = 1 cpu.sockets = 4 cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic")) cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow")) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu guest = vconfig.LibvirtConfigGuest() guest.ostype = vm_mode.HVM guest.arch = arch.X86_64 guest.domtype = ["kvm"] caps.guests.append(guest) guest = vconfig.LibvirtConfigGuest() guest.ostype = vm_mode.HVM guest.arch = arch.I686 guest.domtype = ["kvm"] caps.guests.append(guest) return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) want = {"vendor": "AMD", "features": set(["extapic", "3dnow"]), "model": "Opteron_G4", "arch": arch.X86_64, "topology": {"cells": 1, "cores": 2, "threads": 1, "sockets": 4}} got = drvr._get_cpu_info() self.assertEqual(want, got) def test_get_pcidev_info(self): def fake_nodeDeviceLookupByName(self, name): return FakeNodeDevice(_fake_NodeDevXml[name]) self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name') host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object( fakelibvirt.Connection, 'getLibVersion') as mock_lib_version: mock_lib_version.return_value = ( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION) - 1) actualvf = drvr._get_pcidev_info("pci_0000_04_00_3") expect_vf = { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', "numa_node": None, "vendor_id": '8086', "label": 'label_8086_1521', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_04_10_7") expect_vf = { "dev_id": "pci_0000_04_10_7", "address": "0000:04:10.7", "product_id": '1520', "numa_node": None, "vendor_id": '8086', "label": 'label_8086_1520', "dev_type": fields.PciDeviceType.SRIOV_VF, "parent_addr": '0000:04:00.3', } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_04_11_7") expect_vf = { "dev_id": "pci_0000_04_11_7", "address": "0000:04:11.7", "product_id": '1520', "vendor_id": '8086', "numa_node": 0, "label": 'label_8086_1520', "dev_type": fields.PciDeviceType.SRIOV_VF, "parent_addr": '0000:04:00.3', } self.assertEqual(expect_vf, actualvf) with mock.patch.object( pci_utils, 'is_physical_function', return_value=True): actualvf = drvr._get_pcidev_info("pci_0000_04_00_1") expect_vf = { "dev_id": "pci_0000_04_00_1", "address": "0000:04:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) with mock.patch.object( pci_utils, 'is_physical_function', return_value=False): actualvf = drvr._get_pcidev_info("pci_0000_04_00_1") expect_vf = { "dev_id": "pci_0000_04_00_1", "address": "0000:04:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.STANDARD, } self.assertEqual(expect_vf, actualvf) with mock.patch.object( fakelibvirt.Connection, 'getLibVersion') as mock_lib_version: mock_lib_version.return_value = ( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION)) actualvf = drvr._get_pcidev_info("pci_0000_03_00_0") expect_vf = { "dev_id": "pci_0000_03_00_0", "address": "0000:03:00.0", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_03_00_1") expect_vf = { "dev_id": "pci_0000_03_00_1", "address": "0000:03:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) def test_list_devices_not_supported(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Handle just the NO_SUPPORT error not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) with mock.patch.object(drvr._conn, 'listDevices', side_effect=not_supported_exc): self.assertEqual('[]', drvr._get_pci_passthrough_devices()) # We cache not supported status to avoid emitting too many logging # messages. Clear this value to test the other exception case. del drvr._list_devices_supported # Other errors should not be caught other_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'other exc', error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) with mock.patch.object(drvr._conn, 'listDevices', side_effect=other_exc): self.assertRaises(fakelibvirt.libvirtError, drvr._get_pci_passthrough_devices) def test_get_pci_passthrough_devices(self): def fakelistDevices(caps, fakeargs=0): return ['pci_0000_04_00_3', 'pci_0000_04_10_7', 'pci_0000_04_11_7'] self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn') libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices def fake_nodeDeviceLookupByName(self, name): return FakeNodeDevice(_fake_NodeDevXml[name]) self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name') host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) actjson = drvr._get_pci_passthrough_devices() expectvfs = [ { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None, "numa_node": None}, { "dev_id": "pci_0000_04_10_7", "domain": 0, "address": "0000:04:10.7", "product_id": '1520', "vendor_id": '8086', "numa_node": None, "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": [('0x0000', '0x04', '0x00', '0x3')]}, { "dev_id": "pci_0000_04_11_7", "domain": 0, "address": "0000:04:11.7", "product_id": '1520', "vendor_id": '8086', "numa_node": 0, "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": [('0x0000', '0x04', '0x00', '0x3')], } ] actualvfs = jsonutils.loads(actjson) for dev in range(len(actualvfs)): for key in actualvfs[dev].keys(): if key not in ['phys_function', 'virt_functions', 'label']: self.assertEqual(expectvfs[dev][key], actualvfs[dev][key]) def _fake_caps_numa_topology(self, cells_per_host=4, sockets_per_cell=1, cores_per_socket=1, threads_per_core=2, kb_mem=1048576): # Generate mempages list per cell cell_mempages = list() for cellid in range(cells_per_host): mempages_0 = vconfig.LibvirtConfigCapsNUMAPages() mempages_0.size = 4 mempages_0.total = 1024 * cellid mempages_1 = vconfig.LibvirtConfigCapsNUMAPages() mempages_1.size = 2048 mempages_1.total = 0 + cellid cell_mempages.append([mempages_0, mempages_1]) topology = fakelibvirt.HostInfo._gen_numa_topology(cells_per_host, sockets_per_cell, cores_per_socket, threads_per_core, kb_mem=kb_mem, numa_mempages_list=cell_mempages) return topology def _test_get_host_numa_topology(self, mempages): caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = arch.X86_64 caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) expected_topo_dict = {'cells': [ {'cpus': '0,1', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 0}, {'cpus': '3', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 1}, {'cpus': '', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 2}, {'cpus': '', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 3}]} with test.nested( mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set([0, 1, 2, 3, 6])), ): got_topo = drvr._get_host_numa_topology() got_topo_dict = got_topo._to_dict() self.assertThat( expected_topo_dict, matchers.DictMatches(got_topo_dict)) if mempages: # cells 0 self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb) self.assertEqual(0, got_topo.cells[0].mempages[0].total) self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb) self.assertEqual(0, got_topo.cells[0].mempages[1].total) # cells 1 self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb) self.assertEqual(1024, got_topo.cells[1].mempages[0].total) self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb) self.assertEqual(1, got_topo.cells[1].mempages[1].total) else: self.assertEqual([], got_topo.cells[0].mempages) self.assertEqual([], got_topo.cells[1].mempages) self.assertEqual(expected_topo_dict, got_topo_dict) self.assertEqual(set([]), got_topo.cells[0].pinned_cpus) self.assertEqual(set([]), got_topo.cells[1].pinned_cpus) self.assertEqual(set([]), got_topo.cells[2].pinned_cpus) self.assertEqual(set([]), got_topo.cells[3].pinned_cpus) self.assertEqual([set([0, 1])], got_topo.cells[0].siblings) self.assertEqual([], got_topo.cells[1].siblings) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_host_numa_topology(self, mock_version): self._test_get_host_numa_topology(mempages=True) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_no_mempages(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='kvm', group='libvirt') mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1 mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_QEMU self._test_get_host_numa_topology(mempages=False) def test_get_host_numa_topology_empty(self): caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = arch.X86_64 caps.host.topology = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps) ) as (has_min_version, get_caps): self.assertIsNone(drvr._get_host_numa_topology()) self.assertEqual(2, get_caps.call_count) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_old_version(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1 mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_QEMU self.assertIsNone(drvr._get_host_numa_topology()) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_xen(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_XEN self.assertIsNone(drvr._get_host_numa_topology()) def test_diagnostic_vcpus_exception(self): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): raise fakelibvirt.libvirtError('vcpus missing') def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_blockstats_exception(self): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): raise fakelibvirt.libvirtError('blockStats missing') def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_interfacestats_exception(self): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): raise fakelibvirt.libvirtError('interfaceStat missing') def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_memorystats_exception(self): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): raise fakelibvirt.libvirtError('memoryStats missing') def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_full(self): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) @mock.patch.object(host.Host, 'get_domain') def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self): return DiagFakeDomain() mock_get_domain.side_effect = fake_get_domain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, 'br0_rx': 4408, 'br0_rx_drop': 0, 'br0_rx_errors': 0, 'br0_rx_packets': 82, 'br0_tx': 0, 'br0_tx_drop': 0, 'br0_tx_errors': 0, 'br0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}, {'mac_address': '53:55:00:a5:39:39', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10., 'version': '1.0'} self.assertEqual(expected, actual.serialize()) @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count(self, mock_list): """Domain can fail to return the vcpu description in case it's just starting up or shutting down. Make sure None is handled gracefully. """ class DiagFakeDomain(object): def __init__(self, vcpus): self._vcpus = vcpus def vcpus(self): if self._vcpus is None: raise fakelibvirt.libvirtError("fake-error") else: return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus) def ID(self): return 1 def name(self): return "instance000001" def UUIDString(self): return "19479fee-07a5-49bb-9138-d3738280d63c" mock_list.return_value = [ DiagFakeDomain(None), DiagFakeDomain(5)] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(5, drvr._get_vcpu_used()) mock_list.assert_called_with(only_guests=True, only_running=True) @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count_none(self, mock_list): """Domain will return zero if the current number of vcpus used is None. This is in case of VM state starting up or shutting down. None type returned is counted as zero. """ class DiagFakeDomain(object): def __init__(self): pass def vcpus(self): return None def ID(self): return 1 def name(self): return "instance000001" mock_list.return_value = [DiagFakeDomain()] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(0, drvr._get_vcpu_used()) mock_list.assert_called_with(only_guests=True, only_running=True) def test_get_instance_capabilities(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) def get_host_capabilities_stub(self): caps = vconfig.LibvirtConfigCaps() guest = vconfig.LibvirtConfigGuest() guest.ostype = 'hvm' guest.arch = arch.X86_64 guest.domtype = ['kvm', 'qemu'] caps.guests.append(guest) guest = vconfig.LibvirtConfigGuest() guest.ostype = 'hvm' guest.arch = arch.I686 guest.domtype = ['kvm'] caps.guests.append(guest) return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) want = [(arch.X86_64, 'kvm', 'hvm'), (arch.X86_64, 'qemu', 'hvm'), (arch.I686, 'kvm', 'hvm')] got = drvr._get_instance_capabilities() self.assertEqual(want, got) def test_set_cache_mode(self): self.flags(disk_cachemodes=['file=directsync'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuestDisk() fake_conf.source_type = 'file' drvr._set_cache_mode(fake_conf) self.assertEqual(fake_conf.driver_cache, 'directsync') def test_set_cache_mode_invalid_mode(self): self.flags(disk_cachemodes=['file=FAKE'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuestDisk() fake_conf.source_type = 'file' drvr._set_cache_mode(fake_conf) self.assertIsNone(fake_conf.driver_cache) def test_set_cache_mode_invalid_object(self): self.flags(disk_cachemodes=['file=directsync'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuest() fake_conf.driver_cache = 'fake' drvr._set_cache_mode(fake_conf) self.assertEqual(fake_conf.driver_cache, 'fake') @mock.patch('os.unlink') @mock.patch.object(os.path, 'exists') def _test_shared_storage_detection(self, is_same, mock_exists, mock_unlink): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.get_host_ip_addr = mock.MagicMock(return_value='bar') mock_exists.return_value = is_same with test.nested( mock.patch.object(drvr._remotefs, 'create_file'), mock.patch.object(drvr._remotefs, 'remove_file') ) as (mock_rem_fs_create, mock_rem_fs_remove): result = drvr._is_storage_shared_with('host', '/path') mock_rem_fs_create.assert_any_call('host', mock.ANY) create_args, create_kwargs = mock_rem_fs_create.call_args self.assertTrue(create_args[1].startswith('/path')) if is_same: mock_unlink.assert_called_once_with(mock.ANY) else: mock_rem_fs_remove.assert_called_with('host', mock.ANY) remove_args, remove_kwargs = mock_rem_fs_remove.call_args self.assertTrue(remove_args[1].startswith('/path')) return result def test_shared_storage_detection_same_host(self): self.assertTrue(self._test_shared_storage_detection(True)) def test_shared_storage_detection_different_host(self): self.assertFalse(self._test_shared_storage_detection(False)) def test_shared_storage_detection_easy(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(drvr, 'get_host_ip_addr') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(os, 'unlink') drvr.get_host_ip_addr().AndReturn('foo') self.mox.ReplayAll() self.assertTrue(drvr._is_storage_shared_with('foo', '/path')) def test_store_pid_remove_pid(self): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) popen = mock.Mock(pid=3) drvr.job_tracker.add_job(instance, popen.pid) self.assertIn(3, drvr.job_tracker.jobs[instance.uuid]) drvr.job_tracker.remove_job(instance, popen.pid) self.assertNotIn(instance.uuid, drvr.job_tracker.jobs) @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_get_domain_info_with_more_return(self, mock_get_domain): instance = objects.Instance(**self.test_instance) dom_mock = mock.MagicMock() dom_mock.info.return_value = [ 1, 2048, 737, 8, 12345, 888888 ] dom_mock.ID.return_value = mock.sentinel.instance_id mock_get_domain.return_value = dom_mock drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_info(instance) self.assertEqual(1, info.state) self.assertEqual(2048, info.max_mem_kb) self.assertEqual(737, info.mem_kb) self.assertEqual(8, info.num_cpu) self.assertEqual(12345, info.cpu_time_ns) self.assertEqual(mock.sentinel.instance_id, info.id) dom_mock.info.assert_called_once_with() dom_mock.ID.assert_called_once_with() mock_get_domain.assert_called_once_with(instance) def test_create_domain(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_domain = mock.MagicMock() guest = drvr._create_domain(domain=mock_domain) self.assertEqual(mock_domain, guest._domain) mock_domain.createWithFlags.assert_has_calls([mock.call(0)]) @mock.patch('nova.virt.disk.api.clean_lxc_namespace') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_clean): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.return_value = hardware.InstanceInfo( state=power_state.RUNNING) with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) @mock.patch('nova.virt.disk.api.clean_lxc_namespace') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc_id_maps(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_chown, mock_get_info, mock_clean): self.flags(virt_type='lxc', uid_maps=["0:1000:100"], gid_maps=["0:1000:100"], group='libvirt') def chown_side_effect(path, id_maps): self.assertEqual('/tmp/rootfs', path) self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap) self.assertEqual(0, id_maps[0].start) self.assertEqual(1000, id_maps[0].target) self.assertEqual(100, id_maps[0].count) self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap) self.assertEqual(0, id_maps[1].start) self.assertEqual(1000, id_maps[1].target) self.assertEqual(100, id_maps[1].count) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_chown.side_effect = chown_side_effect mock_get_info.return_value = hardware.InstanceInfo( state=power_state.RUNNING) with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter') ) as ( mock_create_images_and_backing, mock_is_booted_from_volume, mock_create_domain, mock_plug_vifs, mock_setup_basic_filtering, mock_prepare_instance_filter, mock_apply_instance_filter ): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_is_booted_from_volume.assert_called_once_with(mock_instance, {}) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) @mock.patch('nova.virt.disk.api.teardown_container') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc_not_running(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_teardown): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN) with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) teardown_call = mock.call(container_dir='/tmp/rootfs') mock_teardown.assert_has_calls([teardown_call]) def test_create_domain_define_xml_fails(self): """Tests that the xml is logged when defining the domain fails.""" fake_xml = "this is a test" def fake_defineXML(xml): self.assertEqual(fake_xml, xml) raise fakelibvirt.libvirtError('virDomainDefineXML() failed') def fake_safe_decode(text, *args, **kwargs): return text + 'safe decoded' self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.assertIn('safe decoded', msg % args) self.stubs.Set(encodeutils, 'safe_decode', fake_safe_decode) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock(defineXML=fake_defineXML) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain, fake_xml) self.assertTrue(self.log_error_called) def test_create_domain_with_flags_fails(self): """Tests that the xml is logged when creating the domain with flags fails """ fake_xml = "this is a test" fake_domain = FakeVirtDomain(fake_xml) def fake_createWithFlags(launch_flags): raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed') self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock() self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain, domain=fake_domain) self.assertTrue(self.log_error_called) def test_create_domain_enable_hairpin_fails(self): """Tests that the xml is logged when enabling hairpin mode for the domain fails. """ fake_xml = "this is a test" fake_domain = FakeVirtDomain(fake_xml) def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError('error') def fake_get_interfaces(*args): return ["dev"] self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock() self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.stubs.Set(nova.utils, 'execute', fake_execute) self.stubs.Set( nova.virt.libvirt.guest.Guest, 'get_interfaces', fake_get_interfaces) self.assertRaises(processutils.ProcessExecutionError, drvr._create_domain, domain=fake_domain, power_on=False) self.assertTrue(self.log_error_called) def test_get_vnc_console(self): instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "" "" "") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) vnc_dict = drvr.get_vnc_console(self.context, instance) self.assertEqual(vnc_dict.port, '5900') def test_get_vnc_console_unavailable(self): instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleTypeUnavailable, drvr.get_vnc_console, self.context, instance) def test_get_spice_console(self): instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "" "" "") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) spice_dict = drvr.get_spice_console(self.context, instance) self.assertEqual(spice_dict.port, '5950') def test_get_spice_console_unavailable(self): instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleTypeUnavailable, drvr.get_spice_console, self.context, instance) def test_detach_volume_with_instance_not_found(self): # Test that detach_volume() method does not raise exception, # if the instance does not exist. instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(host.Host, 'get_domain', side_effect=exception.InstanceNotFound( instance_id=instance.uuid)), mock.patch.object(drvr, '_disconnect_volume') ) as (_get_domain, _disconnect_volume): connection_info = {'driver_volume_type': 'fake'} drvr.detach_volume(connection_info, instance, '/dev/sda') _get_domain.assert_called_once_with(instance) _disconnect_volume.assert_called_once_with(connection_info, 'sda') def _test_attach_detach_interface_get_config(self, method_name): """Tests that the get_config() method is properly called in attach_interface() and detach_interface(). method_name: either \"attach_interface\" or \"detach_interface\" depending on the method to test. """ self.stubs.Set(host.Host, "get_domain", lambda a, b: FakeVirtDomain()) instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_image_meta = objects.ImageMeta.from_dict( {'id': instance['image_ref']}) if method_name == "attach_interface": self.mox.StubOutWithMock(drvr.firewall_driver, 'setup_basic_filtering') drvr.firewall_driver.setup_basic_filtering(instance, network_info) expected = drvr.vif_driver.get_config(instance, network_info[0], fake_image_meta, instance.get_flavor(), CONF.libvirt.virt_type, drvr._host) self.mox.StubOutWithMock(drvr.vif_driver, 'get_config') drvr.vif_driver.get_config(instance, network_info[0], mox.IsA(objects.ImageMeta), mox.IsA(objects.Flavor), CONF.libvirt.virt_type, drvr._host).\ AndReturn(expected) self.mox.ReplayAll() if method_name == "attach_interface": drvr.attach_interface(instance, fake_image_meta, network_info[0]) elif method_name == "detach_interface": drvr.detach_interface(instance, network_info[0]) else: raise ValueError("Unhandled method %s" % method_name) @mock.patch.object(lockutils, "external_lock") def test_attach_interface_get_config(self, mock_lock): """Tests that the get_config() method is properly called in attach_interface(). """ mock_lock.return_value = threading.Semaphore() self._test_attach_detach_interface_get_config("attach_interface") def test_detach_interface_get_config(self): """Tests that the get_config() method is properly called in detach_interface(). """ self._test_attach_detach_interface_get_config("detach_interface") def test_default_root_device_name(self): instance = {'uuid': 'fake_instance'} image_meta = objects.ImageMeta.from_dict({'id': 'fake'}) root_bdm = {'source_type': 'image', 'detination_type': 'volume', 'image_id': 'fake_id'} self.flags(virt_type='fake_libvirt_type', group='libvirt') self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type') self.mox.StubOutWithMock(blockinfo, 'get_root_info') blockinfo.get_disk_bus_for_device_type(instance, 'fake_libvirt_type', image_meta, 'disk').InAnyOrder().\ AndReturn('virtio') blockinfo.get_disk_bus_for_device_type(instance, 'fake_libvirt_type', image_meta, 'cdrom').InAnyOrder().\ AndReturn('ide') blockinfo.get_root_info(instance, 'fake_libvirt_type', image_meta, root_bdm, 'virtio', 'ide').AndReturn({'dev': 'vda'}) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(drvr.default_root_device_name(instance, image_meta, root_bdm), '/dev/vda') @mock.patch.object(objects.BlockDeviceMapping, "save") def test_default_device_names_for_instance(self, save_mock): instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' ephemerals = [objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'device_name': 'vdb', 'source_type': 'blank', 'volume_size': 2, 'destination_type': 'local'}))] swap = [objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'device_name': 'vdg', 'source_type': 'blank', 'volume_size': 512, 'guest_format': 'swap', 'destination_type': 'local'}))] block_device_mapping = [ objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-image-id', 'device_name': '/dev/vdxx', 'disk_bus': 'scsi'}))] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.default_device_names_for_instance(instance, instance.root_device_name, ephemerals, swap, block_device_mapping) # Ephemeral device name was correct so no changes self.assertEqual('/dev/vdb', ephemerals[0].device_name) # Swap device name was incorrect so it was changed self.assertEqual('/dev/vdc', swap[0].device_name) # Volume device name was changed too, taking the bus into account self.assertEqual('/dev/sda', block_device_mapping[0].device_name) self.assertEqual(3, save_mock.call_count) def _test_get_device_name_for_instance(self, new_bdm, expected_dev): instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' instance.ephemeral_gb = 0 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) got_dev = drvr.get_device_name_for_instance( instance, [], new_bdm) self.assertEqual(expected_dev, got_dev) def test_get_device_name_for_instance_simple(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus=None, device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/vdb') def test_get_device_name_for_instance_suggested(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name='/dev/vdg', guest_format=None, disk_bus=None, device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/vdb') def test_get_device_name_for_instance_bus(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus='scsi', device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/sda') def test_get_device_name_for_instance_device_type(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus=None, device_type='floppy') self._test_get_device_name_for_instance(new_bdm, '/dev/fda') def test_is_supported_fs_format(self): supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3, disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) for fs in supported_fs: self.assertTrue(drvr.is_supported_fs_format(fs)) supported_fs = ['', 'dummy'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) for fs in supported_fs: self.assertFalse(drvr.is_supported_fs_format(fs)) def test_post_live_migration_at_destination_with_block_device_info(self): # Preparing mocks mock_domain = self.mox.CreateMock(fakelibvirt.virDomain) self.resultXML = None def fake_getLibVersion(): return fakelibvirt.FAKE_LIBVIRT_VERSION def fake_getCapabilities(): return """ cef19ce0-0ca2-11df-855d-b19fbce37686 x86_64 Penryn Intel """ def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): if image_meta is None: image_meta = objects.ImageMeta.from_dict({}) conf = drvr._get_guest_config(instance, network_info, image_meta, disk_info, rescue, block_device_info) self.resultXML = conf.to_xml() return self.resultXML def fake_get_domain(instance): return mock_domain def fake_baselineCPU(cpu, flag): return """ Westmere Intel """ network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion, getCapabilities=fake_getCapabilities, getVersion=lambda: 1005001, listDefinedDomains=lambda: [], numOfDomains=lambda: 0, baselineCPU=fake_baselineCPU) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 # we send an int to test sha1 call instance = objects.Instance(**instance_ref) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'guest_format': None, 'boot_index': 0, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vda', 'disk_bus': 'virtio', 'device_type': 'disk', 'delete_on_termination': False})) block_device_info = {'block_device_mapping': driver_block_device.convert_volumes([bdm])} block_device_info['block_device_mapping'][0]['connection_info'] = ( {'driver_volume_type': 'iscsi'}) with test.nested( mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(objects.Instance, 'save') ) as (mock_volume_save, mock_instance_save): drvr.post_live_migration_at_destination( self.context, instance, network_info, True, block_device_info=block_device_info) self.assertIn('fake', self.resultXML) mock_volume_save.assert_called_once_with() def test_create_propagates_exceptions(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(id=1, uuid='fake-uuid', image_ref='my_fake_image') with test.nested( mock.patch.object(drvr, '_create_domain_setup_lxc'), mock.patch.object(drvr, '_create_domain_cleanup_lxc'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, '_create_domain', side_effect=exception.NovaException), mock.patch.object(drvr, 'cleanup')): self.assertRaises(exception.NovaException, drvr._create_domain_and_network, self.context, 'xml', instance, None, None) def test_create_without_pause(self): self.flags(virt_type='lxc', group='libvirt') @contextlib.contextmanager def fake_lxc_disk_handler(*args, **kwargs): yield drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) with test.nested( mock.patch.object(drvr, '_lxc_disk_handler', side_effect=fake_lxc_disk_handler), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'cleanup')) as ( _handler, cleanup, firewall_driver, create, plug_vifs): domain = drvr._create_domain_and_network(self.context, 'xml', instance, None, None) self.assertEqual(0, create.call_args_list[0][1]['pause']) self.assertEqual(0, domain.resume.call_count) def _test_create_with_network_events(self, neutron_failure=None, power_on=True): generated_events = [] def wait_timeout(): event = mock.MagicMock() if neutron_failure == 'timeout': raise eventlet.timeout.Timeout() elif neutron_failure == 'error': event.status = 'failed' else: event.status = 'completed' return event def fake_prepare(instance, event_name): m = mock.MagicMock() m.instance = instance m.event_name = event_name m.wait.side_effect = wait_timeout generated_events.append(m) return m virtapi = manager.ComputeVirtAPI(mock.MagicMock()) prepare = virtapi._compute.instance_events.prepare_for_instance_event prepare.side_effect = fake_prepare drvr = libvirt_driver.LibvirtDriver(virtapi, False) instance = objects.Instance(**self.test_instance) vifs = [{'id': 'vif1', 'active': False}, {'id': 'vif2', 'active': False}] @mock.patch.object(drvr, 'plug_vifs') @mock.patch.object(drvr, 'firewall_driver') @mock.patch.object(drvr, '_create_domain') @mock.patch.object(drvr, 'cleanup') def test_create(cleanup, create, fw_driver, plug_vifs): domain = drvr._create_domain_and_network(self.context, 'xml', instance, vifs, None, power_on=power_on) plug_vifs.assert_called_with(instance, vifs) pause = self._get_pause_flag(drvr, vifs, power_on=power_on) self.assertEqual(pause, create.call_args_list[0][1]['pause']) if pause: domain.resume.assert_called_once_with() if neutron_failure and CONF.vif_plugging_is_fatal: cleanup.assert_called_once_with(self.context, instance, network_info=vifs, block_device_info=None) test_create() if utils.is_neutron() and CONF.vif_plugging_timeout and power_on: prepare.assert_has_calls([ mock.call(instance, 'network-vif-plugged-vif1'), mock.call(instance, 'network-vif-plugged-vif2')]) for event in generated_events: if neutron_failure and generated_events.index(event) != 0: self.assertEqual(0, event.call_count) elif (neutron_failure == 'error' and not CONF.vif_plugging_is_fatal): event.wait.assert_called_once_with() else: self.assertEqual(0, prepare.call_count) @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron(self, is_neutron): self._test_create_with_network_events() @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_power_off(self, is_neutron): # Tests that we don't wait for events if we don't start the instance. self._test_create_with_network_events(power_on=False) @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_nowait(self, is_neutron): self.flags(vif_plugging_timeout=0) self._test_create_with_network_events() @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_nonfatal_timeout( self, is_neutron): self.flags(vif_plugging_is_fatal=False) self._test_create_with_network_events(neutron_failure='timeout') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_fatal_timeout( self, is_neutron): self.assertRaises(exception.VirtualInterfaceCreateException, self._test_create_with_network_events, neutron_failure='timeout') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_nonfatal_error( self, is_neutron): self.flags(vif_plugging_is_fatal=False) self._test_create_with_network_events(neutron_failure='error') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_fatal_error( self, is_neutron): self.assertRaises(exception.VirtualInterfaceCreateException, self._test_create_with_network_events, neutron_failure='error') @mock.patch('nova.utils.is_neutron', return_value=False) def test_create_with_network_events_non_neutron(self, is_neutron): self._test_create_with_network_events() @mock.patch('nova.volume.encryptors.get_encryption_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_dom = mock.MagicMock() mock_encryption_meta = mock.MagicMock() get_encryption_metadata.return_value = mock_encryption_meta fake_xml = """ instance-00000001 1048576 1 """ fake_volume_id = "fake-volume-id" connection_info = {"driver_volume_type": "fake", "data": {"access_mode": "rw", "volume_id": fake_volume_id}} def fake_getitem(*args, **kwargs): fake_bdm = {'connection_info': connection_info, 'mount_device': '/dev/vda'} return fake_bdm.get(args[0]) mock_volume = mock.MagicMock() mock_volume.__getitem__.side_effect = fake_getitem block_device_info = {'block_device_mapping': [mock_volume]} network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] with test.nested( mock.patch.object(drvr, '_get_volume_encryptor'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'), ) as (get_volume_encryptor, plug_vifs, setup_basic_filtering, prepare_instance_filter, create_domain, apply_instance_filter): create_domain.return_value = libvirt_guest.Guest(mock_dom) guest = drvr._create_domain_and_network( self.context, fake_xml, instance, network_info, None, block_device_info=block_device_info) get_encryption_metadata.assert_called_once_with(self.context, drvr._volume_api, fake_volume_id, connection_info) get_volume_encryptor.assert_called_once_with(connection_info, mock_encryption_meta) plug_vifs.assert_called_once_with(instance, network_info) setup_basic_filtering.assert_called_once_with(instance, network_info) prepare_instance_filter.assert_called_once_with(instance, network_info) pause = self._get_pause_flag(drvr, network_info) create_domain.assert_called_once_with( fake_xml, pause=pause, power_on=True) self.assertEqual(mock_dom, guest._domain) def test_get_guest_storage_config(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_instance = copy.deepcopy(self.test_instance) test_instance["default_swap_device"] = None instance = objects.Instance(**test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = instance.get_flavor() conn_info = {'driver_volume_type': 'fake', 'data': {}} bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc'})) bdi = {'block_device_mapping': driver_block_device.convert_volumes([bdm])} bdm = bdi['block_device_mapping'][0] bdm['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, bdi) mock_conf = mock.MagicMock(source_path='fake') with test.nested( mock.patch.object(driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode') ) as (volume_save, connect_volume, get_volume_config, set_cache_mode): devices = drvr._get_guest_storage_config(instance, image_meta, disk_info, False, bdi, flavor, "hvm") self.assertEqual(3, len(devices)) self.assertEqual('/dev/vdb', instance.default_ephemeral_device) self.assertIsNone(instance.default_swap_device) connect_volume.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) get_volume_config.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) volume_save.assert_called_once_with() self.assertEqual(3, set_cache_mode.call_count) def test_get_neutron_events(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] events = drvr._get_neutron_events(network_info) self.assertEqual([('network-vif-plugged', '1')], events) def test_unplug_vifs_ignores_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') drvr._unplug_vifs('inst', [1], ignore_errors=True) vif_driver.unplug.assert_called_once_with('inst', 1) def test_unplug_vifs_reports_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') self.assertRaises(exception.AgentError, drvr.unplug_vifs, 'inst', [1]) vif_driver.unplug.assert_called_once_with('inst', 1) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_pass_with_no_mount_device(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = mock.Mock() drvr._disconnect_volume = mock.Mock() fake_inst = {'name': 'foo'} fake_bdms = [{'connection_info': 'foo', 'mount_device': None}] with mock.patch('nova.virt.driver' '.block_device_info_get_mapping', return_value=fake_bdms): drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False) self.assertTrue(drvr._disconnect_volume.called) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) fake_inst = {'name': 'foo'} with mock.patch.object(drvr._conn, 'lookupByName') as lookup: lookup.return_value = fake_inst # NOTE(danms): Make unplug cause us to bail early, since # we only care about how it was called unplug.side_effect = test.TestingException self.assertRaises(test.TestingException, drvr.cleanup, 'ctxt', fake_inst, 'netinfo') unplug.assert_called_once_with(fake_inst, 'netinfo', True) @mock.patch.object(driver, 'block_device_info_get_mapping') @mock.patch.object(host.Host, "get_guest") @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_serial_ports_from_guest') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_cleanup_serial_console_enabled( self, undefine, get_ports, get_guest, block_device_info_get_mapping): self.flags(enabled="True", group='serial_console') instance = 'i1' network_info = {} bdm_info = {} firewall_driver = mock.MagicMock() guest = mock.Mock(spec=libvirt_guest.Guest) get_guest.return_value = guest get_ports.return_value = iter([('127.0.0.1', 10000)]) block_device_info_get_mapping.return_value = () # We want to ensure undefine_domain is called after # lookup_domain. def undefine_domain(instance): get_ports.side_effect = Exception("domain undefined") undefine.side_effect = undefine_domain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = firewall_driver drvr.cleanup( 'ctx', instance, network_info, block_device_info=bdm_info, destroy_disks=False, destroy_vifs=False) get_ports.assert_called_once_with(guest) undefine.assert_called_once_with(instance) firewall_driver.unfilter_instance.assert_called_once_with( instance, network_info=network_info) block_device_info_get_mapping.assert_called_once_with(bdm_info) @mock.patch.object(driver, 'block_device_info_get_mapping') @mock.patch.object(host.Host, "get_guest") @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_cleanup_serial_console_domain_gone( self, undefine, get_guest, block_device_info_get_mapping): self.flags(enabled="True", group='serial_console') instance = {'name': 'i1'} network_info = {} bdm_info = {} firewall_driver = mock.MagicMock() block_device_info_get_mapping.return_value = () # Ensure get_guest raises same exception that would have occurred # if domain was gone. get_guest.side_effect = exception.InstanceNotFound("domain undefined") drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = firewall_driver drvr.cleanup( 'ctx', instance, network_info, block_device_info=bdm_info, destroy_disks=False, destroy_vifs=False) get_guest.assert_called_once_with(instance) undefine.assert_called_once_with(instance) firewall_driver.unfilter_instance.assert_called_once_with( instance, network_info=network_info) block_device_info_get_mapping.assert_called_once_with(bdm_info) @mock.patch.object(libvirt_driver.LibvirtDriver, 'unfilter_instance') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files', return_value=True) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_cleanup_migrate_data_shared_block_storage(self, _undefine_domain, save, delete_instance_files, unfilter_instance): # Tests the cleanup method when migrate_data has # is_shared_block_storage=True and destroy_disks=False. instance = objects.Instance(self.context, **self.test_instance) migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=True) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.cleanup( self.context, instance, network_info={}, destroy_disks=False, migrate_data=migrate_data, destroy_vifs=False) delete_instance_files.assert_called_once_with(instance) self.assertEqual(1, int(instance.system_metadata['clean_attempts'])) self.assertTrue(instance.cleaned) save.assert_called_once_with() def test_swap_volume(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) with mock.patch.object(drvr._conn, 'defineXML', create=True) as mock_define: xmldoc = "" srcfile = "/first/path" dstfile = "/second/path" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_dom.blockJobInfo.return_value = {} drvr._swap_volume(guest, srcfile, dstfile, 1) mock_dom.XMLDesc.assert_called_once_with( flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert_called_once_with( srcfile, dstfile, 0, flags=( fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)) mock_dom.blockResize.assert_called_once_with( srcfile, 1 * units.Gi / units.Ki) mock_define.assert_called_once_with(xmldoc) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume') @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') @mock.patch('nova.objects.block_device.BlockDeviceMapping.' 'get_by_volume_and_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume') @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_swap_volume_driver_bdm_save(self, get_guest, connect_volume, get_volume_config, get_by_volume_and_instance, volume_save, swap_volume, disconnect_volume): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) old_connection_info = {'driver_volume_type': 'fake', 'serial': 'old-volume-id', 'data': {'device_path': '/fake-old-volume', 'access_mode': 'rw'}} new_connection_info = {'driver_volume_type': 'fake', 'serial': 'new-volume-id', 'data': {'device_path': '/fake-new-volume', 'access_mode': 'rw'}} mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) mock_dom.XMLDesc.return_value = """ """ mock_dom.name.return_value = 'inst' mock_dom.UUIDString.return_value = 'uuid' get_guest.return_value = guest disk_info = {'bus': 'virtio', 'type': 'disk', 'dev': 'vdb'} get_volume_config.return_value = mock.MagicMock( source_path='/fake-new-volume') bdm = objects.BlockDeviceMapping(self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdb', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-2', 'boot_index': 0})) get_by_volume_and_instance.return_value = bdm conn.swap_volume(old_connection_info, new_connection_info, instance, '/dev/vdb', 1) get_guest.assert_called_once_with(instance) connect_volume.assert_called_once_with(new_connection_info, disk_info) swap_volume.assert_called_once_with(guest, 'vdb', '/fake-new-volume', 1) disconnect_volume.assert_called_once_with(old_connection_info, 'vdb') volume_save.assert_called_once_with() def _test_live_snapshot(self, can_quiesce=False, require_quiesce=False): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() test_image_meta = self.test_image_meta.copy() if require_quiesce: test_image_meta = {'properties': {'os_require_quiesce': 'yes'}} with test.nested( mock.patch.object(drvr._conn, 'defineXML', create=True), mock.patch.object(fake_libvirt_utils, 'get_disk_size'), mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'), mock.patch.object(fake_libvirt_utils, 'create_cow_image'), mock.patch.object(fake_libvirt_utils, 'chown'), mock.patch.object(fake_libvirt_utils, 'extract_snapshot'), mock.patch.object(drvr, '_set_quiesced') ) as (mock_define, mock_size, mock_backing, mock_create_cow, mock_chown, mock_snapshot, mock_quiesce): xmldoc = "" srcfile = "/first/path" dstfile = "/second/path" bckfile = "/other/path" dltfile = dstfile + ".delta" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_size.return_value = 1004009 mock_backing.return_value = bckfile guest = libvirt_guest.Guest(mock_dom) if not can_quiesce: mock_quiesce.side_effect = ( exception.InstanceQuiesceNotSupported( instance_id=self.test_instance['id'], reason='test')) image_meta = objects.ImageMeta.from_dict(test_image_meta) drvr._live_snapshot(self.context, self.test_instance, guest, srcfile, dstfile, "qcow2", "qcow2", image_meta) mock_dom.XMLDesc.assert_called_once_with(flags=( fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert_called_once_with( srcfile, dltfile, 0, flags=( fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)) mock_size.assert_called_once_with(srcfile, format="qcow2") mock_backing.assert_called_once_with(srcfile, basename=False, format="qcow2") mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009) mock_chown.assert_called_once_with(dltfile, os.getuid()) mock_snapshot.assert_called_once_with(dltfile, "qcow2", dstfile, "qcow2") mock_define.assert_called_once_with(xmldoc) mock_quiesce.assert_any_call(mock.ANY, self.test_instance, mock.ANY, True) if can_quiesce: mock_quiesce.assert_any_call(mock.ANY, self.test_instance, mock.ANY, False) def test_live_snapshot(self): self._test_live_snapshot() def test_live_snapshot_with_quiesce(self): self._test_live_snapshot(can_quiesce=True) def test_live_snapshot_with_require_quiesce(self): self._test_live_snapshot(can_quiesce=True, require_quiesce=True) def test_live_snapshot_with_require_quiesce_fails(self): self.assertRaises(exception.InstanceQuiesceNotSupported, self._test_live_snapshot, can_quiesce=False, require_quiesce=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration") def test_live_migration_hostname_valid(self, mock_lm): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.live_migration(self.context, self.test_instance, "host1.example.com", lambda x: x, lambda x: x) self.assertEqual(1, mock_lm.call_count) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration") @mock.patch.object(fake_libvirt_utils, "is_valid_hostname") def test_live_migration_hostname_invalid(self, mock_hostname, mock_lm): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_hostname.return_value = False self.assertRaises(exception.InvalidHostname, drvr.live_migration, self.context, self.test_instance, "foo/?com=/bin/sh", lambda x: x, lambda x: x) @mock.patch.object(libvirt_driver.LibvirtDriver, "pause") def test_live_migration_force_complete(self, pause): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.live_migration_force_complete(self.test_instance) pause.assert_called_once_with(self.test_instance) @mock.patch.object(fakelibvirt.virDomain, "abortJob") def test_live_migration_abort(self, mock_abort): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), "", False) guest = libvirt_guest.Guest(dom) with mock.patch.object(nova.virt.libvirt.host.Host, 'get_guest', return_value=guest): drvr.live_migration_abort(self.test_instance) self.assertTrue(mock_abort.called) @mock.patch('os.path.exists', return_value=True) @mock.patch('tempfile.mkstemp') @mock.patch('os.close', return_value=None) def test_check_instance_shared_storage_local_raw(self, mock_close, mock_mkstemp, mock_exists): instance_uuid = str(uuid.uuid4()) self.flags(images_type='raw', group='libvirt') self.flags(instances_path='/tmp') mock_mkstemp.return_value = (-1, '/tmp/{0}/file'.format(instance_uuid)) driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) temp_file = driver.check_instance_shared_storage_local(self.context, instance) self.assertEqual('/tmp/{0}/file'.format(instance_uuid), temp_file['filename']) def test_check_instance_shared_storage_local_rbd(self): self.flags(images_type='rbd', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.assertIsNone(driver. check_instance_shared_storage_local(self.context, instance)) def test_version_to_string(self): driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) string_ver = driver._version_to_string((4, 33, 173)) self.assertEqual("4.33.173", string_ver) def test_parallels_min_version_fail(self): self.flags(virt_type='parallels', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(driver._conn, 'getLibVersion', return_value=1002011): self.assertRaises(exception.NovaException, driver.init_host, 'wibble') def test_parallels_min_version_ok(self): self.flags(virt_type='parallels', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(driver._conn, 'getLibVersion', return_value=1002012): driver.init_host('wibble') def test_get_guest_config_parallels_vm(self): self.flags(virt_type='parallels', group='libvirt') self.flags(images_type='ploop', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.HVM, cfg.os_type) self.assertIsNone(cfg.os_root) self.assertEqual(6, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[0].driver_format, "ploop") self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) def test_get_guest_config_parallels_ct(self): self.flags(virt_type='parallels', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) ct_instance = self.test_instance.copy() ct_instance["vm_mode"] = vm_mode.EXE instance_ref = objects.Instance(**ct_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {'disk': {}}}) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertIsNone(cfg.os_root) self.assertEqual(4, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) fs = cfg.devices[0] self.assertEqual(fs.source_type, "file") self.assertEqual(fs.driver_type, "ploop") self.assertEqual(fs.target_dir, "/") self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestVideo) def _test_get_guest_config_parallels_volume(self, vmmode, devices): self.flags(virt_type='parallels', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) ct_instance = self.test_instance.copy() ct_instance["vm_mode"] = vmmode instance_ref = objects.Instance(**ct_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 0, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sda'})) info = {'block_device_mapping': driver_block_device.convert_volumes( [bdm])} info['block_device_mapping'][0]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info, None, info) mock_save.assert_called_once_with() self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vmmode, cfg.os_type) self.assertIsNone(cfg.os_root) self.assertEqual(devices, len(cfg.devices)) disk_found = False for dev in cfg.devices: result = isinstance(dev, vconfig.LibvirtConfigGuestFilesys) self.assertFalse(result) if (isinstance(dev, vconfig.LibvirtConfigGuestDisk) and (dev.source_path is None or 'disk.local' not in dev.source_path)): self.assertEqual("disk", dev.source_device) self.assertEqual("sda", dev.target_dev) disk_found = True self.assertTrue(disk_found) def test_get_guest_config_parallels_volume(self): self._test_get_guest_config_parallels_volume(vm_mode.EXE, 4) self._test_get_guest_config_parallels_volume(vm_mode.HVM, 6) def _test_prepare_domain_for_snapshot(self, live_snapshot, state): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance_ref = objects.Instance(**self.test_instance) with mock.patch.object(drvr, "suspend") as mock_suspend: drvr._prepare_domain_for_snapshot( self.context, live_snapshot, state, instance_ref) return mock_suspend.called def test_prepare_domain_for_snapshot(self): # Ensure that suspend() is only called on RUNNING or PAUSED instances for test_power_state in power_state.STATE_MAP.keys(): if test_power_state in (power_state.RUNNING, power_state.PAUSED): self.assertTrue(self._test_prepare_domain_for_snapshot( False, test_power_state)) else: self.assertFalse(self._test_prepare_domain_for_snapshot( False, test_power_state)) def test_prepare_domain_for_snapshot_lxc(self): self.flags(virt_type='lxc', group='libvirt') # Ensure that suspend() is never called with LXC for test_power_state in power_state.STATE_MAP.keys(): self.assertFalse(self._test_prepare_domain_for_snapshot( False, test_power_state)) def test_prepare_domain_for_snapshot_live_snapshots(self): # Ensure that suspend() is never called for live snapshots for test_power_state in power_state.STATE_MAP.keys(): self.assertFalse(self._test_prepare_domain_for_snapshot( True, test_power_state)) class HostStateTestCase(test.NoDBTestCase): cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686", "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", "apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"}} instance_caps = [(arch.X86_64, "kvm", "hvm"), (arch.I686, "kvm", "hvm")] pci_devices = [{ "dev_id": "pci_0000_04_00_3", "address": "0000:04:10.3", "product_id": '1521', "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None}] numa_topology = objects.NUMATopology( cells=[objects.NUMACell( id=1, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell( id=2, cpuset=set([3, 4]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) class FakeConnection(libvirt_driver.LibvirtDriver): """Fake connection object.""" def __init__(self): super(HostStateTestCase.FakeConnection, self).__init__(fake.FakeVirtAPI(), True) self._host = host.Host("qemu:///system") def _get_memory_mb_total(): return 497 def _get_memory_mb_used(): return 88 self._host.get_memory_mb_total = _get_memory_mb_total self._host.get_memory_mb_used = _get_memory_mb_used def _get_vcpu_total(self): return 1 def _get_vcpu_used(self): return 0 def _get_cpu_info(self): return HostStateTestCase.cpu_info def _get_disk_over_committed_size_total(self): return 0 def _get_local_gb_info(self): return {'total': 100, 'used': 20, 'free': 80} def get_host_uptime(self): return ('10:01:16 up 1:36, 6 users, ' 'load average: 0.21, 0.16, 0.19') def _get_disk_available_least(self): return 13091 def _get_instance_capabilities(self): return HostStateTestCase.instance_caps def _get_pci_passthrough_devices(self): return jsonutils.dumps(HostStateTestCase.pci_devices) def _get_host_numa_topology(self): return HostStateTestCase.numa_topology @mock.patch.object(fakelibvirt, "openAuth") def test_update_status(self, mock_open): mock_open.return_value = fakelibvirt.Connection("qemu:///system") drvr = HostStateTestCase.FakeConnection() stats = drvr.get_available_resource("compute1") self.assertEqual(stats["vcpus"], 1) self.assertEqual(stats["memory_mb"], 497) self.assertEqual(stats["local_gb"], 100) self.assertEqual(stats["vcpus_used"], 0) self.assertEqual(stats["memory_mb_used"], 88) self.assertEqual(stats["local_gb_used"], 20) self.assertEqual(stats["hypervisor_type"], 'QEMU') self.assertEqual(stats["hypervisor_version"], 1001000) self.assertEqual(stats["hypervisor_hostname"], 'compute1') cpu_info = jsonutils.loads(stats["cpu_info"]) self.assertEqual(cpu_info, {"vendor": "Intel", "model": "pentium", "arch": arch.I686, "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", "apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"} }) self.assertEqual(stats["disk_available_least"], 80) self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]), HostStateTestCase.pci_devices) self.assertThat(objects.NUMATopology.obj_from_db_obj( stats['numa_topology'])._to_dict(), matchers.DictMatches( HostStateTestCase.numa_topology._to_dict())) class LibvirtDriverTestCase(test.NoDBTestCase): """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver.""" def setUp(self): super(LibvirtDriverTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver( fake.FakeVirtAPI(), read_only=True) self.context = context.get_admin_context() self.test_image_meta = { "disk_format": "raw", } def _create_instance(self, params=None): """Create a test instance.""" if not params: params = {} flavor = objects.Flavor(memory_mb=512, swap=0, vcpu_weight=None, root_gb=10, id=2, name=u'm1.tiny', ephemeral_gb=20, rxtx_factor=1.0, flavorid=u'1', vcpus=1) inst = {} inst['id'] = 1 inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b' inst['os_type'] = 'linux' inst['image_ref'] = '1' inst['reservation_id'] = 'r-fakeres' inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = 2 inst['ami_launch_index'] = 0 inst['host'] = 'host1' inst['root_gb'] = flavor.root_gb inst['ephemeral_gb'] = flavor.ephemeral_gb inst['config_drive'] = True inst['kernel_id'] = 2 inst['ramdisk_id'] = 3 inst['key_data'] = 'ABCDEFG' inst['system_metadata'] = {} inst['metadata'] = {} inst['task_state'] = None inst.update(params) return objects.Instance(flavor=flavor, old_flavor=None, new_flavor=None, **inst) @staticmethod def _disk_info(type='qcow2', config_disk=False): # 10G root and 512M swap disk disk_info = [{'disk_size': 1, 'type': type, 'virt_disk_size': 10737418240, 'path': '/test/disk', 'backing_file': '/base/disk'}, {'disk_size': 1, 'type': type, 'virt_disk_size': 536870912, 'path': '/test/disk.swap', 'backing_file': '/base/swap_512'}] if config_disk: disk_info.append({'disk_size': 1, 'type': 'raw', 'virt_disk_size': 1024, 'path': '/test/disk.config'}) return jsonutils.dumps(disk_info) def test_migrate_disk_and_power_off_exception(self): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ self.counter = 0 self.checked_shared_storage = False def fake_get_instance_disk_info(instance, block_device_info=None): return '[]' def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' def fake_execute(*args, **kwargs): self.counter += 1 if self.counter == 1: assert False, "intentional failure" def fake_os_path_exists(path): return True def fake_is_storage_shared(dest, inst_base): self.checked_shared_storage = True return False self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) self.stubs.Set(self.drvr, '_is_storage_shared_with', fake_is_storage_shared) self.stubs.Set(utils, 'execute', fake_execute) self.stub_out('os.path.exists', fake_os_path_exists) ins_ref = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) self.assertRaises(AssertionError, self.drvr.migrate_disk_and_power_off, context.get_admin_context(), ins_ref, '10.0.0.2', flavor_obj, None) def _test_migrate_disk_and_power_off(self, flavor_obj, block_device_info=None, params_for_instance=None): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ disk_info = self._disk_info() def fake_get_instance_disk_info(instance, block_device_info=None): return disk_info def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' def fake_execute(*args, **kwargs): pass def fake_copy_image(src, dest, host=None, receive=False, on_execute=None, on_completion=None, compression=True): self.assertIsNotNone(on_execute) self.assertIsNotNone(on_completion) self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(libvirt_utils, 'copy_image', fake_copy_image) ins_ref = self._create_instance(params=params_for_instance) # dest is different host case out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), ins_ref, '10.0.0.2', flavor_obj, None, block_device_info=block_device_info) self.assertEqual(out, disk_info) # dest is same host case out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), ins_ref, '10.0.0.1', flavor_obj, None, block_device_info=block_device_info) self.assertEqual(out, disk_info) def test_migrate_disk_and_power_off(self): flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) self._test_migrate_disk_and_power_off(flavor_obj) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') def test_migrate_disk_and_power_off_boot_from_volume(self, disconnect_volume): info = {'block_device_mapping': [{'boot_index': None, 'mount_device': '/dev/vdd', 'connection_info': None}, {'boot_index': 0, 'mount_device': '/dev/vda', 'connection_info': None}]} flavor = {'root_gb': 1, 'ephemeral_gb': 0} flavor_obj = objects.Flavor(**flavor) # Note(Mike_D): The size of instance's ephemeral_gb is 0 gb. self._test_migrate_disk_and_power_off( flavor_obj, block_device_info=info, params_for_instance={'image_ref': None, 'ephemeral_gb': 0}) disconnect_volume.assert_called_with( info['block_device_mapping'][1]['connection_info'], 'vda') @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.utils.copy_image') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info, get_host_ip_addr, mock_destroy, mock_copy_image, mock_execute): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ self.copy_or_move_swap_called = False disk_info = self._disk_info() mock_get_disk_info.return_value = disk_info get_host_ip_addr.return_value = '10.0.0.1' def fake_copy_image(*args, **kwargs): # disk.swap should not be touched since it is skipped over if '/test/disk.swap' in list(args): self.copy_or_move_swap_called = True def fake_execute(*args, **kwargs): # disk.swap should not be touched since it is skipped over if set(['mv', '/test/disk.swap']).issubset(list(args)): self.copy_or_move_swap_called = True mock_copy_image.side_effect = fake_copy_image mock_execute.side_effect = fake_execute drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Original instance config instance = self._create_instance({'root_gb': 10, 'ephemeral_gb': 0}) # Re-size fake instance to 20G root and 1024M swap disk flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024} flavor_obj = objects.Flavor(**flavor) # Destination is same host out = drvr.migrate_disk_and_power_off(context.get_admin_context(), instance, '10.0.0.1', flavor_obj, None) mock_get_disk_info.assert_called_once_with(instance, block_device_info=None) self.assertTrue(get_host_ip_addr.called) mock_destroy.assert_called_once_with(instance) self.assertFalse(self.copy_or_move_swap_called) self.assertEqual(disk_info, out) def _test_migrate_disk_and_power_off_resize_check(self, expected_exc): """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection .migrate_disk_and_power_off. """ def fake_get_instance_disk_info(instance, xml=None, block_device_info=None): return self._disk_info() def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) ins_ref = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) # Migration is not implemented for LVM backed instances self.assertRaises(expected_exc, self.drvr.migrate_disk_and_power_off, None, ins_ref, '10.0.0.1', flavor_obj, None) @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '._is_storage_shared_with') def _test_migrate_disk_and_power_off_backing_file(self, shared_storage, mock_is_shared_storage, mock_get_disk_info, mock_destroy, mock_execute): self.convert_file_called = False flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0} flavor_obj = objects.Flavor(**flavor) disk_info = [{'type': 'qcow2', 'path': '/test/disk', 'virt_disk_size': '10737418240', 'backing_file': '/base/disk', 'disk_size': '83886080'}] disk_info_text = jsonutils.dumps(disk_info) mock_get_disk_info.return_value = disk_info_text mock_is_shared_storage.return_value = shared_storage def fake_execute(*args, **kwargs): self.assertNotEqual(args[0:2], ['qemu-img', 'convert']) mock_execute.side_effect = fake_execute instance = self._create_instance() out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), instance, '10.0.0.2', flavor_obj, None) self.assertTrue(mock_is_shared_storage.called) mock_destroy.assert_called_once_with(instance) self.assertEqual(out, disk_info_text) def test_migrate_disk_and_power_off_shared_storage(self): self._test_migrate_disk_and_power_off_backing_file(True) def test_migrate_disk_and_power_off_non_shared_storage(self): self._test_migrate_disk_and_power_off_backing_file(False) def test_migrate_disk_and_power_off_lvm(self): self.flags(images_type='lvm', group='libvirt') def fake_execute(*args, **kwargs): pass self.stubs.Set(utils, 'execute', fake_execute) expected_exc = exception.InstanceFaultRollback self._test_migrate_disk_and_power_off_resize_check(expected_exc) def test_migrate_disk_and_power_off_resize_cannot_ssh(self): def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError() def fake_is_storage_shared(dest, inst_base): self.checked_shared_storage = True return False self.stubs.Set(self.drvr, '_is_storage_shared_with', fake_is_storage_shared) self.stubs.Set(utils, 'execute', fake_execute) expected_exc = exception.InstanceFaultRollback self._test_migrate_disk_and_power_off_resize_check(expected_exc) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info): instance = self._create_instance() flavor = {'root_gb': 5, 'ephemeral_gb': 10} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = self._disk_info() self.assertRaises( exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_error_default_ephemeral( self, mock_get_disk_info): # Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb. instance = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 0} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = self._disk_info() self.assertRaises(exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') @mock.patch('nova.virt.driver.block_device_info_get_ephemerals') def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get, mock_get_disk_info): mappings = [ { 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, { 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': 'disk', 'volume_id': 1, 'guest_format': None, 'boot_index': 1, 'volume_size': 6 }, { 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': 1, 'device_type': 'disk', 'guest_format': None, 'boot_index': 0, 'volume_size': 4 }, { 'device_name': '/dev/sda3', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1, 'volume_size': 3 } ] mock_get.return_value = mappings instance = self._create_instance() # Old flavor, eph is 20, real disk is 3, target is 2, fail flavor = {'root_gb': 10, 'ephemeral_gb': 2} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = self._disk_info() self.assertRaises( exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) # Old flavor, eph is 20, real disk is 3, target is 4 flavor = {'root_gb': 10, 'ephemeral_gb': 4} flavor_obj = objects.Flavor(**flavor) self._test_migrate_disk_and_power_off(flavor_obj) @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.utils.copy_image') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.utils.get_instance_path') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '._is_storage_shared_with') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_copy_disk_info(self, mock_disk_info, mock_shared, mock_path, mock_destroy, mock_copy, mock_execuate): instance = self._create_instance() disk_info = self._disk_info() disk_info_text = jsonutils.loads(disk_info) instance_base = os.path.dirname(disk_info_text[0]['path']) flavor = {'root_gb': 10, 'ephemeral_gb': 25} flavor_obj = objects.Flavor(**flavor) mock_disk_info.return_value = disk_info mock_path.return_value = instance_base mock_shared.return_value = False src_disk_info_path = os.path.join(instance_base + '_resize', 'disk.info') with mock.patch.object(os.path, 'exists', autospec=True) \ as mock_exists: # disk.info exists on the source mock_exists.side_effect = \ lambda path: path == src_disk_info_path self.drvr.migrate_disk_and_power_off(context.get_admin_context(), instance, mock.sentinel, flavor_obj, None) self.assertTrue(mock_exists.called) dst_disk_info_path = os.path.join(instance_base, 'disk.info') mock_copy.assert_any_call(src_disk_info_path, dst_disk_info_path, host=mock.sentinel, on_execute=mock.ANY, on_completion=mock.ANY) def test_wait_for_running(self): def fake_get_info(instance): if instance['name'] == "not_found": raise exception.InstanceNotFound(instance_id=instance['uuid']) elif instance['name'] == "running": return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) self.stubs.Set(self.drvr, 'get_info', fake_get_info) # instance not found case self.assertRaises(exception.InstanceNotFound, self.drvr._wait_for_running, {'name': 'not_found', 'uuid': 'not_found_uuid'}) # instance is running case self.assertRaises(loopingcall.LoopingCallDone, self.drvr._wait_for_running, {'name': 'running', 'uuid': 'running_uuid'}) # else case self.drvr._wait_for_running({'name': 'else', 'uuid': 'other_uuid'}) def test_disk_size_from_instance_disk_info(self): instance_data = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30} inst = objects.Instance(**instance_data) self.assertEqual(10 * units.Gi, self.drvr._disk_size_from_instance(inst, 'disk')) self.assertEqual(20 * units.Gi, self.drvr._disk_size_from_instance(inst, 'disk.local')) self.assertEqual(0, self.drvr._disk_size_from_instance(inst, 'disk.swap')) @mock.patch('nova.utils.execute') def test_disk_raw_to_qcow2(self, mock_execute): path = '/test/disk' _path_qcow = path + '_qcow' self.drvr._disk_raw_to_qcow2(path) mock_execute.assert_has_calls([ mock.call('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', path, _path_qcow), mock.call('mv', _path_qcow, path)]) @mock.patch('nova.utils.execute') def test_disk_qcow2_to_raw(self, mock_execute): path = '/test/disk' _path_raw = path + '_raw' self.drvr._disk_qcow2_to_raw(path) mock_execute.assert_has_calls([ mock.call('qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', path, _path_raw), mock.call('mv', _path_raw, path)]) @mock.patch('nova.virt.disk.api.extend') def test_disk_resize_raw(self, mock_extend): image = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_RAW) self.drvr._disk_resize(image, 50) mock_extend.assert_called_once_with(image, 50) @mock.patch('nova.virt.disk.api.can_resize_image') @mock.patch('nova.virt.disk.api.is_image_extendable') @mock.patch('nova.virt.disk.api.extend') def test_disk_resize_qcow2( self, mock_extend, mock_can_resize, mock_is_image_extendable): with test.nested( mock.patch.object( self.drvr, '_disk_qcow2_to_raw'), mock.patch.object( self.drvr, '_disk_raw_to_qcow2'))\ as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2): mock_can_resize.return_value = True mock_is_image_extendable.return_value = True imageqcow2 = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_QCOW2) imageraw = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_RAW) self.drvr._disk_resize(imageqcow2, 50) mock_disk_qcow2_to_raw.assert_called_once_with(imageqcow2.path) mock_extend.assert_called_once_with(imageraw, 50) mock_disk_raw_to_qcow2.assert_called_once_with(imageqcow2.path) def _test_finish_migration(self, power_on, resize_instance=False): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_migration. """ powered_on = power_on self.fake_create_domain_called = False self.fake_disk_resize_called = False create_image_called = [False] def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): return "" def fake_plug_vifs(instance, network_info): pass def fake_create_image(context, inst, disk_mapping, suffix='', disk_images=None, network_info=None, block_device_info=None, inject_files=True, fallback_from_host=None): self.assertFalse(inject_files) create_image_called[0] = True def fake_create_domain_and_network( context, xml, instance, network_info, disk_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False): self.fake_create_domain_called = True self.assertEqual(powered_on, power_on) self.assertTrue(vifs_already_plugged) def fake_enable_hairpin(): pass def fake_execute(*args, **kwargs): pass def fake_get_info(instance): if powered_on: return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_disk_resize(image, size): # Assert that _create_image is called before disk resize, # otherwise we might be trying to resize a disk whose backing # file hasn't been fetched, yet. self.assertTrue(create_image_called[0]) self.fake_disk_resize_called = True self.flags(use_cow_images=True) self.stubs.Set(self.drvr, '_disk_resize', fake_disk_resize) self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(self.drvr, '_create_image', fake_create_image) self.stubs.Set(self.drvr, '_create_domain_and_network', fake_create_domain_and_network) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin', fake_enable_hairpin) self.stubs.Set(utils, 'execute', fake_execute) fw = base_firewall.NoopFirewallDriver() self.stubs.Set(self.drvr, 'firewall_driver', fw) self.stubs.Set(self.drvr, 'get_info', fake_get_info) ins_ref = self._create_instance() migration = objects.Migration() migration.source_compute = 'fake-source-compute' migration.dest_compute = 'fake-dest-compute' migration.source_node = 'fake-source-node' migration.dest_node = 'fake-dest-node' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # Source disks are raw to test conversion disk_info = self._disk_info(type='raw', config_disk=True) with mock.patch.object(self.drvr, '_disk_raw_to_qcow2', autospec=True) as mock_raw_to_qcow2: self.drvr.finish_migration( context.get_admin_context(), migration, ins_ref, disk_info, [], image_meta, resize_instance, None, power_on) # Assert that we converted the root and swap disks convert_calls = [mock.call('/test/disk'), mock.call('/test/disk.swap')] mock_raw_to_qcow2.assert_has_calls(convert_calls, any_order=True) # Implicitly assert that we did not convert the config disk self.assertEqual(len(convert_calls), mock_raw_to_qcow2.call_count) self.assertTrue(self.fake_create_domain_called) self.assertEqual( resize_instance, self.fake_disk_resize_called) def test_finish_migration_resize(self): self._test_finish_migration(True, resize_instance=True) def test_finish_migration_power_on(self): self._test_finish_migration(True) def test_finish_migration_power_off(self): self._test_finish_migration(False) def _test_finish_revert_migration(self, power_on): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_revert_migration. """ powered_on = power_on self.fake_create_domain_called = False def fake_execute(*args, **kwargs): pass def fake_plug_vifs(instance, network_info): pass def fake_create_domain(context, xml, instance, network_info, disk_info, block_device_info=None, power_on=None, vifs_already_plugged=None): self.fake_create_domain_called = True self.assertEqual(powered_on, power_on) self.assertTrue(vifs_already_plugged) return mock.MagicMock() def fake_enable_hairpin(): pass def fake_get_info(instance): if powered_on: return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None): return "" self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(utils, 'execute', fake_execute) fw = base_firewall.NoopFirewallDriver() self.stubs.Set(self.drvr, 'firewall_driver', fw) self.stubs.Set(self.drvr, '_create_domain_and_network', fake_create_domain) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin', fake_enable_hairpin) self.stubs.Set(self.drvr, 'get_info', fake_get_info) self.stubs.Set(utils, 'get_image_from_system_metadata', lambda *a: self.test_image_meta) with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) ins_ref = self._create_instance() os.mkdir(os.path.join(tmpdir, ins_ref['name'])) libvirt_xml_path = os.path.join(tmpdir, ins_ref['name'], 'libvirt.xml') f = open(libvirt_xml_path, 'w') f.close() self.drvr.finish_revert_migration( context.get_admin_context(), ins_ref, [], None, power_on) self.assertTrue(self.fake_create_domain_called) def test_finish_revert_migration_power_on(self): self._test_finish_revert_migration(True) def test_finish_revert_migration_power_off(self): self._test_finish_revert_migration(False) def _test_finish_revert_migration_after_crash(self, backup_made=True, del_inst_failed=False): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None context = 'fake_context' instance = self._create_instance() self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(shutil, 'rmtree') self.mox.StubOutWithMock(utils, 'execute') self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None) self.stubs.Set(self.drvr, '_get_guest_xml', lambda *a, **k: None) self.stubs.Set(self.drvr, '_create_domain_and_network', lambda *a, **kw: None) self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall', lambda *a, **k: FakeLoopingCall()) libvirt_utils.get_instance_path(instance).AndReturn('/fake/foo') os.path.exists('/fake/foo_resize').AndReturn(backup_made) if backup_made: if del_inst_failed: os_error = OSError(errno.ENOENT, 'No such file or directory') shutil.rmtree('/fake/foo').AndRaise(os_error) else: shutil.rmtree('/fake/foo') utils.execute('mv', '/fake/foo_resize', '/fake/foo') imagebackend.Backend.image(mox.IgnoreArg(), 'disk').AndReturn( fake_imagebackend.Raw()) self.mox.ReplayAll() self.drvr.finish_revert_migration(context, instance, []) def test_finish_revert_migration_after_crash(self): self._test_finish_revert_migration_after_crash(backup_made=True) def test_finish_revert_migration_after_crash_before_new(self): self._test_finish_revert_migration_after_crash(backup_made=True) def test_finish_revert_migration_after_crash_before_backup(self): self._test_finish_revert_migration_after_crash(backup_made=False) def test_finish_revert_migration_after_crash_delete_failed(self): self._test_finish_revert_migration_after_crash(backup_made=True, del_inst_failed=True) def test_finish_revert_migration_preserves_disk_bus(self): def fake_get_guest_xml(context, instance, network_info, disk_info, image_meta, block_device_info=None): self.assertEqual('ide', disk_info['disk_bus']) image_meta = {"disk_format": "raw", "properties": {"hw_disk_bus": "ide"}} instance = self._create_instance() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(utils, 'get_image_from_system_metadata', return_value=image_meta), mock.patch.object(drvr, '_get_guest_xml', side_effect=fake_get_guest_xml)): drvr.finish_revert_migration('', instance, None, power_on=False) def test_finish_revert_migration_snap_backend(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend ins_ref = self._create_instance() with test.nested( mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} drvr.finish_revert_migration('', ins_ref, None, power_on=False) drvr.image_backend.rollback_to_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME) drvr.image_backend.remove_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_finish_revert_migration_snap_backend_snapshot_not_found(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend ins_ref = self._create_instance() with test.nested( mock.patch.object(rbd_utils, 'RBDDriver'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_rbd, mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} mock_rbd.rollback_to_snap.side_effect = exception.SnapshotNotFound( snapshot_id='testing') drvr.finish_revert_migration('', ins_ref, None, power_on=False) drvr.image_backend.remove_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_cleanup_failed_migration(self): self.mox.StubOutWithMock(shutil, 'rmtree') shutil.rmtree('/fake/inst') self.mox.ReplayAll() self.drvr._cleanup_failed_migration('/fake/inst') def test_confirm_migration(self): ins_ref = self._create_instance() self.mox.StubOutWithMock(self.drvr, "_cleanup_resize") self.drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) self.mox.ReplayAll() self.drvr.confirm_migration("migration_ref", ins_ref, _fake_network_info(self, 1)) def test_cleanup_resize_same_host(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) def fake_os_path_exists(path): return True self.stub_out('os.path.exists', fake_os_path_exists) self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(utils, 'execute') libvirt_utils.get_instance_path(ins_ref, forceold=True).AndReturn('/fake/inst') utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) imagebackend.Backend.image(ins_ref, 'disk').AndReturn( fake_imagebackend.Raw()) self.mox.ReplayAll() self.drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) def test_cleanup_resize_not_same_host(self): CONF.set_override('policy_dirs', [], group='oslo_policy') host = 'not' + CONF.host ins_ref = self._create_instance({'host': host}) def fake_os_path_exists(path): return True def fake_undefine_domain(instance): pass def fake_unplug_vifs(instance, network_info, ignore_errors=False): pass def fake_unfilter_instance(instance, network_info): pass self.stub_out('os.path.exists', fake_os_path_exists) self.stubs.Set(self.drvr, '_undefine_domain', fake_undefine_domain) self.stubs.Set(self.drvr, 'unplug_vifs', fake_unplug_vifs) self.stubs.Set(self.drvr.firewall_driver, 'unfilter_instance', fake_unfilter_instance) self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(utils, 'execute') libvirt_utils.get_instance_path(ins_ref, forceold=True).AndReturn('/fake/inst') utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) imagebackend.Backend.image(ins_ref, 'disk').AndReturn( fake_imagebackend.Raw()) self.mox.ReplayAll() self.drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) def test_cleanup_resize_snap_backend(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr.image_backend, 'remove_snap')) as ( mock_exists, mock_get_path, mock_exec, mock_remove): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) mock_get_path.assert_called_once_with(ins_ref, forceold=True) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) mock_remove.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_get_instance_disk_info_exception(self): instance = self._create_instance() class FakeExceptionDomain(FakeVirtDomain): def __init__(self): super(FakeExceptionDomain, self).__init__() def XMLDesc(self, flags): raise fakelibvirt.libvirtError("Libvirt error") def fake_get_domain(self, instance): return FakeExceptionDomain() self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.assertRaises(exception.InstanceNotFound, self.drvr.get_instance_disk_info, instance) @mock.patch('os.path.exists') @mock.patch.object(lvm, 'list_volumes') def test_lvm_disks(self, listlvs, exists): instance = objects.Instance(uuid='fake-uuid', id=1) self.flags(images_volume_group='vols', group='libvirt') exists.return_value = True listlvs.return_value = ['fake-uuid_foo', 'other-uuid_foo'] disks = self.drvr._lvm_disks(instance) self.assertEqual(['/dev/vols/fake-uuid_foo'], disks) def test_is_booted_from_volume(self): func = libvirt_driver.LibvirtDriver._is_booted_from_volume instance, disk_mapping = {}, {} self.assertTrue(func(instance, disk_mapping)) disk_mapping['disk'] = 'map' self.assertTrue(func(instance, disk_mapping)) instance['image_ref'] = 'uuid' self.assertFalse(func(instance, disk_mapping)) @mock.patch('nova.virt.netutils.get_injected_network_template') @mock.patch('nova.virt.disk.api.inject_data') @mock.patch.object(libvirt_driver.LibvirtDriver, "_conn") def _test_inject_data(self, driver_params, path, disk_params, mock_conn, disk_inject_data, inj_network, called=True): class ImageBackend(object): path = '/path' def check_image_exists(self): if self.path == '/fail/path': return False return True def get_model(self, connection): return imgmodel.LocalFileImage(self.path, imgmodel.FORMAT_RAW) def fake_inj_network(*args, **kwds): return args[0] or None inj_network.side_effect = fake_inj_network image_backend = ImageBackend() image_backend.path = path with mock.patch.object( self.drvr.image_backend, 'image', return_value=image_backend): self.flags(inject_partition=0, group='libvirt') self.drvr._inject_data(**driver_params) if called: disk_inject_data.assert_called_once_with( mock.ANY, *disk_params, partition=None, mandatory=('files',)) self.assertEqual(disk_inject_data.called, called) def _test_inject_data_default_driver_params(self, **params): return { 'instance': self._create_instance(params=params), 'network_info': None, 'admin_pass': None, 'files': None, 'suffix': '' } def test_inject_data_adminpass(self): self.flags(inject_password=True, group='libvirt') driver_params = self._test_inject_data_default_driver_params() driver_params['admin_pass'] = 'foobar' disk_params = [ None, # key None, # net {}, # metadata 'foobar', # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) # Test with the configuration setted to false. self.flags(inject_password=False, group='libvirt') self._test_inject_data(driver_params, "/path", disk_params, called=False) def test_inject_data_key(self): driver_params = self._test_inject_data_default_driver_params() driver_params['instance']['key_data'] = 'key-content' self.flags(inject_key=True, group='libvirt') disk_params = [ 'key-content', # key None, # net {}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) # Test with the configuration setted to false. self.flags(inject_key=False, group='libvirt') self._test_inject_data(driver_params, "/path", disk_params, called=False) def test_inject_data_metadata(self): instance_metadata = {'metadata': {'data': 'foo'}} driver_params = self._test_inject_data_default_driver_params( **instance_metadata ) disk_params = [ None, # key None, # net {'data': 'foo'}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_data_files(self): driver_params = self._test_inject_data_default_driver_params() driver_params['files'] = ['file1', 'file2'] disk_params = [ None, # key None, # net {}, # metadata None, # admin_pass ['file1', 'file2'], # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_data_net(self): driver_params = self._test_inject_data_default_driver_params() driver_params['network_info'] = {'net': 'eno1'} disk_params = [ None, # key {'net': 'eno1'}, # net {}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_not_exist_image(self): driver_params = self._test_inject_data_default_driver_params() disk_params = [ 'key-content', # key None, # net None, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/fail/path", disk_params, called=False) def _test_attach_detach_interface(self, method, power_state, expected_flags): instance = self._create_instance() network_info = _fake_network_info(self, 1) domain = FakeVirtDomain() self.mox.StubOutWithMock(host.Host, 'get_domain') self.mox.StubOutWithMock(self.drvr.firewall_driver, 'setup_basic_filtering') self.mox.StubOutWithMock(domain, 'attachDeviceFlags') self.mox.StubOutWithMock(domain, 'info') host.Host.get_domain(instance).AndReturn(domain) if method == 'attach_interface': self.drvr.firewall_driver.setup_basic_filtering( instance, [network_info[0]]) fake_image_meta = objects.ImageMeta.from_dict( {'id': instance.image_ref}) expected = self.drvr.vif_driver.get_config( instance, network_info[0], fake_image_meta, instance.flavor, CONF.libvirt.virt_type, self.drvr._host) self.mox.StubOutWithMock(self.drvr.vif_driver, 'get_config') self.drvr.vif_driver.get_config( instance, network_info[0], mox.IsA(objects.ImageMeta), mox.IsA(objects.Flavor), CONF.libvirt.virt_type, self.drvr._host).AndReturn(expected) domain.info().AndReturn([power_state, 1, 2, 3, 4]) if method == 'attach_interface': domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags) elif method == 'detach_interface': domain.detachDeviceFlags(expected.to_xml(), expected_flags) self.mox.ReplayAll() if method == 'attach_interface': self.drvr.attach_interface( instance, fake_image_meta, network_info[0]) elif method == 'detach_interface': self.drvr.detach_interface( instance, network_info[0]) self.mox.VerifyAll() def test_attach_interface_with_running_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.RUNNING, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_attach_interface_with_pause_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.PAUSED, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_attach_interface_with_shutdown_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.SHUTDOWN, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)) def test_detach_interface_with_running_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.RUNNING, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_interface_with_pause_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.PAUSED, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_interface_with_shutdown_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.SHUTDOWN, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)) @mock.patch('nova.virt.libvirt.driver.LOG') def test_detach_interface_device_not_found(self, mock_log): # Asserts that we don't log an error when the interface device is not # found on the guest after a libvirt error during detach. instance = self._create_instance() vif = _fake_network_info(self, 1)[0] guest = mock.Mock(spec='nova.virt.libvirt.guest.Guest') guest.get_power_state = mock.Mock() self.drvr._host.get_guest = mock.Mock(return_value=guest) self.drvr.vif_driver = mock.Mock() error = fakelibvirt.libvirtError( 'no matching network device was found') error.err = (fakelibvirt.VIR_ERR_OPERATION_FAILED,) guest.detach_device = mock.Mock(side_effect=error) # mock out that get_interface_by_mac doesn't find the interface guest.get_interface_by_mac = mock.Mock(return_value=None) self.drvr.detach_interface(instance, vif) guest.get_interface_by_mac.assert_called_once_with(vif['address']) # an error shouldn't be logged, but a warning should be logged self.assertFalse(mock_log.error.called) self.assertEqual(1, mock_log.warning.call_count) self.assertIn('the device is no longer found on the guest', six.text_type(mock_log.warning.call_args[0])) def test_rescue(self): instance = self._create_instance({'config_drive': None}) dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") network_info = _fake_network_info(self, 1) self.mox.StubOutWithMock(self.drvr, '_get_existing_domain_xml') self.mox.StubOutWithMock(libvirt_utils, 'write_to_file') self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(imagebackend.Image, 'cache') self.mox.StubOutWithMock(self.drvr, '_get_guest_xml') self.mox.StubOutWithMock(self.drvr, '_destroy') self.mox.StubOutWithMock(self.drvr, '_create_domain') self.drvr._get_existing_domain_xml(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) imagebackend.Backend.image(instance, 'kernel.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'disk.rescue', 'default' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg()).MultipleTimes() imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), size=None, user_id=mox.IgnoreArg()) image_meta = objects.ImageMeta.from_dict( {'id': 'fake', 'name': 'fake'}) self.drvr._get_guest_xml(mox.IgnoreArg(), instance, network_info, mox.IgnoreArg(), image_meta, rescue=mox.IgnoreArg(), write_to_disk=mox.IgnoreArg() ).AndReturn(dummyxml) self.drvr._destroy(instance) self.drvr._create_domain(mox.IgnoreArg()) self.mox.ReplayAll() rescue_password = 'fake_password' self.drvr.rescue(self.context, instance, network_info, image_meta, rescue_password) self.mox.VerifyAll() @mock.patch.object(libvirt_utils, 'get_instance_path') @mock.patch.object(libvirt_utils, 'load_file') @mock.patch.object(host.Host, "get_domain") def test_unrescue(self, mock_get_domain, mock_load_file, mock_get_instance_path): dummyxml = ("instance-0000000a" "" "" "" "" "") mock_get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake=uuid', id=1) fake_dom = FakeVirtDomain(fake_xml=dummyxml) mock_get_domain.return_value = fake_dom mock_load_file.return_value = "fake_unrescue_xml" unrescue_xml_path = os.path.join('/path', 'unrescue.xml') rescue_file = os.path.join('/path', 'rescue.file') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(drvr, '_destroy'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(libvirt_utils, 'file_delete'), mock.patch.object(drvr, '_lvm_disks', return_value=['lvm.rescue']), mock.patch.object(lvm, 'remove_volumes'), mock.patch.object(glob, 'iglob', return_value=[rescue_file]) ) as (mock_destroy, mock_create, mock_del, mock_lvm_disks, mock_remove_volumes, mock_glob): drvr.unrescue(instance, None) mock_destroy.assert_called_once_with(instance) mock_create.assert_called_once_with("fake_unrescue_xml", fake_dom) self.assertEqual(2, mock_del.call_count) self.assertEqual(unrescue_xml_path, mock_del.call_args_list[0][0][0]) self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0]) mock_remove_volumes.assert_called_once_with(['lvm.rescue']) @mock.patch( 'nova.virt.configdrive.ConfigDriveBuilder.add_instance_metadata') @mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive') def test_rescue_config_drive(self, mock_make, mock_add): instance = self._create_instance() uuid = instance.uuid configdrive_path = uuid + '/disk.config.rescue' dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") network_info = _fake_network_info(self, 1) self.mox.StubOutWithMock(self.drvr, '_get_existing_domain_xml') self.mox.StubOutWithMock(libvirt_utils, 'write_to_file') self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(imagebackend.Image, 'cache') self.mox.StubOutWithMock(instance_metadata.InstanceMetadata, '__init__') self.mox.StubOutWithMock(self.drvr, '_get_guest_xml') self.mox.StubOutWithMock(self.drvr, '_destroy') self.mox.StubOutWithMock(self.drvr, '_create_domain') self.drvr._get_existing_domain_xml(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) imagebackend.Backend.image(instance, 'kernel.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'disk.rescue', 'default' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'disk.config.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg()).MultipleTimes() imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), size=None, user_id=mox.IgnoreArg()) instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(), content=mox.IgnoreArg(), extra_md=mox.IgnoreArg(), network_info=mox.IgnoreArg()) image_meta = objects.ImageMeta.from_dict( {'id': 'fake', 'name': 'fake'}) self.drvr._get_guest_xml(mox.IgnoreArg(), instance, network_info, mox.IgnoreArg(), image_meta, rescue=mox.IgnoreArg(), write_to_disk=mox.IgnoreArg() ).AndReturn(dummyxml) self.drvr._destroy(instance) self.drvr._create_domain(mox.IgnoreArg()) self.mox.ReplayAll() rescue_password = 'fake_password' self.drvr.rescue(self.context, instance, network_info, image_meta, rescue_password) self.mox.VerifyAll() mock_add.assert_any_call(mock.ANY) expected_call = [mock.call(os.path.join(CONF.instances_path, configdrive_path))] mock_make.assert_has_calls(expected_call) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') shutil.assert_called_with('/path_del') self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('os.kill') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_kill_running( self, get_instance_path, kill, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) self.drvr.job_tracker.jobs[instance.uuid] = [3, 4] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') kill.assert_has_calls([mock.call(3, signal.SIGKILL), mock.call(3, 0), mock.call(4, signal.SIGKILL), mock.call(4, 0)]) shutil.assert_called_with('/path_del') self.assertTrue(result) self.assertNotIn(instance.uuid, self.drvr.job_tracker.jobs) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_resize(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = [Exception(), None] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] self.assertEqual(expected, exe.mock_calls) shutil.assert_called_with('/path_del') self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_failed(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) exists.side_effect = [False, False, True, True] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') shutil.assert_called_with('/path_del') self.assertFalse(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_mv_failed(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [True, True] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertFalse(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_resume(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_none(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [False, False, False, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertEqual(0, len(shutil.mock_calls)) self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_concurrent(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = [Exception(), Exception(), None] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] expected.append(expected[0]) self.assertEqual(expected, exe.mock_calls) shutil.assert_called_with('/path_del') self.assertTrue(result) def _assert_on_id_map(self, idmap, klass, start, target, count): self.assertIsInstance(idmap, klass) self.assertEqual(start, idmap.start) self.assertEqual(target, idmap.target) self.assertEqual(count, idmap.count) def test_get_id_maps(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.virt_type = "lxc" CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(len(idmaps), 4) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestUIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestUIDMap, 1, 20000, 10) self._assert_on_id_map(idmaps[2], vconfig.LibvirtConfigGuestGIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[3], vconfig.LibvirtConfigGuestGIDMap, 1, 20000, 10) def test_get_id_maps_not_lxc(self): CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(0, len(idmaps)) def test_get_id_maps_only_uid(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = [] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(2, len(idmaps)) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestUIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestUIDMap, 1, 20000, 10) def test_get_id_maps_only_gid(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.uid_maps = [] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(2, len(idmaps)) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestGIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestGIDMap, 1, 20000, 10) def test_instance_on_disk(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(uuid='fake-uuid', id=1) self.assertFalse(drvr.instance_on_disk(instance)) def test_instance_on_disk_rbd(self): self.flags(images_type='rbd', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(uuid='fake-uuid', id=1) self.assertTrue(drvr.instance_on_disk(instance)) def test_get_disk_xml(self): dom_xml = """ 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ diska_xml = """ 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ diskb_xml = """ """ dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) # NOTE(gcb): etree.tostring(node) returns an extra line with # some white spaces, need to strip it. actual_diska_xml = guest.get_disk('vda').to_xml() self.assertEqual(diska_xml.strip(), actual_diska_xml.strip()) actual_diskb_xml = guest.get_disk('vdb').to_xml() self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip()) self.assertIsNone(guest.get_disk('vdc')) def test_vcpu_model_from_config(self): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) vcpu_model = drv._cpu_config_to_vcpu_model(None, None) self.assertIsNone(vcpu_model) cpu = vconfig.LibvirtConfigGuestCPU() feature1 = vconfig.LibvirtConfigGuestCPUFeature() feature2 = vconfig.LibvirtConfigGuestCPUFeature() feature1.name = 'sse' feature1.policy = cpumodel.POLICY_REQUIRE feature2.name = 'aes' feature2.policy = cpumodel.POLICY_REQUIRE cpu.features = set([feature1, feature2]) cpu.mode = cpumodel.MODE_CUSTOM cpu.sockets = 1 cpu.cores = 2 cpu.threads = 4 vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None) self.assertEqual(cpumodel.MATCH_EXACT, vcpu_model.match) self.assertEqual(cpumodel.MODE_CUSTOM, vcpu_model.mode) self.assertEqual(4, vcpu_model.topology.threads) self.assertEqual(set(['sse', 'aes']), set([f.name for f in vcpu_model.features])) cpu.mode = cpumodel.MODE_HOST_MODEL vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model) self.assertEqual(cpumodel.MODE_HOST_MODEL, vcpu_model.mode) self.assertEqual(vcpu_model, vcpu_model_1) @mock.patch.object(lvm, 'get_volume_size', return_value=10) @mock.patch.object(host.Host, "get_guest") @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unfilter_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') @mock.patch.object(objects.Instance, 'save') def test_cleanup_lvm_encrypted(self, mock_save, mock_undefine_domain, mock_unfilter, mock_delete_volume, mock_get_guest, mock_get_size): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance = objects.Instance(uuid='fake-uuid', id=1, ephemeral_key_uuid='000-000-000') instance.system_metadata = {} block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} self.flags(images_type="lvm", group='libvirt') dom_xml = """ """ dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) mock_get_guest.return_value = guest drv.cleanup(self.context, instance, 'fake_network', destroy_vifs=False, block_device_info=block_device_info) mock_delete_volume.assert_called_once_with('/dev/mapper/fake-dmcrypt') @mock.patch.object(lvm, 'get_volume_size', return_value=10) @mock.patch.object(host.Host, "get_guest") @mock.patch.object(dmcrypt, 'delete_volume') def _test_cleanup_lvm(self, mock_delete_volume, mock_get_guest, mock_size, encrypted=False): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance = objects.Instance(uuid='fake-uuid', id=1, ephemeral_key_uuid='000-000-000') block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} dev_name = 'fake-dmcrypt' if encrypted else 'fake' dom_xml = """ """ % dev_name dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) mock_get_guest.return_value = guest drv._cleanup_lvm(instance, block_device_info) if encrypted: mock_delete_volume.assert_called_once_with( '/dev/mapper/fake-dmcrypt') else: self.assertFalse(mock_delete_volume.called) def test_cleanup_lvm(self): self._test_cleanup_lvm() def test_cleanup_encrypted_lvm(self): self._test_cleanup_lvm(encrypted=True) def test_vcpu_model_to_config(self): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) feature = objects.VirtCPUFeature(policy=cpumodel.POLICY_REQUIRE, name='sse') feature_1 = objects.VirtCPUFeature(policy=cpumodel.POLICY_FORBID, name='aes') topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4) vcpu_model = objects.VirtCPUModel(mode=cpumodel.MODE_HOST_MODEL, features=[feature, feature_1], topology=topo) cpu = drv._vcpu_model_to_cpu_config(vcpu_model) self.assertEqual(cpumodel.MODE_HOST_MODEL, cpu.mode) self.assertEqual(1, cpu.sockets) self.assertEqual(4, cpu.threads) self.assertEqual(2, len(cpu.features)) self.assertEqual(set(['sse', 'aes']), set([f.name for f in cpu.features])) self.assertEqual(set([cpumodel.POLICY_REQUIRE, cpumodel.POLICY_FORBID]), set([f.policy for f in cpu.features])) def test_trigger_crash_dump(self): mock_guest = mock.Mock(libvirt_guest.Guest, id=1) instance = objects.Instance(uuid='fake-uuid', id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.drvr.trigger_crash_dump(instance) def test_trigger_crash_dump_not_running(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'Requested operation is not valid: domain is not running', error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid='fake-uuid', id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(exception.InstanceNotRunning, self.drvr.trigger_crash_dump, instance) def test_trigger_crash_dump_not_supported(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, '', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid='fake-uuid', id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(exception.TriggerCrashDumpNotSupported, self.drvr.trigger_crash_dump, instance) def test_trigger_crash_dump_unexpected_error(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'UnexpectedError', error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid='fake-uuid', id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(fakelibvirt.libvirtError, self.drvr.trigger_crash_dump, instance) class LibvirtVolumeUsageTestCase(test.NoDBTestCase): """Test for LibvirtDriver.get_all_volume_usage.""" def setUp(self): super(LibvirtVolumeUsageTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.c = context.get_admin_context() self.ins_ref = objects.Instance( id=1729, uuid='875a8070-d0b9-4949-8b31-104d125c9a64' ) # verify bootable volume device path also self.bdms = [{'volume_id': 1, 'device_name': '/dev/vde'}, {'volume_id': 2, 'device_name': 'vda'}] def test_get_all_volume_usage(self): def fake_block_stats(instance_name, disk): return (169, 688640, 0, 0, -1) self.stubs.Set(self.drvr, 'block_stats', fake_block_stats) vol_usage = self.drvr.get_all_volume_usage(self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) expected_usage = [{'volume': 1, 'instance': self.ins_ref, 'rd_bytes': 688640, 'wr_req': 0, 'rd_req': 169, 'wr_bytes': 0}, {'volume': 2, 'instance': self.ins_ref, 'rd_bytes': 688640, 'wr_req': 0, 'rd_req': 169, 'wr_bytes': 0}] self.assertEqual(vol_usage, expected_usage) def test_get_all_volume_usage_device_not_found(self): def fake_get_domain(self, instance): raise exception.InstanceNotFound(instance_id="fakedom") self.stubs.Set(host.Host, 'get_domain', fake_get_domain) vol_usage = self.drvr.get_all_volume_usage(self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) self.assertEqual(vol_usage, []) class LibvirtNonblockingTestCase(test.NoDBTestCase): """Test libvirtd calls are nonblocking.""" def setUp(self): super(LibvirtNonblockingTestCase, self).setUp() self.flags(connection_uri="test:///default", group='libvirt') def test_connection_to_primitive(self): # Test bug 962840. import nova.virt.libvirt.driver as libvirt_driver drvr = libvirt_driver.LibvirtDriver('') drvr.set_host_enabled = mock.Mock() jsonutils.to_primitive(drvr._conn, convert_instances=True) def test_tpool_execute_calls_libvirt(self): conn = fakelibvirt.virConnect() conn.is_expected = True self.mox.StubOutWithMock(eventlet.tpool, 'execute') eventlet.tpool.execute( fakelibvirt.openAuth, 'test:///default', mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(conn) eventlet.tpool.execute( conn.domainEventRegisterAny, None, fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, mox.IgnoreArg(), mox.IgnoreArg()) if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'): eventlet.tpool.execute( conn.registerCloseCallback, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) c = driver._get_connection() self.assertTrue(c.is_expected) class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase): """Tests for libvirtDriver.volume_snapshot_create/delete.""" def setUp(self): super(LibvirtVolumeSnapshotTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.c = context.get_admin_context() self.flags(instance_name_template='instance-%s') self.flags(qemu_allowed_storage_drivers=[], group='libvirt') # creating instance self.inst = {} self.inst['uuid'] = uuidutils.generate_uuid() self.inst['id'] = '1' # create domain info self.dom_xml = """ 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ # alternate domain info with network-backed snapshot chain self.dom_netdisk_xml = """ 0e38683e-f0af-418f-a3f1-6b67eaffffff 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ # XML with netdisk attached, and 1 snapshot taken self.dom_netdisk_xml_2 = """ 0e38683e-f0af-418f-a3f1-6b67eaffffff 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ self.create_info = {'type': 'qcow2', 'snapshot_id': '1234-5678', 'new_file': 'new-file'} self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d' self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162' self.delete_info_1 = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': None} self.delete_info_2 = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': 'other-snap.img'} self.delete_info_3 = {'type': 'qcow2', 'file_to_merge': None, 'merge_target_file': None} self.delete_info_netdisk = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': 'root.img'} self.delete_info_invalid_type = {'type': 'made_up_type', 'file_to_merge': 'some_file', 'merge_target_file': 'some_other_file'} def tearDown(self): super(LibvirtVolumeSnapshotTestCase, self).tearDown() @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.' 'refresh_connection_info') @mock.patch('nova.objects.block_device.BlockDeviceMapping.' 'get_by_volume_and_instance') def test_volume_refresh_connection_info(self, mock_get_by_volume_and_instance, mock_refresh_connection_info): instance = objects.Instance(**self.inst) fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'connection_info': '{"fake": "connection_info"}'}) fake_bdm = objects.BlockDeviceMapping(self.c, **fake_bdm) mock_get_by_volume_and_instance.return_value = fake_bdm self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) mock_get_by_volume_and_instance.assert_called_once_with( self.c, self.volume_uuid, instance.uuid) mock_refresh_connection_info.assert_called_once_with(self.c, instance, self.drvr._volume_api, self.drvr) def test_volume_snapshot_create(self, quiesce=True): """Test snapshot creation with file-based disk.""" self.flags(instance_name_template='instance-%s') self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') instance = objects.Instance(**self.inst) new_file = 'new-file' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') self.mox.StubOutWithMock(domain, 'snapshotCreateXML') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) snap_xml_src = ( '\n' ' \n' ' \n' ' \n' ' \n' ' \n' ' \n' '\n') # Older versions of libvirt may be missing these. fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) snap_flags_q = (snap_flags | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) if quiesce: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q) else: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\ AndRaise(fakelibvirt.libvirtError( 'quiescing failed, no qemu-ga')) domain.snapshotCreateXML(snap_xml_src, flags=snap_flags) self.mox.ReplayAll() guest = libvirt_guest.Guest(domain) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, new_file) self.mox.VerifyAll() def test_volume_snapshot_create_libgfapi(self, quiesce=True): """Test snapshot creation with libgfapi network disk.""" self.flags(instance_name_template = 'instance-%s') self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt') self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.dom_xml = """ 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ instance = objects.Instance(**self.inst) new_file = 'new-file' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') self.mox.StubOutWithMock(domain, 'snapshotCreateXML') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) snap_xml_src = ( '\n' ' \n' ' \n' ' \n' ' \n' ' \n' ' \n' '\n') # Older versions of libvirt may be missing these. fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) snap_flags_q = (snap_flags | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) if quiesce: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q) else: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\ AndRaise(fakelibvirt.libvirtError( 'quiescing failed, no qemu-ga')) domain.snapshotCreateXML(snap_xml_src, flags=snap_flags) self.mox.ReplayAll() guest = libvirt_guest.Guest(domain) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, new_file) self.mox.VerifyAll() def test_volume_snapshot_create_noquiesce(self): self.test_volume_snapshot_create(quiesce=False) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict( {"properties": { "hw_qemu_guest_agent": "yes"}}) self.assertIsNone(self.drvr._can_quiesce(instance, image_meta)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce_bad_hyp(self, ver): self.flags(virt_type='xxx', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict( {"properties": { "hw_qemu_guest_agent": "yes"}}) self.assertRaises(exception.InstanceQuiesceNotSupported, self.drvr._can_quiesce, instance, image_meta) @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_can_quiesce_bad_ver(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = {"properties": { "hw_qemu_guest_agent": "yes"}} self.assertRaises(exception.InstanceQuiesceNotSupported, self.drvr._can_quiesce, instance, image_meta) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce_agent_not_enable(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict({}) self.assertRaises(exception.QemuGuestAgentNotEnabled, self.drvr._can_quiesce, instance, image_meta) def test_volume_snapshot_create_outer_success(self): instance = objects.Instance(**self.inst) domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create') self.drvr._host.get_guest(instance).AndReturn(guest) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, self.create_info['new_file']) self.drvr._volume_api.update_snapshot_status( self.c, self.create_info['snapshot_id'], 'creating') self.mox.StubOutWithMock(self.drvr._volume_api, 'get_snapshot') self.drvr._volume_api.get_snapshot(self.c, self.create_info['snapshot_id']).AndReturn({'status': 'available'}) self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info') self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) self.mox.ReplayAll() self.drvr.volume_snapshot_create(self.c, instance, self.volume_uuid, self.create_info) def test_volume_snapshot_create_outer_failure(self): instance = objects.Instance(**self.inst) domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create') self.drvr._host.get_guest(instance).AndReturn(guest) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, self.create_info['new_file']).\ AndRaise(exception.NovaException('oops')) self.drvr._volume_api.update_snapshot_status( self.c, self.create_info['snapshot_id'], 'error') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_create, self.c, instance, self.volume_uuid, self.create_info) def test_volume_snapshot_delete_1(self): """Deleting newest snapshot -- blockRebase.""" # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vda', 'snap.img', 0, flags=0) domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8}) def test_volume_snapshot_delete_relative_1(self): """Deleting newest snapshot -- blockRebase using relative flag""" self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_guest(instance).AndReturn(guest) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vda', 'snap.img', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def _setup_block_rebase_domain_and_guest_mocks(self, dom_xml): mock_domain = mock.Mock(spec=fakelibvirt.virDomain) mock_domain.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(mock_domain) exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'virDomainBlockRebase() failed', error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID) mock_domain.blockRebase.side_effect = exc return mock_domain, guest @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) @mock.patch('nova.virt.images.qemu_img_info', return_value=mock.Mock(file_format="fake_fmt")) @mock.patch('nova.utils.execute') def test_volume_snapshot_delete_when_dom_not_running(self, mock_execute, mock_qemu_img_info): """Deleting newest snapshot of a file-based image when the domain is not running should trigger a blockRebase using qemu-img not libvirt. In this test, we rebase the image with another image as backing file. """ mock_domain, guest = self._setup_block_rebase_domain_and_guest_mocks( self.dom_xml) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=guest): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) mock_qemu_img_info.assert_called_once_with("snap.img") mock_execute.assert_called_once_with('qemu-img', 'rebase', '-b', 'snap.img', '-F', 'fake_fmt', 'disk1_file') @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) @mock.patch('nova.virt.images.qemu_img_info', return_value=mock.Mock(file_format="fake_fmt")) @mock.patch('nova.utils.execute') def test_volume_snapshot_delete_when_dom_not_running_and_no_rebase_base( self, mock_execute, mock_qemu_img_info): """Deleting newest snapshot of a file-based image when the domain is not running should trigger a blockRebase using qemu-img not libvirt. In this test, the image is rebased onto no backing file (i.e. it will exist independently of any backing file) """ mock_domain, mock_guest = ( self._setup_block_rebase_domain_and_guest_mocks(self.dom_xml)) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) self.assertEqual(0, mock_qemu_img_info.call_count) mock_execute.assert_called_once_with('qemu-img', 'rebase', '-b', '', 'disk1_file') @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) def test_volume_snapshot_delete_when_dom_with_nw_disk_not_running(self): """Deleting newest snapshot of a network disk when the domain is not running should raise a NovaException. """ mock_domain, mock_guest = ( self._setup_block_rebase_domain_and_guest_mocks( self.dom_netdisk_xml)) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): ex = self.assertRaises(exception.NovaException, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.assertIn('has not been fully tested', six.text_type(ex)) def test_volume_snapshot_delete_2(self): """Deleting older snapshot -- blockCommit.""" # libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) self.mox.ReplayAll() self.assertRaises(exception.Invalid, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_2) fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4}) def test_volume_snapshot_delete_relative_2(self): """Deleting older snapshot -- blockCommit using relative flag""" self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn({}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_2) self.mox.VerifyAll() def test_volume_snapshot_delete_nonrelative_null_base(self): # Deleting newest and last snapshot of a volume # with blockRebase. So base of the new image will be null. instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) guest = libvirt_guest.Guest(domain) with test.nested( mock.patch.object(domain, 'XMLDesc', return_value=self.dom_xml), mock.patch.object(self.drvr._host, 'get_guest', return_value=guest), mock.patch.object(self.drvr._host, 'has_min_version', return_value=True), mock.patch.object(domain, 'blockRebase'), mock.patch.object(domain, 'blockJobInfo', return_value={'cur': 1000, 'end': 1000}) ) as (mock_xmldesc, mock_get_guest, mock_has_min_version, mock_rebase, mock_job_info): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) mock_xmldesc.assert_called_once_with(flags=0) mock_get_guest.assert_called_once_with(instance) mock_has_min_version.assert_called_once_with((1, 1, 1,)) mock_rebase.assert_called_once_with('vda', None, 0, flags=0) mock_job_info.assert_called_once_with('vda', flags=0) def test_volume_snapshot_delete_netdisk_nonrelative_null_base(self): # Deleting newest and last snapshot of a network attached volume # with blockRebase. So base of the new image will be null. instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_netdisk_xml_2) guest = libvirt_guest.Guest(domain) with test.nested( mock.patch.object(domain, 'XMLDesc', return_value=self.dom_netdisk_xml_2), mock.patch.object(self.drvr._host, 'get_guest', return_value=guest), mock.patch.object(self.drvr._host, 'has_min_version', return_value=True), mock.patch.object(domain, 'blockRebase'), mock.patch.object(domain, 'blockJobInfo', return_value={'cur': 1000, 'end': 1000}) ) as (mock_xmldesc, mock_get_guest, mock_has_min_version, mock_rebase, mock_job_info): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) mock_xmldesc.assert_called_once_with(flags=0) mock_get_guest.assert_called_once_with(instance) mock_has_min_version.assert_called_once_with((1, 1, 1,)) mock_rebase.assert_called_once_with('vdb', None, 0, flags=0) mock_job_info.assert_called_once_with('vdb', flags=0) def test_volume_snapshot_delete_outer_success(self): instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete') self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, delete_info=self.delete_info_1) self.drvr._volume_api.update_snapshot_status( self.c, snapshot_id, 'deleting') self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info') self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) self.mox.ReplayAll() self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_outer_failure(self): instance = objects.Instance(**self.inst) snapshot_id = '1234-9876' FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete') self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, delete_info=self.delete_info_1).\ AndRaise(exception.NovaException('oops')) self.drvr._volume_api.update_snapshot_status( self.c, snapshot_id, 'error_deleting') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_invalid_type(self): instance = objects.Instance(**self.inst) FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) self.drvr._volume_api.update_snapshot_status( self.c, self.snapshot_id, 'error_deleting') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_delete, self.c, instance, self.volume_uuid, self.snapshot_id, self.delete_info_invalid_type) def test_volume_snapshot_delete_netdisk_1(self): """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vdb', 'vdb[1]', 0, flags=0) domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8}) def test_volume_snapshot_delete_netdisk_relative_1(self): """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vdb', 'vdb[1]', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_netdisk_2(self): """Delete older snapshot -- blockCommit for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml # libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) self.mox.ReplayAll() self.assertRaises(exception.Invalid, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_netdisk) fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4}) def test_volume_snapshot_delete_netdisk_relative_2(self): """Delete older snapshot -- blockCommit for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_netdisk) self.mox.VerifyAll() def _fake_convert_image(source, dest, in_format, out_format, run_as_root=True): libvirt_driver.libvirt_utils.files[dest] = '' class _BaseSnapshotTests(test.NoDBTestCase): def setUp(self): super(_BaseSnapshotTests, self).setUp() self.flags(snapshots_directory='./', group='libvirt') self.context = context.get_admin_context() self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) self.image_service = nova.tests.unit.image.fake.stub_out_image_service( self) self.mock_update_task_state = mock.Mock() test_instance = _create_test_instance() self.instance_ref = objects.Instance(**test_instance) self.instance_ref.info_cache = objects.InstanceInfoCache( network_info=None) def _assert_snapshot(self, snapshot, disk_format, expected_properties=None): self.mock_update_task_state.assert_has_calls([ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD), mock.call(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD)]) props = snapshot['properties'] self.assertEqual(props['image_state'], 'available') self.assertEqual(snapshot['status'], 'active') self.assertEqual(snapshot['disk_format'], disk_format) self.assertEqual(snapshot['name'], 'test-snap') if expected_properties: for expected_key, expected_value in \ six.iteritems(expected_properties): self.assertEqual(expected_value, props[expected_key]) def _create_image(self, extra_properties=None): properties = {'instance_id': self.instance_ref['id'], 'user_id': str(self.context.user_id)} if extra_properties: properties.update(extra_properties) sent_meta = {'name': 'test-snap', 'is_public': False, 'status': 'creating', 'properties': properties} # Create new image. It will be updated in snapshot method # To work with it from snapshot, the single image_service is needed recv_meta = self.image_service.create(self.context, sent_meta) return recv_meta @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'get_domain') def _snapshot(self, image_id, mock_get_domain, mock_resolve): mock_get_domain.return_value = FakeVirtDomain() driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) driver.snapshot(self.context, self.instance_ref, image_id, self.mock_update_task_state) snapshot = self.image_service.show(self.context, image_id) return snapshot def _test_snapshot(self, disk_format, extra_properties=None): recv_meta = self._create_image(extra_properties=extra_properties) snapshot = self._snapshot(recv_meta['id']) self._assert_snapshot(snapshot, disk_format=disk_format, expected_properties=extra_properties) class LibvirtSnapshotTests(_BaseSnapshotTests): def test_ami(self): # Assign different image_ref from nova/images/fakes for testing ami self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' self.instance_ref.system_metadata = \ utils.get_system_metadata_from_image( {'disk_format': 'ami'}) self._test_snapshot(disk_format='ami') @mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) def test_raw(self, mock_convert_image): self._test_snapshot(disk_format='raw') def test_qcow2(self): self._test_snapshot(disk_format='qcow2') @mock.patch.object(fake_libvirt_utils, 'disk_type', new='ploop') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) def test_ploop(self, mock_convert_image): self._test_snapshot(disk_format='ploop') def test_no_image_architecture(self): self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' self._test_snapshot(disk_format='qcow2') def test_no_original_image(self): self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa' self._test_snapshot(disk_format='qcow2') def test_snapshot_metadata_image(self): # Assign an image with an architecture defined (x86_64) self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379' extra_properties = {'architecture': 'fake_arch', 'key_a': 'value_a', 'key_b': 'value_b', 'os_type': 'linux'} self._test_snapshot(disk_format='qcow2', extra_properties=extra_properties) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) with mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')): with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'): self._test_snapshot(disk_format='raw') rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool') rbd.flatten.assert_called_with(mock.ANY, pool='test-pool') @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_graceful_fallback(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable( image_id='fake_id', reason='rbd testing')) with test.nested( mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image), mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')): self._test_snapshot(disk_format='raw') self.assertFalse(rbd.clone.called) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_eperm(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) rbd.clone = mock.Mock(side_effect=exception.Forbidden( image_id='fake_id', reason='rbd testing')) with test.nested( mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image), mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')): self._test_snapshot(disk_format='raw') # Ensure that the direct_snapshot attempt was cleaned up rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=False, pool='b', force=True) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_post_process_fails(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) with test.nested( mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'), mock.patch.object(self.image_service, 'update', side_effect=test.TestingException)): self.assertRaises(test.TestingException, self._test_snapshot, disk_format='raw') rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool') rbd.flatten.assert_called_with(mock.ANY, pool='test-pool') # Ensure that the direct_snapshot attempt was cleaned up rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=True, pool='b', force=True) class LXCSnapshotTests(LibvirtSnapshotTests): """Repeat all of the Libvirt snapshot tests, but with LXC enabled""" def setUp(self): super(LXCSnapshotTests, self).setUp() self.flags(virt_type='lxc', group='libvirt') class LVMSnapshotTests(_BaseSnapshotTests): @mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) @mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info') def _test_lvm_snapshot(self, disk_format, mock_volume_info, mock_convert_image): self.flags(images_type='lvm', images_volume_group='nova-vg', group='libvirt') self._test_snapshot(disk_format=disk_format) mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')]) mock_convert_image.assert_called_once_with( '/dev/nova-vg/lv', mock.ANY, 'raw', disk_format, run_as_root=True) def test_raw(self): self._test_lvm_snapshot('raw') def test_qcow2(self): self.flags(snapshot_image_format='qcow2', group='libvirt') self._test_lvm_snapshot('qcow2') nova-13.0.0/nova/tests/unit/virt/libvirt/test_imagecache.py0000664000567000056710000012313012701410011025064 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still and Canonical Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import hashlib import os import time import mock from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import formatters from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import importutils from six.moves import cStringIO from nova import conductor from nova import context from nova import objects from nova import test from nova.tests.unit import fake_instance from nova import utils from nova.virt.libvirt import imagecache from nova.virt.libvirt import utils as libvirt_utils CONF = cfg.CONF CONF.import_opt('compute_manager', 'nova.service') CONF.import_opt('host', 'nova.netconf') @contextlib.contextmanager def intercept_log_messages(): try: mylog = logging.getLogger('nova') stream = cStringIO() handler = logging.logging.StreamHandler(stream) handler.setFormatter(formatters.ContextFormatter()) mylog.logger.addHandler(handler) yield stream finally: mylog.logger.removeHandler(handler) class ImageCacheManagerTestCase(test.NoDBTestCase): def setUp(self): super(ImageCacheManagerTestCase, self).setUp() self.stock_instance_names = set(['instance-00000001', 'instance-00000002', 'instance-00000003', 'banana-42-hamster']) def test_read_stored_checksum_missing(self): self.stub_out('os.path.exists', lambda x: False) csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False) self.assertIsNone(csum) @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(time, 'time', return_value=2000000) @mock.patch.object(os.path, 'getmtime', return_value=1000000) def test_get_age_of_file(self, mock_getmtime, mock_time, mock_exists): image_cache_manager = imagecache.ImageCacheManager() exists, age = image_cache_manager._get_age_of_file('/tmp') self.assertTrue(exists) self.assertEqual(1000000, age) @mock.patch.object(os.path, 'exists', return_value=False) def test_get_age_of_file_not_exists(self, mock_exists): image_cache_manager = imagecache.ImageCacheManager() exists, age = image_cache_manager._get_age_of_file('/tmp') self.assertFalse(exists) self.assertEqual(0, age) def test_read_stored_checksum(self): with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.flags(image_info_filename_pattern=('$instances_path/' '%(image)s.info'), group='libvirt') csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n' fname = os.path.join(tmpdir, 'aaa') info_fname = imagecache.get_info_filename(fname) f = open(info_fname, 'w') f.write(csum_input) f.close() csum_output = imagecache.read_stored_checksum(fname, timestamped=False) self.assertEqual(csum_input.rstrip(), '{"sha1": "%s"}' % csum_output) def test_read_stored_checksum_legacy_essex(self): with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.flags(image_info_filename_pattern=('$instances_path/' '%(image)s.info'), group='libvirt') fname = os.path.join(tmpdir, 'aaa') old_fname = fname + '.sha1' f = open(old_fname, 'w') f.write('fdghkfhkgjjksfdgjksjkghsdf') f.close() csum_output = imagecache.read_stored_checksum(fname, timestamped=False) self.assertEqual(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf') self.assertFalse(os.path.exists(old_fname)) info_fname = imagecache.get_info_filename(fname) self.assertTrue(os.path.exists(info_fname)) def test_list_base_images(self): listing = ['00000001', 'ephemeral_0_20_None', '17d1b00b81642842e514494a78e804e9a511637c_5368709120.info', '00000004', 'swap_1000'] images = ['e97222e91fc4241f49a7f520d1dcf446751129b3_sm', 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm', 'e97222e91fc4241f49a7f520d1dcf446751129b3', '17d1b00b81642842e514494a78e804e9a511637c', '17d1b00b81642842e514494a78e804e9a511637c_5368709120', '17d1b00b81642842e514494a78e804e9a511637c_10737418240'] listing.extend(images) self.stub_out('os.listdir', lambda x: listing) self.stub_out('os.path.isfile', lambda x: True) base_dir = '/var/lib/nova/instances/_base' self.flags(instances_path='/var/lib/nova/instances') image_cache_manager = imagecache.ImageCacheManager() image_cache_manager._list_base_images(base_dir) sanitized = [] for ent in image_cache_manager.unexplained_images: sanitized.append(ent.replace(base_dir + '/', '')) self.assertEqual(sorted(sanitized), sorted(images)) expected = os.path.join(base_dir, 'e97222e91fc4241f49a7f520d1dcf446751129b3') self.assertIn(expected, image_cache_manager.unexplained_images) expected = os.path.join(base_dir, '17d1b00b81642842e514494a78e804e9a511637c_' '10737418240') self.assertIn(expected, image_cache_manager.unexplained_images) unexpected = os.path.join(base_dir, '00000004') self.assertNotIn(unexpected, image_cache_manager.unexplained_images) for ent in image_cache_manager.unexplained_images: self.assertTrue(ent.startswith(base_dir)) self.assertEqual(len(image_cache_manager.originals), 2) expected = os.path.join(base_dir, '17d1b00b81642842e514494a78e804e9a511637c') self.assertIn(expected, image_cache_manager.originals) unexpected = os.path.join(base_dir, '17d1b00b81642842e514494a78e804e9a511637c_' '10737418240') self.assertNotIn(unexpected, image_cache_manager.originals) self.assertEqual(1, len(image_cache_manager.back_swap_images)) self.assertIn('swap_1000', image_cache_manager.back_swap_images) def test_list_backing_images_small(self): self.stub_out('os.listdir', lambda x: ['_base', 'instance-00000001', 'instance-00000002', 'instance-00000003']) self.stub_out('os.path.exists', lambda x: x.find('instance-') != -1) self.stubs.Set(libvirt_utils, 'get_disk_backing_file', lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm') found = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name, 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm') image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [found] image_cache_manager.instance_names = self.stock_instance_names inuse_images = image_cache_manager._list_backing_images() self.assertEqual(inuse_images, [found]) self.assertEqual(len(image_cache_manager.unexplained_images), 0) def test_list_backing_images_resized(self): self.stub_out('os.listdir', lambda x: ['_base', 'instance-00000001', 'instance-00000002', 'instance-00000003']) self.stub_out('os.path.exists', lambda x: x.find('instance-') != -1) self.stubs.Set(libvirt_utils, 'get_disk_backing_file', lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_' '10737418240')) found = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name, 'e97222e91fc4241f49a7f520d1dcf446751129b3_' '10737418240') image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [found] image_cache_manager.instance_names = self.stock_instance_names inuse_images = image_cache_manager._list_backing_images() self.assertEqual(inuse_images, [found]) self.assertEqual(len(image_cache_manager.unexplained_images), 0) def test_list_backing_images_instancename(self): self.stub_out('os.listdir', lambda x: ['_base', 'banana-42-hamster']) self.stub_out('os.path.exists', lambda x: x.find('banana-42-hamster') != -1) self.stubs.Set(libvirt_utils, 'get_disk_backing_file', lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm') found = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name, 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm') image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [found] image_cache_manager.instance_names = self.stock_instance_names inuse_images = image_cache_manager._list_backing_images() self.assertEqual(inuse_images, [found]) self.assertEqual(len(image_cache_manager.unexplained_images), 0) def test_list_backing_images_disk_notexist(self): self.stub_out('os.listdir', lambda x: ['_base', 'banana-42-hamster']) self.stub_out('os.path.exists', lambda x: x.find('banana-42-hamster') != -1) def fake_get_disk(disk_path): raise processutils.ProcessExecutionError() self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk) image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [] image_cache_manager.instance_names = self.stock_instance_names self.assertRaises(processutils.ProcessExecutionError, image_cache_manager._list_backing_images) def test_find_base_file_nothing(self): self.stub_out('os.path.exists', lambda x: False) base_dir = '/var/lib/nova/instances/_base' fingerprint = '549867354867' image_cache_manager = imagecache.ImageCacheManager() res = list(image_cache_manager._find_base_file(base_dir, fingerprint)) self.assertEqual(0, len(res)) def test_find_base_file_small(self): fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a' self.stub_out('os.path.exists', lambda x: x.endswith('%s_sm' % fingerprint)) base_dir = '/var/lib/nova/instances/_base' image_cache_manager = imagecache.ImageCacheManager() res = list(image_cache_manager._find_base_file(base_dir, fingerprint)) base_file = os.path.join(base_dir, fingerprint + '_sm') self.assertEqual(res, [(base_file, True, False)]) def test_find_base_file_resized(self): fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a' listing = ['00000001', 'ephemeral_0_20_None', '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240', '00000004'] self.stub_out('os.listdir', lambda x: listing) self.stub_out('os.path.exists', lambda x: x.endswith('%s_10737418240' % fingerprint)) self.stub_out('os.path.isfile', lambda x: True) base_dir = '/var/lib/nova/instances/_base' image_cache_manager = imagecache.ImageCacheManager() image_cache_manager._list_base_images(base_dir) res = list(image_cache_manager._find_base_file(base_dir, fingerprint)) base_file = os.path.join(base_dir, fingerprint + '_10737418240') self.assertEqual(res, [(base_file, False, True)]) def test_find_base_file_all(self): fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a' listing = ['00000001', 'ephemeral_0_20_None', '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm', '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240', '00000004'] self.stub_out('os.listdir', lambda x: listing) self.stub_out('os.path.exists', lambda x: True) self.stub_out('os.path.isfile', lambda x: True) base_dir = '/var/lib/nova/instances/_base' image_cache_manager = imagecache.ImageCacheManager() image_cache_manager._list_base_images(base_dir) res = list(image_cache_manager._find_base_file(base_dir, fingerprint)) base_file1 = os.path.join(base_dir, fingerprint) base_file2 = os.path.join(base_dir, fingerprint + '_sm') base_file3 = os.path.join(base_dir, fingerprint + '_10737418240') self.assertEqual(res, [(base_file1, False, False), (base_file2, True, False), (base_file3, False, True)]) @contextlib.contextmanager def _make_base_file(self, checksum=True, lock=True): """Make a base file for testing.""" with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.flags(image_info_filename_pattern=('$instances_path/' '%(image)s.info'), group='libvirt') fname = os.path.join(tmpdir, 'aaa') base_file = open(fname, 'w') base_file.write('data') base_file.close() if lock: lockdir = os.path.join(tmpdir, 'locks') lockname = os.path.join(lockdir, 'nova-aaa') os.mkdir(lockdir) lock_file = open(lockname, 'w') lock_file.write('data') lock_file.close() base_file = open(fname, 'r') if checksum: imagecache.write_stored_checksum(fname) base_file.close() yield fname def test_remove_base_file(self): with self._make_base_file() as fname: image_cache_manager = imagecache.ImageCacheManager() image_cache_manager._remove_base_file(fname) info_fname = imagecache.get_info_filename(fname) lock_name = 'nova-' + os.path.split(fname)[-1] lock_dir = os.path.join(CONF.instances_path, 'locks') lock_file = os.path.join(lock_dir, lock_name) # Files are initially too new to delete self.assertTrue(os.path.exists(fname)) self.assertTrue(os.path.exists(info_fname)) self.assertTrue(os.path.exists(lock_file)) # Old files get cleaned up though os.utime(fname, (-1, time.time() - 3601)) image_cache_manager._remove_base_file(fname) self.assertFalse(os.path.exists(fname)) self.assertFalse(os.path.exists(info_fname)) self.assertFalse(os.path.exists(lock_file)) def test_remove_base_file_original(self): with self._make_base_file() as fname: image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.originals = [fname] image_cache_manager._remove_base_file(fname) info_fname = imagecache.get_info_filename(fname) # Files are initially too new to delete self.assertTrue(os.path.exists(fname)) self.assertTrue(os.path.exists(info_fname)) # This file should stay longer than a resized image os.utime(fname, (-1, time.time() - 3601)) image_cache_manager._remove_base_file(fname) self.assertTrue(os.path.exists(fname)) self.assertTrue(os.path.exists(info_fname)) # Originals don't stay forever though os.utime(fname, (-1, time.time() - 3600 * 25)) image_cache_manager._remove_base_file(fname) self.assertFalse(os.path.exists(fname)) self.assertFalse(os.path.exists(info_fname)) def test_remove_base_file_dne(self): # This test is solely to execute the "does not exist" code path. We # don't expect the method being tested to do anything in this case. with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.flags(image_info_filename_pattern=('$instances_path/' '%(image)s.info'), group='libvirt') fname = os.path.join(tmpdir, 'aaa') image_cache_manager = imagecache.ImageCacheManager() image_cache_manager._remove_base_file(fname) def test_remove_base_file_oserror(self): with intercept_log_messages() as stream: with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.flags(image_info_filename_pattern=('$instances_path/' '%(image)s.info'), group='libvirt') fname = os.path.join(tmpdir, 'aaa') os.mkdir(fname) os.utime(fname, (-1, time.time() - 3601)) # This will raise an OSError because of file permissions image_cache_manager = imagecache.ImageCacheManager() image_cache_manager._remove_base_file(fname) self.assertTrue(os.path.exists(fname)) self.assertNotEqual(stream.getvalue().find('Failed to remove'), -1) def test_handle_base_image_unused(self): img = '123' with self._make_base_file() as fname: os.utime(fname, (-1, time.time() - 3601)) image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [fname] image_cache_manager._handle_base_image(img, fname) self.assertEqual(image_cache_manager.unexplained_images, []) self.assertEqual(image_cache_manager.removable_base_files, [fname]) self.assertEqual(image_cache_manager.corrupt_base_files, []) @mock.patch.object(libvirt_utils, 'update_mtime') def test_handle_base_image_used(self, mock_mtime): img = '123' with self._make_base_file() as fname: image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [fname] image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])} image_cache_manager._handle_base_image(img, fname) mock_mtime.assert_called_once_with(fname) self.assertEqual(image_cache_manager.unexplained_images, []) self.assertEqual(image_cache_manager.removable_base_files, []) self.assertEqual(image_cache_manager.corrupt_base_files, []) @mock.patch.object(libvirt_utils, 'update_mtime') def test_handle_base_image_used_remotely(self, mock_mtime): img = '123' with self._make_base_file() as fname: image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [fname] image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])} image_cache_manager._handle_base_image(img, fname) mock_mtime.assert_called_once_with(fname) self.assertEqual(image_cache_manager.unexplained_images, []) self.assertEqual(image_cache_manager.removable_base_files, []) self.assertEqual(image_cache_manager.corrupt_base_files, []) def test_handle_base_image_absent(self): img = '123' with intercept_log_messages() as stream: image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])} image_cache_manager._handle_base_image(img, None) self.assertEqual(image_cache_manager.unexplained_images, []) self.assertEqual(image_cache_manager.removable_base_files, []) self.assertEqual(image_cache_manager.corrupt_base_files, []) self.assertNotEqual(stream.getvalue().find('an absent base file'), -1) def test_handle_base_image_used_missing(self): img = '123' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.flags(image_info_filename_pattern=('$instances_path/' '%(image)s.info'), group='libvirt') fname = os.path.join(tmpdir, 'aaa') image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [fname] image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])} image_cache_manager._handle_base_image(img, fname) self.assertEqual(image_cache_manager.unexplained_images, []) self.assertEqual(image_cache_manager.removable_base_files, []) self.assertEqual(image_cache_manager.corrupt_base_files, []) @mock.patch.object(libvirt_utils, 'update_mtime') def test_handle_base_image_checksum_fails(self, mock_mtime): self.flags(checksum_base_images=True, group='libvirt') img = '123' with self._make_base_file() as fname: with open(fname, 'w') as f: f.write('banana') d = {'sha1': '21323454'} with open('%s.info' % fname, 'w') as f: f.write(jsonutils.dumps(d)) image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [fname] image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])} image_cache_manager._handle_base_image(img, fname) mock_mtime.assert_called_once_with(fname) self.assertEqual(image_cache_manager.unexplained_images, []) self.assertEqual(image_cache_manager.removable_base_files, []) self.assertEqual(image_cache_manager.corrupt_base_files, [fname]) @mock.patch.object(libvirt_utils, 'update_mtime') @mock.patch.object(lockutils, 'external_lock') def test_verify_base_images(self, mock_lock, mock_mtime): hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab' hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8' hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17' hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56' self.flags(instances_path='/instance_path', image_cache_subdirectory_name='_base') base_file_list = ['00000001', 'ephemeral_0_20_None', 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm', 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm', hashed_42, hashed_1, hashed_21, hashed_22, '%s_5368709120' % hashed_1, '%s_10737418240' % hashed_1, '00000004'] def fq_path(path): return os.path.join('/instance_path/_base/', path) # Fake base directory existence orig_exists = os.path.exists def exists(path): # The python coverage tool got angry with my overly broad mocks if not path.startswith('/instance_path'): return orig_exists(path) if path in ['/instance_path', '/instance_path/_base', '/instance_path/instance-1/disk', '/instance_path/instance-2/disk', '/instance_path/instance-3/disk', '/instance_path/_base/%s.info' % hashed_42]: return True for p in base_file_list: if path == fq_path(p): return True if path == fq_path(p) + '.info': return False if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1, hashed_21, hashed_22, hashed_42]]: return False self.fail('Unexpected path existence check: %s' % path) self.stub_out('os.path.exists', lambda x: exists(x)) # Fake up some instances in the instances directory orig_listdir = os.listdir def listdir(path): # The python coverage tool got angry with my overly broad mocks if not path.startswith('/instance_path'): return orig_listdir(path) if path == '/instance_path': return ['instance-1', 'instance-2', 'instance-3', '_base'] if path == '/instance_path/_base': return base_file_list self.fail('Unexpected directory listed: %s' % path) self.stub_out('os.listdir', lambda x: listdir(x)) # Fake isfile for these faked images in _base orig_isfile = os.path.isfile def isfile(path): # The python coverage tool got angry with my overly broad mocks if not path.startswith('/instance_path'): return orig_isfile(path) for p in base_file_list: if path == fq_path(p): return True self.fail('Unexpected isfile call: %s' % path) self.stub_out('os.path.isfile', lambda x: isfile(x)) # Fake the database call which lists running instances instances = [{'image_ref': '1', 'host': CONF.host, 'name': 'instance-1', 'uuid': '123', 'vm_state': '', 'task_state': ''}, {'image_ref': '1', 'kernel_id': '21', 'ramdisk_id': '22', 'host': CONF.host, 'name': 'instance-2', 'uuid': '456', 'vm_state': '', 'task_state': ''}] all_instances = [fake_instance.fake_instance_obj(None, **instance) for instance in instances] image_cache_manager = imagecache.ImageCacheManager() # Fake the utils call which finds the backing image def get_disk_backing_file(path): if path in ['/instance_path/instance-1/disk', '/instance_path/instance-2/disk']: return fq_path('%s_5368709120' % hashed_1) self.fail('Unexpected backing file lookup: %s' % path) self.stubs.Set(libvirt_utils, 'get_disk_backing_file', lambda x: get_disk_backing_file(x)) # Fake out verifying checksums, as that is tested elsewhere self.stubs.Set(image_cache_manager, '_verify_checksum', lambda x, y: True) # Fake getmtime as well orig_getmtime = os.path.getmtime def getmtime(path): if not path.startswith('/instance_path'): return orig_getmtime(path) return 1000000 self.stub_out('os.path.getmtime', lambda x: getmtime(x)) # Make sure we don't accidentally remove a real file orig_remove = os.remove def remove(path): if not path.startswith('/instance_path'): return orig_remove(path) # Don't try to remove fake files return self.stub_out('os.remove', lambda x: remove(x)) self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') ctxt = context.get_admin_context() objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '123').AndReturn(None) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '456').AndReturn(None) self.mox.ReplayAll() # And finally we can make the call we're actually testing... # The argument here should be a context, but it is mocked out image_cache_manager.update(ctxt, all_instances) # Verify active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1), fq_path(hashed_21), fq_path(hashed_22)] for act in active: self.assertIn(act, image_cache_manager.active_base_files) self.assertEqual(len(image_cache_manager.active_base_files), len(active)) for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'), fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'), fq_path(hashed_42), fq_path('%s_10737418240' % hashed_1)]: self.assertIn(rem, image_cache_manager.removable_base_files) # Ensure there are no "corrupt" images as well self.assertEqual(len(image_cache_manager.corrupt_base_files), 0) def test_verify_base_images_no_base(self): self.flags(instances_path='/tmp/no/such/dir/name/please') image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.update(None, []) def test_is_valid_info_file(self): hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3' self.flags(instances_path='/tmp/no/such/dir/name/please') self.flags(image_info_filename_pattern=('$instances_path/_base/' '%(image)s.info'), group='libvirt') base_filename = os.path.join(CONF.instances_path, '_base', hashed) is_valid_info_file = imagecache.is_valid_info_file self.assertFalse(is_valid_info_file('banana')) self.assertFalse(is_valid_info_file( os.path.join(CONF.instances_path, '_base', '00000001'))) self.assertFalse(is_valid_info_file(base_filename)) self.assertFalse(is_valid_info_file(base_filename + '.sha1')) self.assertTrue(is_valid_info_file(base_filename + '.info')) def test_configured_checksum_path(self): with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.flags(image_info_filename_pattern=('$instances_path/' '%(image)s.info'), group='libvirt') # Ensure there is a base directory os.mkdir(os.path.join(tmpdir, '_base')) # Fake the database call which lists running instances instances = [{'image_ref': '1', 'host': CONF.host, 'name': 'instance-1', 'uuid': '123', 'vm_state': '', 'task_state': ''}, {'image_ref': '1', 'host': CONF.host, 'name': 'instance-2', 'uuid': '456', 'vm_state': '', 'task_state': ''}] all_instances = [] for instance in instances: all_instances.append(fake_instance.fake_instance_obj( None, **instance)) def touch(filename): f = open(filename, 'w') f.write('Touched') f.close() old = time.time() - (25 * 3600) hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3' base_filename = os.path.join(tmpdir, hashed) touch(base_filename) touch(base_filename + '.info') os.utime(base_filename + '.info', (old, old)) touch(base_filename + '.info') os.utime(base_filename + '.info', (old, old)) self.mox.StubOutWithMock( objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') ctxt = context.get_admin_context() objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '123').AndReturn(None) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '456').AndReturn(None) self.mox.ReplayAll() image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.update(ctxt, all_instances) self.assertTrue(os.path.exists(base_filename)) self.assertTrue(os.path.exists(base_filename + '.info')) def test_run_image_cache_manager_pass(self): was = {'called': False} def fake_get_all_by_filters(context, *args, **kwargs): was['called'] = True instances = [] for x in range(2): instances.append(fake_instance.fake_db_instance( image_ref='1', uuid=x, name=x, vm_state='', task_state='')) return instances with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.stub_out('nova.db.instance_get_all_by_filters', fake_get_all_by_filters) compute = importutils.import_object(CONF.compute_manager) self.flags(use_local=True, group='conductor') compute.conductor_api = conductor.API() ctxt = context.get_admin_context() compute._run_image_cache_manager_pass(ctxt) self.assertTrue(was['called']) def test_store_swap_image(self): image_cache_manager = imagecache.ImageCacheManager() image_cache_manager._store_swap_image('swap_') image_cache_manager._store_swap_image('swap_123') image_cache_manager._store_swap_image('swap_456') image_cache_manager._store_swap_image('swap_abc') image_cache_manager._store_swap_image('123_swap') image_cache_manager._store_swap_image('swap_129_') self.assertEqual(len(image_cache_manager.back_swap_images), 2) expect_set = set(['swap_123', 'swap_456']) self.assertEqual(image_cache_manager.back_swap_images, expect_set) @mock.patch.object(lockutils, 'external_lock') @mock.patch.object(libvirt_utils, 'update_mtime') @mock.patch('os.path.exists', return_value=True) @mock.patch('os.path.getmtime') @mock.patch('os.remove') def test_age_and_verify_swap_images(self, mock_remove, mock_getmtime, mock_exist, mock_mtime, mock_lock): image_cache_manager = imagecache.ImageCacheManager() expected_remove = set() expected_exist = set(['swap_128', 'swap_256']) image_cache_manager.back_swap_images.add('swap_128') image_cache_manager.back_swap_images.add('swap_256') image_cache_manager.used_swap_images.add('swap_128') def getmtime(path): return time.time() - 1000000 mock_getmtime.side_effect = getmtime def removefile(path): if not path.startswith('/tmp_age_test'): return os.remove(path) fn = os.path.split(path)[-1] expected_remove.add(fn) expected_exist.remove(fn) mock_remove.side_effect = removefile image_cache_manager._age_and_verify_swap_images(None, '/tmp_age_test') self.assertEqual(1, len(expected_exist)) self.assertEqual(1, len(expected_remove)) self.assertIn('swap_128', expected_exist) self.assertIn('swap_256', expected_remove) @mock.patch.object(utils, 'synchronized') @mock.patch.object(imagecache.ImageCacheManager, '_get_age_of_file', return_value=(True, 100)) def test_lock_acquired_on_removing_old_enough_files(self, mock_get_age, mock_synchronized): base_file = '/tmp_age_test' lock_path = os.path.join(CONF.instances_path, 'locks') lock_file = os.path.split(base_file)[-1] image_cache_manager = imagecache.ImageCacheManager() image_cache_manager._remove_old_enough_file( base_file, 60, remove_sig=False, remove_lock=False) mock_synchronized.assert_called_once_with(lock_file, external=True, lock_path=lock_path) class VerifyChecksumTestCase(test.NoDBTestCase): def setUp(self): super(VerifyChecksumTestCase, self).setUp() self.img = {'container_format': 'ami', 'id': '42'} self.flags(checksum_base_images=True, group='libvirt') def _make_checksum(self, tmpdir): testdata = ('OpenStack Software delivers a massively scalable cloud ' 'operating system.') fname = os.path.join(tmpdir, 'aaa') info_fname = imagecache.get_info_filename(fname) with open(fname, 'w') as f: f.write(testdata) return fname, info_fname, testdata def _write_file(self, info_fname, info_attr, testdata): f = open(info_fname, 'w') if info_attr == "csum valid": csum = hashlib.sha1() csum.update(testdata) f.write('{"sha1": "%s"}\n' % csum.hexdigest()) elif info_attr == "csum invalid, not json": f.write('banana') else: f.write('{"sha1": "banana"}') f.close() def _check_body(self, tmpdir, info_attr): self.flags(instances_path=tmpdir) self.flags(image_info_filename_pattern=('$instances_path/' '%(image)s.info'), group='libvirt') fname, info_fname, testdata = self._make_checksum(tmpdir) self._write_file(info_fname, info_attr, testdata) image_cache_manager = imagecache.ImageCacheManager() return image_cache_manager, fname def test_verify_checksum(self): with utils.tempdir() as tmpdir: image_cache_manager, fname = self._check_body(tmpdir, "csum valid") res = image_cache_manager._verify_checksum(self.img, fname) self.assertTrue(res) def test_verify_checksum_disabled(self): self.flags(checksum_base_images=False, group='libvirt') with utils.tempdir() as tmpdir: image_cache_manager, fname = self._check_body(tmpdir, "csum valid") res = image_cache_manager._verify_checksum(self.img, fname) self.assertIsNone(res) def test_verify_checksum_invalid_json(self): with intercept_log_messages() as stream: with utils.tempdir() as tmpdir: image_cache_manager, fname = ( self._check_body(tmpdir, "csum invalid, not json")) res = image_cache_manager._verify_checksum( self.img, fname, create_if_missing=False) self.assertFalse(res) log = stream.getvalue() # NOTE(mikal): this is a skip not a fail because the file is # present, but is not in valid JSON format and therefore is # skipped. self.assertNotEqual(log.find('image verification skipped'), -1) def test_verify_checksum_invalid_repaired(self): with utils.tempdir() as tmpdir: image_cache_manager, fname = ( self._check_body(tmpdir, "csum invalid, not json")) res = image_cache_manager._verify_checksum( self.img, fname, create_if_missing=True) self.assertIsNone(res) def test_verify_checksum_invalid(self): with intercept_log_messages() as stream: with utils.tempdir() as tmpdir: image_cache_manager, fname = ( self._check_body(tmpdir, "csum invalid, valid json")) res = image_cache_manager._verify_checksum(self.img, fname) self.assertFalse(res) log = stream.getvalue() self.assertNotEqual(log.find('image verification failed'), -1) def test_verify_checksum_file_missing(self): with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.flags(image_info_filename_pattern=('$instances_path/' '%(image)s.info'), group='libvirt') fname, info_fname, testdata = self._make_checksum(tmpdir) image_cache_manager = imagecache.ImageCacheManager() res = image_cache_manager._verify_checksum('aaa', fname) self.assertIsNone(res) # Checksum requests for a file with no checksum now have the # side effect of creating the checksum self.assertTrue(os.path.exists(info_fname)) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/0000775000567000056710000000000012701410205022721 5ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_quobyte.py0000664000567000056710000003737612701407773026062 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Quobyte Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Quobyte volume driver module.""" import mock import os from oslo_concurrency import processutils from oslo_utils import fileutils from nova import exception from nova import test from nova.tests.unit.virt.libvirt.volume import test_volume from nova import utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt.volume import quobyte class QuobyteTestCase(test.NoDBTestCase): """Tests the nova.virt.libvirt.volume.quobyte module utilities.""" @mock.patch.object(fileutils, "ensure_tree") @mock.patch.object(utils, "execute") def test_quobyte_mount_volume(self, mock_execute, mock_ensure_tree): mnt_base = '/mnt' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) quobyte.mount_volume(quobyte_volume, export_mnt_base) mock_ensure_tree.assert_called_once_with(export_mnt_base) expected_commands = [mock.call('mount.quobyte', quobyte_volume, export_mnt_base, check_exit_code=[0, 4]) ] mock_execute.assert_has_calls(expected_commands) @mock.patch.object(fileutils, "ensure_tree") @mock.patch.object(utils, "execute") def test_quobyte_mount_volume_with_config(self, mock_execute, mock_ensure_tree): mnt_base = '/mnt' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) config_file_dummy = "/etc/quobyte/dummy.conf" quobyte.mount_volume(quobyte_volume, export_mnt_base, config_file_dummy) mock_ensure_tree.assert_called_once_with(export_mnt_base) expected_commands = [mock.call('mount.quobyte', quobyte_volume, export_mnt_base, '-c', config_file_dummy, check_exit_code=[0, 4]) ] mock_execute.assert_has_calls(expected_commands) @mock.patch.object(fileutils, "ensure_tree") @mock.patch.object(utils, "execute", side_effect=(processutils. ProcessExecutionError)) def test_quobyte_mount_volume_fails(self, mock_execute, mock_ensure_tree): mnt_base = '/mnt' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) self.assertRaises(processutils.ProcessExecutionError, quobyte.mount_volume, quobyte_volume, export_mnt_base) @mock.patch.object(utils, "execute") def test_quobyte_umount_volume(self, mock_execute): mnt_base = '/mnt' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) quobyte.umount_volume(export_mnt_base) mock_execute.assert_called_once_with('umount.quobyte', export_mnt_base) @mock.patch.object(quobyte.LOG, "error") @mock.patch.object(utils, "execute") def test_quobyte_umount_volume_warns(self, mock_execute, mock_debug): mnt_base = '/mnt' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) def exec_side_effect(*cmd, **kwargs): exerror = processutils.ProcessExecutionError( "Device or resource busy") raise exerror mock_execute.side_effect = exec_side_effect quobyte.umount_volume(export_mnt_base) (mock_debug. assert_called_once_with("The Quobyte volume at %s is still in use.", export_mnt_base)) @mock.patch.object(quobyte.LOG, "exception") @mock.patch.object(utils, "execute", side_effect=(processutils.ProcessExecutionError)) def test_quobyte_umount_volume_fails(self, mock_execute, mock_exception): mnt_base = '/mnt' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) quobyte.umount_volume(export_mnt_base) (mock_exception. assert_called_once_with("Couldn't unmount " "the Quobyte Volume at %s", export_mnt_base)) @mock.patch.object(os, "access", return_value=True) @mock.patch.object(utils, "execute") def test_quobyte_is_valid_volume(self, mock_execute, mock_access): mnt_base = '/mnt' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) quobyte.validate_volume(export_mnt_base) mock_execute.assert_called_once_with('getfattr', '-n', 'quobyte.info', export_mnt_base) @mock.patch.object(utils, "execute", side_effect=(processutils. ProcessExecutionError)) def test_quobyte_is_valid_volume_vol_not_valid_volume(self, mock_execute): mnt_base = '/mnt' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) self.assertRaises(exception.NovaException, quobyte.validate_volume, export_mnt_base) @mock.patch.object(os, "access", return_value=False) @mock.patch.object(utils, "execute", side_effect=(processutils. ProcessExecutionError)) def test_quobyte_is_valid_volume_vol_no_valid_access(self, mock_execute, mock_access): mnt_base = '/mnt' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) self.assertRaises(exception.NovaException, quobyte.validate_volume, export_mnt_base) class LibvirtQuobyteVolumeDriverTestCase( test_volume.LibvirtVolumeBaseTestCase): """Tests the LibvirtQuobyteVolumeDriver class.""" @mock.patch.object(quobyte, 'validate_volume') @mock.patch.object(quobyte, 'mount_volume') @mock.patch.object(libvirt_utils, 'is_mounted', return_value=False) def test_libvirt_quobyte_driver_mount(self, mock_is_mounted, mock_mount_volume, mock_validate_volume ): mnt_base = '/mnt' self.flags(quobyte_mount_point_base=mnt_base, group='libvirt') libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn) export_string = 'quobyte://192.168.1.1/volume-00001' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) file_path = os.path.join(export_mnt_base, self.name) connection_info = {'data': {'export': export_string, 'name': self.name}} libvirt_driver.connect_volume(connection_info, self.disk_info) conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertFileTypeEquals(tree, file_path) mock_mount_volume.assert_called_once_with(quobyte_volume, export_mnt_base, mock.ANY) mock_validate_volume.assert_called_with(export_mnt_base) @mock.patch.object(quobyte, 'validate_volume') @mock.patch.object(quobyte, 'umount_volume') @mock.patch.object(libvirt_utils, 'is_mounted', return_value=True) def test_libvirt_quobyte_driver_umount(self, mock_is_mounted, mock_umount_volume, mock_validate_volume): mnt_base = '/mnt' self.flags(quobyte_mount_point_base=mnt_base, group='libvirt') libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn) export_string = 'quobyte://192.168.1.1/volume-00001' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) file_path = os.path.join(export_mnt_base, self.name) connection_info = {'data': {'export': export_string, 'name': self.name}} libvirt_driver.connect_volume(connection_info, self.disk_info) conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertFileTypeEquals(tree, file_path) libvirt_driver.disconnect_volume(connection_info, "vde") mock_validate_volume.assert_called_once_with(export_mnt_base) mock_umount_volume.assert_called_once_with(export_mnt_base) @mock.patch.object(quobyte, 'validate_volume') @mock.patch.object(quobyte, 'umount_volume') def test_libvirt_quobyte_driver_already_mounted(self, mock_umount_volume, mock_validate_volume ): mnt_base = '/mnt' self.flags(quobyte_mount_point_base=mnt_base, group='libvirt') libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn) export_string = 'quobyte://192.168.1.1/volume-00001' quobyte_volume = '192.168.1.1/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) file_path = os.path.join(export_mnt_base, self.name) connection_info = {'data': {'export': export_string, 'name': self.name}} libvirt_driver.connect_volume(connection_info, self.disk_info) conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertFileTypeEquals(tree, file_path) libvirt_driver.disconnect_volume(connection_info, "vde") expected_commands = [ ('findmnt', '--target', export_mnt_base, '--source', "quobyte@" + quobyte_volume), ('findmnt', '--target', export_mnt_base, '--source', "quobyte@" + quobyte_volume), ] self.assertEqual(expected_commands, self.executes) mock_umount_volume.assert_called_once_with(export_mnt_base) mock_validate_volume.assert_called_once_with(export_mnt_base) @mock.patch.object(quobyte, 'validate_volume') @mock.patch.object(quobyte, 'mount_volume') @mock.patch.object(libvirt_utils, 'is_mounted', return_value=False) def test_libvirt_quobyte_driver_qcow2(self, mock_is_mounted, mock_mount_volume, mock_validate_volume ): mnt_base = '/mnt' self.flags(quobyte_mount_point_base=mnt_base, group='libvirt') libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn) export_string = 'quobyte://192.168.1.1/volume-00001' name = 'volume-00001' image_format = 'qcow2' quobyte_volume = '192.168.1.1/volume-00001' connection_info = {'data': {'export': export_string, 'name': name, 'format': image_format}} export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(quobyte_volume)) libvirt_driver.connect_volume(connection_info, self.disk_info) conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertEqual('file', tree.get('type')) self.assertEqual('qcow2', tree.find('./driver').get('type')) (mock_mount_volume. assert_called_once_with('192.168.1.1/volume-00001', export_mnt_base, mock.ANY)) mock_validate_volume.assert_called_with(export_mnt_base) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_quobyte_driver_mount_non_quobyte_volume(self): mnt_base = '/mnt' self.flags(quobyte_mount_point_base=mnt_base, group='libvirt') libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn) export_string = 'quobyte://192.168.1.1/volume-00001' connection_info = {'data': {'export': export_string, 'name': self.name}} def exe_side_effect(*cmd, **kwargs): if cmd == mock.ANY: raise exception.NovaException() with mock.patch.object(quobyte, 'validate_volume') as mock_execute: mock_execute.side_effect = exe_side_effect self.assertRaises(exception.NovaException, libvirt_driver.connect_volume, connection_info, self.disk_info) def test_libvirt_quobyte_driver_normalize_export_with_protocol(self): mnt_base = '/mnt' self.flags(quobyte_mount_point_base=mnt_base, group='libvirt') libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn) export_string = 'quobyte://192.168.1.1/volume-00001' self.assertEqual("192.168.1.1/volume-00001", libvirt_driver._normalize_export(export_string)) def test_libvirt_quobyte_driver_normalize_export_without_protocol(self): mnt_base = '/mnt' self.flags(quobyte_mount_point_base=mnt_base, group='libvirt') libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn) export_string = '192.168.1.1/volume-00001' self.assertEqual("192.168.1.1/volume-00001", libvirt_driver._normalize_export(export_string)) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_disco.py0000664000567000056710000000554212701407773025461 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Industrial Technology Research Institute. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from os_brick.initiator import connector from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt.volume import disco class LibvirtDISCOVolumeDriverTestCase( test_volume.LibvirtVolumeBaseTestCase): def test_libvirt_disco_driver(self): libvirt_driver = disco.LibvirtDISCOVolumeDriver( self.fake_conn) self.assertIsInstance(libvirt_driver.connector, connector.DISCOConnector) def test_libvirt_disco_driver_connect(self): dcon = disco.LibvirtDISCOVolumeDriver(self.fake_conn) conf = {'server_ip': '127.0.0.1', 'server_port': 9898} disk_info = {'disco_id': '1234567', 'name': 'aDiscoVolume', 'conf': conf} conn = {'data': disk_info} with mock.patch.object(dcon.connector, 'connect_volume', return_value={'path': '/dev/dms1234567'}): dcon.connect_volume(conn, None) self.assertEqual('/dev/dms1234567', conn['data']['device_path']) def test_libvirt_disco_driver_get_config(self): dcon = disco.LibvirtDISCOVolumeDriver(self.fake_conn) disk_info = {'path': '/dev/dms1234567', 'name': 'aDiscoVolume', 'type': 'raw', 'dev': 'vda1', 'bus': 'pci0', 'device_path': '/dev/dms1234567'} conn = {'data': disk_info} conf = dcon.get_config(conn, disk_info) self.assertEqual('file', conf.source_type) self.assertEqual('/dev/dms1234567', conf.source_path) self.assertEqual('disco', conf.source_protocol) def test_libvirt_disco_driver_disconnect(self): dcon = disco.LibvirtDISCOVolumeDriver(self.fake_conn) dcon.connector.disconnect_volume = mock.MagicMock() disk_info = {'path': '/dev/dms1234567', 'name': 'aDiscoVolume', 'type': 'raw', 'dev': 'vda1', 'bus': 'pci0', 'device_path': '/dev/dms123456'} conn = {'data': disk_info} dcon.disconnect_volume(conn, disk_info) dcon.connector.disconnect_volume.assert_called_once_with( disk_info, None) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_net.py0000664000567000056710000002264412701407773025150 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt import host from nova.virt.libvirt.volume import net CONF = cfg.CONF CONF.import_opt('rbd_user', 'nova.virt.libvirt.volume.net', group='libvirt') CONF.import_opt('rbd_secret_uuid', 'nova.virt.libvirt.volume.net', group='libvirt') class LibvirtNetVolumeDriverTestCase( test_volume.LibvirtISCSIVolumeBaseTestCase): """Tests the libvirt network volume driver.""" def _assertNetworkAndProtocolEquals(self, tree): self.assertEqual('network', tree.get('type')) self.assertEqual('rbd', tree.find('./source').get('protocol')) rbd_name = '%s/%s' % ('rbd', self.name) self.assertEqual(rbd_name, tree.find('./source').get('name')) def _assertISCSINetworkAndProtocolEquals(self, tree): self.assertEqual('network', tree.get('type')) self.assertEqual('iscsi', tree.find('./source').get('protocol')) iscsi_name = '%s/%s' % (self.iqn, self.vol['id']) self.assertEqual(iscsi_name, tree.find('./source').get('name')) def sheepdog_connection(self, volume): return { 'driver_volume_type': 'sheepdog', 'data': { 'name': volume['name'] } } def test_libvirt_sheepdog_driver(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.sheepdog_connection(self.vol) conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertEqual('network', tree.get('type')) self.assertEqual('sheepdog', tree.find('./source').get('protocol')) self.assertEqual(self.name, tree.find('./source').get('name')) libvirt_driver.disconnect_volume(connection_info, "vde") def rbd_connection(self, volume): return { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % ('rbd', volume['name']), 'auth_enabled': CONF.libvirt.rbd_secret_uuid is not None, 'auth_username': CONF.libvirt.rbd_user, 'secret_type': 'ceph', 'secret_uuid': CONF.libvirt.rbd_secret_uuid, 'qos_specs': { 'total_bytes_sec': '1048576', 'read_iops_sec': '500', } } } def test_libvirt_rbd_driver(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertIsNone(tree.find('./source/auth')) self.assertEqual('1048576', tree.find('./iotune/total_bytes_sec').text) self.assertEqual('500', tree.find('./iotune/read_iops_sec').text) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_rbd_driver_hosts(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) hosts = ['example.com', '1.2.3.4', '::1'] ports = [None, '6790', '6791'] connection_info['data']['hosts'] = hosts connection_info['data']['ports'] = ports conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertIsNone(tree.find('./source/auth')) found_hosts = tree.findall('./source/host') self.assertEqual(hosts, [host.get('name') for host in found_hosts]) self.assertEqual(ports, [host.get('port') for host in found_hosts]) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_rbd_driver_auth_enabled(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) secret_type = 'ceph' connection_info['data']['auth_enabled'] = True connection_info['data']['auth_username'] = self.user connection_info['data']['secret_type'] = secret_type connection_info['data']['secret_uuid'] = self.uuid conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertEqual(self.user, tree.find('./auth').get('username')) self.assertEqual(secret_type, tree.find('./auth/secret').get('type')) self.assertEqual(self.uuid, tree.find('./auth/secret').get('uuid')) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_rbd_driver_auth_enabled_flags_override(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) secret_type = 'ceph' connection_info['data']['auth_enabled'] = True connection_info['data']['auth_username'] = self.user connection_info['data']['secret_type'] = secret_type connection_info['data']['secret_uuid'] = self.uuid flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b' flags_user = 'bar' self.flags(rbd_user=flags_user, rbd_secret_uuid=flags_uuid, group='libvirt') conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertEqual(flags_user, tree.find('./auth').get('username')) self.assertEqual(secret_type, tree.find('./auth/secret').get('type')) self.assertEqual(flags_uuid, tree.find('./auth/secret').get('uuid')) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_rbd_driver_auth_disabled(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) secret_type = 'ceph' connection_info['data']['auth_enabled'] = False connection_info['data']['auth_username'] = self.user connection_info['data']['secret_type'] = secret_type connection_info['data']['secret_uuid'] = self.uuid conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertIsNone(tree.find('./auth')) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_rbd_driver_auth_disabled_flags_override(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) secret_type = 'ceph' connection_info['data']['auth_enabled'] = False connection_info['data']['auth_username'] = self.user connection_info['data']['secret_type'] = secret_type connection_info['data']['secret_uuid'] = self.uuid # NOTE: Supplying the rbd_secret_uuid will enable authentication # locally in nova-compute even if not enabled in nova-volume/cinder flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b' flags_user = 'bar' self.flags(rbd_user=flags_user, rbd_secret_uuid=flags_uuid, group='libvirt') conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertEqual(flags_user, tree.find('./auth').get('username')) self.assertEqual(secret_type, tree.find('./auth/secret').get('type')) self.assertEqual(flags_uuid, tree.find('./auth/secret').get('uuid')) libvirt_driver.disconnect_volume(connection_info, "vde") @mock.patch.object(host.Host, 'find_secret') @mock.patch.object(host.Host, 'create_secret') @mock.patch.object(host.Host, 'delete_secret') def test_libvirt_iscsi_net_driver(self, mock_delete, mock_create, mock_find): mock_find.return_value = test_volume.FakeSecret() mock_create.return_value = test_volume.FakeSecret() libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.iscsi_connection(self.vol, self.location, self.iqn, auth=True) secret_type = 'iscsi' flags_user = connection_info['data']['auth_username'] conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertISCSINetworkAndProtocolEquals(tree) self.assertEqual(flags_user, tree.find('./auth').get('username')) self.assertEqual(secret_type, tree.find('./auth/secret').get('type')) self.assertEqual(test_volume.SECRET_UUID, tree.find('./auth/secret').get('uuid')) libvirt_driver.disconnect_volume(connection_info, 'vde') nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_volume.py0000664000567000056710000002616412701407773025672 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import exception from nova import test from nova.tests.unit.virt.libvirt import fakelibvirt from nova import utils from nova.virt.libvirt import host from nova.virt.libvirt.volume import volume SECRET_UUID = '2a0a0d6c-babf-454d-b93e-9ac9957b95e0' class FakeSecret(object): def __init__(self): self.uuid = SECRET_UUID def getUUIDString(self): return self.uuid def UUIDString(self): return self.uuid def setValue(self, value): self.value = value return 0 def getValue(self, value): return self.value def undefine(self): self.value = None return 0 class LibvirtVolumeBaseTestCase(test.NoDBTestCase): """Contains common setup and helper methods for libvirt volume tests.""" def setUp(self): super(LibvirtVolumeBaseTestCase, self).setUp() self.executes = [] def fake_execute(*cmd, **kwargs): self.executes.append(cmd) return None, None self.stubs.Set(utils, 'execute', fake_execute) self.useFixture(fakelibvirt.FakeLibvirtFixture()) class FakeLibvirtDriver(object): def __init__(self): self._host = host.Host("qemu:///system") def _get_all_block_devices(self): return [] self.fake_conn = FakeLibvirtDriver() self.connr = { 'ip': '127.0.0.1', 'initiator': 'fake_initiator', 'host': 'fake_host' } self.disk_info = { "bus": "virtio", "dev": "vde", "type": "disk", } self.name = 'volume-00000001' self.location = '10.0.2.15:3260' self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name self.vol = {'id': 1, 'name': self.name} self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64' self.user = 'foo' def _assertFileTypeEquals(self, tree, file_path): self.assertEqual('file', tree.get('type')) self.assertEqual(file_path, tree.find('./source').get('file')) class LibvirtISCSIVolumeBaseTestCase(LibvirtVolumeBaseTestCase): """Contains common setup and helper methods for iSCSI volume tests.""" def iscsi_connection(self, volume, location, iqn, auth=False, transport=None): dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn) if transport is not None: dev_name = 'pci-0000:00:00.0-' + dev_name dev_path = '/dev/disk/by-path/%s' % (dev_name) ret = { 'driver_volume_type': 'iscsi', 'data': { 'volume_id': volume['id'], 'target_portal': location, 'target_iqn': iqn, 'target_lun': 1, 'device_path': dev_path, 'qos_specs': { 'total_bytes_sec': '102400', 'read_iops_sec': '200', } } } if auth: ret['data']['auth_method'] = 'CHAP' ret['data']['auth_username'] = 'foo' ret['data']['auth_password'] = 'bar' return ret class LibvirtVolumeTestCase(LibvirtISCSIVolumeBaseTestCase): def _assertDiskInfoEquals(self, tree, disk_info): self.assertEqual(disk_info['type'], tree.get('device')) self.assertEqual(disk_info['bus'], tree.find('./target').get('bus')) self.assertEqual(disk_info['dev'], tree.find('./target').get('dev')) def _test_libvirt_volume_driver_disk_info(self): libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn) connection_info = { 'driver_volume_type': 'fake', 'data': { 'device_path': '/foo', }, 'serial': 'fake_serial', } conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertDiskInfoEquals(tree, self.disk_info) def test_libvirt_volume_disk_info_type(self): self.disk_info['type'] = 'cdrom' self._test_libvirt_volume_driver_disk_info() def test_libvirt_volume_disk_info_dev(self): self.disk_info['dev'] = 'hdc' self._test_libvirt_volume_driver_disk_info() def test_libvirt_volume_disk_info_bus(self): self.disk_info['bus'] = 'scsi' self._test_libvirt_volume_driver_disk_info() def test_libvirt_volume_driver_serial(self): libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn) connection_info = { 'driver_volume_type': 'fake', 'data': { 'device_path': '/foo', }, 'serial': 'fake_serial', } conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertEqual('block', tree.get('type')) self.assertEqual('fake_serial', tree.find('./serial').text) self.assertIsNone(tree.find('./blockio')) self.assertIsNone(tree.find("driver[@discard]")) def test_libvirt_volume_driver_blockio(self): libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn) connection_info = { 'driver_volume_type': 'fake', 'data': { 'device_path': '/foo', 'logical_block_size': '4096', 'physical_block_size': '4096', }, 'serial': 'fake_serial', } disk_info = { "bus": "virtio", "dev": "vde", "type": "disk", } conf = libvirt_driver.get_config(connection_info, disk_info) tree = conf.format_dom() blockio = tree.find('./blockio') self.assertEqual('4096', blockio.get('logical_block_size')) self.assertEqual('4096', blockio.get('physical_block_size')) def test_libvirt_volume_driver_iotune(self): libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn) connection_info = { 'driver_volume_type': 'fake', 'data': { "device_path": "/foo", 'qos_specs': 'bar', }, } disk_info = { "bus": "virtio", "dev": "vde", "type": "disk", } conf = libvirt_driver.get_config(connection_info, disk_info) tree = conf.format_dom() iotune = tree.find('./iotune') # ensure invalid qos_specs is ignored self.assertIsNone(iotune) specs = { 'total_bytes_sec': '102400', 'read_bytes_sec': '51200', 'write_bytes_sec': '0', 'total_iops_sec': '0', 'read_iops_sec': '200', 'write_iops_sec': '200', } del connection_info['data']['qos_specs'] connection_info['data'].update(dict(qos_specs=specs)) conf = libvirt_driver.get_config(connection_info, disk_info) tree = conf.format_dom() self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text) self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text) self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text) self.assertEqual('0', tree.find('./iotune/total_iops_sec').text) self.assertEqual('200', tree.find('./iotune/read_iops_sec').text) self.assertEqual('200', tree.find('./iotune/write_iops_sec').text) def test_libvirt_volume_driver_readonly(self): libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn) connection_info = { 'driver_volume_type': 'fake', 'data': { "device_path": "/foo", 'access_mode': 'bar', }, } disk_info = { "bus": "virtio", "dev": "vde", "type": "disk", } self.assertRaises(exception.InvalidVolumeAccessMode, libvirt_driver.get_config, connection_info, self.disk_info) connection_info['data']['access_mode'] = 'rw' conf = libvirt_driver.get_config(connection_info, disk_info) tree = conf.format_dom() readonly = tree.find('./readonly') self.assertIsNone(readonly) connection_info['data']['access_mode'] = 'ro' conf = libvirt_driver.get_config(connection_info, disk_info) tree = conf.format_dom() readonly = tree.find('./readonly') self.assertIsNotNone(readonly) @mock.patch('nova.virt.libvirt.host.Host.has_min_version') def test_libvirt_volume_driver_discard_true(self, mock_has_min_version): # Check the discard attrib is present in driver section mock_has_min_version.return_value = True libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn) connection_info = { 'driver_volume_type': 'fake', 'data': { 'device_path': '/foo', 'discard': True, }, 'serial': 'fake_serial', } conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() driver_node = tree.find("driver[@discard]") self.assertIsNotNone(driver_node) self.assertEqual('unmap', driver_node.attrib['discard']) def test_libvirt_volume_driver_discard_false(self): # Check the discard attrib is not present in driver section libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn) connection_info = { 'driver_volume_type': 'fake', 'data': { 'device_path': '/foo', 'discard': False, }, 'serial': 'fake_serial', } conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertIsNone(tree.find("driver[@discard]")) @mock.patch('nova.virt.libvirt.host.Host.has_min_version') def test_libvirt_volume_driver_discard_true_bad_version( self, mock_has_min_version): # Check the discard attrib is not present in driver section mock_has_min_version.return_value = False libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn) connection_info = { 'driver_volume_type': 'fake', 'data': { 'device_path': '/foo', 'discard': True, }, 'serial': 'fake_serial', } conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertIsNone(tree.find("driver[@discard]")) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/__init__.py0000664000567000056710000000000012701407773025040 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_glusterfs.py0000664000567000056710000001656712701407773026407 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_concurrency import processutils from nova.tests.unit.virt.libvirt.volume import test_volume from nova import utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt.volume import glusterfs class LibvirtGlusterfsVolumeDriverTestCase( test_volume.LibvirtVolumeBaseTestCase): def test_libvirt_glusterfs_driver(self): mnt_base = '/mnt' self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt') libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn) self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False) export_string = '192.168.1.1:/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(export_string)) connection_info = {'data': {'export': export_string, 'name': self.name}} libvirt_driver.connect_volume(connection_info, self.disk_info) libvirt_driver.disconnect_volume(connection_info, "vde") device_path = os.path.join(export_mnt_base, connection_info['data']['name']) self.assertEqual(connection_info['data']['device_path'], device_path) expected_commands = [ ('mkdir', '-p', export_mnt_base), ('mount', '-t', 'glusterfs', export_string, export_mnt_base), ('umount', export_mnt_base)] self.assertEqual(expected_commands, self.executes) def test_libvirt_glusterfs_driver_get_config(self): mnt_base = '/mnt' self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt') libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn) export_string = '192.168.1.1:/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(export_string)) file_path = os.path.join(export_mnt_base, self.name) # Test default format - raw connection_info = {'data': {'export': export_string, 'name': self.name, 'device_path': file_path}} conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertFileTypeEquals(tree, file_path) self.assertEqual('raw', tree.find('./driver').get('type')) # Test specified format - qcow2 connection_info = {'data': {'export': export_string, 'name': self.name, 'device_path': file_path, 'format': 'qcow2'}} conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertFileTypeEquals(tree, file_path) self.assertEqual('qcow2', tree.find('./driver').get('type')) def test_libvirt_glusterfs_driver_already_mounted(self): mnt_base = '/mnt' self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt') libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn) export_string = '192.168.1.1:/volume-00001' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(export_string)) connection_info = {'data': {'export': export_string, 'name': self.name}} libvirt_driver.connect_volume(connection_info, self.disk_info) libvirt_driver.disconnect_volume(connection_info, "vde") expected_commands = [ ('findmnt', '--target', export_mnt_base, '--source', export_string), ('umount', export_mnt_base)] self.assertEqual(expected_commands, self.executes) @mock.patch.object(glusterfs.utils, 'execute') @mock.patch.object(glusterfs.LOG, 'debug') @mock.patch.object(glusterfs.LOG, 'exception') def test_libvirt_glusterfs_driver_umount_error(self, mock_LOG_exception, mock_LOG_debug, mock_utils_exe): export_string = '192.168.1.1:/volume-00001' connection_info = {'data': {'export': export_string, 'name': self.name}} libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn) mock_utils_exe.side_effect = processutils.ProcessExecutionError( None, None, None, 'umount', 'umount: target is busy.') libvirt_driver.disconnect_volume(connection_info, "vde") self.assertTrue(mock_LOG_debug.called) def test_libvirt_glusterfs_driver_with_opts(self): mnt_base = '/mnt' self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt') libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn) self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False) export_string = '192.168.1.1:/volume-00001' options = '-o backupvolfile-server=192.168.1.2' export_mnt_base = os.path.join(mnt_base, utils.get_hash_str(export_string)) connection_info = {'data': {'export': export_string, 'name': self.name, 'options': options}} libvirt_driver.connect_volume(connection_info, self.disk_info) libvirt_driver.disconnect_volume(connection_info, "vde") expected_commands = [ ('mkdir', '-p', export_mnt_base), ('mount', '-t', 'glusterfs', '-o', 'backupvolfile-server=192.168.1.2', export_string, export_mnt_base), ('umount', export_mnt_base), ] self.assertEqual(expected_commands, self.executes) def test_libvirt_glusterfs_libgfapi(self): self.flags(qemu_allowed_storage_drivers=['gluster'], group='libvirt') libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn) self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False) export_string = '192.168.1.1:/volume-00001' name = 'volume-00001' connection_info = {'data': {'export': export_string, 'name': name}} disk_info = { "dev": "vde", "type": "disk", "bus": "virtio", } libvirt_driver.connect_volume(connection_info, disk_info) conf = libvirt_driver.get_config(connection_info, disk_info) tree = conf.format_dom() self.assertEqual('network', tree.get('type')) self.assertEqual('raw', tree.find('./driver').get('type')) source = tree.find('./source') self.assertEqual('gluster', source.get('protocol')) self.assertEqual('volume-00001/volume-00001', source.get('name')) self.assertEqual('192.168.1.1', source.find('./host').get('name')) self.assertEqual('24007', source.find('./host').get('port')) libvirt_driver.disconnect_volume(connection_info, "vde") nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_scality.py0000664000567000056710000001070212701407773026022 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock import nova.exception from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt.volume import scality class LibvirtScalityVolumeDriverTestCase( test_volume.LibvirtVolumeBaseTestCase): def setUp(self): super(LibvirtScalityVolumeDriverTestCase, self).setUp() self.scality_sofs_config = 'fake.conf' self.scality_sofs_mount_point = '/fake' self.flags(scality_sofs_config=self.scality_sofs_config, scality_sofs_mount_point=self.scality_sofs_mount_point, group='libvirt') self.drv = scality.LibvirtScalityVolumeDriver(self.fake_conn) @mock.patch('six.moves.urllib.request.urlopen') def test_connect_volume(self, mock_urlopen): TEST_VOLDIR = 'volumes' TEST_VOLNAME = 'volume_name' TEST_CONN_INFO = { 'data': { 'sofs_path': os.path.join(TEST_VOLDIR, TEST_VOLNAME) } } TEST_VOLPATH = os.path.join(self.scality_sofs_mount_point, TEST_VOLDIR, TEST_VOLNAME) def _access_wrapper(path, flags): if path == '/sbin/mount.sofs': return True else: return os.access(path, flags) self.stub_out('os.access', _access_wrapper) with mock.patch.object(self.drv, '_mount_sofs'): self.drv.connect_volume(TEST_CONN_INFO, self.disk_info) device_path = os.path.join(self.scality_sofs_mount_point, TEST_CONN_INFO['data']['sofs_path']) self.assertEqual(TEST_CONN_INFO['data']['device_path'], device_path) conf = self.drv.get_config(TEST_CONN_INFO, self.disk_info) tree = conf.format_dom() self._assertFileTypeEquals(tree, TEST_VOLPATH) @mock.patch('nova.utils.execute') def test_mount_sofs_when_sofs_already_mounted(self, mock_execute): with mock.patch.object(self.drv, '_sofs_is_mounted') as m_is_mounted: m_is_mounted.return_value = True self.drv._mount_sofs() mock_execute.assert_called_once_with('mkdir', '-p', self.scality_sofs_mount_point) self.assertEqual(1, m_is_mounted.call_count) @mock.patch('nova.utils.execute', mock.Mock()) def test_mount_sofs_when_mount_fails(self): with mock.patch.object(self.drv, '_sofs_is_mounted') as m_is_mounted: m_is_mounted.side_effect = [False, False] self.assertRaises(nova.exception.NovaException, self.drv._mount_sofs) self.assertEqual(2, m_is_mounted.call_count) @mock.patch('nova.utils.execute') def test_mount_sofs_when_sofs_is_not_mounted(self, mock_execute): with mock.patch.object(self.drv, '_sofs_is_mounted') as m_is_mounted: m_is_mounted.side_effect = [False, True] self.drv._mount_sofs() self.assertEqual(2, m_is_mounted.call_count) self.assertEqual(2, mock_execute.call_count) expected_calls = [ mock.call('mkdir', '-p', self.scality_sofs_mount_point), mock.call('mount', '-t', 'sofs', self.scality_sofs_config, self.scality_sofs_mount_point, run_as_root=True) ] mock_execute.assert_has_calls(expected_calls) def test_sofs_is_mounted_when_sofs_is_not_mounted(self): mock_open = mock.mock_open(read_data='tmpfs /dev/shm\n') with mock.patch('io.open', mock_open) as mock_open: self.assertFalse(self.drv._sofs_is_mounted()) def test_sofs_is_mounted_when_sofs_is_mounted(self): proc_mount = '/dev/fuse ' + self.scality_sofs_mount_point + '\n' mock_open = mock.mock_open(read_data=proc_mount) with mock.patch('io.open', mock_open) as mock_open: self.assertTrue(self.drv._sofs_is_mounted()) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_iscsi.py0000664000567000056710000001101712701407773025464 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from os_brick import exception as os_brick_exception from os_brick.initiator import connector from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt.volume import iscsi class LibvirtISCSIVolumeDriverTestCase( test_volume.LibvirtISCSIVolumeBaseTestCase): # TODO(mriedem): move this to os-brick def test_iscsiadm_discover_parsing(self): # Ensure that parsing iscsiadm discover ignores cruft. targets = [ ["192.168.204.82:3260,1", ("iqn.2010-10.org.openstack:volume-" "f9b12623-6ce3-4dac-a71f-09ad4249bdd3")], ["192.168.204.82:3261,1", ("iqn.2010-10.org.openstack:volume-" "f9b12623-6ce3-4dac-a71f-09ad4249bdd4")]] # This slight wonkiness brought to you by pep8, as the actual # example output runs about 97 chars wide. sample_input = """Loading iscsi modules: done Starting iSCSI initiator service: done Setting up iSCSI targets: unused %s %s %s %s """ % (targets[0][0], targets[0][1], targets[1][0], targets[1][1]) driver = iscsi.LibvirtISCSIVolumeDriver("none") out = driver.connector._get_target_portals_from_iscsiadm_output( sample_input) self.assertEqual(targets, out) def test_libvirt_iscsi_driver(self, transport=None): libvirt_driver = iscsi.LibvirtISCSIVolumeDriver(self.fake_conn) self.assertIsInstance(libvirt_driver.connector, connector.ISCSIConnector) # TODO(mriedem): move this to os-brick def test_sanitize_log_run_iscsiadm(self): # Tests that the parameters to the os-brick connector's # _run_iscsiadm function are sanitized for passwords when logged. def fake_debug(*args, **kwargs): self.assertIn('node.session.auth.password', args[0]) self.assertNotIn('scrubme', args[0]) def fake_execute(*args, **kwargs): return (None, None) libvirt_driver = iscsi.LibvirtISCSIVolumeDriver(self.fake_conn) libvirt_driver.connector.set_execute(fake_execute) connection_info = self.iscsi_connection(self.vol, self.location, self.iqn) iscsi_properties = connection_info['data'] with mock.patch.object(connector.LOG, 'debug', side_effect=fake_debug) as debug_mock: libvirt_driver.connector._iscsiadm_update( iscsi_properties, 'node.session.auth.password', 'scrubme') # we don't care what the log message is, we just want to make sure # our stub method is called which asserts the password is scrubbed self.assertTrue(debug_mock.called) def test_libvirt_iscsi_driver_get_config(self): libvirt_driver = iscsi.LibvirtISCSIVolumeDriver(self.fake_conn) device_path = '/dev/fake-dev' connection_info = {'data': {'device_path': device_path}} conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertEqual('block', tree.get('type')) self.assertEqual(device_path, tree.find('./source').get('dev')) self.assertEqual('raw', tree.find('./driver').get('type')) self.assertEqual('native', tree.find('./driver').get('io')) @mock.patch.object(iscsi.LOG, 'warning') def test_libvirt_iscsi_driver_disconnect_volume_with_devicenotfound(self, mock_LOG_warning): device_path = '/dev/fake-dev' connection_info = {'data': {'device_path': device_path}} libvirt_driver = iscsi.LibvirtISCSIVolumeDriver(self.fake_conn) libvirt_driver.connector.disconnect_volume = mock.MagicMock( side_effect=os_brick_exception.VolumeDeviceNotFound( device=device_path)) libvirt_driver.disconnect_volume(connection_info, device_path) msg = mock_LOG_warning.call_args_list[0] self.assertIn('Ignoring VolumeDeviceNotFound', msg[0][0]) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_hgst.py0000664000567000056710000000501212701407773025315 0ustar jenkinsjenkins00000000000000# Copyright 2015 HGST # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from os_brick.initiator import connector from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt.volume import hgst # Actual testing of the os_brick HGST driver done in the os_brick testcases # Here we're concerned only with the small API shim that connects Nova # so these will be pretty simple cases. class LibvirtHGSTVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): def test_libvirt_hgst_driver_type(self): drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn) self.assertIsInstance(drvr.connector, connector.HGSTConnector) def test_libvirt_hgst_driver_connect(self): def brick_conn_vol(data): return {'path': '/dev/space01'} drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn) drvr.connector.connect_volume = brick_conn_vol di = {'path': '/dev/space01', 'name': 'space01'} ci = {'data': di} drvr.connect_volume(ci, None) self.assertEqual('/dev/space01', ci['data']['device_path']) def test_libvirt_hgst_driver_get_config(self): drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn) di = {'path': '/dev/space01', 'name': 'space01', 'type': 'raw', 'dev': 'vda1', 'bus': 'pci0', 'device_path': '/dev/space01'} ci = {'data': di} conf = drvr.get_config(ci, di) self.assertEqual('block', conf.source_type) self.assertEqual('/dev/space01', conf.source_path) def test_libvirt_hgst_driver_disconnect(self): drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn) drvr.connector.disconnect_volume = mock.MagicMock() di = {'path': '/dev/space01', 'name': 'space01', 'type': 'raw', 'dev': 'vda1', 'bus': 'pci0', 'device_path': '/dev/space01'} ci = {'data': di} drvr.disconnect_volume(ci, di) drvr.connector.disconnect_volume.assert_called_once_with( di, None) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_fs.py0000664000567000056710000000363112701407773024765 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from nova import test from nova import utils from nova.virt.libvirt.volume import fs FAKE_MOUNT_POINT = '/var/lib/nova/fake-mount' FAKE_SHARE = 'fake-share' NORMALIZED_SHARE = FAKE_SHARE + '-normalized' HASHED_SHARE = utils.get_hash_str(NORMALIZED_SHARE) FAKE_DEVICE_NAME = 'fake-device' class FakeFileSystemVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver): def _get_mount_point_base(self): return FAKE_MOUNT_POINT def _normalize_export(self, export): return NORMALIZED_SHARE class LibvirtBaseFileSystemVolumeDriverTestCase(test.NoDBTestCase): """Tests the basic behavior of the LibvirtBaseFileSystemVolumeDriver""" def setUp(self): super(LibvirtBaseFileSystemVolumeDriverTestCase, self).setUp() self.connection = mock.Mock() self.driver = FakeFileSystemVolumeDriver(self.connection) self.connection_info = { 'data': { 'export': FAKE_SHARE, 'name': FAKE_DEVICE_NAME, } } def test_get_device_path(self): path = self.driver._get_device_path(self.connection_info) expected_path = os.path.join(FAKE_MOUNT_POINT, HASHED_SHARE, FAKE_DEVICE_NAME) self.assertEqual(expected_path, path) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_nfs.py0000664000567000056710000001415512701407773025146 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_concurrency import processutils from nova.tests.unit.virt.libvirt.volume import test_volume from nova import utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt.volume import nfs class LibvirtNFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): """Tests the libvirt NFS volume driver.""" def setUp(self): super(LibvirtNFSVolumeDriverTestCase, self).setUp() self.mnt_base = '/mnt' self.flags(nfs_mount_point_base=self.mnt_base, group='libvirt') def test_libvirt_nfs_driver(self): libvirt_driver = nfs.LibvirtNFSVolumeDriver(self.fake_conn) self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False) export_string = '192.168.1.1:/nfs/share1' export_mnt_base = os.path.join(self.mnt_base, utils.get_hash_str(export_string)) connection_info = {'data': {'export': export_string, 'name': self.name}} libvirt_driver.connect_volume(connection_info, self.disk_info) libvirt_driver.disconnect_volume(connection_info, "vde") device_path = os.path.join(export_mnt_base, connection_info['data']['name']) self.assertEqual(connection_info['data']['device_path'], device_path) expected_commands = [ ('mkdir', '-p', export_mnt_base), ('mount', '-t', 'nfs', export_string, export_mnt_base), ('umount', export_mnt_base)] self.assertEqual(expected_commands, self.executes) @mock.patch.object(nfs.utils, 'execute') @mock.patch.object(nfs.LOG, 'debug') @mock.patch.object(nfs.LOG, 'exception') def test_libvirt_nfs_driver_umount_error(self, mock_LOG_exception, mock_LOG_debug, mock_utils_exe): export_string = '192.168.1.1:/nfs/share1' connection_info = {'data': {'export': export_string, 'name': self.name}} libvirt_driver = nfs.LibvirtNFSVolumeDriver(self.fake_conn) mock_utils_exe.side_effect = processutils.ProcessExecutionError( None, None, None, 'umount', 'umount: device is busy.') libvirt_driver.disconnect_volume(connection_info, "vde") self.assertTrue(mock_LOG_debug.called) mock_utils_exe.side_effect = processutils.ProcessExecutionError( None, None, None, 'umount', 'umount: target is busy.') libvirt_driver.disconnect_volume(connection_info, "vde") self.assertTrue(mock_LOG_debug.called) mock_utils_exe.side_effect = processutils.ProcessExecutionError( None, None, None, 'umount', 'umount: not mounted.') libvirt_driver.disconnect_volume(connection_info, "vde") self.assertTrue(mock_LOG_debug.called) mock_utils_exe.side_effect = processutils.ProcessExecutionError( None, None, None, 'umount', 'umount: Other error.') libvirt_driver.disconnect_volume(connection_info, "vde") self.assertTrue(mock_LOG_exception.called) def test_libvirt_nfs_driver_get_config(self): libvirt_driver = nfs.LibvirtNFSVolumeDriver(self.fake_conn) export_string = '192.168.1.1:/nfs/share1' export_mnt_base = os.path.join(self.mnt_base, utils.get_hash_str(export_string)) file_path = os.path.join(export_mnt_base, self.name) connection_info = {'data': {'export': export_string, 'name': self.name, 'device_path': file_path}} conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertFileTypeEquals(tree, file_path) self.assertEqual('raw', tree.find('./driver').get('type')) self.assertEqual('native', tree.find('./driver').get('io')) def test_libvirt_nfs_driver_already_mounted(self): libvirt_driver = nfs.LibvirtNFSVolumeDriver(self.fake_conn) export_string = '192.168.1.1:/nfs/share1' export_mnt_base = os.path.join(self.mnt_base, utils.get_hash_str(export_string)) connection_info = {'data': {'export': export_string, 'name': self.name}} libvirt_driver.connect_volume(connection_info, self.disk_info) libvirt_driver.disconnect_volume(connection_info, "vde") expected_commands = [ ('findmnt', '--target', export_mnt_base, '--source', export_string), ('umount', export_mnt_base)] self.assertEqual(expected_commands, self.executes) def test_libvirt_nfs_driver_with_opts(self): libvirt_driver = nfs.LibvirtNFSVolumeDriver(self.fake_conn) self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False) export_string = '192.168.1.1:/nfs/share1' options = '-o intr,nfsvers=3' export_mnt_base = os.path.join(self.mnt_base, utils.get_hash_str(export_string)) connection_info = {'data': {'export': export_string, 'name': self.name, 'options': options}} libvirt_driver.connect_volume(connection_info, self.disk_info) libvirt_driver.disconnect_volume(connection_info, "vde") expected_commands = [ ('mkdir', '-p', export_mnt_base), ('mount', '-t', 'nfs', '-o', 'intr,nfsvers=3', export_string, export_mnt_base), ('umount', export_mnt_base), ] self.assertEqual(expected_commands, self.executes) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_iser.py0000664000567000056710000000167612701407773025326 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt.volume import iser class LibvirtISERVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): """Tests the libvirt iSER volume driver.""" def test_get_transport(self): driver = iser.LibvirtISERVolumeDriver(self.fake_conn) self.assertEqual('iser', driver._get_transport()) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_smbfs.py0000664000567000056710000001101212701407773025457 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from nova.tests.unit.virt.libvirt.volume import test_volume from nova import utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt.volume import smbfs class LibvirtSMBFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): """Tests the libvirt SMBFS volume driver.""" def setUp(self): super(LibvirtSMBFSVolumeDriverTestCase, self).setUp() self.mnt_base = '/mnt' self.flags(smbfs_mount_point_base=self.mnt_base, group='libvirt') @mock.patch.object(libvirt_utils, 'is_mounted') def test_libvirt_smbfs_driver(self, mock_is_mounted): mock_is_mounted.return_value = False libvirt_driver = smbfs.LibvirtSMBFSVolumeDriver(self.fake_conn) export_string = '//192.168.1.1/volumes' export_mnt_base = os.path.join(self.mnt_base, utils.get_hash_str(export_string)) connection_info = {'data': {'export': export_string, 'name': self.name, 'options': None}} libvirt_driver.connect_volume(connection_info, self.disk_info) libvirt_driver.disconnect_volume(connection_info, "vde") expected_commands = [ ('mkdir', '-p', export_mnt_base), ('mount', '-t', 'cifs', '-o', 'username=guest', export_string, export_mnt_base), ('umount', export_mnt_base)] self.assertEqual(expected_commands, self.executes) def test_libvirt_smbfs_driver_already_mounted(self): libvirt_driver = smbfs.LibvirtSMBFSVolumeDriver(self.fake_conn) export_string = '//192.168.1.1/volumes' export_mnt_base = os.path.join(self.mnt_base, utils.get_hash_str(export_string)) connection_info = {'data': {'export': export_string, 'name': self.name}} libvirt_driver.connect_volume(connection_info, self.disk_info) libvirt_driver.disconnect_volume(connection_info, "vde") expected_commands = [ ('findmnt', '--target', export_mnt_base, '--source', export_string), ('umount', export_mnt_base)] self.assertEqual(expected_commands, self.executes) def test_libvirt_smbfs_driver_get_config(self): libvirt_driver = smbfs.LibvirtSMBFSVolumeDriver(self.fake_conn) export_string = '//192.168.1.1/volumes' export_mnt_base = os.path.join(self.mnt_base, utils.get_hash_str(export_string)) file_path = os.path.join(export_mnt_base, self.name) connection_info = {'data': {'export': export_string, 'name': self.name, 'device_path': file_path}} conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertFileTypeEquals(tree, file_path) @mock.patch.object(libvirt_utils, 'is_mounted') def test_libvirt_smbfs_driver_with_opts(self, mock_is_mounted): mock_is_mounted.return_value = False libvirt_driver = smbfs.LibvirtSMBFSVolumeDriver(self.fake_conn) export_string = '//192.168.1.1/volumes' options = '-o user=guest,uid=107,gid=105' export_mnt_base = os.path.join(self.mnt_base, utils.get_hash_str(export_string)) connection_info = {'data': {'export': export_string, 'name': self.name, 'options': options}} libvirt_driver.connect_volume(connection_info, self.disk_info) libvirt_driver.disconnect_volume(connection_info, "vde") expected_commands = [ ('mkdir', '-p', export_mnt_base), ('mount', '-t', 'cifs', '-o', 'user=guest,uid=107,gid=105', export_string, export_mnt_base), ('umount', export_mnt_base)] self.assertEqual(expected_commands, self.executes) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_scaleio.py0000664000567000056710000000503012701407773025767 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from os_brick.initiator import connector from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt.volume import scaleio class LibvirtScaleIOVolumeDriverTestCase( test_volume.LibvirtVolumeBaseTestCase): def test_libvirt_scaleio_driver(self): libvirt_driver = scaleio.LibvirtScaleIOVolumeDriver( self.fake_conn) self.assertIsInstance(libvirt_driver.connector, connector.ScaleIOConnector) def test_libvirt_scaleio_driver_connect(self): def brick_conn_vol(data): return {'path': '/dev/vol01'} sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_conn) sio.connector.connect_volume = brick_conn_vol disk_info = {'path': '/dev/vol01', 'name': 'vol01'} conn = {'data': disk_info} sio.connect_volume(conn, None) self.assertEqual('/dev/vol01', conn['data']['device_path']) def test_libvirt_scaleio_driver_get_config(self): sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_conn) disk_info = {'path': '/dev/vol01', 'name': 'vol01', 'type': 'raw', 'dev': 'vda1', 'bus': 'pci0', 'device_path': '/dev/vol01'} conn = {'data': disk_info} conf = sio.get_config(conn, disk_info) self.assertEqual('block', conf.source_type) self.assertEqual('/dev/vol01', conf.source_path) def test_libvirt_scaleio_driver_disconnect(self): sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_conn) sio.connector.disconnect_volume = mock.MagicMock() disk_info = {'path': '/dev/vol01', 'name': 'vol01', 'type': 'raw', 'dev': 'vda1', 'bus': 'pci0', 'device_path': '/dev/vol01'} conn = {'data': disk_info} sio.disconnect_volume(conn, disk_info) sio.connector.disconnect_volume.assert_called_once_with( disk_info, None) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py0000664000567000056710000000501612701407773026774 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import platform import mock from os_brick.initiator import connector from nova.compute import arch from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt.volume import fibrechannel class LibvirtFibreChannelVolumeDriverTestCase( test_volume.LibvirtVolumeBaseTestCase): def test_libvirt_fibrechan_driver(self): libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver( self.fake_conn) self.assertIsInstance(libvirt_driver.connector, connector.FibreChannelConnector) def _test_libvirt_fibrechan_driver_s390(self): libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver( self.fake_conn) self.assertIsInstance(libvirt_driver.connector, connector.FibreChannelConnectorS390X) @mock.patch.object(platform, 'machine', return_value=arch.S390) def test_libvirt_fibrechan_driver_s390(self, mock_machine): self._test_libvirt_fibrechan_driver_s390() @mock.patch.object(platform, 'machine', return_value=arch.S390X) def test_libvirt_fibrechan_driver_s390x(self, mock_machine): self._test_libvirt_fibrechan_driver_s390() def test_libvirt_fibrechan_driver_get_config(self): libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver( self.fake_conn) device_path = '/dev/fake-dev' connection_info = {'data': {'device_path': device_path}} conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertEqual('block', tree.get('type')) self.assertEqual(device_path, tree.find('./source').get('dev')) self.assertEqual('raw', tree.find('./driver').get('type')) self.assertEqual('native', tree.find('./driver').get('io')) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_gpfs.py0000664000567000056710000000242612701407773025315 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt.volume import gpfs class LibvirtGPFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): def test_libvirt_gpfs_driver_get_config(self): libvirt_driver = gpfs.LibvirtGPFSVolumeDriver(self.fake_conn) connection_info = { 'driver_volume_type': 'gpfs', 'data': { 'device_path': '/gpfs/foo', }, 'serial': 'fake_serial', } conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertEqual('file', tree.get('type')) self.assertEqual('fake_serial', tree.find('./serial').text) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_aoe.py0000664000567000056710000000207612701407773025123 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from os_brick.initiator import connector from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt.volume import aoe class LibvirtAOEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): @mock.patch('os.path.exists', return_value=True) def test_libvirt_aoe_driver(self, exists): libvirt_driver = aoe.LibvirtAOEVolumeDriver(self.fake_conn) self.assertIsInstance(libvirt_driver.connector, connector.AoEConnector) nova-13.0.0/nova/tests/unit/virt/libvirt/volume/test_remotefs.py0000664000567000056710000002347512701407773026211 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_concurrency import processutils from nova import test from nova import utils from nova.virt.libvirt.volume import remotefs class RemoteFSTestCase(test.NoDBTestCase): """Remote filesystem operations test case.""" @mock.patch.object(utils, 'execute') def _test_mount_share(self, mock_execute, already_mounted=False): if already_mounted: err_msg = 'Device or resource busy' mock_execute.side_effect = [ None, processutils.ProcessExecutionError(err_msg)] remotefs.mount_share( mock.sentinel.mount_path, mock.sentinel.export_path, mock.sentinel.export_type, options=[mock.sentinel.mount_options]) mock_execute.assert_any_call('mkdir', '-p', mock.sentinel.mount_path) mock_execute.assert_any_call('mount', '-t', mock.sentinel.export_type, mock.sentinel.mount_options, mock.sentinel.export_path, mock.sentinel.mount_path, run_as_root=True) def test_mount_new_share(self): self._test_mount_share() def test_mount_already_mounted_share(self): self._test_mount_share(already_mounted=True) @mock.patch.object(utils, 'execute') def test_unmount_share(self, mock_execute): remotefs.unmount_share( mock.sentinel.mount_path, mock.sentinel.export_path) mock_execute.assert_any_call('umount', mock.sentinel.mount_path, run_as_root=True, attempts=3, delay_on_retry=True) @mock.patch('tempfile.mkdtemp', return_value='/tmp/Mercury') @mock.patch('nova.utils.execute') def test_remove_remote_file_rsync(self, mock_execute, mock_mkdtemp): remotefs.RsyncDriver().remove_file('host', 'dest', None, None) rsync_call_args = mock.call('rsync', '--archive', '--delete', '--include', 'dest', '--exclude', '*', '/tmp/Mercury/', 'host:', on_completion=None, on_execute=None) self.assertEqual(mock_execute.mock_calls[0], rsync_call_args) rm_call_args = mock.call('rm', '-rf', '/tmp/Mercury') self.assertEqual(mock_execute.mock_calls[1], rm_call_args) self.assertEqual(2, mock_execute.call_count) self.assertEqual(1, mock_mkdtemp.call_count) @mock.patch('nova.utils.execute') def test_remove_remote_file_ssh(self, mock_execute): remotefs.SshDriver().remove_file('host', 'dest', None, None) mock_execute.assert_called_once_with( 'ssh', 'host', 'rm', 'dest', on_completion=None, on_execute=None) @mock.patch('tempfile.mkdtemp', return_value='/tmp/Venus') @mock.patch('nova.utils.execute') def test_remove_remote_dir_rsync(self, mock_execute, mock_mkdtemp): remotefs.RsyncDriver().remove_dir('host', 'dest', None, None) rsync_call_args = mock.call('rsync', '--archive', '--delete-excluded', '/tmp/Venus/', 'host:dest', on_completion=None, on_execute=None) self.assertEqual(mock_execute.mock_calls[0], rsync_call_args) rsync_call_args = mock.call('rsync', '--archive', '--delete', '--include', 'dest', '--exclude', '*', '/tmp/Venus/', 'host:', on_completion=None, on_execute=None) self.assertEqual(mock_execute.mock_calls[1], rsync_call_args) rm_call_args = mock.call('rm', '-rf', '/tmp/Venus') self.assertEqual(mock_execute.mock_calls[2], rm_call_args) self.assertEqual(3, mock_execute.call_count) self.assertEqual(1, mock_mkdtemp.call_count) @mock.patch('nova.utils.execute') def test_remove_remote_dir_ssh(self, mock_execute): remotefs.SshDriver().remove_dir('host', 'dest', None, None) mock_execute.assert_called_once_with( 'ssh', 'host', 'rm', '-rf', 'dest', on_completion=None, on_execute=None) @mock.patch('tempfile.mkdtemp', return_value='/tmp/Mars') @mock.patch('nova.utils.execute') def test_create_remote_file_rsync(self, mock_execute, mock_mkdtemp): remotefs.RsyncDriver().create_file('host', 'dest_dir', None, None) mkdir_call_args = mock.call('mkdir', '-p', '/tmp/Mars/', on_completion=None, on_execute=None) self.assertEqual(mock_execute.mock_calls[0], mkdir_call_args) touch_call_args = mock.call('touch', '/tmp/Mars/dest_dir', on_completion=None, on_execute=None) self.assertEqual(mock_execute.mock_calls[1], touch_call_args) rsync_call_args = mock.call('rsync', '--archive', '--relative', '--no-implied-dirs', '/tmp/Mars/./dest_dir', 'host:/', on_completion=None, on_execute=None) self.assertEqual(mock_execute.mock_calls[2], rsync_call_args) rm_call_args = mock.call('rm', '-rf', '/tmp/Mars') self.assertEqual(mock_execute.mock_calls[3], rm_call_args) self.assertEqual(4, mock_execute.call_count) self.assertEqual(1, mock_mkdtemp.call_count) @mock.patch('nova.utils.execute') def test_create_remote_file_ssh(self, mock_execute): remotefs.SshDriver().create_file('host', 'dest_dir', None, None) mock_execute.assert_called_once_with('ssh', 'host', 'touch', 'dest_dir', on_completion=None, on_execute=None) @mock.patch('tempfile.mkdtemp', return_value='/tmp/Jupiter') @mock.patch('nova.utils.execute') def test_create_remote_dir_rsync(self, mock_execute, mock_mkdtemp): remotefs.RsyncDriver().create_dir('host', 'dest_dir', None, None) mkdir_call_args = mock.call('mkdir', '-p', '/tmp/Jupiter/dest_dir', on_completion=None, on_execute=None) self.assertEqual(mock_execute.mock_calls[0], mkdir_call_args) rsync_call_args = mock.call('rsync', '--archive', '--relative', '--no-implied-dirs', '/tmp/Jupiter/./dest_dir', 'host:/', on_completion=None, on_execute=None) self.assertEqual(mock_execute.mock_calls[1], rsync_call_args) rm_call_args = mock.call('rm', '-rf', '/tmp/Jupiter') self.assertEqual(mock_execute.mock_calls[2], rm_call_args) self.assertEqual(3, mock_execute.call_count) self.assertEqual(1, mock_mkdtemp.call_count) @mock.patch('nova.utils.execute') def test_create_remote_dir_ssh(self, mock_execute): remotefs.SshDriver().create_dir('host', 'dest_dir', None, None) mock_execute.assert_called_once_with('ssh', 'host', 'mkdir', '-p', 'dest_dir', on_completion=None, on_execute=None) @mock.patch('nova.utils.execute') def test_remote_copy_file_rsync(self, mock_execute): remotefs.RsyncDriver().copy_file('1.2.3.4:/home/star_wars', '/home/favourite', None, None, compression=True) mock_execute.assert_called_once_with('rsync', '--sparse', '1.2.3.4:/home/star_wars', '/home/favourite', '--compress', on_completion=None, on_execute=None) @mock.patch('nova.utils.execute') def test_remote_copy_file_rsync_without_compression(self, mock_execute): remotefs.RsyncDriver().copy_file('1.2.3.4:/home/star_wars', '/home/favourite', None, None, compression=False) mock_execute.assert_called_once_with('rsync', '--sparse', '1.2.3.4:/home/star_wars', '/home/favourite', on_completion=None, on_execute=None) @mock.patch('nova.utils.execute') def test_remote_copy_file_ssh(self, mock_execute): remotefs.SshDriver().copy_file('1.2.3.4:/home/SpaceOdyssey', '/home/favourite', None, None, True) mock_execute.assert_called_once_with('scp', '1.2.3.4:/home/SpaceOdyssey', '/home/favourite', on_completion=None, on_execute=None) nova-13.0.0/nova/tests/unit/virt/libvirt/test_compat.py0000664000567000056710000000474712701407773024342 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.compute import power_state from nova import test from nova.tests.unit.virt.libvirt import fakelibvirt from nova.virt.libvirt import compat from nova.virt.libvirt import host class CompatTestCase(test.NoDBTestCase): def setUp(self): super(CompatTestCase, self).setUp() self.useFixture(fakelibvirt.FakeLibvirtFixture()) @mock.patch.object(host.Host, 'has_min_version') def test_get_domain_info(self, mock_has_min_version): test_host = host.Host("qemu:///system") domain = mock.MagicMock() expected = [power_state.RUNNING, 512, 512, None, None] race = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'ERR', error_code=fakelibvirt.VIR_ERR_OPERATION_FAILED, error_message='cannot read cputime for domain') mock_has_min_version.return_value = True domain.info.return_value = expected actual = compat.get_domain_info(fakelibvirt, test_host, domain) self.assertEqual(actual, expected) self.assertEqual(domain.info.call_count, 1) domain.info.reset_mock() domain.info.side_effect = race self.assertRaises(fakelibvirt.libvirtError, compat.get_domain_info, fakelibvirt, test_host, domain) self.assertEqual(domain.info.call_count, 1) domain.info.reset_mock() mock_has_min_version.return_value = False domain.info.side_effect = [race, expected] actual = compat.get_domain_info(fakelibvirt, test_host, domain) self.assertEqual(actual, expected) self.assertEqual(domain.info.call_count, 2) domain.info.reset_mock() domain.info.side_effect = race self.assertRaises(fakelibvirt.libvirtError, compat.get_domain_info, fakelibvirt, test_host, domain) self.assertEqual(domain.info.call_count, 2) nova-13.0.0/nova/tests/unit/virt/libvirt/__init__.py0000664000567000056710000000000012701407773023531 0ustar jenkinsjenkins00000000000000nova-13.0.0/nova/tests/unit/virt/libvirt/test_designer.py0000664000567000056710000002144312701410011024622 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.pci import utils as pci_utils from nova import test from nova.tests.unit import matchers from nova.virt.libvirt import config from nova.virt.libvirt import designer class DesignerTestCase(test.NoDBTestCase): def test_set_vif_bandwidth_config_no_extra_specs(self): # Test whether test_set_vif_bandwidth_config_no_extra_specs fails when # its second parameter has no 'extra_specs' field. try: # The conf will never be user be used, so we can use 'None'. # An empty dictionary is fine: all that matters it that there is no # 'extra_specs' field. designer.set_vif_bandwidth_config(None, {}) except KeyError as e: self.fail('KeyError: %s' % e) def test_set_vif_guest_frontend_config(self): conf = config.LibvirtConfigGuestInterface() designer.set_vif_guest_frontend_config(conf, 'fake-mac', 'fake-model', 'fake-driver', 'fake-queues') self.assertEqual('fake-mac', conf.mac_addr) self.assertEqual('fake-model', conf.model) self.assertEqual('fake-driver', conf.driver_name) self.assertEqual('fake-queues', conf.vhost_queues) def test_set_vif_host_backend_bridge_config(self): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_bridge_config(conf, 'fake-bridge', 'fake-tap') self.assertEqual('bridge', conf.net_type) self.assertEqual('fake-bridge', conf.source_dev) self.assertEqual('fake-tap', conf.target_dev) def test_set_vif_host_backend_ethernet_config(self): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_ethernet_config(conf, 'fake-tap') self.assertEqual('ethernet', conf.net_type) self.assertEqual('fake-tap', conf.target_dev) self.assertEqual('', conf.script) def test_set_vif_host_backend_ovs_config(self): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_ovs_config(conf, 'fake-bridge', 'fake-interface', 'fake-tap') self.assertEqual('bridge', conf.net_type) self.assertEqual('fake-bridge', conf.source_dev) self.assertEqual('openvswitch', conf.vporttype) self.assertEqual('fake-tap', conf.target_dev) def test_set_vif_host_backend_802qbg_config(self): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_802qbg_config(conf, 'fake-devname', 'fake-managerid', 'fake-typeid', 'fake-typeidversion', 'fake-instanceid', 'fake-tap') self.assertEqual('direct', conf.net_type) self.assertEqual('fake-devname', conf.source_dev) self.assertEqual('vepa', conf.source_mode) self.assertEqual('802.1Qbg', conf.vporttype) expected = [{'key': 'managerid', 'value': 'fake-managerid'}, {'key': 'typeid', 'value': 'fake-typeid'}, {'key': 'typeidversion', 'value': 'fake-typeidversion'}, {'key': 'instanceid', 'value': 'fake-instanceid'}] self.assertThat(expected, matchers.DictListMatches(conf.vportparams)) self.assertEqual('fake-tap', conf.target_dev) @mock.patch.object(pci_utils, 'get_ifname_by_pci_address', return_value='fake-devname') def test_set_vif_host_backend_802qbh_config_direct(self, mock_pci): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_802qbh_config(conf, 'direct', 'fake-pci-dev', 'fake-profileid', 'fake-tap') self.assertEqual('direct', conf.net_type) self.assertEqual('fake-devname', conf.source_dev) self.assertEqual('passthrough', conf.source_mode) self.assertEqual('vhost', conf.driver_name) mock_pci.assert_called_with('fake-pci-dev') self.assertEqual('802.1Qbh', conf.vporttype) self.assertEqual('fake-tap', conf.target_dev) def test_set_vif_host_backend_802qbh_config_hostdev(self): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_802qbh_config(conf, 'hostdev', 'fake-devname', 'fake-profileid', 'fake-tap') self.assertEqual('hostdev', conf.net_type) self.assertEqual('fake-devname', conf.source_dev) self.assertIsNone(conf.model) self.assertEqual('802.1Qbh', conf.vporttype) self.assertEqual('fake-tap', conf.target_dev) @mock.patch.object(pci_utils, 'get_ifname_by_pci_address', return_value='fake-devname') def test_set_vif_host_backend_hw_veb_direct(self, mock_pci): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_hw_veb(conf, 'direct', 'fake-pci-dev', 'fake-vlan', 'fake-tap') self.assertEqual('direct', conf.net_type) self.assertEqual('fake-devname', conf.source_dev) self.assertEqual('passthrough', conf.source_mode) self.assertEqual('vhost', conf.driver_name) self.assertEqual('fake-tap', conf.target_dev) mock_pci.assert_called_with('fake-pci-dev') def test_set_vif_host_backend_hw_veb_hostdev(self): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_hw_veb(conf, 'hostdev', 'fake-devname', 'fake-vlan', 'fake-tap') self.assertEqual('hostdev', conf.net_type) self.assertEqual('fake-devname', conf.source_dev) self.assertIsNone(conf.model) self.assertEqual('fake-vlan', conf.vlan) self.assertEqual('fake-tap', conf.target_dev) @mock.patch.object(pci_utils, 'get_pci_address_fields', return_value=('fake-domain', 'fake-bus', 'fake-slot', 'fake-function')) def test_set_vif_host_backend_ib_hostdev_config(self, mock_pci_fields): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_ib_hostdev_config(conf, 'fake-pci-slot') self.assertEqual('fake-domain', conf.domain) self.assertEqual('fake-bus', conf.bus) self.assertEqual('fake-slot', conf.slot) self.assertEqual('fake-function', conf.function) mock_pci_fields.assert_called_with('fake-pci-slot') def test_set_vif_host_backend_direct_config(self): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_direct_config(conf, 'fake-devname', mode="passthrough") self.assertEqual('direct', conf.net_type) self.assertEqual('fake-devname', conf.source_dev) self.assertEqual('passthrough', conf.source_mode) self.assertEqual('virtio', conf.model) def test_set_vif_host_backend_vhostuser_config(self): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_vhostuser_config(conf, 'fake-mode', 'fake-path') self.assertEqual('vhostuser', conf.net_type) self.assertEqual('unix', conf.vhostuser_type) self.assertEqual('fake-mode', conf.vhostuser_mode) self.assertEqual('fake-path', conf.vhostuser_path) nova-13.0.0/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py0000664000567000056710000000677312701407773025702 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from six.moves import StringIO from nova.virt.libvirt import utils as libvirt_utils files = {'console.log': True} disk_sizes = {} disk_backing_files = {} disk_type = "qcow2" RESIZE_SNAPSHOT_NAME = libvirt_utils.RESIZE_SNAPSHOT_NAME def create_image(disk_format, path, size): pass def create_cow_image(backing_file, path): pass def get_disk_size(path, format=None): return 0 def get_disk_backing_file(path, format=None): return disk_backing_files.get(path, None) def get_disk_type_from_path(path): if disk_type in ('raw', 'qcow2'): return None return disk_type def copy_image(src, dest): pass def resize2fs(path): pass def create_lvm_image(vg, lv, size, sparse=False): pass def volume_group_free_space(vg): pass def remove_logical_volumes(*paths): pass def write_to_file(path, contents, umask=None): pass def chown(path, owner): pass def update_mtime(path): pass def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt): files[out_path] = '' class File(object): def __init__(self, path, mode=None): if path in files: self.fp = StringIO(files[path]) else: self.fp = StringIO(files[os.path.split(path)[-1]]) def __enter__(self): return self.fp def __exit__(self, *args): return def close(self, *args, **kwargs): self.fp.close() def file_open(path, mode=None): return File(path, mode) def find_disk(virt_dom): if disk_type == 'lvm': return ("/dev/nova-vg/lv", "raw") elif disk_type in ['raw', 'qcow2']: return ("filename", disk_type) else: return ("unknown_type_disk", None) def load_file(path): if os.path.exists(path): with open(path, 'r') as fp: return fp.read() else: return '' def logical_volume_info(path): return {} def file_delete(path): return True def get_fs_info(path): return {'total': 128 * (1024 ** 3), 'used': 44 * (1024 ** 3), 'free': 84 * (1024 ** 3)} def fetch_image(context, target, image_id, user_id, project_id, max_size=0): pass def fetch_raw_image(context, target, image_id, user_id, project_id, max_size=0): pass def get_instance_path(instance, forceold=False, relative=False): return libvirt_utils.get_instance_path(instance, forceold=forceold, relative=relative) def get_instance_path_at_destination(instance, migrate_data=None): return libvirt_utils.get_instance_path_at_destination(instance, migrate_data) def pick_disk_driver_name(hypervisor_version, is_block_dev=False): return "qemu" def is_valid_hostname(name): return True def chown_for_id_maps(path, id_maps): pass def get_arch(image_meta): return libvirt_utils.get_arch(image_meta) nova-13.0.0/nova/tests/unit/virt/libvirt/test_guest.py0000664000567000056710000005751312701407773024205 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # Copyright 2014-2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from oslo_config import cfg from oslo_utils import encodeutils from nova import context from nova import exception from nova import test from nova.tests.unit.virt.libvirt import fakelibvirt from nova import utils from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import guest as libvirt_guest from nova.virt.libvirt import host host.libvirt = fakelibvirt libvirt_guest.libvirt = fakelibvirt CONF = cfg.CONF if sys.version_info > (3,): long = int class GuestTestCase(test.NoDBTestCase): def setUp(self): super(GuestTestCase, self).setUp() self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.host = host.Host("qemu:///system") self.context = context.get_admin_context() self.domain = mock.Mock(spec=fakelibvirt.virDomain) self.guest = libvirt_guest.Guest(self.domain) def test_repr(self): self.domain.ID.return_value = 99 self.domain.UUIDString.return_value = "UUID" self.domain.name.return_value = "foo" self.assertEqual("", repr(self.guest)) @mock.patch.object(fakelibvirt.Connection, 'defineXML') def test_create(self, mock_define): libvirt_guest.Guest.create("xml", self.host) mock_define.assert_called_once_with("xml") @mock.patch.object(fakelibvirt.Connection, 'defineXML') def test_create_exception(self, mock_define): mock_define.side_effect = test.TestingException self.assertRaises(test.TestingException, libvirt_guest.Guest.create, "foo", self.host) def test_launch(self): self.guest.launch() self.domain.createWithFlags.assert_called_once_with(0) def test_launch_and_pause(self): self.guest.launch(pause=True) self.domain.createWithFlags.assert_called_once_with( fakelibvirt.VIR_DOMAIN_START_PAUSED) def test_shutdown(self): self.domain.shutdown = mock.MagicMock() self.guest.shutdown() self.domain.shutdown.assert_called_once_with() @mock.patch.object(encodeutils, 'safe_decode') def test_launch_exception(self, mock_safe_decode): self.domain.createWithFlags.side_effect = test.TestingException mock_safe_decode.return_value = "" self.assertRaises(test.TestingException, self.guest.launch) self.assertEqual(1, mock_safe_decode.called) @mock.patch.object(utils, 'execute') @mock.patch.object(libvirt_guest.Guest, 'get_interfaces') def test_enable_hairpin(self, mock_get_interfaces, mock_execute): mock_get_interfaces.return_value = ["vnet0", "vnet1"] self.guest.enable_hairpin() mock_execute.assert_has_calls([ mock.call( 'tee', '/sys/class/net/vnet0/brport/hairpin_mode', run_as_root=True, process_input='1', check_exit_code=[0, 1]), mock.call( 'tee', '/sys/class/net/vnet1/brport/hairpin_mode', run_as_root=True, process_input='1', check_exit_code=[0, 1])]) @mock.patch.object(encodeutils, 'safe_decode') @mock.patch.object(utils, 'execute') @mock.patch.object(libvirt_guest.Guest, 'get_interfaces') def test_enable_hairpin_exception(self, mock_get_interfaces, mock_execute, mock_safe_decode): mock_get_interfaces.return_value = ["foo"] mock_execute.side_effect = test.TestingException('oops') self.assertRaises(test.TestingException, self.guest.enable_hairpin) self.assertEqual(1, mock_safe_decode.called) def test_get_interfaces(self): self.domain.XMLDesc.return_value = """ """ self.assertEqual(["vnet0", "vnet1"], self.guest.get_interfaces()) def test_get_interfaces_exception(self): self.domain.XMLDesc.return_value = "" self.assertEqual([], self.guest.get_interfaces()) def test_poweroff(self): self.guest.poweroff() self.domain.destroy.assert_called_once_with() def test_resume(self): self.guest.resume() self.domain.resume.assert_called_once_with() def test_get_vcpus_info(self): self.domain.vcpus.return_value = ([(0, 1, int(10290000000), 2)], [(True, True)]) vcpus = list(self.guest.get_vcpus_info()) self.assertEqual(0, vcpus[0].id) self.assertEqual(2, vcpus[0].cpu) self.assertEqual(1, vcpus[0].state) self.assertEqual(int(10290000000), vcpus[0].time) def test_delete_configuration(self): self.guest.delete_configuration() self.domain.undefineFlags.assert_called_once_with( fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE) def test_delete_configuration_exception(self): self.domain.undefineFlags.side_effect = fakelibvirt.libvirtError( 'oops') self.domain.ID.return_value = 1 self.guest.delete_configuration() self.domain.undefine.assert_called_once_with() def test_attach_device(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "" self.guest.attach_device(conf) self.domain.attachDeviceFlags.assert_called_once_with( "", flags=0) def test_attach_device_persistent(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "" self.guest.attach_device(conf, persistent=True) self.domain.attachDeviceFlags.assert_called_once_with( "", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG) def test_attach_device_live(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "" self.guest.attach_device(conf, live=True) self.domain.attachDeviceFlags.assert_called_once_with( "", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) def test_attach_device_persistent_live(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "" self.guest.attach_device(conf, persistent=True, live=True) self.domain.attachDeviceFlags.assert_called_once_with( "", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_device(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "" self.guest.detach_device(conf) self.domain.detachDeviceFlags.assert_called_once_with( "", flags=0) def test_detach_device_persistent(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "" self.guest.detach_device(conf, persistent=True) self.domain.detachDeviceFlags.assert_called_once_with( "", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG) def test_detach_device_live(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "" self.guest.detach_device(conf, live=True) self.domain.detachDeviceFlags.assert_called_once_with( "", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) def test_detach_device_persistent_live(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "" self.guest.detach_device(conf, persistent=True, live=True) self.domain.detachDeviceFlags.assert_called_once_with( "", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_device_with_retry_detach_success(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "" get_config = mock.Mock() # Force multiple retries of detach get_config.side_effect = [conf, conf, conf, None] dev_path = "/dev/vdb" retry_detach = self.guest.detach_device_with_retry( get_config, dev_path, persistent=True, live=True, inc_sleep_time=.01) # Ensure we've only done the initial detach call self.domain.detachDeviceFlags.assert_called_once_with( "", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) get_config.assert_called_with(dev_path) # Some time later, we can do the wait/retry to ensure detach succeeds self.domain.detachDeviceFlags.reset_mock() retry_detach() # Should have two retries before we pretend device is detached self.assertEqual(2, self.domain.detachDeviceFlags.call_count) def test_detach_device_with_retry_detach_failure(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "" # Continue to return some value for the disk config get_config = mock.Mock(return_value=conf) retry_detach = self.guest.detach_device_with_retry( get_config, "/dev/vdb", persistent=True, live=True, inc_sleep_time=.01, max_retry_count=3) # Ensure we've only done the initial detach call self.domain.detachDeviceFlags.assert_called_once_with( "", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) # Some time later, we can do the wait/retry to ensure detach self.domain.detachDeviceFlags.reset_mock() # Should hit max # of retries self.assertRaises(exception.DeviceDetachFailed, retry_detach) self.assertEqual(4, self.domain.detachDeviceFlags.call_count) def test_detach_device_with_retry_device_not_found(self): get_config = mock.Mock(return_value=None) self.assertRaises( exception.DeviceNotFound, self.guest.detach_device_with_retry, get_config, "/dev/vdb", persistent=True, live=True) def test_get_xml_desc(self): self.guest.get_xml_desc() self.domain.XMLDesc.assert_called_once_with(flags=0) def test_get_xml_desc_dump_inactive(self): self.guest.get_xml_desc(dump_inactive=True) self.domain.XMLDesc.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_INACTIVE) def test_get_xml_desc_dump_sensitive(self): self.guest.get_xml_desc(dump_sensitive=True) self.domain.XMLDesc.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_SECURE) def test_get_xml_desc_dump_inactive_dump_sensitive(self): self.guest.get_xml_desc(dump_inactive=True, dump_sensitive=True) self.domain.XMLDesc.assert_called_once_with( flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) def test_get_xml_desc_dump_migratable(self): self.guest.get_xml_desc(dump_migratable=True) self.domain.XMLDesc.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE) def test_has_persistent_configuration(self): self.assertTrue( self.guest.has_persistent_configuration()) self.domain.isPersistent.assert_called_once_with() def test_save_memory_state(self): self.guest.save_memory_state() self.domain.managedSave.assert_called_once_with(0) def test_get_block_device(self): disk = 'vda' gblock = self.guest.get_block_device(disk) self.assertEqual(disk, gblock._disk) self.assertEqual(self.guest, gblock._guest) def test_set_user_password(self): self.guest.set_user_password("foo", "123") self.domain.setUserPassword.assert_called_once_with("foo", "123", 0) def test_get_devices(self): xml = """ QEMUGuest1 c7a5fdbd-edaf-9455-926a-d65c16db1809 219136 219136 1 hvm destroy restart destroy /usr/bin/qemu
""" self.domain.XMLDesc.return_value = xml devs = self.guest.get_all_devices() # Only currently parse , and elements # hence we're not counting the controller/memballoon self.assertEqual(6, len(devs)) self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[3], vconfig.LibvirtConfigGuestHostdev) self.assertIsInstance(devs[4], vconfig.LibvirtConfigGuestHostdev) self.assertIsInstance(devs[5], vconfig.LibvirtConfigGuestInterface) devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestDisk) self.assertEqual(3, len(devs)) self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk) devs = self.guest.get_all_disks() self.assertEqual(3, len(devs)) self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk) devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestHostdev) self.assertEqual(2, len(devs)) self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestHostdev) self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestHostdev) devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestInterface) self.assertEqual(1, len(devs)) self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestInterface) self.assertIsNotNone( self.guest.get_interface_by_mac('fa:16:3e:f9:af:ae')) self.assertIsNone(self.guest.get_interface_by_mac(None)) def test_get_info(self): self.domain.info.return_value = (1, 2, 3, 4, 5) self.domain.ID.return_value = 6 info = self.guest.get_info(self.host) self.domain.info.assert_called_once_with() self.assertEqual(1, info.state) self.assertEqual(2, info.max_mem_kb) self.assertEqual(3, info.mem_kb) self.assertEqual(4, info.num_cpu) self.assertEqual(5, info.cpu_time_ns) self.assertEqual(6, info.id) def test_get_power_state(self): self.domain.info.return_value = (1, 2, 3, 4, 5) power = self.guest.get_power_state(self.host) self.assertEqual(1, power) def test_is_active_when_domain_is_active(self): with mock.patch.object(self.domain, "isActive", return_value=True): self.assertTrue(self.guest.is_active()) def test_is_active_when_domain_not_active(self): with mock.patch.object(self.domain, "isActive", return_value=False): self.assertFalse(self.guest.is_active()) def test_freeze_filesystems(self): self.guest.freeze_filesystems() self.domain.fsFreeze.assert_called_once_with() def test_thaw_filesystems(self): self.guest.thaw_filesystems() self.domain.fsThaw.assert_called_once_with() def _conf_snapshot(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestSnapshotDisk) conf.to_xml.return_value = '' return conf def test_snapshot(self): conf = self._conf_snapshot() self.guest.snapshot(conf) self.domain.snapshotCreateXML('', flags=0) conf.to_xml.assert_called_once_with() def test_snapshot_no_metadata(self): conf = self._conf_snapshot() self.guest.snapshot(conf, no_metadata=True) self.domain.snapshotCreateXML( '', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA) conf.to_xml.assert_called_once_with() def test_snapshot_disk_only(self): conf = self._conf_snapshot() self.guest.snapshot(conf, disk_only=True) self.domain.snapshotCreateXML( '', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY) conf.to_xml.assert_called_once_with() def test_snapshot_reuse_ext(self): conf = self._conf_snapshot() self.guest.snapshot(conf, reuse_ext=True) self.domain.snapshotCreateXML( '', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) conf.to_xml.assert_called_once_with() def test_snapshot_quiesce(self): conf = self._conf_snapshot() self.guest.snapshot(conf, quiesce=True) self.domain.snapshotCreateXML( '', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) conf.to_xml.assert_called_once_with() def test_snapshot_all(self): conf = self._conf_snapshot() self.guest.snapshot(conf, no_metadata=True, disk_only=True, reuse_ext=True, quiesce=True) self.domain.snapshotCreateXML( '', flags=( fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)) conf.to_xml.assert_called_once_with() def test_pause(self): self.guest.pause() self.domain.suspend.assert_called_once_with() class GuestBlockTestCase(test.NoDBTestCase): def setUp(self): super(GuestBlockTestCase, self).setUp() self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.host = host.Host("qemu:///system") self.context = context.get_admin_context() self.domain = mock.Mock(spec=fakelibvirt.virDomain) self.guest = libvirt_guest.Guest(self.domain) self.gblock = self.guest.get_block_device('vda') def test_abort_job(self): self.gblock.abort_job() self.domain.blockJobAbort.assert_called_once_with('vda', flags=0) def test_abort_job_async(self): self.gblock.abort_job(async=True) self.domain.blockJobAbort.assert_called_once_with( 'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC) def test_abort_job_pivot(self): self.gblock.abort_job(pivot=True) self.domain.blockJobAbort.assert_called_once_with( 'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) def test_get_job_info(self): self.domain.blockJobInfo.return_value = { "type": 1, "bandwidth": 18, "cur": 66, "end": 100} info = self.gblock.get_job_info() self.assertEqual(1, info.job) self.assertEqual(18, info.bandwidth) self.assertEqual(66, info.cur) self.assertEqual(100, info.end) self.domain.blockJobInfo.assert_called_once_with('vda', flags=0) def test_resize(self): self.gblock.resize(10) self.domain.blockResize.assert_called_once_with('vda', 10) def test_rebase(self): self.gblock.rebase("foo") self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=0) def test_rebase_shallow(self): self.gblock.rebase("foo", shallow=True) self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW) def test_rebase_reuse_ext(self): self.gblock.rebase("foo", reuse_ext=True) self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT) def test_rebase_copy(self): self.gblock.rebase("foo", copy=True) self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY) def test_rebase_relative(self): self.gblock.rebase("foo", relative=True) self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) def test_commit(self): self.gblock.commit("foo", "top") self.domain.blockCommit.assert_called_once_with( 'vda', "foo", "top", 0, flags=0) def test_commit_relative(self): self.gblock.commit("foo", "top", relative=True) self.domain.blockCommit.assert_called_once_with( 'vda', "foo", "top", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) def test_wait_for_job(self): self.domain.blockJobInfo.return_value = { "type": 4, "bandwidth": 18, "cur": 95, "end": 100} in_progress = self.gblock.wait_for_job() self.assertTrue(in_progress) self.domain.blockJobInfo.return_value = { "type": 4, "bandwidth": 18, "cur": 100, "end": 100} in_progress = self.gblock.wait_for_job() self.assertFalse(in_progress) self.domain.blockJobInfo.return_value = {"type": 0} in_progress = self.gblock.wait_for_job(wait_for_job_clean=True) self.assertFalse(in_progress) def test_wait_for_job_arbort_on_error(self): self.domain.blockJobInfo.return_value = -1 self.assertRaises( exception.NovaException, self.gblock.wait_for_job, abort_on_error=True) nova-13.0.0/nova/tests/unit/virt/libvirt/test_utils.py0000664000567000056710000007124312701407773024212 0ustar jenkinsjenkins00000000000000# Copyright 2012 NTT Data. All Rights Reserved. # Copyright 2012 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os import tempfile import mock from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import fileutils import six from nova.compute import arch from nova import context from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_instance from nova import utils from nova.virt.disk import api as disk from nova.virt import images from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import utils as libvirt_utils CONF = cfg.CONF class LibvirtUtilsTestCase(test.NoDBTestCase): @mock.patch('nova.utils.execute') def test_copy_image_local(self, mock_execute): libvirt_utils.copy_image('src', 'dest') mock_execute.assert_called_once_with('cp', 'src', 'dest') @mock.patch('nova.virt.libvirt.volume.remotefs.SshDriver.copy_file') def test_copy_image_remote_ssh(self, mock_rem_fs_remove): self.flags(remote_filesystem_transport='ssh', group='libvirt') libvirt_utils.copy_image('src', 'dest', host='host') mock_rem_fs_remove.assert_called_once_with('src', 'host:dest', on_completion=None, on_execute=None, compression=True) @mock.patch('nova.virt.libvirt.volume.remotefs.RsyncDriver.copy_file') def test_copy_image_remote_rsync(self, mock_rem_fs_remove): self.flags(remote_filesystem_transport='rsync', group='libvirt') libvirt_utils.copy_image('src', 'dest', host='host') mock_rem_fs_remove.assert_called_once_with('src', 'host:dest', on_completion=None, on_execute=None, compression=True) @mock.patch('os.path.exists', return_value=True) def test_disk_type_from_path(self, mock_exists): # Seems like lvm detection # if its in /dev ?? for p in ['/dev/b', '/dev/blah/blah']: d_type = libvirt_utils.get_disk_type_from_path(p) self.assertEqual('lvm', d_type) # Try rbd detection d_type = libvirt_utils.get_disk_type_from_path('rbd:pool/instance') self.assertEqual('rbd', d_type) # Try the other types path = '/myhome/disk.config' d_type = libvirt_utils.get_disk_type_from_path(path) self.assertIsNone(d_type) @mock.patch('os.path.exists', return_value=True) @mock.patch('os.path.isdir', return_value=True) def test_disk_type_ploop(self, mock_isdir, mock_exists): path = '/some/path' d_type = libvirt_utils.get_disk_type_from_path(path) mock_isdir.assert_called_once_with(path) mock_exists.assert_called_once_with("%s/DiskDescriptor.xml" % path) self.assertEqual('ploop', d_type) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_disk_backing(self, mock_execute, mock_exists): path = '/myhome/disk.config' template_output = """image: %(path)s file format: raw virtual size: 2K (2048 bytes) cluster_size: 65536 disk size: 96K """ output = template_output % ({ 'path': path, }) mock_execute.return_value = (output, '') d_backing = libvirt_utils.get_disk_backing_file(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) mock_exists.assert_called_once_with(path) self.assertIsNone(d_backing) def _test_disk_size(self, mock_execute, path, expected_size): d_size = libvirt_utils.get_disk_size(path) self.assertEqual(expected_size, d_size) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) @mock.patch('os.path.exists', return_value=True) def test_disk_size(self, mock_exists): path = '/myhome/disk.config' template_output = """image: %(path)s file format: raw virtual size: %(v_size)s (%(vsize_b)s bytes) cluster_size: 65536 disk size: 96K """ for i in range(0, 128): bytes = i * 65336 kbytes = bytes / 1024 mbytes = kbytes / 1024 output = template_output % ({ 'v_size': "%sM" % (mbytes), 'vsize_b': i, 'path': path, }) with mock.patch('nova.utils.execute', return_value=(output, '')) as mock_execute: self._test_disk_size(mock_execute, path, i) output = template_output % ({ 'v_size': "%sK" % (kbytes), 'vsize_b': i, 'path': path, }) with mock.patch('nova.utils.execute', return_value=(output, '')) as mock_execute: self._test_disk_size(mock_execute, path, i) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_qemu_info_canon(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: raw virtual size: 64M (67108864 bytes) cluster_size: 65536 disk size: 96K blah BLAH: bb """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) self.assertEqual(67108864, image_info.virtual_size) self.assertEqual(98304, image_info.disk_size) self.assertEqual(65536, image_info.cluster_size) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_qemu_info_canon2(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: QCOW2 virtual size: 67108844 cluster_size: 65536 disk size: 963434 backing file: /var/lib/nova/a328c7998805951a_2 """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('qcow2', image_info.file_format) self.assertEqual(67108844, image_info.virtual_size) self.assertEqual(963434, image_info.disk_size) self.assertEqual(65536, image_info.cluster_size) self.assertEqual('/var/lib/nova/a328c7998805951a_2', image_info.backing_file) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_qemu_backing_file_actual(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: raw virtual size: 64M (67108864 bytes) cluster_size: 65536 disk size: 96K Snapshot list: ID TAG VM SIZE DATE VM CLOCK 1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2) """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) self.assertEqual(67108864, image_info.virtual_size) self.assertEqual(98304, image_info.disk_size) self.assertEqual(1, len(image_info.snapshots)) self.assertEqual('/b/3a988059e51a_2', image_info.backing_file) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_qemu_info_convert(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: raw virtual size: 64M disk size: 96K Snapshot list: ID TAG VM SIZE DATE VM CLOCK 1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 junk stuff: bbb """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) self.assertEqual(67108864, image_info.virtual_size) self.assertEqual(98304, image_info.disk_size) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_qemu_info_snaps(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: raw virtual size: 64M (67108864 bytes) disk size: 96K Snapshot list: ID TAG VM SIZE DATE VM CLOCK 1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) self.assertEqual(67108864, image_info.virtual_size) self.assertEqual(98304, image_info.disk_size) self.assertEqual(3, len(image_info.snapshots)) def test_valid_hostname_normal(self): self.assertTrue(libvirt_utils.is_valid_hostname("hello.world.com")) def test_valid_hostname_ipv4addr(self): self.assertTrue(libvirt_utils.is_valid_hostname("10.0.2.1")) def test_valid_hostname_ipv6addr(self): self.assertTrue(libvirt_utils.is_valid_hostname("240:2ac3::2")) def test_valid_hostname_bad(self): self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh")) @mock.patch('nova.utils.execute') def test_create_image(self, mock_execute): libvirt_utils.create_image('raw', '/some/path', '10G') libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234') expected_args = [(('qemu-img', 'create', '-f', 'raw', '/some/path', '10G'),), (('qemu-img', 'create', '-f', 'qcow2', '/some/stuff', '1234567891234'),)] self.assertEqual(expected_args, mock_execute.call_args_list) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_create_cow_image(self, mock_execute, mock_exists): mock_execute.return_value = ('stdout', None) libvirt_utils.create_cow_image('/some/path', '/the/new/cow') expected_args = [(('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/some/path'),), (('qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=/some/path', '/the/new/cow'),)] self.assertEqual(expected_args, mock_execute.call_args_list) def test_pick_disk_driver_name(self): type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']), 'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']), 'uml': ([True, None], [False, None], [None, None]), 'lxc': ([True, None], [False, None], [None, None])} # NOTE(aloga): Xen is tested in test_pick_disk_driver_name_xen version = 1005001 for (virt_type, checks) in six.iteritems(type_map): self.flags(virt_type=virt_type, group='libvirt') for (is_block_dev, expected_result) in checks: result = libvirt_utils.pick_disk_driver_name(version, is_block_dev) self.assertEqual(result, expected_result) @mock.patch('nova.utils.execute') def test_pick_disk_driver_name_xen(self, mock_execute): def side_effect(*args, **kwargs): if args == ('tap-ctl', 'check'): if mock_execute.blktap is True: return ('ok\n', '') elif mock_execute.blktap is False: return ('some error\n', '') else: raise OSError(2, "No such file or directory") elif args == ('xend', 'status'): if mock_execute.xend is True: return ('', '') elif mock_execute.xend is False: raise processutils.ProcessExecutionError("error") else: raise OSError(2, "No such file or directory") raise Exception('Unexpected call') mock_execute.side_effect = side_effect self.flags(virt_type="xen", group='libvirt') versions = [4000000, 4001000, 4002000, 4003000, 4005000] for version in versions: # block dev result = libvirt_utils.pick_disk_driver_name(version, True) self.assertEqual(result, "phy") self.assertFalse(mock_execute.called) mock_execute.reset_mock() # file dev for blktap in True, False, None: mock_execute.blktap = blktap for xend in True, False, None: mock_execute.xend = xend result = libvirt_utils.pick_disk_driver_name(version, False) # qemu backend supported only by libxl which is # production since xen 4.2. libvirt use libxl if # xend service not started. if version >= 4002000 and xend is not True: self.assertEqual(result, 'qemu') elif blktap: if version == 4000000: self.assertEqual(result, 'tap') else: self.assertEqual(result, 'tap2') else: self.assertEqual(result, 'file') # default is_block_dev False self.assertEqual(result, libvirt_utils.pick_disk_driver_name(version)) mock_execute.reset_mock() @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_get_disk_size(self, mock_execute, mock_exists): path = '/some/path' example_output = """image: 00000001 file format: raw virtual size: 4.4M (4592640 bytes) disk size: 4.4M """ mock_execute.return_value = (example_output, '') self.assertEqual(4592640, disk.get_disk_size('/some/path')) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) mock_exists.assert_called_once_with(path) def test_copy_image(self): dst_fd, dst_path = tempfile.mkstemp() try: os.close(dst_fd) src_fd, src_path = tempfile.mkstemp() try: with os.fdopen(src_fd, 'w') as fp: fp.write('canary') libvirt_utils.copy_image(src_path, dst_path) with open(dst_path, 'r') as fp: self.assertEqual(fp.read(), 'canary') finally: os.unlink(src_path) finally: os.unlink(dst_path) def test_write_to_file(self): dst_fd, dst_path = tempfile.mkstemp() try: os.close(dst_fd) libvirt_utils.write_to_file(dst_path, 'hello') with open(dst_path, 'r') as fp: self.assertEqual(fp.read(), 'hello') finally: os.unlink(dst_path) def test_write_to_file_with_umask(self): dst_fd, dst_path = tempfile.mkstemp() try: os.close(dst_fd) os.unlink(dst_path) libvirt_utils.write_to_file(dst_path, 'hello', umask=0o277) with open(dst_path, 'r') as fp: self.assertEqual(fp.read(), 'hello') mode = os.stat(dst_path).st_mode self.assertEqual(mode & 0o277, 0) finally: os.unlink(dst_path) @mock.patch.object(utils, 'execute') def test_chown(self, mock_execute): libvirt_utils.chown('/some/path', 'soren') mock_execute.assert_called_once_with('chown', 'soren', '/some/path', run_as_root=True) @mock.patch.object(utils, 'execute') def test_chown_for_id_maps(self, mock_execute): id_maps = [vconfig.LibvirtConfigGuestUIDMap(), vconfig.LibvirtConfigGuestUIDMap(), vconfig.LibvirtConfigGuestGIDMap(), vconfig.LibvirtConfigGuestGIDMap()] id_maps[0].target = 10000 id_maps[0].count = 2000 id_maps[1].start = 2000 id_maps[1].target = 40000 id_maps[1].count = 2000 id_maps[2].target = 10000 id_maps[2].count = 2000 id_maps[3].start = 2000 id_maps[3].target = 40000 id_maps[3].count = 2000 libvirt_utils.chown_for_id_maps('/some/path', id_maps) execute_args = ('nova-idmapshift', '-i', '-u', '0:10000:2000,2000:40000:2000', '-g', '0:10000:2000,2000:40000:2000', '/some/path') mock_execute.assert_called_once_with(*execute_args, run_as_root=True) def _do_test_extract_snapshot(self, mock_execute, src_format='qcow2', dest_format='raw', out_format='raw'): libvirt_utils.extract_snapshot('/path/to/disk/image', src_format, '/extracted/snap', dest_format) mock_execute.assert_called_once_with( 'qemu-img', 'convert', '-f', src_format, '-O', out_format, '/path/to/disk/image', '/extracted/snap') @mock.patch.object(utils, 'execute') def test_extract_snapshot_raw(self, mock_execute): self._do_test_extract_snapshot(mock_execute) @mock.patch.object(utils, 'execute') def test_extract_snapshot_iso(self, mock_execute): self._do_test_extract_snapshot(mock_execute, dest_format='iso') @mock.patch.object(utils, 'execute') def test_extract_snapshot_qcow2(self, mock_execute): self._do_test_extract_snapshot(mock_execute, dest_format='qcow2', out_format='qcow2') @mock.patch.object(utils, 'execute') def test_extract_snapshot_parallels(self, mock_execute): self._do_test_extract_snapshot(mock_execute, src_format='raw', dest_format='ploop', out_format='parallels') def test_load_file(self): dst_fd, dst_path = tempfile.mkstemp() try: os.close(dst_fd) # We have a test for write_to_file. If that is sound, this suffices libvirt_utils.write_to_file(dst_path, 'hello') self.assertEqual(libvirt_utils.load_file(dst_path), 'hello') finally: os.unlink(dst_path) def test_file_open(self): dst_fd, dst_path = tempfile.mkstemp() try: os.close(dst_fd) # We have a test for write_to_file. If that is sound, this suffices libvirt_utils.write_to_file(dst_path, 'hello') with libvirt_utils.file_open(dst_path, 'r') as fp: self.assertEqual(fp.read(), 'hello') finally: os.unlink(dst_path) def test_get_fs_info(self): class FakeStatResult(object): def __init__(self): self.f_bsize = 4096 self.f_frsize = 4096 self.f_blocks = 2000 self.f_bfree = 1000 self.f_bavail = 900 self.f_files = 2000 self.f_ffree = 1000 self.f_favail = 900 self.f_flag = 4096 self.f_namemax = 255 self.path = None def fake_statvfs(path): self.path = path return FakeStatResult() self.stub_out('os.statvfs', fake_statvfs) fs_info = libvirt_utils.get_fs_info('/some/file/path') self.assertEqual('/some/file/path', self.path) self.assertEqual(8192000, fs_info['total']) self.assertEqual(3686400, fs_info['free']) self.assertEqual(4096000, fs_info['used']) @mock.patch('nova.virt.images.fetch_to_raw') def test_fetch_image(self, mock_images): context = 'opaque context' target = '/tmp/targetfile' image_id = '4' user_id = 'fake' project_id = 'fake' libvirt_utils.fetch_image(context, target, image_id, user_id, project_id) mock_images.assert_called_once_with( context, image_id, target, user_id, project_id, max_size=0) @mock.patch('nova.virt.images.fetch') def test_fetch_initrd_image(self, mock_images): _context = context.RequestContext(project_id=123, project_name="aubergine", user_id=456, user_name="pie") target = '/tmp/targetfile' image_id = '4' user_id = 'fake' project_id = 'fake' libvirt_utils.fetch_raw_image(_context, target, image_id, user_id, project_id) mock_images.assert_called_once_with( _context, image_id, target, user_id, project_id, max_size=0) def test_fetch_raw_image(self): def fake_execute(*cmd, **kwargs): self.executes.append(cmd) return None, None def fake_rename(old, new): self.executes.append(('mv', old, new)) def fake_unlink(path): self.executes.append(('rm', path)) def fake_rm_on_error(path, remove=None): self.executes.append(('rm', '-f', path)) def fake_qemu_img_info(path): class FakeImgInfo(object): pass file_format = path.split('.')[-1] if file_format == 'part': file_format = path.split('.')[-2] elif file_format == 'converted': file_format = 'raw' if 'backing' in path: backing_file = 'backing' else: backing_file = None if 'big' in path: virtual_size = 2 else: virtual_size = 1 FakeImgInfo.file_format = file_format FakeImgInfo.backing_file = backing_file FakeImgInfo.virtual_size = virtual_size return FakeImgInfo() self.stubs.Set(utils, 'execute', fake_execute) self.stub_out('os.rename', fake_rename) self.stub_out('os.unlink', fake_unlink) self.stubs.Set(images, 'fetch', lambda *_, **__: None) self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info) self.stubs.Set(fileutils, 'delete_if_exists', fake_rm_on_error) # Since the remove param of fileutils.remove_path_on_error() # is initialized at load time, we must provide a wrapper # that explicitly resets it to our fake delete_if_exists() old_rm_path_on_error = fileutils.remove_path_on_error f = functools.partial(old_rm_path_on_error, remove=fake_rm_on_error) self.stubs.Set(fileutils, 'remove_path_on_error', f) context = 'opaque context' image_id = '4' user_id = 'fake' project_id = 'fake' target = 't.qcow2' self.executes = [] expected_commands = [('qemu-img', 'convert', '-O', 'raw', 't.qcow2.part', 't.qcow2.converted', '-f', 'qcow2'), ('rm', 't.qcow2.part'), ('mv', 't.qcow2.converted', 't.qcow2')] images.fetch_to_raw(context, image_id, target, user_id, project_id, max_size=1) self.assertEqual(self.executes, expected_commands) target = 't.raw' self.executes = [] expected_commands = [('mv', 't.raw.part', 't.raw')] images.fetch_to_raw(context, image_id, target, user_id, project_id) self.assertEqual(self.executes, expected_commands) target = 'backing.qcow2' self.executes = [] expected_commands = [('rm', '-f', 'backing.qcow2.part')] self.assertRaises(exception.ImageUnacceptable, images.fetch_to_raw, context, image_id, target, user_id, project_id) self.assertEqual(self.executes, expected_commands) target = 'big.qcow2' self.executes = [] expected_commands = [('rm', '-f', 'big.qcow2.part')] self.assertRaises(exception.FlavorDiskSmallerThanImage, images.fetch_to_raw, context, image_id, target, user_id, project_id, max_size=1) self.assertEqual(self.executes, expected_commands) del self.executes def test_get_disk_backing_file(self): with_actual_path = False def fake_execute(*args, **kwargs): if with_actual_path: return ("some: output\n" "backing file: /foo/bar/baz (actual path: /a/b/c)\n" "...: ...\n"), '' else: return ("some: output\n" "backing file: /foo/bar/baz\n" "...: ...\n"), '' def return_true(*args, **kwargs): return True self.stubs.Set(utils, 'execute', fake_execute) self.stub_out('os.path.exists', return_true) out = libvirt_utils.get_disk_backing_file('') self.assertEqual(out, 'baz') with_actual_path = True out = libvirt_utils.get_disk_backing_file('') self.assertEqual(out, 'c') def test_get_instance_path_at_destination(self): instance = fake_instance.fake_instance_obj(None, name='fake_inst', uuid='fake_uuid') migrate_data = None inst_path_at_dest = libvirt_utils.get_instance_path_at_destination( instance, migrate_data) expected_path = os.path.join(CONF.instances_path, instance['uuid']) self.assertEqual(expected_path, inst_path_at_dest) migrate_data = {} inst_path_at_dest = libvirt_utils.get_instance_path_at_destination( instance, migrate_data) expected_path = os.path.join(CONF.instances_path, instance['uuid']) self.assertEqual(expected_path, inst_path_at_dest) migrate_data = objects.LibvirtLiveMigrateData( instance_relative_path='fake_relative_path') inst_path_at_dest = libvirt_utils.get_instance_path_at_destination( instance, migrate_data) expected_path = os.path.join(CONF.instances_path, 'fake_relative_path') self.assertEqual(expected_path, inst_path_at_dest) def test_get_arch(self): image_meta = objects.ImageMeta.from_dict( {'properties': {'architecture': "X86_64"}}) image_arch = libvirt_utils.get_arch(image_meta) self.assertEqual(arch.X86_64, image_arch) nova-13.0.0/nova/tests/unit/virt/libvirt/fakelibvirt.py0000664000567000056710000012006312701407773024310 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import uuid import fixtures from lxml import etree import six from nova.compute import arch from nova.virt.libvirt import config as vconfig # Allow passing None to the various connect methods # (i.e. allow the client to rely on default URLs) allow_default_uri_connection = True # Has libvirt connection been used at least once connection_used = False def _reset(): global allow_default_uri_connection allow_default_uri_connection = True # virDomainState VIR_DOMAIN_NOSTATE = 0 VIR_DOMAIN_RUNNING = 1 VIR_DOMAIN_BLOCKED = 2 VIR_DOMAIN_PAUSED = 3 VIR_DOMAIN_SHUTDOWN = 4 VIR_DOMAIN_SHUTOFF = 5 VIR_DOMAIN_CRASHED = 6 # NOTE(mriedem): These values come from include/libvirt/libvirt-domain.h VIR_DOMAIN_XML_SECURE = 1 VIR_DOMAIN_XML_INACTIVE = 2 VIR_DOMAIN_XML_UPDATE_CPU = 4 VIR_DOMAIN_XML_MIGRATABLE = 8 VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1 VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2 VIR_DOMAIN_BLOCK_REBASE_COPY = 8 VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC = 1 VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2 VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0 VIR_DOMAIN_EVENT_DEFINED = 0 VIR_DOMAIN_EVENT_UNDEFINED = 1 VIR_DOMAIN_EVENT_STARTED = 2 VIR_DOMAIN_EVENT_SUSPENDED = 3 VIR_DOMAIN_EVENT_RESUMED = 4 VIR_DOMAIN_EVENT_STOPPED = 5 VIR_DOMAIN_EVENT_SHUTDOWN = 6 VIR_DOMAIN_EVENT_PMSUSPENDED = 7 VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1 VIR_DOMAIN_AFFECT_CURRENT = 0 VIR_DOMAIN_AFFECT_LIVE = 1 VIR_DOMAIN_AFFECT_CONFIG = 2 VIR_CPU_COMPARE_ERROR = -1 VIR_CPU_COMPARE_INCOMPATIBLE = 0 VIR_CPU_COMPARE_IDENTICAL = 1 VIR_CPU_COMPARE_SUPERSET = 2 VIR_CRED_USERNAME = 1 VIR_CRED_AUTHNAME = 2 VIR_CRED_LANGUAGE = 3 VIR_CRED_CNONCE = 4 VIR_CRED_PASSPHRASE = 5 VIR_CRED_ECHOPROMPT = 6 VIR_CRED_NOECHOPROMPT = 7 VIR_CRED_REALM = 8 VIR_CRED_EXTERNAL = 9 VIR_MIGRATE_LIVE = 1 VIR_MIGRATE_PEER2PEER = 2 VIR_MIGRATE_TUNNELLED = 4 VIR_MIGRATE_PERSIST_DEST = 8 VIR_MIGRATE_UNDEFINE_SOURCE = 16 VIR_MIGRATE_NON_SHARED_INC = 128 VIR_NODE_CPU_STATS_ALL_CPUS = -1 VIR_DOMAIN_START_PAUSED = 1 # libvirtError enums # (Intentionally different from what's in libvirt. We do this to check, # that consumers of the library are using the symbolic names rather than # hardcoding the numerical values) VIR_FROM_QEMU = 100 VIR_FROM_DOMAIN = 200 VIR_FROM_NWFILTER = 330 VIR_FROM_REMOTE = 340 VIR_FROM_RPC = 345 VIR_FROM_NODEDEV = 666 VIR_ERR_INVALID_ARG = 8 VIR_ERR_NO_SUPPORT = 3 VIR_ERR_XML_DETAIL = 350 VIR_ERR_NO_DOMAIN = 420 VIR_ERR_OPERATION_FAILED = 510 VIR_ERR_OPERATION_INVALID = 55 VIR_ERR_OPERATION_TIMEOUT = 68 VIR_ERR_NO_NWFILTER = 620 VIR_ERR_SYSTEM_ERROR = 900 VIR_ERR_INTERNAL_ERROR = 950 VIR_ERR_CONFIG_UNSUPPORTED = 951 VIR_ERR_NO_NODE_DEVICE = 667 VIR_ERR_NO_SECRET = 66 # Readonly VIR_CONNECT_RO = 1 # virConnectBaselineCPU flags VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1 # snapshotCreateXML flags VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4 VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16 VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 # blockCommit flags VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4 # blockRebase flags VIR_DOMAIN_BLOCK_REBASE_RELATIVE = 8 VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1 VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2 # secret type VIR_SECRET_USAGE_TYPE_NONE = 0 VIR_SECRET_USAGE_TYPE_VOLUME = 1 VIR_SECRET_USAGE_TYPE_CEPH = 2 VIR_SECRET_USAGE_TYPE_ISCSI = 3 # Libvirt version FAKE_LIBVIRT_VERSION = 10002 class HostInfo(object): def __init__(self, arch=arch.X86_64, kB_mem=4096, cpus=2, cpu_mhz=800, cpu_nodes=1, cpu_sockets=1, cpu_cores=2, cpu_threads=1, cpu_model="Penryn", cpu_vendor="Intel", numa_topology='', cpu_disabled=None): """Create a new Host Info object :param arch: (string) indicating the CPU arch (eg 'i686' or whatever else uname -m might return) :param kB_mem: (int) memory size in KBytes :param cpus: (int) the number of active CPUs :param cpu_mhz: (int) expected CPU frequency :param cpu_nodes: (int) the number of NUMA cell, 1 for unusual NUMA topologies or uniform :param cpu_sockets: (int) number of CPU sockets per node if nodes > 1, total number of CPU sockets otherwise :param cpu_cores: (int) number of cores per socket :param cpu_threads: (int) number of threads per core :param cpu_model: CPU model :param cpu_vendor: CPU vendor :param numa_topology: Numa topology :param cpu_disabled: List of disabled cpus """ self.arch = arch self.kB_mem = kB_mem self.cpus = cpus self.cpu_mhz = cpu_mhz self.cpu_nodes = cpu_nodes self.cpu_cores = cpu_cores self.cpu_threads = cpu_threads self.cpu_sockets = cpu_sockets self.cpu_model = cpu_model self.cpu_vendor = cpu_vendor self.numa_topology = numa_topology self.disabled_cpus_list = cpu_disabled or [] @classmethod def _gen_numa_topology(self, cpu_nodes, cpu_sockets, cpu_cores, cpu_threads, kb_mem, numa_mempages_list=None): topology = vconfig.LibvirtConfigCapsNUMATopology() cpu_count = 0 for cell_count in range(cpu_nodes): cell = vconfig.LibvirtConfigCapsNUMACell() cell.id = cell_count cell.memory = kb_mem / cpu_nodes for socket_count in range(cpu_sockets): for cpu_num in range(cpu_cores * cpu_threads): cpu = vconfig.LibvirtConfigCapsNUMACPU() cpu.id = cpu_count cpu.socket_id = cell_count cpu.core_id = cpu_num // cpu_threads cpu.siblings = set([cpu_threads * (cpu_count // cpu_threads) + thread for thread in range(cpu_threads)]) cell.cpus.append(cpu) cpu_count += 1 # Set mempages per numa cell. if numa_mempages_list is empty # we will set only the default 4K pages. if numa_mempages_list: mempages = numa_mempages_list[cell_count] else: mempages = vconfig.LibvirtConfigCapsNUMAPages() mempages.size = 4 mempages.total = cell.memory / mempages.size mempages = [mempages] cell.mempages = mempages topology.cells.append(cell) return topology def get_numa_topology(self): return self.numa_topology VIR_DOMAIN_JOB_NONE = 0 VIR_DOMAIN_JOB_BOUNDED = 1 VIR_DOMAIN_JOB_UNBOUNDED = 2 VIR_DOMAIN_JOB_COMPLETED = 3 VIR_DOMAIN_JOB_FAILED = 4 VIR_DOMAIN_JOB_CANCELLED = 5 def _parse_disk_info(element): disk_info = {} disk_info['type'] = element.get('type', 'file') disk_info['device'] = element.get('device', 'disk') driver = element.find('./driver') if driver is not None: disk_info['driver_name'] = driver.get('name') disk_info['driver_type'] = driver.get('type') source = element.find('./source') if source is not None: disk_info['source'] = source.get('file') if not disk_info['source']: disk_info['source'] = source.get('dev') if not disk_info['source']: disk_info['source'] = source.get('path') target = element.find('./target') if target is not None: disk_info['target_dev'] = target.get('dev') disk_info['target_bus'] = target.get('bus') return disk_info def disable_event_thread(self): """Disable nova libvirt driver event thread. The Nova libvirt driver includes a native thread which monitors the libvirt event channel. In a testing environment this becomes problematic because it means we've got a floating thread calling sleep(1) over the life of the unit test. Seems harmless? It's not, because we sometimes want to test things like retry loops that should have specific sleep paterns. An unlucky firing of the libvirt thread will cause a test failure. """ # because we are patching a method in a class MonkeyPatch doesn't # auto import correctly. Import explicitly otherwise the patching # may silently fail. import nova.virt.libvirt.host # noqa def evloop(*args, **kwargs): pass self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.Host._init_events', evloop)) class libvirtError(Exception): """This class was copied and slightly modified from `libvirt-python:libvirt-override.py`. Since a test environment will use the real `libvirt-python` version of `libvirtError` if it's installed and not this fake, we need to maintain strict compatibility with the original class, including `__init__` args and instance-attributes. To create a libvirtError instance you should: # Create an unsupported error exception exc = libvirtError('my message') exc.err = (libvirt.VIR_ERR_NO_SUPPORT,) self.err is a tuple of form: (error_code, error_domain, error_message, error_level, str1, str2, str3, int1, int2) Alternatively, you can use the `make_libvirtError` convenience function to allow you to specify these attributes in one shot. """ def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None, vol=None): Exception.__init__(self, defmsg) self.err = None def get_error_code(self): if self.err is None: return None return self.err[0] def get_error_domain(self): if self.err is None: return None return self.err[1] def get_error_message(self): if self.err is None: return None return self.err[2] def get_error_level(self): if self.err is None: return None return self.err[3] def get_str1(self): if self.err is None: return None return self.err[4] def get_str2(self): if self.err is None: return None return self.err[5] def get_str3(self): if self.err is None: return None return self.err[6] def get_int1(self): if self.err is None: return None return self.err[7] def get_int2(self): if self.err is None: return None return self.err[8] class NWFilter(object): def __init__(self, connection, xml): self._connection = connection self._xml = xml self._parse_xml(xml) def _parse_xml(self, xml): tree = etree.fromstring(xml) root = tree.find('.') self._name = root.get('name') def undefine(self): self._connection._remove_filter(self) class NodeDevice(object): def __init__(self, connection, xml=None): self._connection = connection self._xml = xml if xml is not None: self._parse_xml(xml) def _parse_xml(self, xml): tree = etree.fromstring(xml) root = tree.find('.') self._name = root.get('name') def attach(self): pass def dettach(self): pass def reset(self): pass class Domain(object): def __init__(self, connection, xml, running=False, transient=False): self._connection = connection if running: connection._mark_running(self) self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF self._transient = transient self._def = self._parse_definition(xml) self._has_saved_state = False self._snapshots = {} self._id = self._connection._id_counter def _parse_definition(self, xml): try: tree = etree.fromstring(xml) except etree.ParseError: raise make_libvirtError( libvirtError, "Invalid XML.", error_code=VIR_ERR_XML_DETAIL, error_domain=VIR_FROM_DOMAIN) definition = {} name = tree.find('./name') if name is not None: definition['name'] = name.text uuid_elem = tree.find('./uuid') if uuid_elem is not None: definition['uuid'] = uuid_elem.text else: definition['uuid'] = str(uuid.uuid4()) vcpu = tree.find('./vcpu') if vcpu is not None: definition['vcpu'] = int(vcpu.text) memory = tree.find('./memory') if memory is not None: definition['memory'] = int(memory.text) os = {} os_type = tree.find('./os/type') if os_type is not None: os['type'] = os_type.text os['arch'] = os_type.get('arch', self._connection.host_info.arch) os_kernel = tree.find('./os/kernel') if os_kernel is not None: os['kernel'] = os_kernel.text os_initrd = tree.find('./os/initrd') if os_initrd is not None: os['initrd'] = os_initrd.text os_cmdline = tree.find('./os/cmdline') if os_cmdline is not None: os['cmdline'] = os_cmdline.text os_boot = tree.find('./os/boot') if os_boot is not None: os['boot_dev'] = os_boot.get('dev') definition['os'] = os features = {} acpi = tree.find('./features/acpi') if acpi is not None: features['acpi'] = True definition['features'] = features devices = {} device_nodes = tree.find('./devices') if device_nodes is not None: disks_info = [] disks = device_nodes.findall('./disk') for disk in disks: disks_info += [_parse_disk_info(disk)] devices['disks'] = disks_info nics_info = [] nics = device_nodes.findall('./interface') for nic in nics: nic_info = {} nic_info['type'] = nic.get('type') mac = nic.find('./mac') if mac is not None: nic_info['mac'] = mac.get('address') source = nic.find('./source') if source is not None: if nic_info['type'] == 'network': nic_info['source'] = source.get('network') elif nic_info['type'] == 'bridge': nic_info['source'] = source.get('bridge') nics_info += [nic_info] devices['nics'] = nics_info definition['devices'] = devices return definition def create(self): self.createWithFlags(0) def createWithFlags(self, flags): # FIXME: Not handling flags at the moment self._state = VIR_DOMAIN_RUNNING self._connection._mark_running(self) self._has_saved_state = False def isActive(self): return int(self._state == VIR_DOMAIN_RUNNING) def undefine(self): self._connection._undefine(self) def isPersistent(self): return True def undefineFlags(self, flags): self.undefine() if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE: if self.hasManagedSaveImage(0): self.managedSaveRemove() def destroy(self): self._state = VIR_DOMAIN_SHUTOFF self._connection._mark_not_running(self) def ID(self): return self._id def name(self): return self._def['name'] def UUIDString(self): return self._def['uuid'] def interfaceStats(self, device): return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3] def blockStats(self, device): return [2, 10000242400, 234, 2343424234, 34] def suspend(self): self._state = VIR_DOMAIN_PAUSED def shutdown(self): self._state = VIR_DOMAIN_SHUTDOWN self._connection._mark_not_running(self) def reset(self, flags): # FIXME: Not handling flags at the moment self._state = VIR_DOMAIN_RUNNING self._connection._mark_running(self) def info(self): return [self._state, int(self._def['memory']), int(self._def['memory']), self._def['vcpu'], 123456789] def migrateToURI(self, desturi, flags, dname, bandwidth): raise make_libvirtError( libvirtError, "Migration always fails for fake libvirt!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth): raise make_libvirtError( libvirtError, "Migration always fails for fake libvirt!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def migrateToURI3(self, dconnuri, params, logical_sum): raise make_libvirtError( libvirtError, "Migration always fails for fake libvirt!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def migrateSetMaxDowntime(self, downtime): pass def attachDevice(self, xml): disk_info = _parse_disk_info(etree.fromstring(xml)) disk_info['_attached'] = True self._def['devices']['disks'] += [disk_info] return True def attachDeviceFlags(self, xml, flags): if (flags & VIR_DOMAIN_AFFECT_LIVE and self._state != VIR_DOMAIN_RUNNING): raise make_libvirtError( libvirtError, "AFFECT_LIVE only allowed for running domains!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) self.attachDevice(xml) def detachDevice(self, xml): disk_info = _parse_disk_info(etree.fromstring(xml)) disk_info['_attached'] = True return disk_info in self._def['devices']['disks'] def detachDeviceFlags(self, xml, flags): self.detachDevice(xml) def setUserPassword(self, user, password, flags=0): pass def XMLDesc(self, flags): disks = '' for disk in self._def['devices']['disks']: disks += '''
''' % disk nics = '' for nic in self._def['devices']['nics']: nics += '''
''' % nic return ''' %(name)s %(uuid)s %(memory)s %(memory)s %(vcpu)s hvm destroy restart restart /usr/bin/kvm %(disks)s
%(nics)s